text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves import queue
import six.moves.cPickle as pickle
import errno
import os
import signal
import sys
import time
import uuid
from random import random, shuffle
from collections import deque
from eventlet import spawn, Timeout
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_drive
from swift.common.exceptions import ConnectionTimeout
from swift.common.ring import Ring
from swift.common.utils import get_logger, renamer, write_pickle, \
dump_recon_cache, config_true_value, RateLimitedIterator, split_path, \
eventlet_monkey_patch, get_redirect_data, ContextPool, hash_path, \
non_negative_float, config_positive_int_value, non_negative_int
from swift.common.daemon import Daemon
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.storage_policy import split_policy_string, PolicyError
from swift.common.recon import RECON_OBJECT_FILE, DEFAULT_RECON_CACHE_PATH
from swift.obj.diskfile import get_tmp_dir, ASYNCDIR_BASE
from swift.common.http import is_success, HTTP_INTERNAL_SERVER_ERROR, \
HTTP_MOVED_PERMANENTLY
class RateLimiterBucket(object):
def __init__(self, update_delta):
self.update_delta = update_delta
self.last_time = 0
self.deque = deque()
@property
def wait_until(self):
return self.last_time + self.update_delta
def __len__(self):
return len(self.deque)
def __bool__(self):
return bool(self.deque)
__nonzero__ = __bool__ # py2
def __lt__(self, other):
# used to sort buckets by readiness
if isinstance(other, RateLimiterBucket):
return self.wait_until < other.wait_until
return self.wait_until < other
class BucketizedUpdateSkippingLimiter(object):
"""
Wrap an iterator to rate-limit updates on a per-bucket basis, where updates
are mapped to buckets by hashing their destination path. If an update is
rate-limited then it is placed on a deferral queue and may be sent later if
the wrapped iterator is exhausted before the ``drain_until`` time is
reached.
The deferral queue has constrained size and once the queue is full updates
are evicted using a first-in-first-out policy. This policy is used because
updates on the queue may have been made obsolete by newer updates written
to disk, and this is more likely for updates that have been on the queue
longest.
The iterator increments stats as follows:
* The `deferrals` stat is incremented for each update that is
rate-limited. Note that a individual update is rate-limited at most
once.
* The `skips` stat is incremented for each rate-limited update that is
not eventually yielded. This includes updates that are evicted from the
deferral queue and all updates that remain in the deferral queue when
``drain_until`` time is reached and the iterator terminates.
* The `drains` stat is incremented for each rate-limited update that is
eventually yielded.
Consequently, when this iterator terminates, the sum of `skips` and
`drains` is equal to the number of `deferrals`.
:param update_iterable: an async_pending update iterable
:param logger: a logger instance
:param stats: a SweepStats instance
:param num_buckets: number of buckets to divide container hashes into, the
more buckets total the less containers to a bucket
(once a busy container slows down a bucket the whole
bucket starts deferring)
:param max_elements_per_group_per_second: tunable, when deferring kicks in
:param max_deferred_elements: maximum number of deferred elements before
skipping starts. Each bucket may defer updates, but once the total
number of deferred updates summed across all buckets reaches this
value then all buckets will skip subsequent updates.
:param drain_until: time at which any remaining deferred elements must be
skipped and the iterator stops. Once the wrapped iterator has been
exhausted, this iterator will drain deferred elements from its buckets
until either all buckets have drained or this time is reached.
"""
def __init__(self, update_iterable, logger, stats, num_buckets=1000,
max_elements_per_group_per_second=50,
max_deferred_elements=0,
drain_until=0):
self.iterator = iter(update_iterable)
self.logger = logger
self.stats = stats
# if we want a smaller "blast radius" we could make this number bigger
self.num_buckets = max(num_buckets, 1)
try:
self.bucket_update_delta = 1.0 / max_elements_per_group_per_second
except ZeroDivisionError:
self.bucket_update_delta = -1
self.max_deferred_elements = max_deferred_elements
self.deferred_buckets = deque()
self.drain_until = drain_until
self.salt = str(uuid.uuid4())
self.buckets = [RateLimiterBucket(self.bucket_update_delta)
for _ in range(self.num_buckets)]
self.buckets_ordered_by_readiness = None
def __iter__(self):
return self
def _bucket_key(self, update):
acct, cont = split_update_path(update)
return int(hash_path(acct, cont, self.salt), 16) % self.num_buckets
def _get_time(self):
return time.time()
def next(self):
# first iterate over the wrapped iterator...
for update_ctx in self.iterator:
bucket = self.buckets[self._bucket_key(update_ctx['update'])]
now = self._get_time()
if now >= bucket.wait_until:
# no need to ratelimit, just return next update
bucket.last_time = now
return update_ctx
self.stats.deferrals += 1
self.logger.increment("deferrals")
if self.max_deferred_elements > 0:
if len(self.deferred_buckets) >= self.max_deferred_elements:
# create space to defer this update by popping the least
# recent deferral from the least recently deferred bucket;
# updates read from disk recently are preferred over those
# read from disk less recently.
oldest_deferred_bucket = self.deferred_buckets.popleft()
oldest_deferred_bucket.deque.popleft()
self.stats.skips += 1
self.logger.increment("skips")
# append the update to the bucket's queue and append the bucket
# to the queue of deferred buckets
# note: buckets may have multiple entries in deferred_buckets,
# one for each deferred update in that particular bucket
bucket.deque.append(update_ctx)
self.deferred_buckets.append(bucket)
else:
self.stats.skips += 1
self.logger.increment("skips")
if self.buckets_ordered_by_readiness is None:
# initialise a queue of those buckets with deferred elements;
# buckets are queued in the chronological order in which they are
# ready to serve an element
self.buckets_ordered_by_readiness = queue.PriorityQueue()
for bucket in self.buckets:
if bucket:
self.buckets_ordered_by_readiness.put(bucket)
# now drain the buckets...
undrained_elements = []
while not self.buckets_ordered_by_readiness.empty():
now = self._get_time()
bucket = self.buckets_ordered_by_readiness.get_nowait()
if now < self.drain_until:
# wait for next element to be ready
time.sleep(max(0, bucket.wait_until - now))
# drain the most recently deferred element
item = bucket.deque.pop()
if bucket:
# bucket has more deferred elements, re-insert in queue in
# correct chronological position
bucket.last_time = self._get_time()
self.buckets_ordered_by_readiness.put(bucket)
self.stats.drains += 1
self.logger.increment("drains")
return item
else:
# time to stop iterating: gather all un-drained elements
undrained_elements.extend(bucket.deque)
if undrained_elements:
# report final batch of skipped elements
self.stats.skips += len(undrained_elements)
self.logger.update_stats("skips", len(undrained_elements))
raise StopIteration()
__next__ = next
class SweepStats(object):
"""
Stats bucket for an update sweep
A measure of the rate at which updates are being rate-limited is:
deferrals / (deferrals + successes + failures - drains)
A measure of the rate at which updates are not being sent during a sweep
is:
skips / (skips + successes + failures)
"""
def __init__(self, errors=0, failures=0, quarantines=0, successes=0,
unlinks=0, redirects=0, skips=0, deferrals=0, drains=0):
self.errors = errors
self.failures = failures
self.quarantines = quarantines
self.successes = successes
self.unlinks = unlinks
self.redirects = redirects
self.skips = skips
self.deferrals = deferrals
self.drains = drains
def copy(self):
return type(self)(self.errors, self.failures, self.quarantines,
self.successes, self.unlinks, self.redirects,
self.skips, self.deferrals, self.drains)
def since(self, other):
return type(self)(self.errors - other.errors,
self.failures - other.failures,
self.quarantines - other.quarantines,
self.successes - other.successes,
self.unlinks - other.unlinks,
self.redirects - other.redirects,
self.skips - other.skips,
self.deferrals - other.deferrals,
self.drains - other.drains)
def reset(self):
self.errors = 0
self.failures = 0
self.quarantines = 0
self.successes = 0
self.unlinks = 0
self.redirects = 0
self.skips = 0
self.deferrals = 0
self.drains = 0
def __str__(self):
keys = (
(self.successes, 'successes'),
(self.failures, 'failures'),
(self.quarantines, 'quarantines'),
(self.unlinks, 'unlinks'),
(self.errors, 'errors'),
(self.redirects, 'redirects'),
(self.skips, 'skips'),
(self.deferrals, 'deferrals'),
(self.drains, 'drains'),
)
return ', '.join('%d %s' % pair for pair in keys)
def split_update_path(update):
"""
Split the account and container parts out of the async update data.
N.B. updates to shards set the container_path key while the account and
container keys are always the root.
"""
container_path = update.get('container_path')
if container_path:
acct, cont = split_path('/' + container_path, minsegs=2)
else:
acct, cont = update['account'], update['container']
return acct, cont
class ObjectUpdater(Daemon):
"""Update object information in container listings."""
def __init__(self, conf, logger=None):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-updater')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.interval = float(conf.get('interval', 300))
self.container_ring = None
self.concurrency = int(conf.get('concurrency', 8))
self.updater_workers = int(conf.get('updater_workers', 1))
if 'slowdown' in conf:
self.logger.warning(
'The slowdown option is deprecated in favor of '
'objects_per_second. This option may be ignored in a '
'future release.')
objects_per_second = 1 / (
float(conf.get('slowdown', '0.01')) + 0.01)
else:
objects_per_second = 50
self.objects_running_time = 0
self.max_objects_per_second = \
float(conf.get('objects_per_second',
objects_per_second))
self.max_objects_per_container_per_second = non_negative_float(
conf.get('max_objects_per_container_per_second', 0))
self.per_container_ratelimit_buckets = config_positive_int_value(
conf.get('per_container_ratelimit_buckets', 1000))
self.node_timeout = float(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.report_interval = float(conf.get('report_interval', 300))
self.recon_cache_path = conf.get('recon_cache_path',
DEFAULT_RECON_CACHE_PATH)
self.rcache = os.path.join(self.recon_cache_path, RECON_OBJECT_FILE)
self.stats = SweepStats()
self.max_deferred_updates = non_negative_int(
conf.get('max_deferred_updates', 10000))
self.begin = time.time()
def _listdir(self, path):
try:
return os.listdir(path)
except OSError as e:
self.stats.errors += 1
self.logger.increment('errors')
self.logger.error('ERROR: Unable to access %(path)s: '
'%(error)s',
{'path': path, 'error': e})
return []
def get_container_ring(self):
"""Get the container ring. Load it, if it hasn't been yet."""
if not self.container_ring:
self.container_ring = Ring(self.swift_dir, ring_name='container')
return self.container_ring
def run_forever(self, *args, **kwargs):
"""Run the updater continuously."""
time.sleep(random() * self.interval)
while True:
self.logger.info('Begin object update sweep')
self.begin = time.time()
pids = []
# read from container ring to ensure it's fresh
self.get_container_ring().get_nodes('')
for device in self._listdir(self.devices):
try:
dev_path = check_drive(self.devices, device,
self.mount_check)
except ValueError as err:
# We don't count this as an error. The occasional
# unmounted drive is part of normal cluster operations,
# so a simple warning is sufficient.
self.logger.warning('Skipping: %s', err)
continue
while len(pids) >= self.updater_workers:
pids.remove(os.wait()[0])
pid = os.fork()
if pid:
pids.append(pid)
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
eventlet_monkey_patch()
self.stats.reset()
forkbegin = time.time()
self.object_sweep(dev_path)
elapsed = time.time() - forkbegin
self.logger.info(
('Object update sweep of %(device)s '
'completed: %(elapsed).02fs, %(stats)s'),
{'device': device, 'elapsed': elapsed,
'stats': self.stats})
sys.exit()
while pids:
pids.remove(os.wait()[0])
elapsed = time.time() - self.begin
self.logger.info('Object update sweep completed: %.02fs',
elapsed)
dump_recon_cache({'object_updater_sweep': elapsed},
self.rcache, self.logger)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""Run the updater once."""
self.logger.info('Begin object update single threaded sweep')
self.begin = time.time()
self.stats.reset()
for device in self._listdir(self.devices):
try:
dev_path = check_drive(self.devices, device, self.mount_check)
except ValueError as err:
# We don't count this as an error. The occasional unmounted
# drive is part of normal cluster operations, so a simple
# warning is sufficient.
self.logger.warning('Skipping: %s', err)
continue
self.object_sweep(dev_path)
elapsed = time.time() - self.begin
self.logger.info(
('Object update single-threaded sweep completed: '
'%(elapsed).02fs, %(stats)s'),
{'elapsed': elapsed, 'stats': self.stats})
dump_recon_cache({'object_updater_sweep': elapsed},
self.rcache, self.logger)
def _load_update(self, device, update_path):
try:
return pickle.load(open(update_path, 'rb'))
except Exception as e:
if getattr(e, 'errno', None) == errno.ENOENT:
return
self.logger.exception(
'ERROR Pickle problem, quarantining %s', update_path)
self.stats.quarantines += 1
self.logger.increment('quarantines')
target_path = os.path.join(device, 'quarantined', 'objects',
os.path.basename(update_path))
renamer(update_path, target_path, fsync=False)
try:
# If this was the last async_pending in the directory,
# then this will succeed. Otherwise, it'll fail, and
# that's okay.
os.rmdir(os.path.dirname(update_path))
except OSError:
pass
return
def _iter_async_pendings(self, device):
"""
Locate and yield an update context for all the async pending files on
the device. Each update context contains details of the async pending
file location, its timestamp and the un-pickled update data.
Async pending files that fail to load will be quarantined.
Only the most recent update for the same object is yielded; older
(stale) async pending files are unlinked as they are located.
The iterator tries to clean up empty directories as it goes.
"""
# loop through async pending dirs for all policies
for asyncdir in self._listdir(device):
# we only care about directories
async_pending = os.path.join(device, asyncdir)
if not asyncdir.startswith(ASYNCDIR_BASE):
# skip stuff like "accounts", "containers", etc.
continue
if not os.path.isdir(async_pending):
continue
try:
base, policy = split_policy_string(asyncdir)
except PolicyError as e:
# This isn't an error, but a misconfiguration. Logging a
# warning should be sufficient.
self.logger.warning('Directory %(directory)r does not map '
'to a valid policy (%(error)s)', {
'directory': asyncdir, 'error': e})
continue
prefix_dirs = self._listdir(async_pending)
shuffle(prefix_dirs)
for prefix in prefix_dirs:
prefix_path = os.path.join(async_pending, prefix)
if not os.path.isdir(prefix_path):
continue
last_obj_hash = None
for update_file in sorted(self._listdir(prefix_path),
reverse=True):
update_path = os.path.join(prefix_path, update_file)
if not os.path.isfile(update_path):
continue
try:
obj_hash, timestamp = update_file.split('-')
except ValueError:
self.stats.errors += 1
self.logger.increment('errors')
self.logger.error(
'ERROR async pending file with unexpected '
'name %s', update_path)
continue
# Async pendings are stored on disk like this:
#
# <device>/async_pending/<suffix>/<obj_hash>-<timestamp>
#
# If there are multiple updates for a given object,
# they'll look like this:
#
# <device>/async_pending/<obj_suffix>/<obj_hash>-<timestamp1>
# <device>/async_pending/<obj_suffix>/<obj_hash>-<timestamp2>
# <device>/async_pending/<obj_suffix>/<obj_hash>-<timestamp3>
#
# Async updates also have the property that newer
# updates contain all the information in older updates.
# Since we sorted the directory listing in reverse
# order, we'll see timestamp3 first, yield it, and then
# unlink timestamp2 and timestamp1 since we know they
# are obsolete.
#
# This way, our caller only gets useful async_pendings.
if obj_hash == last_obj_hash:
self.stats.unlinks += 1
self.logger.increment('unlinks')
try:
os.unlink(update_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
last_obj_hash = obj_hash
update = self._load_update(device, update_path)
if update is not None:
yield {'device': device,
'policy': policy,
'update_path': update_path,
'obj_hash': obj_hash,
'timestamp': timestamp,
'update': update}
def object_sweep(self, device):
"""
If there are async pendings on the device, walk each one and update.
:param device: path to device
"""
start_time = time.time()
last_status_update = start_time
start_stats = self.stats.copy()
my_pid = os.getpid()
self.logger.info("Object update sweep starting on %s (pid: %d)",
device, my_pid)
ap_iter = RateLimitedIterator(
self._iter_async_pendings(device),
elements_per_second=self.max_objects_per_second)
ap_iter = BucketizedUpdateSkippingLimiter(
ap_iter, self.logger, self.stats,
self.per_container_ratelimit_buckets,
self.max_objects_per_container_per_second,
max_deferred_elements=self.max_deferred_updates,
drain_until=self.begin + self.interval)
with ContextPool(self.concurrency) as pool:
for update_ctx in ap_iter:
pool.spawn(self.process_object_update, **update_ctx)
now = time.time()
if now - last_status_update >= self.report_interval:
this_sweep = self.stats.since(start_stats)
self.logger.info(
('Object update sweep progress on %(device)s: '
'%(elapsed).02fs, %(stats)s (pid: %(pid)d)'),
{'device': device,
'elapsed': now - start_time,
'pid': my_pid,
'stats': this_sweep})
last_status_update = now
pool.waitall()
self.logger.timing_since('timing', start_time)
sweep_totals = self.stats.since(start_stats)
self.logger.info(
('Object update sweep completed on %(device)s '
'in %(elapsed).02fs seconds:, '
'%(successes)d successes, %(failures)d failures, '
'%(quarantines)d quarantines, '
'%(unlinks)d unlinks, %(errors)d errors, '
'%(redirects)d redirects, '
'%(skips)d skips, '
'%(deferrals)d deferrals, '
'%(drains)d drains '
'(pid: %(pid)d)'),
{'device': device,
'elapsed': time.time() - start_time,
'pid': my_pid,
'successes': sweep_totals.successes,
'failures': sweep_totals.failures,
'quarantines': sweep_totals.quarantines,
'unlinks': sweep_totals.unlinks,
'errors': sweep_totals.errors,
'redirects': sweep_totals.redirects,
'skips': sweep_totals.skips,
'deferrals': sweep_totals.deferrals,
'drains': sweep_totals.drains
})
def process_object_update(self, update_path, device, policy, update,
**kwargs):
"""
Process the object information to be updated and update.
:param update_path: path to pickled object update file
:param device: path to device
:param policy: storage policy of object update
:param update: the un-pickled update data
:param kwargs: un-used keys from update_ctx
"""
def do_update():
successes = update.get('successes', [])
headers_out = HeaderKeyDict(update['headers'].copy())
headers_out['user-agent'] = 'object-updater %s' % os.getpid()
headers_out.setdefault('X-Backend-Storage-Policy-Index',
str(int(policy)))
headers_out.setdefault('X-Backend-Accept-Redirect', 'true')
headers_out.setdefault('X-Backend-Accept-Quoted-Location', 'true')
acct, cont = split_update_path(update)
part, nodes = self.get_container_ring().get_nodes(acct, cont)
obj = '/%s/%s/%s' % (acct, cont, update['obj'])
events = [spawn(self.object_update,
node, part, update['op'], obj, headers_out)
for node in nodes if node['id'] not in successes]
success = True
new_successes = rewrite_pickle = False
redirect = None
redirects = set()
for event in events:
event_success, node_id, redirect = event.wait()
if event_success is True:
successes.append(node_id)
new_successes = True
else:
success = False
if redirect:
redirects.add(redirect)
if success:
self.stats.successes += 1
self.logger.increment('successes')
self.logger.debug('Update sent for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
self.stats.unlinks += 1
self.logger.increment('unlinks')
os.unlink(update_path)
try:
# If this was the last async_pending in the directory,
# then this will succeed. Otherwise, it'll fail, and
# that's okay.
os.rmdir(os.path.dirname(update_path))
except OSError:
pass
elif redirects:
# erase any previous successes
update.pop('successes', None)
redirect = max(redirects, key=lambda x: x[-1])[0]
redirect_history = update.setdefault('redirect_history', [])
if redirect in redirect_history:
# force next update to be sent to root, reset history
update['container_path'] = None
update['redirect_history'] = []
else:
update['container_path'] = redirect
redirect_history.append(redirect)
self.stats.redirects += 1
self.logger.increment("redirects")
self.logger.debug(
'Update redirected for %(obj)s %(path)s to %(shard)s',
{'obj': obj, 'path': update_path,
'shard': update['container_path']})
rewrite_pickle = True
else:
self.stats.failures += 1
self.logger.increment('failures')
self.logger.debug('Update failed for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
if new_successes:
update['successes'] = successes
rewrite_pickle = True
return rewrite_pickle, redirect
rewrite_pickle, redirect = do_update()
if redirect:
# make one immediate retry to the redirect location
rewrite_pickle, redirect = do_update()
if rewrite_pickle:
write_pickle(update, update_path, os.path.join(
device, get_tmp_dir(policy)))
def object_update(self, node, part, op, obj, headers_out):
"""
Perform the object update to the container
:param node: node dictionary from the container ring
:param part: partition that holds the container
:param op: operation performed (ex: 'PUT' or 'DELETE')
:param obj: object name being updated
:param headers_out: headers to send with the update
:return: a tuple of (``success``, ``node_id``, ``redirect``)
where ``success`` is True if the update succeeded, ``node_id`` is
the_id of the node updated and ``redirect`` is either None or a
tuple of (a path, a timestamp string).
"""
redirect = None
start = time.time()
# Assume an error until we hear otherwise
status = 500
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], part, op, obj, headers_out)
with Timeout(self.node_timeout):
resp = conn.getresponse()
resp.read()
status = resp.status
if status == HTTP_MOVED_PERMANENTLY:
try:
redirect = get_redirect_data(resp)
except ValueError as err:
self.logger.error(
'Container update failed for %r; problem with '
'redirect location: %s' % (obj, err))
success = is_success(status)
if not success:
self.logger.debug(
'Error code %(status)d is returned from remote '
'server %(ip)s: %(port)s / %(device)s',
{'status': resp.status, 'ip': node['replication_ip'],
'port': node['replication_port'],
'device': node['device']})
return success, node['id'], redirect
except Exception:
self.logger.exception(
'ERROR with remote server '
'%(replication_ip)s:%(replication_port)s/%(device)s', node)
except Timeout as exc:
action = 'connecting to'
if not isinstance(exc, ConnectionTimeout):
# i.e., we definitely made the request but gave up
# waiting for the response
status = 499
action = 'waiting on'
self.logger.info(
'Timeout %(action)s remote server '
'%(replication_ip)s:%(replication_port)s/%(device)s: %(exc)s',
dict(node, exc=exc, action=action))
finally:
elapsed = time.time() - start
self.logger.timing('updater.timing.status.%s' % status,
elapsed * 1000)
return HTTP_INTERNAL_SERVER_ERROR, node['id'], redirect
|
openstack/swift
|
swift/obj/updater.py
|
Python
|
apache-2.0
| 33,568
|
[
"BLAST"
] |
9c77a055971cc63c29bacef316753c84d26d6c4ae7c3f35c21fe8a11eb6b1cf0
|
# match.py - filename matching
#
# Copyright 2008, 2009 Olivia Mackall <olivia@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import, print_function
import bisect
import copy
import itertools
import os
import re
from .i18n import _
from .pycompat import open
from . import (
encoding,
error,
pathutil,
policy,
pycompat,
util,
)
from .utils import stringutil
rustmod = policy.importrust('dirstate')
allpatternkinds = (
b're',
b'glob',
b'path',
b'relglob',
b'relpath',
b'relre',
b'rootglob',
b'listfile',
b'listfile0',
b'set',
b'include',
b'subinclude',
b'rootfilesin',
)
cwdrelativepatternkinds = (b'relpath', b'glob')
propertycache = util.propertycache
def _rematcher(regex):
"""compile the regexp with the best available regexp engine and return a
matcher function"""
m = util.re.compile(regex)
try:
# slightly faster, provided by facebook's re2 bindings
return m.test_match
except AttributeError:
return m.match
def _expandsets(cwd, kindpats, ctx=None, listsubrepos=False, badfn=None):
'''Returns the kindpats list with the 'set' patterns expanded to matchers'''
matchers = []
other = []
for kind, pat, source in kindpats:
if kind == b'set':
if ctx is None:
raise error.ProgrammingError(
b"fileset expression with no context"
)
matchers.append(ctx.matchfileset(cwd, pat, badfn=badfn))
if listsubrepos:
for subpath in ctx.substate:
sm = ctx.sub(subpath).matchfileset(cwd, pat, badfn=badfn)
pm = prefixdirmatcher(subpath, sm, badfn=badfn)
matchers.append(pm)
continue
other.append((kind, pat, source))
return matchers, other
def _expandsubinclude(kindpats, root):
"""Returns the list of subinclude matcher args and the kindpats without the
subincludes in it."""
relmatchers = []
other = []
for kind, pat, source in kindpats:
if kind == b'subinclude':
sourceroot = pathutil.dirname(util.normpath(source))
pat = util.pconvert(pat)
path = pathutil.join(sourceroot, pat)
newroot = pathutil.dirname(path)
matcherargs = (newroot, b'', [], [b'include:%s' % path])
prefix = pathutil.canonpath(root, root, newroot)
if prefix:
prefix += b'/'
relmatchers.append((prefix, matcherargs))
else:
other.append((kind, pat, source))
return relmatchers, other
def _kindpatsalwaysmatch(kindpats):
"""Checks whether the kindspats match everything, as e.g.
'relpath:.' does.
"""
for kind, pat, source in kindpats:
if pat != b'' or kind not in [b'relpath', b'glob']:
return False
return True
def _buildkindpatsmatcher(
matchercls,
root,
cwd,
kindpats,
ctx=None,
listsubrepos=False,
badfn=None,
):
matchers = []
fms, kindpats = _expandsets(
cwd,
kindpats,
ctx=ctx,
listsubrepos=listsubrepos,
badfn=badfn,
)
if kindpats:
m = matchercls(root, kindpats, badfn=badfn)
matchers.append(m)
if fms:
matchers.extend(fms)
if not matchers:
return nevermatcher(badfn=badfn)
if len(matchers) == 1:
return matchers[0]
return unionmatcher(matchers)
def match(
root,
cwd,
patterns=None,
include=None,
exclude=None,
default=b'glob',
auditor=None,
ctx=None,
listsubrepos=False,
warn=None,
badfn=None,
icasefs=False,
):
r"""build an object to match a set of file patterns
arguments:
root - the canonical root of the tree you're matching against
cwd - the current working directory, if relevant
patterns - patterns to find
include - patterns to include (unless they are excluded)
exclude - patterns to exclude (even if they are included)
default - if a pattern in patterns has no explicit type, assume this one
auditor - optional path auditor
ctx - optional changecontext
listsubrepos - if True, recurse into subrepositories
warn - optional function used for printing warnings
badfn - optional bad() callback for this matcher instead of the default
icasefs - make a matcher for wdir on case insensitive filesystems, which
normalizes the given patterns to the case in the filesystem
a pattern is one of:
'glob:<glob>' - a glob relative to cwd
're:<regexp>' - a regular expression
'path:<path>' - a path relative to repository root, which is matched
recursively
'rootfilesin:<path>' - a path relative to repository root, which is
matched non-recursively (will not match subdirectories)
'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
'relpath:<path>' - a path relative to cwd
'relre:<regexp>' - a regexp that needn't match the start of a name
'set:<fileset>' - a fileset expression
'include:<path>' - a file of patterns to read and include
'subinclude:<path>' - a file of patterns to match against files under
the same directory
'<something>' - a pattern of the specified default type
>>> def _match(root, *args, **kwargs):
... return match(util.localpath(root), *args, **kwargs)
Usually a patternmatcher is returned:
>>> _match(b'/foo', b'.', [b're:.*\.c$', b'path:foo/a', b'*.py'])
<patternmatcher patterns='.*\\.c$|foo/a(?:/|$)|[^/]*\\.py$'>
Combining 'patterns' with 'include' (resp. 'exclude') gives an
intersectionmatcher (resp. a differencematcher):
>>> type(_match(b'/foo', b'.', [b're:.*\.c$'], include=[b'path:lib']))
<class 'mercurial.match.intersectionmatcher'>
>>> type(_match(b'/foo', b'.', [b're:.*\.c$'], exclude=[b'path:build']))
<class 'mercurial.match.differencematcher'>
Notice that, if 'patterns' is empty, an alwaysmatcher is returned:
>>> _match(b'/foo', b'.', [])
<alwaysmatcher>
The 'default' argument determines which kind of pattern is assumed if a
pattern has no prefix:
>>> _match(b'/foo', b'.', [b'.*\.c$'], default=b're')
<patternmatcher patterns='.*\\.c$'>
>>> _match(b'/foo', b'.', [b'main.py'], default=b'relpath')
<patternmatcher patterns='main\\.py(?:/|$)'>
>>> _match(b'/foo', b'.', [b'main.py'], default=b're')
<patternmatcher patterns='main.py'>
The primary use of matchers is to check whether a value (usually a file
name) matches againset one of the patterns given at initialization. There
are two ways of doing this check.
>>> m = _match(b'/foo', b'', [b're:.*\.c$', b'relpath:a'])
1. Calling the matcher with a file name returns True if any pattern
matches that file name:
>>> m(b'a')
True
>>> m(b'main.c')
True
>>> m(b'test.py')
False
2. Using the exact() method only returns True if the file name matches one
of the exact patterns (i.e. not re: or glob: patterns):
>>> m.exact(b'a')
True
>>> m.exact(b'main.c')
False
"""
assert os.path.isabs(root)
cwd = os.path.join(root, util.localpath(cwd))
normalize = _donormalize
if icasefs:
dirstate = ctx.repo().dirstate
dsnormalize = dirstate.normalize
def normalize(patterns, default, root, cwd, auditor, warn):
kp = _donormalize(patterns, default, root, cwd, auditor, warn)
kindpats = []
for kind, pats, source in kp:
if kind not in (b're', b'relre'): # regex can't be normalized
p = pats
pats = dsnormalize(pats)
# Preserve the original to handle a case only rename.
if p != pats and p in dirstate:
kindpats.append((kind, p, source))
kindpats.append((kind, pats, source))
return kindpats
if patterns:
kindpats = normalize(patterns, default, root, cwd, auditor, warn)
if _kindpatsalwaysmatch(kindpats):
m = alwaysmatcher(badfn)
else:
m = _buildkindpatsmatcher(
patternmatcher,
root,
cwd,
kindpats,
ctx=ctx,
listsubrepos=listsubrepos,
badfn=badfn,
)
else:
# It's a little strange that no patterns means to match everything.
# Consider changing this to match nothing (probably using nevermatcher).
m = alwaysmatcher(badfn)
if include:
kindpats = normalize(include, b'glob', root, cwd, auditor, warn)
im = _buildkindpatsmatcher(
includematcher,
root,
cwd,
kindpats,
ctx=ctx,
listsubrepos=listsubrepos,
badfn=None,
)
m = intersectmatchers(m, im)
if exclude:
kindpats = normalize(exclude, b'glob', root, cwd, auditor, warn)
em = _buildkindpatsmatcher(
includematcher,
root,
cwd,
kindpats,
ctx=ctx,
listsubrepos=listsubrepos,
badfn=None,
)
m = differencematcher(m, em)
return m
def exact(files, badfn=None):
return exactmatcher(files, badfn=badfn)
def always(badfn=None):
return alwaysmatcher(badfn)
def never(badfn=None):
return nevermatcher(badfn)
def badmatch(match, badfn):
"""Make a copy of the given matcher, replacing its bad method with the given
one.
"""
m = copy.copy(match)
m.bad = badfn
return m
def _donormalize(patterns, default, root, cwd, auditor=None, warn=None):
"""Convert 'kind:pat' from the patterns list to tuples with kind and
normalized and rooted patterns and with listfiles expanded."""
kindpats = []
for kind, pat in [_patsplit(p, default) for p in patterns]:
if kind in cwdrelativepatternkinds:
pat = pathutil.canonpath(root, cwd, pat, auditor=auditor)
elif kind in (b'relglob', b'path', b'rootfilesin', b'rootglob'):
pat = util.normpath(pat)
elif kind in (b'listfile', b'listfile0'):
try:
files = util.readfile(pat)
if kind == b'listfile0':
files = files.split(b'\0')
else:
files = files.splitlines()
files = [f for f in files if f]
except EnvironmentError:
raise error.Abort(_(b"unable to read file list (%s)") % pat)
for k, p, source in _donormalize(
files, default, root, cwd, auditor, warn
):
kindpats.append((k, p, pat))
continue
elif kind == b'include':
try:
fullpath = os.path.join(root, util.localpath(pat))
includepats = readpatternfile(fullpath, warn)
for k, p, source in _donormalize(
includepats, default, root, cwd, auditor, warn
):
kindpats.append((k, p, source or pat))
except error.Abort as inst:
raise error.Abort(
b'%s: %s'
% (
pat,
inst.message,
) # pytype: disable=unsupported-operands
)
except IOError as inst:
if warn:
warn(
_(b"skipping unreadable pattern file '%s': %s\n")
% (pat, stringutil.forcebytestr(inst.strerror))
)
continue
# else: re or relre - which cannot be normalized
kindpats.append((kind, pat, b''))
return kindpats
class basematcher(object):
def __init__(self, badfn=None):
if badfn is not None:
self.bad = badfn
def __call__(self, fn):
return self.matchfn(fn)
# Callbacks related to how the matcher is used by dirstate.walk.
# Subscribers to these events must monkeypatch the matcher object.
def bad(self, f, msg):
"""Callback from dirstate.walk for each explicit file that can't be
found/accessed, with an error message."""
# If an traversedir is set, it will be called when a directory discovered
# by recursive traversal is visited.
traversedir = None
@propertycache
def _files(self):
return []
def files(self):
"""Explicitly listed files or patterns or roots:
if no patterns or .always(): empty list,
if exact: list exact files,
if not .anypats(): list all files and dirs,
else: optimal roots"""
return self._files
@propertycache
def _fileset(self):
return set(self._files)
def exact(self, f):
'''Returns True if f is in .files().'''
return f in self._fileset
def matchfn(self, f):
return False
def visitdir(self, dir):
"""Decides whether a directory should be visited based on whether it
has potential matches in it or one of its subdirectories. This is
based on the match's primary, included, and excluded patterns.
Returns the string 'all' if the given directory and all subdirectories
should be visited. Otherwise returns True or False indicating whether
the given directory should be visited.
"""
return True
def visitchildrenset(self, dir):
"""Decides whether a directory should be visited based on whether it
has potential matches in it or one of its subdirectories, and
potentially lists which subdirectories of that directory should be
visited. This is based on the match's primary, included, and excluded
patterns.
This function is very similar to 'visitdir', and the following mapping
can be applied:
visitdir | visitchildrenlist
----------+-------------------
False | set()
'all' | 'all'
True | 'this' OR non-empty set of subdirs -or files- to visit
Example:
Assume matchers ['path:foo/bar', 'rootfilesin:qux'], we would return
the following values (assuming the implementation of visitchildrenset
is capable of recognizing this; some implementations are not).
'' -> {'foo', 'qux'}
'baz' -> set()
'foo' -> {'bar'}
# Ideally this would be 'all', but since the prefix nature of matchers
# is applied to the entire matcher, we have to downgrade this to
# 'this' due to the non-prefix 'rootfilesin'-kind matcher being mixed
# in.
'foo/bar' -> 'this'
'qux' -> 'this'
Important:
Most matchers do not know if they're representing files or
directories. They see ['path:dir/f'] and don't know whether 'f' is a
file or a directory, so visitchildrenset('dir') for most matchers will
return {'f'}, but if the matcher knows it's a file (like exactmatcher
does), it may return 'this'. Do not rely on the return being a set
indicating that there are no files in this dir to investigate (or
equivalently that if there are files to investigate in 'dir' that it
will always return 'this').
"""
return b'this'
def always(self):
"""Matcher will match everything and .files() will be empty --
optimization might be possible."""
return False
def isexact(self):
"""Matcher will match exactly the list of files in .files() --
optimization might be possible."""
return False
def prefix(self):
"""Matcher will match the paths in .files() recursively --
optimization might be possible."""
return False
def anypats(self):
"""None of .always(), .isexact(), and .prefix() is true --
optimizations will be difficult."""
return not self.always() and not self.isexact() and not self.prefix()
class alwaysmatcher(basematcher):
'''Matches everything.'''
def __init__(self, badfn=None):
super(alwaysmatcher, self).__init__(badfn)
def always(self):
return True
def matchfn(self, f):
return True
def visitdir(self, dir):
return b'all'
def visitchildrenset(self, dir):
return b'all'
def __repr__(self):
return r'<alwaysmatcher>'
class nevermatcher(basematcher):
'''Matches nothing.'''
def __init__(self, badfn=None):
super(nevermatcher, self).__init__(badfn)
# It's a little weird to say that the nevermatcher is an exact matcher
# or a prefix matcher, but it seems to make sense to let callers take
# fast paths based on either. There will be no exact matches, nor any
# prefixes (files() returns []), so fast paths iterating over them should
# be efficient (and correct).
def isexact(self):
return True
def prefix(self):
return True
def visitdir(self, dir):
return False
def visitchildrenset(self, dir):
return set()
def __repr__(self):
return r'<nevermatcher>'
class predicatematcher(basematcher):
"""A matcher adapter for a simple boolean function"""
def __init__(self, predfn, predrepr=None, badfn=None):
super(predicatematcher, self).__init__(badfn)
self.matchfn = predfn
self._predrepr = predrepr
@encoding.strmethod
def __repr__(self):
s = stringutil.buildrepr(self._predrepr) or pycompat.byterepr(
self.matchfn
)
return b'<predicatenmatcher pred=%s>' % s
def path_or_parents_in_set(path, prefix_set):
"""Returns True if `path` (or any parent of `path`) is in `prefix_set`."""
l = len(prefix_set)
if l == 0:
return False
if path in prefix_set:
return True
# If there's more than 5 paths in prefix_set, it's *probably* quicker to
# "walk up" the directory hierarchy instead, with the assumption that most
# directory hierarchies are relatively shallow and hash lookup is cheap.
if l > 5:
return any(
parentdir in prefix_set for parentdir in pathutil.finddirs(path)
)
# FIXME: Ideally we'd never get to this point if this is the case - we'd
# recognize ourselves as an 'always' matcher and skip this.
if b'' in prefix_set:
return True
if pycompat.ispy3:
sl = ord(b'/')
else:
sl = '/'
# We already checked that path isn't in prefix_set exactly, so
# `path[len(pf)] should never raise IndexError.
return any(path.startswith(pf) and path[len(pf)] == sl for pf in prefix_set)
class patternmatcher(basematcher):
r"""Matches a set of (kind, pat, source) against a 'root' directory.
>>> kindpats = [
... (b're', br'.*\.c$', b''),
... (b'path', b'foo/a', b''),
... (b'relpath', b'b', b''),
... (b'glob', b'*.h', b''),
... ]
>>> m = patternmatcher(b'foo', kindpats)
>>> m(b'main.c') # matches re:.*\.c$
True
>>> m(b'b.txt')
False
>>> m(b'foo/a') # matches path:foo/a
True
>>> m(b'a') # does not match path:b, since 'root' is 'foo'
False
>>> m(b'b') # matches relpath:b, since 'root' is 'foo'
True
>>> m(b'lib.h') # matches glob:*.h
True
>>> m.files()
['', 'foo/a', 'b', '']
>>> m.exact(b'foo/a')
True
>>> m.exact(b'b')
True
>>> m.exact(b'lib.h') # exact matches are for (rel)path kinds
False
"""
def __init__(self, root, kindpats, badfn=None):
super(patternmatcher, self).__init__(badfn)
self._files = _explicitfiles(kindpats)
self._prefix = _prefix(kindpats)
self._pats, self.matchfn = _buildmatch(kindpats, b'$', root)
@propertycache
def _dirs(self):
return set(pathutil.dirs(self._fileset))
def visitdir(self, dir):
if self._prefix and dir in self._fileset:
return b'all'
return dir in self._dirs or path_or_parents_in_set(dir, self._fileset)
def visitchildrenset(self, dir):
ret = self.visitdir(dir)
if ret is True:
return b'this'
elif not ret:
return set()
assert ret == b'all'
return b'all'
def prefix(self):
return self._prefix
@encoding.strmethod
def __repr__(self):
return b'<patternmatcher patterns=%r>' % pycompat.bytestr(self._pats)
# This is basically a reimplementation of pathutil.dirs that stores the
# children instead of just a count of them, plus a small optional optimization
# to avoid some directories we don't need.
class _dirchildren(object):
def __init__(self, paths, onlyinclude=None):
self._dirs = {}
self._onlyinclude = onlyinclude or []
addpath = self.addpath
for f in paths:
addpath(f)
def addpath(self, path):
if path == b'':
return
dirs = self._dirs
findsplitdirs = _dirchildren._findsplitdirs
for d, b in findsplitdirs(path):
if d not in self._onlyinclude:
continue
dirs.setdefault(d, set()).add(b)
@staticmethod
def _findsplitdirs(path):
# yields (dirname, basename) tuples, walking back to the root. This is
# very similar to pathutil.finddirs, except:
# - produces a (dirname, basename) tuple, not just 'dirname'
# Unlike manifest._splittopdir, this does not suffix `dirname` with a
# slash.
oldpos = len(path)
pos = path.rfind(b'/')
while pos != -1:
yield path[:pos], path[pos + 1 : oldpos]
oldpos = pos
pos = path.rfind(b'/', 0, pos)
yield b'', path[:oldpos]
def get(self, path):
return self._dirs.get(path, set())
class includematcher(basematcher):
def __init__(self, root, kindpats, badfn=None):
super(includematcher, self).__init__(badfn)
if rustmod is not None:
# We need to pass the patterns to Rust because they can contain
# patterns from the user interface
self._kindpats = kindpats
self._pats, self.matchfn = _buildmatch(kindpats, b'(?:/|$)', root)
self._prefix = _prefix(kindpats)
roots, dirs, parents = _rootsdirsandparents(kindpats)
# roots are directories which are recursively included.
self._roots = set(roots)
# dirs are directories which are non-recursively included.
self._dirs = set(dirs)
# parents are directories which are non-recursively included because
# they are needed to get to items in _dirs or _roots.
self._parents = parents
def visitdir(self, dir):
if self._prefix and dir in self._roots:
return b'all'
return (
dir in self._dirs
or dir in self._parents
or path_or_parents_in_set(dir, self._roots)
)
@propertycache
def _allparentschildren(self):
# It may seem odd that we add dirs, roots, and parents, and then
# restrict to only parents. This is to catch the case of:
# dirs = ['foo/bar']
# parents = ['foo']
# if we asked for the children of 'foo', but had only added
# self._parents, we wouldn't be able to respond ['bar'].
return _dirchildren(
itertools.chain(self._dirs, self._roots, self._parents),
onlyinclude=self._parents,
)
def visitchildrenset(self, dir):
if self._prefix and dir in self._roots:
return b'all'
# Note: this does *not* include the 'dir in self._parents' case from
# visitdir, that's handled below.
if (
b'' in self._roots
or dir in self._dirs
or path_or_parents_in_set(dir, self._roots)
):
return b'this'
if dir in self._parents:
return self._allparentschildren.get(dir) or set()
return set()
@encoding.strmethod
def __repr__(self):
return b'<includematcher includes=%r>' % pycompat.bytestr(self._pats)
class exactmatcher(basematcher):
r"""Matches the input files exactly. They are interpreted as paths, not
patterns (so no kind-prefixes).
>>> m = exactmatcher([b'a.txt', br're:.*\.c$'])
>>> m(b'a.txt')
True
>>> m(b'b.txt')
False
Input files that would be matched are exactly those returned by .files()
>>> m.files()
['a.txt', 're:.*\\.c$']
So pattern 're:.*\.c$' is not considered as a regex, but as a file name
>>> m(b'main.c')
False
>>> m(br're:.*\.c$')
True
"""
def __init__(self, files, badfn=None):
super(exactmatcher, self).__init__(badfn)
if isinstance(files, list):
self._files = files
else:
self._files = list(files)
matchfn = basematcher.exact
@propertycache
def _dirs(self):
return set(pathutil.dirs(self._fileset))
def visitdir(self, dir):
return dir in self._dirs
@propertycache
def _visitchildrenset_candidates(self):
"""A memoized set of candidates for visitchildrenset."""
return self._fileset | self._dirs - {b''}
@propertycache
def _sorted_visitchildrenset_candidates(self):
"""A memoized sorted list of candidates for visitchildrenset."""
return sorted(self._visitchildrenset_candidates)
def visitchildrenset(self, dir):
if not self._fileset or dir not in self._dirs:
return set()
if dir == b'':
candidates = self._visitchildrenset_candidates
else:
candidates = self._sorted_visitchildrenset_candidates
d = dir + b'/'
# Use bisect to find the first element potentially starting with d
# (i.e. >= d). This should always find at least one element (we'll
# assert later if this is not the case).
first = bisect.bisect_left(candidates, d)
# We need a representation of the first element that is > d that
# does not start with d, so since we added a `/` on the end of dir,
# we'll add whatever comes after slash (we could probably assume
# that `0` is after `/`, but let's not) to the end of dir instead.
dnext = dir + encoding.strtolocal(chr(ord(b'/') + 1))
# Use bisect to find the first element >= d_next
last = bisect.bisect_left(candidates, dnext, lo=first)
dlen = len(d)
candidates = {c[dlen:] for c in candidates[first:last]}
# self._dirs includes all of the directories, recursively, so if
# we're attempting to match foo/bar/baz.txt, it'll have '', 'foo',
# 'foo/bar' in it. Thus we can safely ignore a candidate that has a
# '/' in it, indicating a it's for a subdir-of-a-subdir; the
# immediate subdir will be in there without a slash.
ret = {c for c in candidates if b'/' not in c}
# We really do not expect ret to be empty, since that would imply that
# there's something in _dirs that didn't have a file in _fileset.
assert ret
return ret
def isexact(self):
return True
@encoding.strmethod
def __repr__(self):
return b'<exactmatcher files=%r>' % self._files
class differencematcher(basematcher):
"""Composes two matchers by matching if the first matches and the second
does not.
The second matcher's non-matching-attributes (bad, traversedir) are ignored.
"""
def __init__(self, m1, m2):
super(differencematcher, self).__init__()
self._m1 = m1
self._m2 = m2
self.bad = m1.bad
self.traversedir = m1.traversedir
def matchfn(self, f):
return self._m1(f) and not self._m2(f)
@propertycache
def _files(self):
if self.isexact():
return [f for f in self._m1.files() if self(f)]
# If m1 is not an exact matcher, we can't easily figure out the set of
# files, because its files() are not always files. For example, if
# m1 is "path:dir" and m2 is "rootfileins:.", we don't
# want to remove "dir" from the set even though it would match m2,
# because the "dir" in m1 may not be a file.
return self._m1.files()
def visitdir(self, dir):
if self._m2.visitdir(dir) == b'all':
return False
elif not self._m2.visitdir(dir):
# m2 does not match dir, we can return 'all' here if possible
return self._m1.visitdir(dir)
return bool(self._m1.visitdir(dir))
def visitchildrenset(self, dir):
m2_set = self._m2.visitchildrenset(dir)
if m2_set == b'all':
return set()
m1_set = self._m1.visitchildrenset(dir)
# Possible values for m1: 'all', 'this', set(...), set()
# Possible values for m2: 'this', set(...), set()
# If m2 has nothing under here that we care about, return m1, even if
# it's 'all'. This is a change in behavior from visitdir, which would
# return True, not 'all', for some reason.
if not m2_set:
return m1_set
if m1_set in [b'all', b'this']:
# Never return 'all' here if m2_set is any kind of non-empty (either
# 'this' or set(foo)), since m2 might return set() for a
# subdirectory.
return b'this'
# Possible values for m1: set(...), set()
# Possible values for m2: 'this', set(...)
# We ignore m2's set results. They're possibly incorrect:
# m1 = path:dir/subdir, m2=rootfilesin:dir, visitchildrenset(''):
# m1 returns {'dir'}, m2 returns {'dir'}, if we subtracted we'd
# return set(), which is *not* correct, we still need to visit 'dir'!
return m1_set
def isexact(self):
return self._m1.isexact()
@encoding.strmethod
def __repr__(self):
return b'<differencematcher m1=%r, m2=%r>' % (self._m1, self._m2)
def intersectmatchers(m1, m2):
"""Composes two matchers by matching if both of them match.
The second matcher's non-matching-attributes (bad, traversedir) are ignored.
"""
if m1 is None or m2 is None:
return m1 or m2
if m1.always():
m = copy.copy(m2)
# TODO: Consider encapsulating these things in a class so there's only
# one thing to copy from m1.
m.bad = m1.bad
m.traversedir = m1.traversedir
return m
if m2.always():
m = copy.copy(m1)
return m
return intersectionmatcher(m1, m2)
class intersectionmatcher(basematcher):
def __init__(self, m1, m2):
super(intersectionmatcher, self).__init__()
self._m1 = m1
self._m2 = m2
self.bad = m1.bad
self.traversedir = m1.traversedir
@propertycache
def _files(self):
if self.isexact():
m1, m2 = self._m1, self._m2
if not m1.isexact():
m1, m2 = m2, m1
return [f for f in m1.files() if m2(f)]
# It neither m1 nor m2 is an exact matcher, we can't easily intersect
# the set of files, because their files() are not always files. For
# example, if intersecting a matcher "-I glob:foo.txt" with matcher of
# "path:dir2", we don't want to remove "dir2" from the set.
return self._m1.files() + self._m2.files()
def matchfn(self, f):
return self._m1(f) and self._m2(f)
def visitdir(self, dir):
visit1 = self._m1.visitdir(dir)
if visit1 == b'all':
return self._m2.visitdir(dir)
# bool() because visit1=True + visit2='all' should not be 'all'
return bool(visit1 and self._m2.visitdir(dir))
def visitchildrenset(self, dir):
m1_set = self._m1.visitchildrenset(dir)
if not m1_set:
return set()
m2_set = self._m2.visitchildrenset(dir)
if not m2_set:
return set()
if m1_set == b'all':
return m2_set
elif m2_set == b'all':
return m1_set
if m1_set == b'this' or m2_set == b'this':
return b'this'
assert isinstance(m1_set, set) and isinstance(m2_set, set)
return m1_set.intersection(m2_set)
def always(self):
return self._m1.always() and self._m2.always()
def isexact(self):
return self._m1.isexact() or self._m2.isexact()
@encoding.strmethod
def __repr__(self):
return b'<intersectionmatcher m1=%r, m2=%r>' % (self._m1, self._m2)
class subdirmatcher(basematcher):
"""Adapt a matcher to work on a subdirectory only.
The paths are remapped to remove/insert the path as needed:
>>> from . import pycompat
>>> m1 = match(util.localpath(b'/root'), b'', [b'a.txt', b'sub/b.txt'], auditor=lambda name: None)
>>> m2 = subdirmatcher(b'sub', m1)
>>> m2(b'a.txt')
False
>>> m2(b'b.txt')
True
>>> m2.matchfn(b'a.txt')
False
>>> m2.matchfn(b'b.txt')
True
>>> m2.files()
['b.txt']
>>> m2.exact(b'b.txt')
True
>>> def bad(f, msg):
... print(pycompat.sysstr(b"%s: %s" % (f, msg)))
>>> m1.bad = bad
>>> m2.bad(b'x.txt', b'No such file')
sub/x.txt: No such file
"""
def __init__(self, path, matcher):
super(subdirmatcher, self).__init__()
self._path = path
self._matcher = matcher
self._always = matcher.always()
self._files = [
f[len(path) + 1 :]
for f in matcher._files
if f.startswith(path + b"/")
]
# If the parent repo had a path to this subrepo and the matcher is
# a prefix matcher, this submatcher always matches.
if matcher.prefix():
self._always = any(f == path for f in matcher._files)
def bad(self, f, msg):
self._matcher.bad(self._path + b"/" + f, msg)
def matchfn(self, f):
# Some information is lost in the superclass's constructor, so we
# can not accurately create the matching function for the subdirectory
# from the inputs. Instead, we override matchfn() and visitdir() to
# call the original matcher with the subdirectory path prepended.
return self._matcher.matchfn(self._path + b"/" + f)
def visitdir(self, dir):
if dir == b'':
dir = self._path
else:
dir = self._path + b"/" + dir
return self._matcher.visitdir(dir)
def visitchildrenset(self, dir):
if dir == b'':
dir = self._path
else:
dir = self._path + b"/" + dir
return self._matcher.visitchildrenset(dir)
def always(self):
return self._always
def prefix(self):
return self._matcher.prefix() and not self._always
@encoding.strmethod
def __repr__(self):
return b'<subdirmatcher path=%r, matcher=%r>' % (
self._path,
self._matcher,
)
class prefixdirmatcher(basematcher):
"""Adapt a matcher to work on a parent directory.
The matcher's non-matching-attributes (bad, traversedir) are ignored.
The prefix path should usually be the relative path from the root of
this matcher to the root of the wrapped matcher.
>>> m1 = match(util.localpath(b'/root/d/e'), b'f', [b'../a.txt', b'b.txt'], auditor=lambda name: None)
>>> m2 = prefixdirmatcher(b'd/e', m1)
>>> m2(b'a.txt')
False
>>> m2(b'd/e/a.txt')
True
>>> m2(b'd/e/b.txt')
False
>>> m2.files()
['d/e/a.txt', 'd/e/f/b.txt']
>>> m2.exact(b'd/e/a.txt')
True
>>> m2.visitdir(b'd')
True
>>> m2.visitdir(b'd/e')
True
>>> m2.visitdir(b'd/e/f')
True
>>> m2.visitdir(b'd/e/g')
False
>>> m2.visitdir(b'd/ef')
False
"""
def __init__(self, path, matcher, badfn=None):
super(prefixdirmatcher, self).__init__(badfn)
if not path:
raise error.ProgrammingError(b'prefix path must not be empty')
self._path = path
self._pathprefix = path + b'/'
self._matcher = matcher
@propertycache
def _files(self):
return [self._pathprefix + f for f in self._matcher._files]
def matchfn(self, f):
if not f.startswith(self._pathprefix):
return False
return self._matcher.matchfn(f[len(self._pathprefix) :])
@propertycache
def _pathdirs(self):
return set(pathutil.finddirs(self._path))
def visitdir(self, dir):
if dir == self._path:
return self._matcher.visitdir(b'')
if dir.startswith(self._pathprefix):
return self._matcher.visitdir(dir[len(self._pathprefix) :])
return dir in self._pathdirs
def visitchildrenset(self, dir):
if dir == self._path:
return self._matcher.visitchildrenset(b'')
if dir.startswith(self._pathprefix):
return self._matcher.visitchildrenset(dir[len(self._pathprefix) :])
if dir in self._pathdirs:
return b'this'
return set()
def isexact(self):
return self._matcher.isexact()
def prefix(self):
return self._matcher.prefix()
@encoding.strmethod
def __repr__(self):
return b'<prefixdirmatcher path=%r, matcher=%r>' % (
pycompat.bytestr(self._path),
self._matcher,
)
class unionmatcher(basematcher):
"""A matcher that is the union of several matchers.
The non-matching-attributes (bad, traversedir) are taken from the first
matcher.
"""
def __init__(self, matchers):
m1 = matchers[0]
super(unionmatcher, self).__init__()
self.traversedir = m1.traversedir
self._matchers = matchers
def matchfn(self, f):
for match in self._matchers:
if match(f):
return True
return False
def visitdir(self, dir):
r = False
for m in self._matchers:
v = m.visitdir(dir)
if v == b'all':
return v
r |= v
return r
def visitchildrenset(self, dir):
r = set()
this = False
for m in self._matchers:
v = m.visitchildrenset(dir)
if not v:
continue
if v == b'all':
return v
if this or v == b'this':
this = True
# don't break, we might have an 'all' in here.
continue
assert isinstance(v, set)
r = r.union(v)
if this:
return b'this'
return r
@encoding.strmethod
def __repr__(self):
return b'<unionmatcher matchers=%r>' % self._matchers
def patkind(pattern, default=None):
r"""If pattern is 'kind:pat' with a known kind, return kind.
>>> patkind(br're:.*\.c$')
're'
>>> patkind(b'glob:*.c')
'glob'
>>> patkind(b'relpath:test.py')
'relpath'
>>> patkind(b'main.py')
>>> patkind(b'main.py', default=b're')
're'
"""
return _patsplit(pattern, default)[0]
def _patsplit(pattern, default):
"""Split a string into the optional pattern kind prefix and the actual
pattern."""
if b':' in pattern:
kind, pat = pattern.split(b':', 1)
if kind in allpatternkinds:
return kind, pat
return default, pattern
def _globre(pat):
r"""Convert an extended glob string to a regexp string.
>>> from . import pycompat
>>> def bprint(s):
... print(pycompat.sysstr(s))
>>> bprint(_globre(br'?'))
.
>>> bprint(_globre(br'*'))
[^/]*
>>> bprint(_globre(br'**'))
.*
>>> bprint(_globre(br'**/a'))
(?:.*/)?a
>>> bprint(_globre(br'a/**/b'))
a/(?:.*/)?b
>>> bprint(_globre(br'[a*?!^][^b][!c]'))
[a*?!^][\^b][^c]
>>> bprint(_globre(br'{a,b}'))
(?:a|b)
>>> bprint(_globre(br'.\*\?'))
\.\*\?
"""
i, n = 0, len(pat)
res = b''
group = 0
escape = util.stringutil.regexbytesescapemap.get
def peek():
return i < n and pat[i : i + 1]
while i < n:
c = pat[i : i + 1]
i += 1
if c not in b'*?[{},\\':
res += escape(c, c)
elif c == b'*':
if peek() == b'*':
i += 1
if peek() == b'/':
i += 1
res += b'(?:.*/)?'
else:
res += b'.*'
else:
res += b'[^/]*'
elif c == b'?':
res += b'.'
elif c == b'[':
j = i
if j < n and pat[j : j + 1] in b'!]':
j += 1
while j < n and pat[j : j + 1] != b']':
j += 1
if j >= n:
res += b'\\['
else:
stuff = pat[i:j].replace(b'\\', b'\\\\')
i = j + 1
if stuff[0:1] == b'!':
stuff = b'^' + stuff[1:]
elif stuff[0:1] == b'^':
stuff = b'\\' + stuff
res = b'%s[%s]' % (res, stuff)
elif c == b'{':
group += 1
res += b'(?:'
elif c == b'}' and group:
res += b')'
group -= 1
elif c == b',' and group:
res += b'|'
elif c == b'\\':
p = peek()
if p:
i += 1
res += escape(p, p)
else:
res += escape(c, c)
else:
res += escape(c, c)
return res
def _regex(kind, pat, globsuffix):
"""Convert a (normalized) pattern of any kind into a
regular expression.
globsuffix is appended to the regexp of globs."""
if not pat and kind in (b'glob', b'relpath'):
return b''
if kind == b're':
return pat
if kind in (b'path', b'relpath'):
if pat == b'.':
return b''
return util.stringutil.reescape(pat) + b'(?:/|$)'
if kind == b'rootfilesin':
if pat == b'.':
escaped = b''
else:
# Pattern is a directory name.
escaped = util.stringutil.reescape(pat) + b'/'
# Anything after the pattern must be a non-directory.
return escaped + b'[^/]+$'
if kind == b'relglob':
globre = _globre(pat)
if globre.startswith(b'[^/]*'):
# When pat has the form *XYZ (common), make the returned regex more
# legible by returning the regex for **XYZ instead of **/*XYZ.
return b'.*' + globre[len(b'[^/]*') :] + globsuffix
return b'(?:|.*/)' + globre + globsuffix
if kind == b'relre':
if pat.startswith(b'^'):
return pat
return b'.*' + pat
if kind in (b'glob', b'rootglob'):
return _globre(pat) + globsuffix
raise error.ProgrammingError(b'not a regex pattern: %s:%s' % (kind, pat))
def _buildmatch(kindpats, globsuffix, root):
"""Return regexp string and a matcher function for kindpats.
globsuffix is appended to the regexp of globs."""
matchfuncs = []
subincludes, kindpats = _expandsubinclude(kindpats, root)
if subincludes:
submatchers = {}
def matchsubinclude(f):
for prefix, matcherargs in subincludes:
if f.startswith(prefix):
mf = submatchers.get(prefix)
if mf is None:
mf = match(*matcherargs)
submatchers[prefix] = mf
if mf(f[len(prefix) :]):
return True
return False
matchfuncs.append(matchsubinclude)
regex = b''
if kindpats:
if all(k == b'rootfilesin' for k, p, s in kindpats):
dirs = {p for k, p, s in kindpats}
def mf(f):
i = f.rfind(b'/')
if i >= 0:
dir = f[:i]
else:
dir = b'.'
return dir in dirs
regex = b'rootfilesin: %s' % stringutil.pprint(list(sorted(dirs)))
matchfuncs.append(mf)
else:
regex, mf = _buildregexmatch(kindpats, globsuffix)
matchfuncs.append(mf)
if len(matchfuncs) == 1:
return regex, matchfuncs[0]
else:
return regex, lambda f: any(mf(f) for mf in matchfuncs)
MAX_RE_SIZE = 20000
def _joinregexes(regexps):
"""gather multiple regular expressions into a single one"""
return b'|'.join(regexps)
def _buildregexmatch(kindpats, globsuffix):
"""Build a match function from a list of kinds and kindpats,
return regexp string and a matcher function.
Test too large input
>>> _buildregexmatch([
... (b'relglob', b'?' * MAX_RE_SIZE, b'')
... ], b'$')
Traceback (most recent call last):
...
Abort: matcher pattern is too long (20009 bytes)
"""
try:
allgroups = []
regexps = [_regex(k, p, globsuffix) for (k, p, s) in kindpats]
fullregexp = _joinregexes(regexps)
startidx = 0
groupsize = 0
for idx, r in enumerate(regexps):
piecesize = len(r)
if piecesize > MAX_RE_SIZE:
msg = _(b"matcher pattern is too long (%d bytes)") % piecesize
raise error.Abort(msg)
elif (groupsize + piecesize) > MAX_RE_SIZE:
group = regexps[startidx:idx]
allgroups.append(_joinregexes(group))
startidx = idx
groupsize = 0
groupsize += piecesize + 1
if startidx == 0:
matcher = _rematcher(fullregexp)
func = lambda s: bool(matcher(s))
else:
group = regexps[startidx:]
allgroups.append(_joinregexes(group))
allmatchers = [_rematcher(g) for g in allgroups]
func = lambda s: any(m(s) for m in allmatchers)
return fullregexp, func
except re.error:
for k, p, s in kindpats:
try:
_rematcher(_regex(k, p, globsuffix))
except re.error:
if s:
raise error.Abort(
_(b"%s: invalid pattern (%s): %s") % (s, k, p)
)
else:
raise error.Abort(_(b"invalid pattern (%s): %s") % (k, p))
raise error.Abort(_(b"invalid pattern"))
def _patternrootsanddirs(kindpats):
"""Returns roots and directories corresponding to each pattern.
This calculates the roots and directories exactly matching the patterns and
returns a tuple of (roots, dirs) for each. It does not return other
directories which may also need to be considered, like the parent
directories.
"""
r = []
d = []
for kind, pat, source in kindpats:
if kind in (b'glob', b'rootglob'): # find the non-glob prefix
root = []
for p in pat.split(b'/'):
if b'[' in p or b'{' in p or b'*' in p or b'?' in p:
break
root.append(p)
r.append(b'/'.join(root))
elif kind in (b'relpath', b'path'):
if pat == b'.':
pat = b''
r.append(pat)
elif kind in (b'rootfilesin',):
if pat == b'.':
pat = b''
d.append(pat)
else: # relglob, re, relre
r.append(b'')
return r, d
def _roots(kindpats):
'''Returns root directories to match recursively from the given patterns.'''
roots, dirs = _patternrootsanddirs(kindpats)
return roots
def _rootsdirsandparents(kindpats):
"""Returns roots and exact directories from patterns.
`roots` are directories to match recursively, `dirs` should
be matched non-recursively, and `parents` are the implicitly required
directories to walk to items in either roots or dirs.
Returns a tuple of (roots, dirs, parents).
>>> r = _rootsdirsandparents(
... [(b'glob', b'g/h/*', b''), (b'glob', b'g/h', b''),
... (b'glob', b'g*', b'')])
>>> print(r[0:2], sorted(r[2])) # the set has an unstable output
(['g/h', 'g/h', ''], []) ['', 'g']
>>> r = _rootsdirsandparents(
... [(b'rootfilesin', b'g/h', b''), (b'rootfilesin', b'', b'')])
>>> print(r[0:2], sorted(r[2])) # the set has an unstable output
([], ['g/h', '']) ['', 'g']
>>> r = _rootsdirsandparents(
... [(b'relpath', b'r', b''), (b'path', b'p/p', b''),
... (b'path', b'', b'')])
>>> print(r[0:2], sorted(r[2])) # the set has an unstable output
(['r', 'p/p', ''], []) ['', 'p']
>>> r = _rootsdirsandparents(
... [(b'relglob', b'rg*', b''), (b're', b're/', b''),
... (b'relre', b'rr', b'')])
>>> print(r[0:2], sorted(r[2])) # the set has an unstable output
(['', '', ''], []) ['']
"""
r, d = _patternrootsanddirs(kindpats)
p = set()
# Add the parents as non-recursive/exact directories, since they must be
# scanned to get to either the roots or the other exact directories.
p.update(pathutil.dirs(d))
p.update(pathutil.dirs(r))
# FIXME: all uses of this function convert these to sets, do so before
# returning.
# FIXME: all uses of this function do not need anything in 'roots' and
# 'dirs' to also be in 'parents', consider removing them before returning.
return r, d, p
def _explicitfiles(kindpats):
"""Returns the potential explicit filenames from the patterns.
>>> _explicitfiles([(b'path', b'foo/bar', b'')])
['foo/bar']
>>> _explicitfiles([(b'rootfilesin', b'foo/bar', b'')])
[]
"""
# Keep only the pattern kinds where one can specify filenames (vs only
# directory names).
filable = [kp for kp in kindpats if kp[0] not in (b'rootfilesin',)]
return _roots(filable)
def _prefix(kindpats):
'''Whether all the patterns match a prefix (i.e. recursively)'''
for kind, pat, source in kindpats:
if kind not in (b'path', b'relpath'):
return False
return True
_commentre = None
def readpatternfile(filepath, warn, sourceinfo=False):
"""parse a pattern file, returning a list of
patterns. These patterns should be given to compile()
to be validated and converted into a match function.
trailing white space is dropped.
the escape character is backslash.
comments start with #.
empty lines are skipped.
lines can be of the following formats:
syntax: regexp # defaults following lines to non-rooted regexps
syntax: glob # defaults following lines to non-rooted globs
re:pattern # non-rooted regular expression
glob:pattern # non-rooted glob
rootglob:pat # rooted glob (same root as ^ in regexps)
pattern # pattern of the current default type
if sourceinfo is set, returns a list of tuples:
(pattern, lineno, originalline).
This is useful to debug ignore patterns.
"""
syntaxes = {
b're': b'relre:',
b'regexp': b'relre:',
b'glob': b'relglob:',
b'rootglob': b'rootglob:',
b'include': b'include',
b'subinclude': b'subinclude',
}
syntax = b'relre:'
patterns = []
fp = open(filepath, b'rb')
for lineno, line in enumerate(util.iterfile(fp), start=1):
if b"#" in line:
global _commentre
if not _commentre:
_commentre = util.re.compile(br'((?:^|[^\\])(?:\\\\)*)#.*')
# remove comments prefixed by an even number of escapes
m = _commentre.search(line)
if m:
line = line[: m.end(1)]
# fixup properly escaped comments that survived the above
line = line.replace(b"\\#", b"#")
line = line.rstrip()
if not line:
continue
if line.startswith(b'syntax:'):
s = line[7:].strip()
try:
syntax = syntaxes[s]
except KeyError:
if warn:
warn(
_(b"%s: ignoring invalid syntax '%s'\n") % (filepath, s)
)
continue
linesyntax = syntax
for s, rels in pycompat.iteritems(syntaxes):
if line.startswith(rels):
linesyntax = rels
line = line[len(rels) :]
break
elif line.startswith(s + b':'):
linesyntax = rels
line = line[len(s) + 1 :]
break
if sourceinfo:
patterns.append((linesyntax + line, lineno, line))
else:
patterns.append(linesyntax + line)
fp.close()
return patterns
|
smmribeiro/intellij-community
|
plugins/hg4idea/testData/bin/mercurial/match.py
|
Python
|
apache-2.0
| 53,313
|
[
"VisIt"
] |
e71d1ed0739d829c56dc656e54981b7a12a601fdd423e8d44280200ba3d2413a
|
#!/usr/bin/python
######################################################################
# Autor: Andrés Herrera Poyatos
# Universidad de Granada, January, 2015
# Breadth-First Search for Shortest Path Problem in Graph Theory
#######################################################################
# This program read a graph as an adjacency list from a file and two graph's
# nodes. Afterwards it executes de BFS algorithm on it and returns the
# length of the shortest path. The user can choose between printing the path or not.
import sys # For arguments (syc.argv) and exit (syc.exit())
import time # To time the program
import math # log
import copy # To copy the graph
import queue # Python queue module
from random import randrange # Random integer generator
# A Graph class. A graph is represented by its adjacency list.
class Graph(object):
# The class is conformed by these members:
# - adj_list : Adjacency list to represent the graph. It is implemented as a dictionary of dictionaries.
# The first dictionary's keys are the nodes of the graph. Its values are dictionaries with
# the key's neighbours and the number of edges between them.
# - outdegree : Dictionary which maps for each node its outdegree (The number of edges leaving from this node).
# - num_edges : Number of edges in the graph.
# - type_node : Type of the graph nodes. It is int by default.
#
# The graph admits multiples edges from one node to another thanks to the dictionary of dictionaries implementation.
# Initializes a graph object
def __init__(self, adj_list = {}, outdegree = {}, num_edges = 0, type_node = int):
self.adj_list = adj_list
self.outdegree = outdegree
self.num_edges = num_edges
self.type_node = type_node
# Read a graph from a file in adjacency list form
def readGraph(self, file):
data = open(file, "r")
for line in data:
# Get the nodes and initialize the current node adj_list
nodes = [self.type_node(element) for element in line.split()]
self.adj_list[nodes[0]] = {}
# Add the neighbours to node[0] adj_list
for i in range(1, len(nodes)):
self.adj_list[nodes[0]][nodes[i]] = self.adj_list[nodes[0]][nodes[i]]+1 if nodes[i] in self.adj_list[nodes[0]] else 1
# Increment num_edges and add degree(nodes[0]) to outdegree:
self.num_edges += len(nodes)-1; self.outdegree[nodes[0]] = len(nodes)-1
data.close()
# Find the shortest path between a and b using BFS.
# Efficiency: O(|V| + |E|)
def breadthFirstSearch(self, a, b):
# See if parameters are correct:
if a not in self.adj_list:
raise RuntimeError(a)
if b not in self.adj_list:
raise RuntimeError(b)
if a == b: return 0, [a] # If a==b then that's the asked path
# Algorithm:
visited = {a : (0,a)} # Dictionary with the visited nodes and their distance to a
q = queue.Queue() # queue with the nodes to read
q.put(a) # q = {a}
found = False # b hasn't been found yet
# Visit every level of nodes (from a) until reaching b.
# In that case that's the shortest path (proof by induction).
while(not q.empty() and not found):
node = q.get()
# Visit node neighbours:
for neighbour in self.adj_list[node]:
if not neighbour in visited:
# The node is added to visited with its distance to a and the parent node in that path.
visited[neighbour] = (visited[node][0]+1, node)
q.put(neighbour) # Added to the queue
# If it is b we are done
if (neighbour == b):
found = True; break
# If it is found the path is returned.
# Else return -1, []
if found:
sol = [b]; x = visited[b][1]
while(x != a):
sol.append(x); x = visited[x][1]
sol.append(a)
return visited[b][0], sol[::-1]
else:
return -1, []
# Returns the edges of the graph
def edges(self):
edges = set()
for node in range(1, len(self.adj_list)):
for neighbour in self.adj_list[node]:
if (neighbour, node) not in edges:
for i in range(self.adj_list[node][neighbour]):
edges.add((node, neighbour))
return edges
######################## MAIN ##########################
# See if arguments are correct
if len(sys.argv) < 4 or len(sys.argv) > 5:
print("Sintax: BreadthFirstSearch.py <options> graph.txt NodeA NodeB \n The option -n don't print the path between A and B.")
sys.exit()
print_path = True
if len(sys.argv) > 4:
if sys.argv[1] == "-n":
print_path = False
# Create Graph
try:
graph_file = sys.argv[1 if len(sys.argv) == 4 else 2]
graph = Graph()
graph.readGraph(graph_file)
except IOError:
print("Error: The file", graph_file, "can\'t be read.")
sys.exit()
a = int(sys.argv[2 if len(sys.argv) == 4 else 3])
b = int(sys.argv[3 if len(sys.argv) == 4 else 4])
# Execute Breadth-First Search and count the time wasted
start_time = time.time()
try:
length, path = graph.breadthFirstSearch(a,b)
except RuntimeError as element:
print("Error:", element.args[0] , "is not a node.")
sys.exit()
print("--- %f seconds ---" % (time.time() - start_time))
# Print the result
if length < 0:
print("The given nodes are not connected.")
else:
print("Path length: ", length)
if print_path:
print("Path between the nodes ", a, " and ", b, ": ", path)
|
andreshp/Algorithms
|
Graphs/ShortestPath/BreadthFirstSearch/BreadthFirstSearch.py
|
Python
|
gpl-2.0
| 5,870
|
[
"VisIt"
] |
e71ebe603a784c21e7f62c2dc12c120efcbb1cb3fc83d0a2157e8955e3993949
|
#importing some packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import os
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
def draw_lines(img, lines, color=[255, 0, 0], thickness = 10):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
left_x = []
left_y = []
right_x = []
right_y = []
for line in lines:
for x1,y1,x2,y2 in line:
# 1. we dont need horizontal; i.e slope =0 they are noise
# 2. slope >0 is right ; slope <0 is left
slope = ((y2-y1)/(x2-x1))
if (slope < 0):
left_x.append(x1)
left_x.append(x2)
left_y.append(y1)
left_y.append(y2)
elif (slope > 0):
right_x.append(x1)
right_x.append(x2)
right_y.append(y1)
right_y.append(y2)
if (len(left_x) > 0 and len(left_y) > 0):
# find coefficient
coeff_left = np.polyfit(left_x, left_y, 1)
# construct y =xa +b
func_left = np.poly1d(coeff_left)
x1L = int(func_left(0))
x2L = int(func_left(460))
cv2.line(img, (0, x1L), (460, x2L), color, thickness)
if (len(right_x) > 0 and len(right_y) > 0):
# find coefficient
coeff_right = np.polyfit(right_x, right_y, 1)
# construct y =xa +b
func_right = np.poly1d(coeff_right)
x1R = int(func_right(500))
x2R = int(func_right(img.shape[1]))
cv2.line(img, (500, x1R), (img.shape[1], x2R), color, thickness)
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + λ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, λ)
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# you should return the final output (image where lines are drawn on lanes)
img = grayscale(image)
# further smoothing to blur image for better result
img = gaussian_blur(img, kernel_size = 3)
img = canny(img, low_threshold = 80, high_threshold = 240)
# This time we are defining a four sided polygon to mask
imshape = image.shape
vertices = np.array([[(0,imshape[0]),(460, 320), (500, 320), (imshape[1],imshape[0])]], dtype=np.int32)
img = region_of_interest(img, vertices)
# Hough transform
line_image = hough_lines(img, rho = 2, theta= np.pi/180, threshold = 50, min_line_len = 40, max_line_gap = 20)
# Draw the lines on the edge image
final = weighted_img(line_image, image, α = 0.8, β = 1., λ = 0.)
return final
if __name__ == '__main__':
white_line_output = '../test_videos_output/solidWhiteRight.mp4'
clip1 = VideoFileClip('../test_videos/solidWhiteRight.mp4')
white_line_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
yellow_output = '../test_videos_output/solidYellowLeft.mp4'
clip2 = VideoFileClip('../test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
challenge_output = '../test_videos_output/challenge.mp4'
clip3 = VideoFileClip('../test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
|
tranlyvu/autonomous-vehicle-projects
|
Finding Lane Lines/src/finding_lane_lines.py
|
Python
|
apache-2.0
| 6,734
|
[
"Gaussian"
] |
48a87a48a2860a29b09acebc0b03b36ed39f24e3ea75a8ca61f125d6eda0f2a4
|
"""Utilities used by AIM
"""
import os, sys
from math import sqrt, pi, sin, cos, acos
from subprocess import Popen, PIPE
from config import update_marker, tephra_output_dir, fall3d_distro
import numpy
import logging
import time
import string
from Scientific.IO.NetCDF import NetCDFFile
def run(cmd,
stdout=None,
stderr=None,
verbose=True):
s = cmd
if stdout:
s += ' > %s' % stdout
if stderr:
s += ' 2> %s' % stderr
if verbose:
print s
err = os.system(s)
if err != 0:
msg = 'Command "%s" failed with errorcode %i. ' % (cmd, err)
if stdout and stderr: msg += 'See logfiles %s and %s for details' % (stdout, stderr)
raise Exception(msg)
return err
def run_with_errorcheck(cmd, name, logdir='.', verbose=False):
"""Run general command with logging and errorchecking
"""
base = os.path.split(name)[-1]
stdout = os.path.join(logdir, '%s.stdout' % base)
stderr = os.path.join(logdir, '%s.stderr' % base)
err = run(cmd,
stdout=stdout,
stderr=stderr,
verbose=verbose)
if err:
msg = 'Command "%s" ended abnormally. Log files are:\n' % cmd
msg += ' %s\n' % stdout
msg += ' %s\n' % stderr
raise Exception(msg)
def pipe(cmd, verbose=False):
"""Simplification of the new style pipe command
One object p is returned and it has
p.stdout, p.stdin and p.stderr
If p.stdout is None an exception will be raised.
"""
if verbose:
print cmd
p = Popen(cmd, shell=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if p.stdout is None:
msg = 'Piping of command %s could be executed' % cmd
raise Exception(msg)
return p
def header(s):
dashes = '-'*len(s)
print
print dashes
print s
print dashes
def write_line(fid, text, indent=0):
fid.write(' '*indent + text + '\n')
def makedir(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
Based on
http://code.activestate.com/recipes/82465/
Note os.makedirs does not silently pass if directory exists.
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
msg = 'a file with the same name as the desired ' \
'dir, "%s", already exists.' % newdir
raise OSError(msg)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
makedir(head)
#print "_mkdir %s" % repr(newdir)
if tail:
os.mkdir(newdir)
def get_scenario_parameters(scenario):
"""Extract dictionary of parameters from scenario file
If scenario is already a dictionary it is returned as is.
The file must be in the current working directory.
"""
# Return if already dictionary
if type(scenario) == type({}):
return scenario
# Get all variables from scenario name space
# Get copy so that the original __dict__ isn't modified
scenario_name = scenario.split('.')[0]
if os.path.sep in scenario:
msg = 'Scenario file must reside in current working directory'
raise Exception(msg)
try:
exec('import %s as scenario_module' % scenario_name)
except Exception, e:
msg = 'Argument scenario must be either the name of a '
msg += 'Python script or a dictionary. '
msg += 'Error message was "%s"' % str(e)
raise Exception(msg)
params = scenario_module.__dict__.copy()
# Remove built-in entries
for key in params.keys():
if key.startswith('__'):
params.pop(key)
# Add scenario name to dictionary
params['scenario_name'] = scenario_name
# Return parameter dictionary
return params
def check_presence_of_required_parameters(params):
"""Check that scenario scripts provides the correct parameters
as specified in required_parameters.txt
Input:
scenario: Name of scenario
params: Dictionary of parameters provided in scenario
"""
# Get path
path = os.path.split(__file__)[0]
# Get scenario name
scenario_name = params['scenario_name']
# Get required parameters
filename = os.path.join(path,
'required_parameters.txt')
required_parameters = {}
fid = open(filename, 'r')
for line in fid.readlines():
entry = line.strip()
# Skip comments and blank lines
if entry == '': continue
if entry.startswith('#'): continue
# Check that each line specifies only one parameter
if len(entry.split()) > 1:
msg = 'Only one parameter must be specified per line '
msg += 'in required_parameters.txt. I got %s ' % entry
raise Exception(msg)
# Register parameter name - value on RHS is irrelevant
required_parameters[entry] = None
# Add scenario_name to required parameters
required_parameters['scenario_name'] = None
# Check that all required parameters were provided
for parameter in required_parameters:
if parameter not in params:
# Bad - required parameter was missing
msg = 'Required parameter "%s" was not specified in scenario "%s"'\
% (parameter, scenario_name)
raise Exception(msg)
# Check that all provide parameters were also required
# i.e. alert the user if a new parameter has been introduced
for parameter in params:
if parameter not in required_parameters:
# Bad - a new parameter has been introduced
msg = 'Scenario "%s" provided a new parameter: "%s".\n'\
% (scenario_name, parameter)
msg += 'Consider updating "required_parameters.txt" or '
msg += 'remove "%s" from scenario.' % parameter
raise Exception(msg)
def build_output_dir(tephra_output_dir='tephra', type_name='scenarios', scenario_name='none', dircomment='', store_locally=True, timestamp_output=True):
"""Build output datastructure like
$TEPHRADATA/<scenario>/<user>/<scenario>_user_timestamp
"""
if store_locally:
output_dir = os.path.join(os.getcwd(), tephra_output_dir)
else:
output_dir = get_tephradata()
output_dir = os.path.join(output_dir, type_name)
output_dir = os.path.join(output_dir, scenario_name)
user = get_username()
output_dir = os.path.join(output_dir, user)
if timestamp_output:
scenario_dir = os.path.join(output_dir, 'D' + get_timestamp())
else:
scenario_dir = os.path.join(output_dir, 'run')
if dircomment is not None:
try:
dircomment = string.replace(dircomment, ' ', '_')
except:
msg = 'Dircomment %s could not be appended to output dir' % str(dircomment)
raise Exception(msg)
scenario_dir += '_' + dircomment
output_dir = os.path.join(output_dir, scenario_dir)
if not timestamp_output:
try:
os.listdir(output_dir)
except:
# OK if it doesn't exist
pass
else:
# Clean out any previous files
s = '/bin/rm -rf %s' % output_dir
try:
run(s, verbose=False)
except:
print 'Could not clean up'
return output_dir
def get_layers_from_windfield(windfield):
"""Get meteorological wind altitudes from Fall3d wind field
Extension .profile assumed
Format is
814924 9208168
20090101
0 9999999
17
64.0 3.50 0.20 26.65 3.51 266.73
751.0 7.00 1.70 22.05 7.20 256.35
1481.0 10.50 1.90 17.65 10.67 259.74
3119.0 8.90 -2.20 10.25 9.17 283.88
4383.0 10.50 -2.40 2.35 10.77 282.87
5837.0 7.00 -3.80 -5.15 7.96 298.50
7564.0 4.20 3.70 -14.75 5.60 228.62
9689.0 1.40 2.40 -27.65 2.78 210.26
10967.0 -4.10 -1.00 -39.65 4.22 76.29
12446.0 -5.90 -1.30 -54.35 6.04 77.57
14223.0 -13.20 1.40 -70.75 13.27 96.05
16566.0 -17.80 -5.40 -76.15 18.60 73.12
18575.0 -2.50 2.80 -78.35 3.75 138.24
20565.0 1.90 0.40 -65.25 1.94 258.11
23751.0 9.80 -0.10 -59.75 9.80 270.58
26306.0 -11.90 5.50 -55.75 13.11 114.81
30814.0 -28.10 2.20 -47.05 28.19 94.48
"""
if not windfield.endswith('.profile'):
return
fid = open(windfield)
lines = fid.readlines()
fid.close()
altitudes = []
for line in lines:
fields = line.split()
if len(fields) < 4:
continue
else:
altitudes.append(float(fields[0]))
return altitudes
def get_temporal_parameters_from_windfield(windfield):
"""Get eruption year, month and date from Fall3d wind field
Extension .profile assumed
Format is
458662 9129502
20101109
43200 54000
13
90.5 -0.05 1.23 25.36
1013.2 -3.33 1.77 20.47
Return year, month, date, start_time, end_time, time_step
times are in seconds UTC after midnight.
"""
if not windfield.endswith('.profile'):
msg = 'Windfield %s must be native Fall3d to work' % windfield
raise Exception(msg)
fid = open(windfield)
lines = fid.readlines()
fid.close()
timestamp = lines[1]
year = int(timestamp[:4])
month = int(timestamp[4:6])
date = int(timestamp[6:])
start_time = int(lines[2].split()[0])
# Search backwards for end time and also get time interval for the last step
for line in lines[::-1]:
fields = line.split()
if len(fields) == 2:
end_time = int(fields[1])
step = end_time - int(fields[0])
break
return year, month, date, start_time, end_time, step
def get_eruptiontime_from_windfield(windfield):
"""Get eruption year, month and date from Fall3d wind field
Extension .profile assumed
Format is
814924 9208168
20090101
0 9999999
17
71.0 1.50 -0.30 26.35 1.53 281.31
....
"""
# FIXME: This should really be superseded by get_temporal_parameters_from_windfield(windfield):
if not windfield.endswith('.profile'):
return
fid = open(windfield)
lines = fid.readlines()
fid.close()
timestamp = lines[1]
year = int(timestamp[:4])
month = int(timestamp[4:6])
date = int(timestamp[6:])
return year, month, date
def get_fall3d_home(verbose=True):
"""Determine location of Fall3d package
"""
#---------------------
# Determine FALL3DHOME
#---------------------
if 'FALL3DHOME' in os.environ:
FALL3DHOME = os.environ['FALL3DHOME']
else:
FALL3DHOME = os.getcwd()
Fall3d_dir = os.path.join(FALL3DHOME, fall3d_distro)
return Fall3d_dir
def get_tephradata(verbose=True):
"""Determine location of TEPHRADATA environment variable
"""
#---------------------
# Determine TEPHRADATA
#---------------------
if 'TEPHRADATA' in os.environ:
TEPHRADATA = os.environ['TEPHRADATA']
else:
TEPHRADATA = os.path.join(os.getcwd(), tephra_output_dir)
return TEPHRADATA
def get_username():
"""Get username
"""
try:
p = pipe('whoami')
except:
username = 'unknown'
else:
username = p.stdout.read().strip()
return username
def get_timestamp():
"""Get timestamp in the ISO 8601 format
http://www.iso.org/iso/date_and_time_format
Format YYYY-MM-DDThh:mm:ss
where the capital letter T is used to separate the date and time
components.
Example: 2009-04-01T13:01:02 represents one minute and two seconds
after one o'clock in the afternoon on the first of April 2009.
"""
#return time.strftime('%Y-%m-%dT%H:%M:%S') # ISO 8601
return time.strftime('%Y-%m-%dT%H%M%S') # Something Windows can read
def get_shell():
"""Get shell if UNIX platform
Otherwise return None
"""
p = pipe('echo $SHELL')
shell = p.stdout.read().strip()
shell = os.path.split(shell)[-1] # Only last part of path
return shell
def set_bash_variable(envvar, envvalue):
"""Modify ~/.bashrc with specified environment variable
If already exist, append using :
"""
fid = open(os.path.expanduser('~/.bashrc'))
lines = fid.readlines()
fid.close()
fid = open(os.path.expanduser('~/.bashrc'), 'w')
found = False
for line in lines:
patchedline = line
if envvar in line:
if line.startswith('export %s=' % envvar):
# Found - now append
found = True
path = line.split('=')[1].strip()
path += ':' + envvalue
patchedline = 'export %s=%s %s\n' % (envvar, path, update_marker)
fid.write(patchedline)
fid.write('\n') # In case last line did not have a newline
if not found:
# Not found - just add it
patchedline = 'export %s=%s %s\n' % (envvar, envvalue, update_marker)
fid.write(patchedline)
fid.close()
def tail(filename,
count=5,
indent=2,
noblanks=True):
"""Run UNIX tail command but optionally remove blank lines
"""
space = ' '*indent
s = 'tail -%i %s' % (count, filename)
p = pipe(s)
result = []
for line in p.stdout.readlines():
s = line.strip()
if s:
print space + s
def list_to_string(L):
"""Convert list of numerical values suitable for Fall3d
If L is a single number it will be used as such.
"""
try:
s = float(L)
except:
s = ''
for x in L:
s += '%f ' % x
return s
def calculate_extrema(filename, verbose=False):
"""Calculate minimum and maximum value of ASCII file.
Format is ESRI ASCII grid.
"""
import sys
# Read ASCII file
fid = open(filename)
lines = fid.readlines()
fid.close()
# Check header and get number of columns
line = lines[0].strip()
fields = line.split()
msg = 'Input file %s does not look like an ASCII grd file. It must start with ncols' % filename
assert fields[0] == 'ncols', msg
assert len(fields) == 2
# Compute extrema and return
min_val = sys.maxint
max_val = -min_val
for line in lines[6:]:
A = numpy.array([float(x) for x in line.split()])
min_val = min(min_val, A.min())
max_val = max(max_val, A.max())
return min_val, max_val
def _write_ascii(header, data, asciifilename, projection):
"""Internal function to write ASCII data from NetCDF. Used by nc2asc.
"""
rows = data.shape[0]
cols = data.shape[1]
prjfilename = asciifilename[:-4] + '.prj'
outfile = open(asciifilename, 'w')
outfile.write(header)
for j in range(rows)[::-1]: # Rows are upside down
for i in range(cols):
outfile.write('%f ' % data[j, i])
outfile.write('\n')
outfile.close()
if projection:
# Create associated projection file
fid = open(prjfilename, 'w')
fid.write(projection)
fid.close()
def nc2asc(ncfilename,
subdataset,
projection=None,
verbose=False):
"""Extract given subdataset from ncfile name and create one ASCII file for each band.
This function is reading the NetCDF file using the Python Library Scientific.IO.NetCDF
Time is assumed to be in whole hours.
"""
basename, _ = os.path.splitext(ncfilename) # Get rid of .nc
basename, _ = os.path.splitext(basename) # Get rid of .res
if verbose:
print 'Converting layer %s in file %s to ASCII files' % (subdataset,
ncfilename)
infile = NetCDFFile(ncfilename)
layers = infile.variables.keys()
msg = 'Subdataset %s was not found in file %s. Options are %s.' % (subdataset, ncfilename, layers)
assert subdataset in layers, msg
A = infile.variables[subdataset].getValue()
msg = 'Data must have 3 dimensions: Time, X and Y. I got shape: %s' % str(A.shape)
assert len(A.shape) == 3, msg
if 'time' in infile.variables:
units = infile.variables['time'].units
msg = 'Time units must be "h". I got %s' % units
assert units == 'h', msg
times = infile.variables['time'].getValue()
assert A.shape[0] == len(times)
cols = infile.dimensions['x']
rows = infile.dimensions['y']
assert A.shape[1] == rows
assert A.shape[2] == cols
# Header information
xmin = float(infile.XMIN)
xmax = float(infile.XMAX)
ymin = float(infile.YMIN)
ymax = float(infile.YMAX)
# Check that cells are square
cellsize = (xmax-xmin)/cols
assert numpy.allclose(cellsize, (ymax-ymin)/rows)
header = 'ncols %i\n' % cols
header += 'nrows %i\n' % rows
header += 'xllcorner %.1f\n' % xmin
header += 'yllcorner %.1f\n' % ymin
header += 'cellsize %.1f\n' % cellsize
header += 'NODATA_value -9999\n'
if 'time' in infile.variables:
# Loop through time slices and name files by hour.
for k, t in enumerate(times):
hour = str(int(t)).zfill(2) + 'h'
asciifilename = basename + '.' + hour + '.' + subdataset.lower() + '.asc'
_write_ascii(header, A[k,:,:], asciifilename, projection)
else:
# Write the one ASCII file
asciifilename = basename + '.' + subdataset.lower() + '.asc'
_write_ascii(header, A[0,:,:], asciifilename, projection)
infile.close()
def OBSOLETE_nc2asc(ncfilename,
subdataset,
ascii_header_file=None, # If ASCII header is known it can be supplied
projection=None,
verbose=False):
"""Extract given subdataset from ncfile name and create one ASCII file for each band.
The underlying command is of the form
gdal_translate -of AAIGrid -b 4 NETCDF:"merapi.res.nc":THICKNESS merapi.003h.depothick.asc
"""
print 'NC', ncfilename
# First assert that this is a valid NetCDF file and that requested subdataset exists
s = 'gdalinfo %s' % ncfilename
try:
p = pipe(s)
except:
msg = 'Could not read NetCDF file %s' % ncfilename
raise Exception(msg)
else:
lines = p.stdout.readlines()
expected_header = 'Driver: netCDF/Network Common Data Format'
print lines
header = lines[0].strip()
if header != expected_header:
msg = 'File %s does not look like a valid NetCDF file.\n' % ncfilename
msg += 'Expected header: "%s"\n' % expected_header
msg += 'but got instead: "%s"' % header
raise Exception(msg)
# Look for something like: SUBDATASET_3_NAME=NETCDF:"merapi.res.nc":THICKNESS
found = False
for line in lines:
info = line.strip()
if info.startswith('SUBDATASET') and info.find('NAME=NETCDF:') > 0 and info.endswith(':THICKNESS'):
#print 'Found', info
found = True
msg = 'Did not find subdataset %s in %s' % (subdataset, ncfilename)
assert found, msg
# Then extract all bands for this subdataset
# Command is for example: gdalinfo NETCDF:"merapi.res.nc":THICKNESS
#
# FIXME (Ole): There is much more scope for using NetCDF info here if needed
# For now we just assume 'hours' but use the numbers given here
s = 'gdalinfo NETCDF:"%s":%s' % (ncfilename, subdataset)
try:
p = pipe(s)
except:
msg = 'Could not execute command: %s' % s
raise Exception(msg)
else:
bands = {}
lines = p.stdout.readlines()
for line in lines:
info = line.strip()
#print info
# Get new band
if info.startswith('Band'):
fields = info.split()
band_number = int(fields[1])
bands[band_number] = [] # Create new entry
assert band_number == len(bands)
# Get associated time
if info.startswith('NETCDF_DIMENSION_time'):
fields = info.split('=')
# Round to nearest integer!
#FIXME: This is not totally general but handy for naming of files
time = int(float(fields[1]))
bands[band_number].append(time)
# Get associated units
if info.startswith('NETCDF_time_units'):
fields = info.split('=')
unit = fields[1]
bands[band_number].append(unit)
# Extract ASCII file for each band
for key in bands:
time = bands[key][0]
dim = bands[key][1]
# Name each output file
basename = ncfilename.split('.')[0]
bandname = str(time).zfill(3)
output_filename = basename + '.' + bandname + dim + '.' + subdataset.lower() + '.asc'
prjfilename = output_filename[:-4] + '.prj'
#print key, bands[key], output_filename
# Convert NetCDF subdataset and band to ascii file
s = 'gdal_translate -of AAIGrid -b %i NETCDF:"%s":%s %s' % (key, ncfilename, subdataset, output_filename)
if verbose:
run(s, verbose=verbose)
else:
run(s, stdout='/dev/null', stderr='/dev/null', verbose=verbose)
# Now replace the header which GDAL gets wrong
#s = 'ncdump %s' % ncfilename
#p = Popen(s, shell=True,
# stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
#
#if p.stdout is None:
# msg = 'Could not execute command: %s' % s
# raise Exception(msg)
#else:
# lines = p.stdout.readlines()
#
# for line in lines:
# info = line.strip()
# fields = info.split('=')
# if info.startswith('XMIN'):
# xllcorner = float(fields[1]) - cellsize/2
# Now replace the header which GDAL gets wrong
#f = NetCDFFile(ncfilename)
#print f.variables.keys
if ascii_header_file:
#if False:
# Read replacement
f = open(ascii_header_file)
new_header = f.readlines()[:6]
f.close()
print 'Supplied newheader:', new_header
# Read ASCII file
f = open(output_filename)
lines = f.readlines()
f.close()
# Write replacement
f = open(output_filename, 'w')
for i, line in enumerate(lines):
if i < 6:
f.write(new_header[i])
else:
f.write(line)
f.close()
if projection:
# Create associated projection file
fid = open(prjfilename, 'w')
fid.write(projection)
fid.close()
def grd2asc(grdfilename,
nodatavalue=-9999,
projection=None): #1.70141e+38):
"""Convert Surfer grd file to ESRI asc grid format
Input:
grdfilename: Name of Golden Software Surfer grid file
(extension .grd assumed) with format
DSAA
<ncols> <nrows>
<xmin> <xmax>
<ymin> <ymax>
<zmin> <zmax>
z11 z21 z31 .... (rows of z values)
Note: Surfer grids use 1.70141e+38 for no data.
An output file with same basename and the extension .asc
will be generated following the format
ncols <ncols>
nrows <nrows>
xllcorner <x coordinate of lower left corner>
yllcorner <y coordinate of lower left corner>
cellsize <cellsize>
NODATA_value <nodata value, typically -9999 for elevation data or
otherwise 1.70141e+38>
If optional argument projection is specified, a projection file with same basename and the
extension .prj will be generated.
It is assumed that the projection follows the WKT projection format.
"""
basename, extension = os.path.splitext(grdfilename)
msg = 'Grid file %s must have extension grd' % grdfilename
assert extension == '.grd', msg
ascfilename = basename + '.asc'
prjfilename = basename + '.prj'
fid = open(grdfilename)
lines = fid.readlines()
fid.close()
fid = open(ascfilename, 'w')
# Do header first
line = lines[0]
msg = 'input file %s does not look like a Surfer grd file. It must start with DSAA' % grdfilename
assert line.strip() == 'DSAA', msg
# Get dimensions
line = lines[1]
ncols, nrows = [int(x) for x in line.split()]
fid.write('ncols %i\n' % ncols)
fid.write('nrows %i\n' % nrows)
# Get origin
line = lines[2]
xmin, xmax = [float(x) for x in line.split()]
line = lines[3]
ymin, ymax = [float(x) for x in line.split()]
# Get cellsize
cellsize = (xmax-xmin)/(ncols-1)
# Put out warning if cells are not square
msg = 'Cells are not square: %i, %i' % (cellsize, (ymax-ymin)/(nrows-1))
if abs(cellsize - (ymax-ymin)/(nrows-1)) > 1.0e-1: print 'WARNING (grd2asc): %s' % msg
#assert abs(cellsize - (ymax-ymin)/(nrows-1)) < 1.0e-6, msg
# Write origin using pixel registration used by ESRI instead of grid line registration used by Surfer.
fid.write('xllcorner %f\n' % (xmin - cellsize/2.)) # FIXME: CHECK THIS
fid.write('yllcorner %f\n' % (ymin - cellsize/2.))
fid.write('cellsize %f\n' % cellsize)
# Write value for no data
fid.write('NODATA_value %d\n' % nodatavalue)
# Write data reversed
data = lines[5:]
data.reverse()
for line in data:
fid.write(line)
fid.close()
if projection:
# Create associated projection file
fid = open(prjfilename, 'w')
fid.write(projection)
fid.close()
def asc2grd(ascfilename,
nodatavalue=1.70141e+38,
projection=None):
"""Convert ESRI asc grid format to Surfer grd file
Input:
ascfilename: Name of ESRI asc grid (extension .asc assumed) with format
ncols <ncols>
nrows <nrows>
xllcorner <x coordinate of lower left corner>
yllcorner <y coordinate of lower left corner>
cellsize <cellsize>
NODATA_value <nodata value, typically -9999 for elevation data or
otherwise 1.70141e+38>
An output file with same basename and the extension .grd
Golden Software Surfer grid file
will be generated following the format
DSAA
<ncols> <nrows>
<xmin> <xmax>
<ymin> <ymax>
<zmin> <zmax>
z11 z21 z31 .... (rows of z values)
Note: Surfer grids use 1.70141e+38 for no data.
# FIXME: Not done yet
If optional argument projection is specified, a projection file with same basename and the
extension .prj will be generated.
It is assumed that the projection follows the WKT projection format.
"""
basename, extension = os.path.splitext(ascfilename)
msg = 'ASCII file %s must have extension asc' % ascfilename
assert extension == '.asc', msg
grdfilename = basename + '.grd'
prjfilename = basename + '.prj'
fid = open(ascfilename)
lines = fid.readlines()
fid.close()
fid = open(grdfilename, 'w')
# Write header
fid.write('DSAA\n')
# Check header and get number of columns
line = lines[0].strip()
fields = line.split()
msg = 'Input file %s does not look like an ASCII grd file. It must start with ncols' % ascfilename
assert fields[0] == 'ncols', msg
assert len(fields) == 2
ncols = int(fields[1])
# Get number of rows and write
line = lines[1]
fields = line.split()
nrows = int(fields[1])
fid.write('%i %i\n' % (ncols, nrows))
# Get data and compute zmin and zmax
data = lines[6:]
zmin = sys.maxint
zmax = -zmin
for line in data:
for z in [float(x) for x in line.split()]:
if z > zmax: zmax = z
if z < zmin: zmin = z
# Get cellsize
msg = 'ASCII file does not look right. Check Traceback and source code %s.' % __file__
line = lines[4]
assert line.startswith('cellsize'), msg
fields = line.split()
cellsize = float(fields[1])
# Get origin
line = lines[2]
assert line.startswith('xllcorner'), msg
fields = line.split()
xmin = float(fields[1]) + cellsize/2
line = lines[3]
assert line.startswith('yllcorner'), msg
fields = line.split()
ymin = float(fields[1]) + cellsize/2
# Calculate upper bounds and write
xmax = cellsize * (ncols-1) + xmin
ymax = cellsize * (nrows-1) + ymin
assert abs(cellsize - (xmax-xmin)/(ncols-1)) < 1.0e-6
assert abs(cellsize - (ymax-ymin)/(nrows-1)) < 1.0e-6
fid.write('%f %f\n' % (xmin, xmax))
fid.write('%f %f\n' % (ymin, ymax))
fid.write('%e %e\n' % (zmin, zmax))
# Write ASCII data reversed into GRD file
data.reverse()
for line in data:
fid.write(line)
fid.close()
if projection:
# Create associated projection file
fid = open(prjfilename, 'w')
fid.write(projection)
fid.close()
# FIXME: I think this is obsolete as the labeling happens in the contouring now
def label_kml_contours(kmlfile, contours, units):
"""Label contours in KML file as generated by ogr2ogr with specified interval, number of contours and units
kmlfile: Name of kml contour file that is to be patched with labels
interval: Contour interval if fixed, if not interval will be -1
number_of_contours: Number of contours specified. If -1 it means that a fixed interval was used.
contours: The original input. Can be False, True, List or number.
units:
"""
return
level_name = 'Level [%s]' % units
#print kmlfile, interval
fid = open(kmlfile)
lines = fid.readlines()
fid.close()
#if interval > 0:
# # This means a fixed interval was used. Create list of levels
level = number_of_contours*interval
fid = open(kmlfile, 'w')
for line in lines:
# Write existing data back
fid.write(line)
# Add new attribute 'Level' to Schema
if line.strip().startswith('<Schema name='):
fid.write('\t<SimpleField name="%s" type="string"></SimpleField>\n' % level_name)
# Add contours to 'Level' at each contour
if line.strip().startswith('<ExtendedData><SchemaData'):
fid.write('\t\t<SimpleData name="%s">%f</SimpleData>\n' % (level_name, level))
level -= interval
fid.close()
def convert_meteorological_winddirection_to_windfield(s, d):
"""Convert windspeed and meteorological direction to windfield (u, v)
Inputs:
s: Absolute windspeed [m/s]
d: Wind direction [degrees from azimuth (north)].
A direction of 90 degrees means that the wind is 'easterly' i.e. it blows towards the west.
Outputs:
u: Velocity of the east component [m/s]
v: Velocity of the north component [m/s]
"""
# Convert degrees from north to radians
r = pi*(450-d)/180
# Map from meterological wind direction to wind field
r = r+pi
# Create ouput fields
u = s*cos(r)
v = s*sin(r)
return u, v
def convert_windfield_to_meteorological_winddirection(u, v):
"""Compute wind direction from u and v velocities.
Direction is 'meteorological, i.e. a Northerly wind blows towards the south.
"""
u = float(u)
v = float(v)
speed = sqrt(u*u + v*v)
if speed > 0:
theta = acos(u/speed)
else:
theta = 0 # Set wind direction arbitrarily in case speed is zero
# Correct for quadrant 3 and 4
if v < 0:
theta = 2*pi - theta
# Reverse direction to meteorological interpretation
angle = theta + pi
# Convert radians to degrees
degrees = angle*180/pi
# Convert to degrees from azimuth (from North)
direction = 450 - degrees
# Normalise direction
if direction < 0:
direction += 360
return speed, direction
def get_wind_direction(x, filename=None):
"""Get wind direction (degrees from azimuth)
Inputs
x: text field that can either be 'N', 'NNE', 'NE', ...
or degrees
filename (optional) for error message
Output
Wind direction in decimal degrees from the North.
"""
# Map from direction to azimuth degrees
direction_table={'N': 0,
'NNE': 22.5,
'NE': 45,
'ENE': 67.5,
'E': 90,
'ESE': 112.5,
'SE': 135,
'SSE': 157.5,
'S': 180,
'SSW': 202.5,
'SW': 225,
'WSW': 247.5,
'W': 270,
'WNW': 292.5,
'NW': 315,
'NNW': 337.5}
try:
d = float(x)
except:
# Direction was not a numeric value
d = float(direction_table[x])
# Check input ranges
if not 0 <= d <= 360:
msg = 'Wind direction must be between 0 and 360 degrees.\n'
msg += 'I got %.0f degrees.\n' % d
if filename:
msg += 'Input file: %s' % filename
raise Exception(msg)
return d
def generate_contours(filename, contours, units, attribute_name,
output_dir='.', meteorological_model=None, WKT_projection=None,
verbose=True):
"""Contour ASCII grid into shp and kml files
The function uses model parameters Load_contours, Thickness_contours and Thickness_units.
"""
if verbose: print 'Processing %s:\t' % filename
logdir = os.path.join(output_dir, 'logs') # Should use same name as log dir in wrapper.py
makedir(logdir)
pathname = os.path.join(output_dir, filename)
basename, ext = os.path.splitext(pathname)
tiffile = basename + '.tif'
shpfile = basename + '.shp'
kmlfile = basename + '.kml'
prjfile = basename + '.prj'
# Get range of data
min, max = calculate_extrema(pathname)
# Establish if interval is constant
if contours is False:
if verbose: print ' No contouring requested'
return
elif contours is True:
interval = (max-min)/8 # Calculate interval automatically
else:
# The variable 'contours' is either a list or a number
try:
interval = float(contours) # Constant interval specified
except:
# The variable 'contours' must be a list
if type(contours) != type([]):
msg = 'Expected list of contours. Must be either True, False, a number or a list of numbers.'
raise Exception(msg)
interval = -1 # Indicate interval is not fixed
# Check for degenerate interval values
if 0 < interval < 1.0e-6:
msg = ' WARNING (generate_contours): Range in file %s is too small to contour: %f' % (pathname, interval)
if verbose: print msg
return
if min + interval >= max:
msg = ' WARNING (generate_contours): No contours generated for range=[%f, %f], interval=%f' % (min, max, interval)
if verbose: print msg
return
# Generate list of contours from input
contour_list = []
if interval < 0:
# A list was specified
for c in contours:
msg = 'Value in contour list %s was not a number. I got %s' % (contours, c)
if c is True or c is False:
# Just catching situation where someone puts boolean values in list.
# The problem is that float(c) below will convert it to 1 or 0
raise Exception(msg)
try:
val = float(c)
except:
raise Exception(msg)
else:
contour_list.append(val)
else:
# A constant interval was given. Build list (exclude both min and max themselves)
level = min + interval
while level < max:
contour_list.append(level)
level += interval
# Generate GeoTIFF raster
s = 'gdal_translate -of GTiff %s %s' % (pathname, tiffile)
run_with_errorcheck(s, tiffile,
logdir=logdir,
verbose=False)
# Clear the way for contours.
s = '/bin/rm -rf %s' % shpfile #
run(s, verbose=False)
# Convert contours into GDAL argument
u = units.lower()
fixed_levels = ''
for c in contour_list:
if c == 0:
# Hack - working around gdal_contour's inability to understand values like 0.0000
fixed_levels += ' 0'
else:
fixed_levels += ' %.6f' % c
#if u == 'mm':
# fixed_levels += ' %.0f' % c
#elif u == 'cm':
# fixed_levels += ' %.2f' % c
#elif u == 'm':
# fixed_levels += ' %.6f' % c
#else:
# # E.g. kg/m^2 for ash load
# fixed_levels += ' %.4f' % c
if verbose:
print ' Units: %s' % units
print ' Range in data: [%f, %f]' % (min, max)
print ' Contour levels: %s' % fixed_levels
# Check that all contour levels are within range
for c in contour_list:
if not min < c < max:
print ' WARNING: Requested contour %f is outside range and will not be shown.' % c
# Run contouring algorithm
s = 'gdal_contour -a %s -fl %s %s %s' % (attribute_name, fixed_levels, tiffile, shpfile)
run_with_errorcheck(s, shpfile,
logdir=logdir,
verbose=True)
# Generate KML
if meteorological_model == 'ncep1':
# FIXME: Test should be about coordinate system rather than meteo model
# Such as params['Coordinates'] == 'UTM' or 'LON-LAT'
s = 'ogr2ogr -f KML -t_srs EPSG:4623 %s %s' % (kmlfile, shpfile)
else:
if WKT_projection:
s = 'ogr2ogr -f KML -t_srs EPSG:4623 -s_srs %s %s %s' % (prjfile, kmlfile, shpfile)
else:
print 'WARNING (generate_contours): Model did not have a projection file'
s = 'ogr2ogr -f KML -t_srs EPSG:4623 %s %s' % (kmlfile, shpfile)
try:
run_with_errorcheck(s, kmlfile,
logdir=logdir,
verbose=False)
except Exception, e:
msg = 'Contour algorithm failed: Error message was %s.\n' % e
msg += 'This error could for example be the result of a missing /usr/lib/libproj.so file, '
msg += 'but it can be fixed by doing something like sudo ln -s /usr/lib/libproj.so.0.6.6 /usr/lib/libproj.so'
raise Exception(msg)
|
GeoscienceAustralia/PF3D
|
source/aim/utilities.py
|
Python
|
gpl-3.0
| 39,189
|
[
"NetCDF"
] |
cd65c7b1d44b828b48db495292a372568524f2adb17c8ad884dc997b8e7196fa
|
# Copyright (c) 2000-2008 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
unittest for visitors.diadefs and extensions.diadefslib modules
"""
import unittest
from logilab.astng.inspector import Linker
from logilab.common.testlib import TestCase, unittest_main
from pylint.pyreverse.diadefslib import DefaultDiadefGenerator, DiadefsHandler
from pylint.pyreverse.diagrams import set_counter
from pylint.pyreverse.writer import DotWriter
from pylint.pyreverse.utils import get_visibility
from utils import FileTC, build_file_case, get_project, Config
project = get_project('data')
linker = Linker(project)
set_counter(0)
config = Config()
handler = DiadefsHandler(config)
dd = DefaultDiadefGenerator(linker, handler).visit(project)
for diagram in dd:
diagram.extract_relationships()
class DotWriterTC(FileTC):
generated_files = ('packages_No_Name.dot', 'classes_No_Name.dot',)
def setUp(self):
FileTC.setUp(self)
writer = DotWriter(config)
writer.write(dd)
build_file_case(DotWriterTC)
class GetVisibilityTC(TestCase):
def test_special(self):
for name in ["__reduce_ex__", "__setattr__"]:
self.assertEquals(get_visibility(name), 'special')
def test_private(self):
for name in ["__g_", "____dsf", "__23_9"]:
got = get_visibility(name)
self.assertEquals(got, 'private',
'got %s instead of private for value %s' % (got, name))
def test_public(self):
self.assertEquals(get_visibility('simple'), 'public')
def test_protected(self):
for name in ["_","__", "___", "____", "_____", "___e__", "_nextsimple", "_filter_it_"]:
got = get_visibility(name)
self.assertEquals(got, 'protected',
'got %s instead of protected for value %s' % (got, name))
if __name__ == '__main__':
unittest.main()
|
dbbhattacharya/kitsune
|
vendor/packages/pylint/test/unittest_pyreverse_writer.py
|
Python
|
bsd-3-clause
| 2,652
|
[
"VisIt"
] |
451c59a5390fb46a18a6699b5a5ec3dbe1fff4fc8f10faf882f76e612dfa7d04
|
# Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
#
"""@file optics.py
Module containing the optical PSF generation routines.
These are just functions; they are used to generate galsim.OpticalPSF() class instances (see
base.py).
Mostly they are solely of use to developers for generating arrays that may be useful in defining
GSObjects with an optical component. They will not therefore be used in a typical image simulation
workflow: users will find most of what they need simply using the OpticalPSF() class.
Glossary of key terms used in function names:
PSF = point spread function
OTF = optical transfer function = FT{PSF}
MTF = modulation transfer function = |FT{PSF}|
PTF = phase transfer function = p, where OTF = MTF * exp(i * p)
Wavefront = the amplitude and phase of the incident light on the telescope pupil, encoded as a
complex number. The OTF is the autocorrelation function of the wavefront.
"""
import numpy as np
import galsim
import utilities
from galsim import GSObject
class OpticalPSF(GSObject):
"""A class describing aberrated PSFs due to telescope optics. It's underlying implementation
uses an InterpolatedImage to characterize the profile.
Input aberration coefficients are assumed to be supplied in units of wavelength, and correspond
to the Zernike polynomials in the Noll convention definined in
Noll, J. Opt. Soc. Am. 66, 207-211(1976). For a brief summary of the polynomials, refer to
http://en.wikipedia.org/wiki/Zernike_polynomials#Zernike_polynomials.
You can also optionally specify that the secondary mirror (or prime focus cage, etc.) are held
by some number of support struts. These are taken to be rectangular obscurations extending from
the outer edge of the pupil to the outer edge of the obscuration disk (or the pupil center if
`obscuration = 0.`). You can specify how many struts there are (evenly spaced in angle), how
thick they are as a fraction of the pupil diameter, and what angle they start at relative to
the positive y direction.
Initialization
--------------
>>> optical_psf = galsim.OpticalPSF(lam_over_diam, defocus=0., astig1=0., astig2=0.,
coma1=0., coma2=0., trefoil1=0., trefoil2=0., spher=0.,
circular_pupil=True, obscuration=0., interpolant=None,
oversampling=1.5, pad_factor=1.5, nstruts=0,
strut_thick=0.05, strut_angle=0.*galsim.degrees)
Initializes optical_psf as a galsim.OpticalPSF() instance.
@param lam_over_diam lambda / telescope diameter in the physical units adopted for dx
(user responsible for consistency).
@param defocus defocus in units of incident light wavelength.
@param astig1 astigmatism (like e2) in units of incident light wavelength.
@param astig2 astigmatism (like e1) in units of incident light wavelength.
@param coma1 coma along y in units of incident light wavelength.
@param coma2 coma along x in units of incident light wavelength.
@param trefoil1 trefoil (one of the arrows along y) in units of incident light
wavelength.
@param trefoil2 trefoil (one of the arrows along x) in units of incident light
wavelength.
@param spher spherical aberration in units of incident light wavelength.
@param circular_pupil adopt a circular pupil? [default `circular_pupil = True`]
@param obscuration linear dimension of central obscuration as fraction of pupil linear
dimension, [0., 1.).
@param interpolant Either an Interpolant2d (or Interpolant) instance or a string indicating
which interpolant should be used. Options are 'nearest', 'sinc',
'linear', 'cubic', 'quintic', or 'lanczosN' where N should be the
integer order to use. [default `interpolant = galsim.Quintic()`]
@param oversampling Optional oversampling factor for the InterpolatedImage. Setting
oversampling < 1 will produce aliasing in the PSF (not good).
[default `oversampling = 1.5`]
@param pad_factor Additional multiple by which to zero-pad the PSF image to avoid folding
compared to what would be employed for a simple galsim.Airy
[default `pad_factor = 1.5`]. Note that `pad_factor` may need to be
increased for stronger aberrations, i.e. those larger than order unity.
@param flux Total flux of the profile [default `flux=1.`].
@param nstruts Number of radial support struts to add to the central obscuration
[default `nstruts = 0`].
@param strut_thick Thickness of support struts as a fraction of pupil diameter
[default `strut_thick = 0.05`]
@param strut_angle Angle made between the vertical and the strut starting closest to it,
defined to be positive in the counter-clockwise direction; must be a
galsim.Angle instance [default `strut_angle = 0. * galsim.degrees`].
@param gsparams You may also specify a gsparams argument. See the docstring for
galsim.GSParams using help(galsim.GSParams) for more information about
this option.
Methods
-------
The OpticalPSF is a GSObject, and inherits all of the GSObject methods (draw(), drawShoot(),
applyShear() etc.) and operator bindings.
"""
# Initialization parameters of the object, with type information
_req_params = { "lam_over_diam" : float }
_opt_params = {
"defocus" : float ,
"astig1" : float ,
"astig2" : float ,
"coma1" : float ,
"coma2" : float ,
"trefoil1" : float ,
"trefoil2" : float ,
"spher" : float ,
"circular_pupil" : bool ,
"obscuration" : float ,
"oversampling" : float ,
"pad_factor" : float ,
"interpolant" : str ,
"flux" : float,
"nstruts" : int,
"strut_thick" : float,
"strut_angle" : galsim.Angle }
_single_params = []
_takes_rng = False
# --- Public Class methods ---
def __init__(self, lam_over_diam, defocus=0.,
astig1=0., astig2=0., coma1=0., coma2=0., trefoil1=0., trefoil2=0., spher=0.,
circular_pupil=True, obscuration=0., interpolant=None, oversampling=1.5,
pad_factor=1.5, flux=1.,
nstruts=0, strut_thick=0.05, strut_angle=0.*galsim.degrees,
gsparams=None):
# Currently we load optics, noise etc in galsim/__init__.py, but this might change (???)
import galsim.optics
# Choose dx for lookup table using Nyquist for optical aperture and the specified
# oversampling factor
dx_lookup = .5 * lam_over_diam / oversampling
# We need alias_threshold here, so don't wait to make this a default GSParams instance
# if the user didn't specify anything else.
if not gsparams:
gsparams = galsim.GSParams()
# Use a similar prescription as SBAiry to set Airy stepK and thus reference unpadded image
# size in physical units
stepk_airy = min(
gsparams.alias_threshold * .5 * np.pi**3 * (1. - obscuration) / lam_over_diam,
np.pi / 5. / lam_over_diam)
# Boost Airy image size by a user-specifed pad_factor to allow for larger, aberrated PSFs,
# also make npix always *odd* so that opticalPSF lookup table array is correctly centred:
npix = 1 + 2 * (np.ceil(pad_factor * (np.pi / stepk_airy) / dx_lookup)).astype(int)
# Make the psf image using this dx and array shape
optimage = galsim.optics.psf_image(
lam_over_diam=lam_over_diam, dx=dx_lookup, array_shape=(npix, npix), defocus=defocus,
astig1=astig1, astig2=astig2, coma1=coma1, coma2=coma2, trefoil1=trefoil1,
trefoil2=trefoil2, spher=spher, circular_pupil=circular_pupil, obscuration=obscuration,
flux=flux, nstruts=nstruts, strut_thick=strut_thick, strut_angle=strut_angle)
# Initialize the SBProfile
GSObject.__init__(
self, galsim.InterpolatedImage(optimage, x_interpolant=interpolant, dx=dx_lookup,
calculate_stepk=True, calculate_maxk=True,
use_true_center=False, normalization='sb',
gsparams=gsparams))
# The above procedure ends up with a larger image than we really need, which
# means that the default stepK value will be smaller than we need.
# Hence calculate_stepk=True and calculate_maxk=True above.
def generate_pupil_plane(array_shape=(256, 256), dx=1., lam_over_diam=2., circular_pupil=True,
obscuration=0., nstruts=0, strut_thick=0.05,
strut_angle=0.*galsim.degrees):
"""Generate a pupil plane, including a central obscuration such as caused by a secondary mirror.
@param array_shape the NumPy array shape desired for the output array.
@param dx grid spacing of PSF in real space units.
@param lam_over_diam lambda / telescope diameter in the physical units adopted for dx
(user responsible for consistency).
@param circular_pupil adopt a circular pupil?
@param obscuration linear dimension of central obscuration as fraction of pupil linear
dimension, [0., 1.).
@param nstruts Number of radial support struts to add to the central obscuration
[default `nstruts = 0`].
@param strut_thick Thickness of support struts as a fraction of pupil diameter
[default `strut_thick = 0.05`].
@param strut_angle Angle made between the vertical and the strut starting closest to it,
defined to be positive in the counter-clockwise direction; must be a
galsim.Angle instance [default `strut_angle = 0. * galsim.degrees`].
Returns a tuple (rho, theta, in_pupil), the first two of which are the coordinates of the
pupil in unit disc-scaled coordinates for use by Zernike polynomials for describing the
wavefront across the pupil plane. The array in_pupil is a vector of Bools used to specify
where in the pupil plane described by rho, theta is illuminated. See also optics.wavefront.
"""
kmax_internal = dx * 2. * np.pi / lam_over_diam # INTERNAL kmax in units of array grid spacing
# Build kx, ky coords
kx, ky = utilities.kxky(array_shape)
# Then define unit disc rho and theta pupil coords for Zernike polynomials
rho = np.sqrt((kx**2 + ky**2) / (.5 * kmax_internal)**2)
theta = np.arctan2(ky, kx)
# Cut out circular pupil if desired (default, square pupil optionally supported) and include
# central obscuration
if obscuration >= 1.:
raise ValueError("Pupil fully obscured! obscuration ="+str(obscuration)+" (>= 1)")
if circular_pupil:
in_pupil = (rho < 1.)
if obscuration > 0.:
in_pupil = in_pupil * (rho >= obscuration) # * acts like "and" for boolean arrays
else:
in_pupil = (np.abs(kx) < .5 * kmax_internal) * (np.abs(ky) < .5 * kmax_internal)
if obscuration > 0.:
in_pupil = in_pupil * (
(np.abs(kx) >= .5 * obscuration * kmax_internal) *
(np.abs(ky) >= .5 * obscuration * kmax_internal))
if nstruts > 0:
if not isinstance(strut_angle, galsim.Angle):
raise TypeError("Input kwarg strut_angle must be a galsim.Angle instance.")
# Add the initial rotation if requested, converting to radians
if strut_angle.rad != 0.:
kxs, kys = utilities.rotate_xy(kx, ky, -strut_angle) # strut rotation +=ve, so coords
# rotation -ve!
else:
kxs, kys = kx, ky
# Define the angle between struts for successive use below
rotang = 360. * galsim.degrees / float(nstruts)
# Then loop through struts setting to zero in the pupil regions which lie under the strut
in_pupil *= (
(np.abs(kxs) >= .5 * strut_thick * kmax_internal) +
((kys < 0.) * (np.abs(kxs) < .5 * strut_thick * kmax_internal)))
for istrut in range(nstruts)[1:]:
kxs, kys = utilities.rotate_xy(kxs, kys, -rotang)
in_pupil *= (
(np.abs(kxs) >= .5 * strut_thick * kmax_internal) +
((kys < 0.) * (np.abs(kxs) < .5 * strut_thick * kmax_internal)))
return rho, theta, in_pupil
def wavefront(array_shape=(256, 256), dx=1., lam_over_diam=2., defocus=0., astig1=0., astig2=0.,
coma1=0., coma2=0., trefoil1=0., trefoil2=0., spher=0., circular_pupil=True,
obscuration=0., nstruts=0, strut_thick=0.05, strut_angle=0.*galsim.degrees):
"""Return a complex, aberrated wavefront across a circular (default) or square pupil.
Outputs a complex image (shape=array_shape) of a circular pupil wavefront of unit amplitude
that can be easily transformed to produce an optical PSF with lambda/D = lam_over_diam on an
output grid of spacing dx.
To ensure properly Nyquist sampled output any user should set lam_over_diam >= 2. * dx.
The pupil sample locations are arranged in standard DFT element ordering format, so that
(kx, ky) = (0, 0) is the [0, 0] array element.
Input aberration coefficients are assumed to be supplied in units of wavelength, and correspond
to the Zernike polynomials in the Noll convention definined in
Noll, J. Opt. Soc. Am. 66, 207-211(1976). For a brief summary of the polynomials, refer to
http://en.wikipedia.org/wiki/Zernike_polynomials#Zernike_polynomials.
@param array_shape the NumPy array shape desired for the output array.
@param dx grid spacing of PSF in real space units
@param lam_over_diam lambda / telescope diameter in the physical units adopted for dx
(user responsible for consistency).
@param defocus defocus in units of incident light wavelength.
@param astig1 astigmatism (like e2) in units of incident light wavelength.
@param astig2 astigmatism (like e1) in units of incident light wavelength.
@param coma1 coma along y in units of incident light wavelength.
@param coma2 coma along x in units of incident light wavelength.
@param trefoil1 trefoil (one of the arrows along y) in units of incident light
wavelength.
@param trefoil2 trefoil (one of the arrows along x) in units of incident light
wavelength.
@param spher spherical aberration in units of incident light wavelength.
@param circular_pupil adopt a circular pupil?
@param obscuration linear dimension of central obscuration as fraction of pupil linear
dimension, [0., 1.).
@param nstruts Number of radial support struts to add to the central obscuration
[default `nstruts = 0`].
@param strut_thick Thickness of support struts as a fraction of pupil diameter
[default `strut_thick = 0.05`].
@param strut_angle Angle made between the vertical and the strut starting closest to it,
defined to be positive in the counter-clockwise direction; must be a
galsim.Angle instance [default `strut_angle = 0. * galsim.degrees`].
Outputs the wavefront for kx, ky locations corresponding to kxky(array_shape).
"""
# Define the pupil coordinates and non-zero regions based on input kwargs
rho, theta, in_pupil = generate_pupil_plane(
array_shape=array_shape, dx=dx, lam_over_diam=lam_over_diam, circular_pupil=circular_pupil,
obscuration=obscuration, nstruts=nstruts, strut_thick=strut_thick, strut_angle=strut_angle)
pi = np.pi # minor but saves Python checking the entire np. namespace every time I need pi
# Then make wavefront image
wf = np.zeros(array_shape, dtype=complex)
wf[in_pupil] = 1.
# Defocus
wf[in_pupil] *= np.exp(2j * pi * defocus * np.sqrt(3.) * (2. * rho[in_pupil]**2 - 1.))
# Astigmatism (like e2)
wf[in_pupil] *= np.exp(2j * pi * astig1 * np.sqrt(6.) * rho[in_pupil]**2
* np.sin(2. * theta[in_pupil]))
# Astigmatism (like e1)
wf[in_pupil] *= np.exp(2j * pi * astig2 * np.sqrt(6.) * rho[in_pupil]**2
* np.cos(2. * theta[in_pupil]))
# Coma along x2
wf[in_pupil] *= np.exp(2j * pi * coma1 * np.sqrt(8.) * (3. * rho[in_pupil]**2 - 2.)
* rho[in_pupil] * np.sin(theta[in_pupil]))
# Coma along x1
wf[in_pupil] *= np.exp(2j * pi * coma2 * np.sqrt(8.) * (3. * rho[in_pupil]**2 - 2.)
* rho[in_pupil]* np.cos(theta[in_pupil]))
# Trefoil (one of the arrows along x2)
wf[in_pupil] *= np.exp(2j * pi * trefoil1 * np.sqrt(8.) * rho[in_pupil]**3
* np.sin(3. * theta[in_pupil]))
# Trefoil (one of the arrows along x1)
wf[in_pupil] *= np.exp(2j * pi * trefoil2 * np.sqrt(8.) * rho[in_pupil]**3
* np.cos(3. * theta[in_pupil]))
# Spherical aberration
wf[in_pupil] *= np.exp(2j * pi * spher * np.sqrt(5.)
* (6. * rho[in_pupil]**4 - 6. * rho[in_pupil]**2 + 1.))
return wf
def wavefront_image(array_shape=(256, 256), dx=1., lam_over_diam=2., defocus=0.,
astig1=0., astig2=0., coma1=0., coma2=0., trefoil1=0., trefoil2=0., spher=0.,
circular_pupil=True, obscuration=0., nstruts=0, strut_thick=0.05,
strut_angle=0.*galsim.degrees):
"""Return wavefront as a (real, imag) tuple of ImageViewD objects rather than complex NumPy
array.
Outputs a circular pupil wavefront of unit amplitude that can be easily transformed to produce
an optical PSF with lambda/diam = lam_over_diam on an output grid of spacing dx.
The ImageView output can be used to directly instantiate an SBInterpolatedImage, and its
scale will reflect the spacing of the output grid in the system of units adopted for
lam_over_diam.
To ensure properly Nyquist sampled output any user should set lam_over_diam >= 2. * dx.
The pupil sample locations are arranged in standard DFT element ordering format, so that
(kx, ky) = (0, 0) is the [0, 0] array element. The scale of the output ImageViewD is correct in
k space units.
Input aberration coefficients are assumed to be supplied in units of wavelength, and correspond
to the Zernike polynomials in the Noll convention definined in
Noll, J. Opt. Soc. Am. 66, 207-211(1976). For a brief summary of the polynomials, refer to
http://en.wikipedia.org/wiki/Zernike_polynomials#Zernike_polynomials.
@param array_shape the NumPy array shape desired for the output array.
@param dx grid spacing of PSF in real space units
@param lam_over_diam lambda / telescope diameter in the physical units adopted for dx
(user responsible for consistency).
@param defocus defocus in units of incident light wavelength.
@param astig1 astigmatism (like e2) in units of incident light wavelength.
@param astig2 astigmatism (like e1) in units of incident light wavelength.
@param coma1 coma along y in units of incident light wavelength.
@param coma2 coma along x in units of incident light wavelength.
@param trefoil1 trefoil (one of the arrows along y) in units of incident light
wavelength.
@param trefoil2 trefoil (one of the arrows along x) in units of incident light
wavelength.
@param spher spherical aberration in units of incident light wavelength.
@param circular_pupil adopt a circular pupil?
@param obscuration linear dimension of central obscuration as fraction of pupil linear
dimension, [0., 1.).
@param nstruts Number of radial support struts to add to the central obscuration
[default `nstruts = 0`].
@param strut_thick Thickness of support struts as a fraction of pupil diameter
[default `strut_thick = 0.05`].
@param strut_angle Angle made between the vertical and the strut starting closest to it,
defined to be positive in the counter-clockwise direction; must be a
galsim.Angle instance [default `strut_angle = 0. * galsim.degrees`].
"""
array = wavefront(
array_shape=array_shape, dx=dx, lam_over_diam=lam_over_diam, defocus=defocus, astig1=astig1,
astig2=astig2, coma1=coma1, coma2=coma2, trefoil1=trefoil1, trefoil2=trefoil2, spher=spher,
circular_pupil=circular_pupil, obscuration=obscuration, nstruts=nstruts,
strut_thick=strut_thick, strut_angle=strut_angle)
if array_shape[0] != array_shape[1]:
import warnings
warnings.warn(
"Wavefront Images' scales will not be correct in both directions for non-square "+
"arrays, only square grids currently supported by galsim.Images.")
scale = 2. * np.pi / array_shape[0]
imreal = galsim.ImageViewD(np.ascontiguousarray(array.real.astype(np.float64)), scale=scale)
imimag = galsim.ImageViewD(np.ascontiguousarray(array.imag.astype(np.float64)), scale=scale)
return (imreal, imimag)
def psf(array_shape=(256, 256), dx=1., lam_over_diam=2., defocus=0., astig1=0., astig2=0., coma1=0.,
coma2=0., trefoil1=0., trefoil2=0., spher=0., circular_pupil=True, obscuration=0.,
nstruts=0, strut_thick=0.05, strut_angle=0.*galsim.degrees, flux=1.):
"""Return NumPy array containing circular (default) or square pupil PSF with low-order
aberrations.
The PSF is centred on the array[array_shape[0] / 2, array_shape[1] / 2] pixel by default, and
uses surface brightness rather than flux units for pixel values, matching SBProfile.
To ensure properly Nyquist sampled output any user should set lam_over_diam >= 2. * dx.
Ouput NumPy array is C-contiguous.
@param array_shape the NumPy array shape desired for the output array.
@param dx grid spacing of PSF in real space units
@param lam_over_diam lambda / telescope diameter in the physical units adopted for dx
(user responsible for consistency).
@param defocus defocus in units of incident light wavelength.
@param astig1 astigmatism (like e2) in units of incident light wavelength.
@param astig2 astigmatism (like e1) in units of incident light wavelength.
@param coma1 coma along y in units of incident light wavelength.
@param coma2 coma along x in units of incident light wavelength.
@param trefoil1 trefoil (one of the arrows along y) in units of incident light
wavelength.
@param trefoil2 trefoil (one of the arrows along x) in units of incident light
wavelength.
@param spher spherical aberration in units of incident light wavelength.
@param circular_pupil adopt a circular pupil?
@param obscuration linear dimension of central obscuration as fraction of pupil linear
dimension, [0., 1.).
@param nstruts Number of radial support struts to add to the central obscuration
[default `nstruts = 0`].
@param strut_thick Thickness of support struts as a fraction of pupil diameter
[default `strut_thick = 0.05`].
@param strut_angle Angle made between the vertical and the strut starting closest to it,
defined to be positive in the counter-clockwise direction; must be a
galsim.Angle instance [default `strut_angle = 0. * galsim.degrees`].
@param flux total flux of the profile [default flux=1.].
"""
wf = wavefront(
array_shape=array_shape, dx=dx, lam_over_diam=lam_over_diam, defocus=defocus, astig1=astig1,
astig2=astig2, coma1=coma1, coma2=coma2, trefoil1=trefoil1, trefoil2=trefoil2, spher=spher,
circular_pupil=circular_pupil, obscuration=obscuration, nstruts=nstruts,
strut_thick=strut_thick, strut_angle=strut_angle)
ftwf = np.fft.fft2(wf) # I think this (and the below) is quicker than np.abs(ftwf)**2
# The roll operation below restores the c_contiguous flag, so no need for a direct action
im = utilities.roll2d((ftwf * ftwf.conj()).real, (array_shape[0] / 2, array_shape[1] / 2))
return im * (flux / (im.sum() * dx**2))
def psf_image(array_shape=(256, 256), dx=1., lam_over_diam=2., defocus=0., astig1=0., astig2=0.,
coma1=0., coma2=0., trefoil1=0., trefoil2=0., spher=0., circular_pupil=True,
obscuration=0., nstruts=0, strut_thick=0.05, strut_angle=0.*galsim.degrees, flux=1.):
"""Return circular (default) or square pupil PSF with low-order aberrations as an ImageViewD.
The PSF is centred on the array[array_shape[0] / 2, array_shape[1] / 2] pixel by default, and
uses surface brightness rather than flux units for pixel values, matching SBProfile.
The ImageView output can be used to directly instantiate an SBInterpolatedImage, and its
scale will reflect the spacing of the output grid in the system of units adopted for
lam_over_diam.
To ensure properly Nyquist sampled output any user should set lam_over_diam >= 2. * dx.
@param array_shape the NumPy array shape desired for the array view of the ImageViewD.
@param dx grid spacing of PSF in real space units
@param lam_over_diam lambda / telescope diameter in the physical units adopted for dx
(user responsible for consistency).
@param defocus defocus in units of incident light wavelength.
@param astig1 astigmatism (like e2) in units of incident light wavelength.
@param astig2 astigmatism (like e1) in units of incident light wavelength.
@param coma1 coma along y in units of incident light wavelength.
@param coma2 coma along x in units of incident light wavelength.
@param trefoil1 trefoil (one of the arrows along y) in units of incident light
wavelength.
@param trefoil2 trefoil (one of the arrows along x) in units of incident light
wavelength.
@param spher spherical aberration in units of incident light wavelength.
@param circular_pupil adopt a circular pupil?
@param obscuration linear dimension of central obscuration as fraction of pupil linear
dimension, [0., 1.).
@param nstruts Number of radial support struts to add to the central obscuration
[default `nstruts = 0`].
@param strut_thick Thickness of support struts as a fraction of pupil diameter
[default `strut_thick = 0.05`].
@param strut_angle Angle made between the vertical and the strut starting closest to it,
defined to be positive in the counter-clockwise direction; must be a
galsim.Angle instance [default `strut_angle = 0. * galsim.degrees`].
@param flux total flux of the profile [default flux=1.].
"""
array = psf(
array_shape=array_shape, dx=dx, lam_over_diam=lam_over_diam, defocus=defocus, astig1=astig1,
astig2=astig2, coma1=coma1, coma2=coma2, trefoil1=trefoil1, trefoil2=trefoil2, spher=spher,
circular_pupil=circular_pupil, obscuration=obscuration, flux=flux, nstruts=nstruts,
strut_thick=strut_thick, strut_angle=strut_angle)
im = galsim.ImageViewD(array.astype(np.float64), scale=dx)
return im
def otf(array_shape=(256, 256), dx=1., lam_over_diam=2., defocus=0., astig1=0., astig2=0., coma1=0.,
coma2=0., trefoil1=0., trefoil2=0., spher=0., circular_pupil=True, obscuration=0.,
nstruts=0, strut_thick=0.05, strut_angle=0.*galsim.degrees):
"""Return the complex OTF of a circular (default) or square pupil with low-order aberrations as
a NumPy array.
OTF array element ordering follows the DFT standard of kxky(array_shape), and has
otf[0, 0] = 1+0j by default.
To ensure properly Nyquist sampled output any user should set lam_over_diam >= 2. * dx.
Output complex NumPy array is C-contiguous.
@param array_shape the NumPy array shape desired for the output array.
@param dx grid spacing of PSF in real space units
@param lam_over_diam lambda / telescope diameter in the physical units adopted for dx
(user responsible for consistency).
@param defocus defocus in units of incident light wavelength.
@param astig1 astigmatism (like e2) in units of incident light wavelength.
@param astig2 astigmatism (like e1) in units of incident light wavelength.
@param coma1 coma along y in units of incident light wavelength.
@param coma2 coma along x in units of incident light wavelength.
@param trefoil1 trefoil (one of the arrows along y) in units of incident light
wavelength.
@param trefoil2 trefoil (one of the arrows along x) in units of incident light
wavelength.
@param spher spherical aberration in units of incident light wavelength.
@param circular_pupil adopt a circular pupil?
@param obscuration linear dimension of central obscuration as fraction of pupil linear
dimension, [0., 1.).
@param nstruts Number of radial support struts to add to the central obscuration
[default `nstruts = 0`].
@param strut_thick Thickness of support struts as a fraction of pupil diameter
[default `strut_thick = 0.05`].
@param strut_angle Angle made between the vertical and the strut starting closest to it,
defined to be positive in the counter-clockwise direction; must be a
galsim.Angle instance [default `strut_angle = 0. * galsim.degrees`].
"""
wf = wavefront(
array_shape=array_shape, dx=dx, lam_over_diam=lam_over_diam, defocus=defocus, astig1=astig1,
astig2=astig2, coma1=coma1, coma2=coma2, trefoil1=trefoil1, trefoil2=trefoil2, spher=spher,
circular_pupil=circular_pupil, obscuration=obscuration, nstruts=nstruts,
strut_thick=strut_thick, strut_angle=strut_angle)
ftwf = np.fft.fft2(wf) # I think this (and the below) is quicker than np.abs(ftwf)**2
otf = np.fft.ifft2((ftwf * ftwf.conj()).real)
# Make unit flux before returning
return np.ascontiguousarray(otf) / otf[0, 0].real
def otf_image(array_shape=(256, 256), dx=1., lam_over_diam=2., defocus=0., astig1=0., astig2=0.,
coma1=0., coma2=0., trefoil1=0., trefoil2=0., spher=0., circular_pupil=True,
obscuration=0., nstruts=0, strut_thick=0.05, strut_angle=0.*galsim.degrees):
"""Return the complex OTF of a circular (default) or square pupil with low-order aberrations as
a (real, imag) tuple of ImageViewD objects, rather than a complex NumPy array.
OTF array element ordering follows the DFT standard of kxky(array_shape), and has
otf[0, 0] = 1+0j by default. The scale of the output ImageViewD is correct in k space units.
The ImageView output can be used to directly instantiate an SBInterpolatedImage, and its
scale will reflect the spacing of the output grid in the system of units adopted for
lam_over_diam.
To ensure properly Nyquist sampled output any user should set lam_over_diam >= 2. * dx.
@param array_shape the NumPy array shape desired for array views of ImageViewD tuple.
@param dx grid spacing of PSF in real space units
@param lam_over_diam lambda / telescope diameter in the physical units adopted for dx
(user responsible for consistency).
@param defocus defocus in units of incident light wavelength.
@param astig1 astigmatism (like e2) in units of incident light wavelength.
@param astig2 astigmatism (like e1) in units of incident light wavelength.
@param coma1 coma along y in units of incident light wavelength.
@param coma2 coma along x in units of incident light wavelength.
@param trefoil1 trefoil (one of the arrows along y) in units of incident light
wavelength.
@param trefoil2 trefoil (one of the arrows along x) in units of incident light
wavelength.
@param spher spherical aberration in units of incident light wavelength.
@param circular_pupil adopt a circular pupil?
@param obscuration linear dimension of central obscuration as fraction of pupil linear
dimension, [0., 1.).
@param nstruts Number of radial support struts to add to the central obscuration
[default `nstruts = 0`].
@param strut_thick Thickness of support struts as a fraction of pupil diameter
[default `strut_thick = 0.05`].
@param strut_angle Angle made between the vertical and the strut starting closest to it,
defined to be positive in the counter-clockwise direction; must be a
galsim.Angle instance [default `strut_angle = 0. * galsim.degrees`].
"""
array = otf(
array_shape=array_shape, dx=dx, lam_over_diam=lam_over_diam, defocus=defocus, astig1=astig1,
astig2=astig2, coma1=coma1, coma2=coma2, trefoil1=trefoil1, trefoil2=trefoil2, spher=spher,
circular_pupil=circular_pupil, obscuration=obscuration, nstruts=nstruts,
strut_thick=strut_thick, strut_angle=strut_angle)
if array_shape[0] != array_shape[1]:
import warnings
warnings.warn(
"OTF Images' scales will not be correct in both directions for non-square arrays, "+
"only square grids currently supported by galsim.Images.")
scale = 2. * np.pi / array_shape[0]
imreal = galsim.ImageViewD(np.ascontiguousarray(array.real.astype(np.float64)), scale=scale)
imimag = galsim.ImageViewD(np.ascontiguousarray(array.imag.astype(np.float64)), scale=scale)
return (imreal, imimag)
def mtf(array_shape=(256, 256), dx=1., lam_over_diam=2., defocus=0., astig1=0., astig2=0., coma1=0.,
coma2=0., trefoil1=0., trefoil2=0., spher=0., circular_pupil=True, obscuration=0.,
nstruts=0, strut_thick=0.05, strut_angle=0.*galsim.degrees):
"""Return NumPy array containing the MTF of a circular (default) or square pupil with low-order
aberrations.
MTF array element ordering follows the DFT standard of kxky(array_shape), and has
mtf[0, 0] = 1 by default.
To ensure properly Nyquist sampled output any user should set lam_over_diam >= 2. * dx.
Output double NumPy array is C-contiguous.
@param array_shape the NumPy array shape desired for the output array.
@param dx grid spacing of PSF in real space units
@param lam_over_diam lambda / telescope diameter in the physical units adopted for dx
(user responsible for consistency).
@param defocus defocus in units of incident light wavelength.
@param astig1 astigmatism (like e2) in units of incident light wavelength.
@param astig2 astigmatism (like e1) in units of incident light wavelength.
@param coma1 coma along y in units of incident light wavelength.
@param coma2 coma along x in units of incident light wavelength.
@param trefoil1 trefoil (one of the arrows along y) in units of incident light
wavelength.
@param trefoil2 trefoil (one of the arrows along x) in units of incident light
wavelength.
@param spher spherical aberration in units of incident light wavelength.
@param circular_pupil adopt a circular pupil?
@param obscuration linear dimension of central obscuration as fraction of pupil linear
dimension, [0., 1.).
@param nstruts Number of radial support struts to add to the central obscuration
[default `nstruts = 0`].
@param strut_thick Thickness of support struts as a fraction of pupil diameter
[default `strut_thick = 0.05`].
@param strut_angle Angle made between the vertical and the strut starting closest to it,
defined to be positive in the counter-clockwise direction; must be a
galsim.Angle instance [default `strut_angle = 0. * galsim.degrees`].
"""
return np.abs(otf(
array_shape=array_shape, dx=dx, lam_over_diam=lam_over_diam, defocus=defocus, astig1=astig1,
astig2=astig2, coma1=coma1, coma2=coma2, trefoil1=trefoil1, trefoil2=trefoil2, spher=spher,
obscuration=obscuration, circular_pupil=circular_pupil, nstruts=nstruts,
strut_thick=strut_thick, strut_angle=strut_angle))
def mtf_image(array_shape=(256, 256), dx=1., lam_over_diam=2., defocus=0., astig1=0., astig2=0.,
coma1=0., coma2=0., trefoil1=0., trefoil2=0., spher=0., circular_pupil=True,
obscuration=0., nstruts=0, strut_thick=0.05, strut_angle=0.*galsim.degrees):
"""Return the MTF of a circular (default) or square pupil with low-order aberrations as an
ImageViewD.
MTF array element ordering follows the DFT standard of kxky(array_shape), and has
mtf[0, 0] = 1 by default. The scale of the output ImageViewD is correct in k space units.
The ImageView output can be used to directly instantiate an SBInterpolatedImage, and its
scale will reflect the spacing of the output grid in the system of units adopted for
lam_over_diam.
To ensure properly Nyquist sampled output any user should set lam_over_diam >= 2. * dx.
@param array_shape the NumPy array shape desired for the array view of the ImageViewD.
@param dx grid spacing of PSF in real space units
@param lam_over_diam lambda / telescope diameter in the physical units adopted for dx
(user responsible for consistency).
@param defocus defocus in units of incident light wavelength.
@param astig1 astigmatism (like e2) in units of incident light wavelength.
@param astig2 astigmatism (like e1) in units of incident light wavelength.
@param coma1 coma along y in units of incident light wavelength.
@param coma2 coma along x in units of incident light wavelength.
@param trefoil1 trefoil (one of the arrows along y) in units of incident light
wavelength.
@param trefoil2 trefoil (one of the arrows along x) in units of incident light
wavelength.
@param spher spherical aberration in units of incident light wavelength.
@param circular_pupil adopt a circular pupil?
@param obscuration linear dimension of central obscuration as fraction of pupil linear
dimension, [0., 1.).
@param nstruts Number of radial support struts to add to the central obscuration
[default `nstruts = 0`].
@param strut_thick Thickness of support struts as a fraction of pupil diameter
[default `strut_thick = 0.05`].
@param strut_angle Angle made between the vertical and the strut starting closest to it,
defined to be positive in the counter-clockwise direction; must be a
galsim.Angle instance [default `strut_angle = 0. * galsim.degrees`].
"""
array = mtf(
array_shape=array_shape, dx=dx, lam_over_diam=lam_over_diam, defocus=defocus, astig1=astig1,
astig2=astig2, coma1=coma1, coma2=coma2, trefoil1=trefoil1, trefoil2=trefoil2, spher=spher,
circular_pupil=circular_pupil, obscuration=obscuration, nstruts=nstruts,
strut_thick=strut_thick, strut_angle=strut_angle)
if array_shape[0] != array_shape[1]:
import warnings
warnings.warn(
"MTF Image scale will not be correct in both directions for non-square arrays, only "+
"square grids currently supported by galsim.Images.")
im = galsim.ImageViewD(array.astype(np.float64), scale = 2. * np.pi / array_shape[0])
return im
def ptf(array_shape=(256, 256), dx=1., lam_over_diam=2., defocus=0., astig1=0., astig2=0., coma1=0.,
coma2=0., trefoil1=0., trefoil2=0., spher=0., circular_pupil=True, obscuration=0.,
nstruts=0, strut_thick=0.05, strut_angle=0.*galsim.degrees):
"""Return NumPy array containing the PTF [radians] of a circular (default) or square pupil with
low-order aberrations.
PTF array element ordering follows the DFT standard of kxky(array_shape), and has
ptf[0, 0] = 0. by default.
To ensure properly Nyquist sampled output any user should set lam_over_diam >= 2. * dx.
Output double NumPy array is C-contiguous.
@param array_shape the NumPy array shape desired for the output array.
@param dx grid spacing of PSF in real space units
@param lam_over_diam lambda / telescope diameter in the physical units adopted for dx
(user responsible for consistency).
@param defocus defocus in units of incident light wavelength.
@param astig1 astigmatism (like e2) in units of incident light wavelength.
@param astig2 astigmatism (like e1) in units of incident light wavelength.
@param coma1 coma along y in units of incident light wavelength.
@param coma2 coma along x in units of incident light wavelength.
@param trefoil1 trefoil (one of the arrows along y) in units of incident light
wavelength.
@param trefoil2 trefoil (one of the arrows along x) in units of incident light
wavelength.
@param spher spherical aberration in units of incident light wavelength.
@param circular_pupil adopt a circular pupil?
@param obscuration linear dimension of central obscuration as fraction of pupil linear
dimension, [0., 1.)
@param nstruts Number of radial support struts to add to the central obscuration
[default `nstruts = 0`].
@param strut_thick Thickness of support struts as a fraction of pupil diameter
[default `strut_thick = 0.05`].
@param strut_angle Angle made between the vertical and the strut starting closest to it,
defined to be positive in the counter-clockwise direction; must be a
galsim.Angle instance [default `strut_angle = 0. * galsim.degrees`].
"""
kx, ky = utilities.kxky(array_shape)
k2 = (kx**2 + ky**2)
ptf = np.zeros(array_shape)
kmax_internal = dx * 2. * np.pi / lam_over_diam # INTERNAL kmax in units of array grid spacing
# Try to handle where both real and imag tend to zero...
ptf[k2 < kmax_internal**2] = np.angle(otf(
array_shape=array_shape, dx=dx, lam_over_diam=lam_over_diam, defocus=defocus, astig1=astig1,
astig2=astig2, coma1=coma1, coma2=coma2, trefoil1=trefoil1, trefoil2=trefoil2, spher=spher,
circular_pupil=circular_pupil, obscuration=obscuration, nstruts=nstruts,
strut_thick=strut_thick, strut_angle=strut_angle)[k2 < kmax_internal**2])
return ptf
def ptf_image(array_shape=(256, 256), dx=1., lam_over_diam=2., defocus=0., astig1=0., astig2=0.,
coma1=0., coma2=0., trefoil1=0., trefoil2=0., spher=0., circular_pupil=True,
obscuration=0., nstruts=0, strut_thick=0.05, strut_angle=0.*galsim.degrees):
"""Return the PTF [radians] of a circular (default) or square pupil with low-order aberrations
as an ImageViewD.
PTF array element ordering follows the DFT standard of kxky(array_shape), and has
ptf[0, 0] = 0. by default. The scale of the output ImageViewD is correct in k space units.
The ImageView output can be used to directly instantiate an SBInterpolatedImage, and its
scale will reflect the spacing of the output grid in the system of units adopted for
lam_over_diam.
To ensure properly Nyquist sampled output any user should set lam_over_diam >= 2. * dx.
@param array_shape the NumPy array shape desired for the array view of the ImageViewD.
@param dx grid spacing of PSF in real space units
@param lam_over_diam lambda / telescope diameter in the physical units adopted for dx
(user responsible for consistency).
@param defocus defocus in units of incident light wavelength.
@param astig1 astigmatism (like e2) in units of incident light wavelength.
@param astig2 astigmatism (like e1) in units of incident light wavelength.
@param coma1 coma along y in units of incident light wavelength.
@param coma2 coma along x in units of incident light wavelength.
@param trefoil1 trefoil (one of the arrows along y) in units of incident light
wavelength.
@param trefoil2 trefoil (one of the arrows along x) in units of incident light
wavelength.
@param spher spherical aberration in units of incident light wavelength.
@param circular_pupil adopt a circular pupil?
@param obscuration linear dimension of central obscuration as fraction of pupil linear
dimension, [0., 1.)
@param nstruts Number of radial support struts to add to the central obscuration
[default `nstruts = 0`].
@param strut_thick Thickness of support struts as a fraction of pupil diameter
[default `strut_thick = 0.05`].
@param strut_angle Angle made between the vertical and the strut starting closest to it,
defined to be positive in the counter-clockwise direction; must be a
galsim.Angle instance [default `strut_angle = 0. * galsim.degrees`].
"""
array = ptf(
array_shape=array_shape, dx=dx, lam_over_diam=lam_over_diam, defocus=defocus, astig1=astig1,
astig2=astig2, coma1=coma1, coma2=coma2, trefoil1=trefoil1, trefoil2=trefoil2, spher=spher,
circular_pupil=circular_pupil, obscuration=obscuration, nstruts=nstruts,
strut_thick=strut_thick, strut_angle=strut_angle)
if array_shape[0] != array_shape[1]:
import warnings
warnings.warn(
"PTF Image scale will not be correct in both directions for non-square arrays, only "+
"square grids currently supported by galsim.Images.")
im = galsim.ImageViewD(array.astype(np.float64), scale = 2. * np.pi / array_shape[0])
return im
|
mardom/GalSim
|
galsim/optics.py
|
Python
|
gpl-3.0
| 49,279
|
[
"Galaxy"
] |
7427435084f5524fad027023ad91bc32e0f2075cb384df2efb1ce8d2ef8c9276
|
# Copyright (C) 2013, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
import os, subprocess
from os.path import join, dirname, abspath
from zeroinstall import SafeException
from repo import scm
topdir = dirname(dirname(dirname(abspath(__file__))))
def handle(args):
if args.key == '-':
key = None
else:
# Get the fingerprint from the key ID (and check we have the secret key)
try:
keys = subprocess.check_output(['gpg', '-q', '--fixed-list-mode', '--fingerprint', '--with-colons', '--list-secret-keys', args.key])
except subprocess.CalledProcessError as ex:
raise SafeException("GPG key '{key}' not found ({ex})".format(key = args.key, ex = ex))
in_ssb = False
fingerprint = None
for line in keys.split('\n'):
bits = line.split(':')
if bits[0] == 'ssb': in_ssb = True
elif bits[0] == 'sec': in_ssb = False
elif bits[0] == 'fpr':
if in_ssb and fingerprint is not None:
pass # Ignore sub-keys (unless we don't have a primary - can that happen?)
elif fingerprint is None:
fingerprint = bits[9]
else:
raise SafeException("Multiple GPG keys match '{key}':\n{output}".format(
key = args.key, output = keys))
if fingerprint is None:
raise SafeException("GPG key not found '{key}'".format(key = args.key))
key = '0x' + fingerprint
# Create the directory structure
os.mkdir(args.path)
os.chdir(args.path)
os.mkdir('incoming')
os.mkdir('feeds')
os.mkdir('public')
# Write the configuration file, with the GPG key filled in
with open(join(topdir, 'resources', '0repo-config.py.template'), 'rt') as stream:
data = stream.read()
data = data.replace('"{{GPGKEY}}"', '"' + key + '"' if key else "None")
with open('0repo-config.py', 'wt') as stream:
stream.write(data)
# Initialise the Git repository
subprocess.check_call(['git', 'init', '-q', 'feeds'])
scm.commit('feeds', [], 'Created new repository', key, extra_options = ['--allow-empty'])
|
bastianeicher/0repo
|
repo/cmd/create.py
|
Python
|
lgpl-2.1
| 2,001
|
[
"VisIt"
] |
eed2e476b0c9a3f565e989503159ef732262619dfc7c52b43fa9b6e9d4675faf
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides plotting capabilities for battery related applications.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 12, 2012"
from collections import OrderedDict
from pymatgen.util.plotting import pretty_plot
class VoltageProfilePlotter:
"""
A plotter to make voltage profile plots for batteries.
"""
def __init__(self, xaxis="capacity"):
"""
Args:
xaxis: The quantity to use as the xaxis. Can be either capacity (the
default), or the frac_x.
"""
self._electrodes = OrderedDict()
self.xaxis = xaxis
def add_electrode(self, electrode, label=None):
"""
Add an electrode to the plot.
Args:
electrode: An electrode. All electrodes satisfying the
AbstractElectrode interface should work.
label: A label for the electrode. If None, defaults to a counting
system, i.e. 'Electrode 1', 'Electrode 2', ...
"""
if not label:
label = "Electrode {}".format(len(self._electrodes) + 1)
self._electrodes[label] = electrode
def get_plot_data(self, electrode):
"""
Args:
electrode (): Electrode object
Returns:
Plot data in x, y.
"""
x = []
y = []
cap = 0
most_discharged = electrode[-1].frac_discharge
norm = most_discharged / (1 - most_discharged)
for vpair in electrode:
if self.xaxis == "capacity":
x.append(cap)
cap += vpair.mAh / electrode.normalization_mass
x.append(cap)
else:
x.append(vpair.frac_charge / (1 - vpair.frac_charge) / norm)
x.append(vpair.frac_discharge / (1 - vpair.frac_discharge)
/ norm)
y.extend([vpair.voltage] * 2)
x.append(x[-1])
y.append(0)
return x, y
def get_plot(self, width=8, height=8):
"""
Returns a plot object.
Args:
width: Width of the plot. Defaults to 8 in.
height: Height of the plot. Defaults to 6 in.
Returns:
A matplotlib plot object.
"""
plt = pretty_plot(width, height)
for label, electrode in self._electrodes.items():
(x, y) = self.get_plot_data(electrode)
plt.plot(x, y, '-', linewidth=2, label=label)
plt.legend()
if self.xaxis == "capacity":
plt.xlabel('Capacity (mAh/g)')
else:
plt.xlabel('Fraction')
plt.ylabel('Voltage (V)')
plt.tight_layout()
return plt
def show(self, width=8, height=6):
"""
Show the voltage profile plot.
Args:
width: Width of the plot. Defaults to 8 in.
height: Height of the plot. Defaults to 6 in.
"""
self.get_plot(width, height).show()
def save(self, filename, image_format="eps", width=8, height=6):
"""
Save the plot to an image file.
Args:
filename: Filename to save to.
image_format: Format to save to. Defaults to eps.
"""
self.get_plot(width, height).savefig(filename, format=image_format)
|
gVallverdu/pymatgen
|
pymatgen/apps/battery/plotter.py
|
Python
|
mit
| 3,550
|
[
"pymatgen"
] |
0ea287ea03bc5e07682c05bc33293ec245cd16e277040b59ef3b78e49105aa27
|
import imgaug as ia
from imgaug import augmenters as iaa
import os
import cv2
import numpy as np
DATASET_DIR = '/media/meerkat/Data/datasets/docs_crlv_plate_augmented'
NUM_AUG_IMAGES = 10
NUM_SCALES = 3
MAX_SCALE = 1.6
IM_HEIGHT = 64
alphabet = '0123456789abcdefghijklmnopqrstuvwxyz/'
# Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
# e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.
sometimes = lambda aug: iaa.Sometimes(0.5, aug)
# execute 0 to 5 of the following (less important) augmenters per image
# don't execute all of them, as that would often be way too strong
im_filters = iaa.SomeOf((0, 5),
[
iaa.OneOf([
iaa.GaussianBlur((0, 1.5)), # blur images with a sigma between 0 and 3.0
iaa.AverageBlur(k=(2, 5)), # blur image using local means with kernel sizes between 2 and 7
iaa.MedianBlur(k=(3, 7)), # blur image using local medians with kernel sizes between 2 and 7
]),
iaa.Sharpen(alpha=(0, 0.1), lightness=(0.75, 1.5)), # sharpen images
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5), # add gaussian noise to images
iaa.Multiply((0.5, 1.5), per_channel=0.5), # change brightness of images (50-150% of original value)
iaa.Add((-20, 20)), # change brightness of images (by -10 to 10 of original value)
iaa.OneOf([
iaa.ContrastNormalization((0.75, 1.75), per_channel=0.5), # improve or worsen the contrast
iaa.ContrastNormalization((0.5, 1.5)), # improve or worsen the contrast
]),
iaa.Grayscale(alpha=(0.0, 1.0)),
],
random_order=True
)
# Define our sequence of augmentation steps that will be applied to every image
# All augmenters with per_channel=0.5 will sample one value _per image_
# in 50% of all cases. In all other cases they will sample new values
# _per channel_.
seq_3 = iaa.Sequential(
[
# apply the following augmenters to most images
sometimes(iaa.Crop(percent=(0, 0.1))), # crop images by 0-10% of their height/width
sometimes(iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
translate_percent={"x": (-0.05, 0.05), "y": (-0.05, 0.05)}, # translate by -20 to +20 percent (per axis)
rotate=(-4, 4), # rotate by -45 to +45 degrees
shear=(-6, 6), # shear by -16 to +16 degrees
order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
cval=(0, 255), # if mode is constant, use a cval between 0 and 255
mode='edge' # use any of scikit-image's warping modes (see 2nd image from the top for examples)
)),
im_filters
],
random_order=True
)
seq_2 = iaa.Sequential(
[
# apply the following augmenters to most images
sometimes(iaa.Crop(percent=(0, 0.05))), # crop images by 0-10% of their height/width
sometimes(iaa.Affine(
scale={"x": (0.9, 1.1), "y": (0.9, 1.1)}, # scale images to 80-120% of their size, individually per axis
translate_percent={"x": (-0.05, 0.05), "y": (-0.05, 0.05)}, # translate by -20 to +20 percent (per axis)
rotate=(-3, 3), # rotate by -45 to +45 degrees
shear=(-6, 6), # shear by -16 to +16 degrees
order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
cval=(0, 255), # if mode is constant, use a cval between 0 and 255
mode='edge' # use any of scikit-image's warping modes (see 2nd image from the top for examples)
)),
im_filters
],
random_order=True
)
seq_1 = iaa.Sequential(
[
sometimes(iaa.Affine(
scale={"x": (0.95, 1.05), "y": (0.95, 1.05)}, # scale images to 80-120% of their size, individually per axis
translate_percent={"x": (-0.025, 0.025), "y": (-0.05, 0.025)}, # translate by -20 to +20 percent (per axis)
rotate=(0, 0), # rotate by -45 to +45 degrees
shear=(-6, 6), # shear by -16 to +16 degrees
order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
cval=(0, 255), # if mode is constant, use a cval between 0 and 255
mode='edge' # use any of scikit-image's warping modes (see 2nd image from the top for examples)
)),
im_filters
],
random_order=True
)
class AnnotatorPlugin:
_VERSION = '0.0.1'
def __init__(self, dataset, partition, additional_params = {}):
self.image_path_list = [[],[]]
self.label_list = [[],[]]
try:
os.system('rm -rf '+DATASET_DIR)
except:
pass
os.system('mkdir '+DATASET_DIR)
os.system('mkdir '+DATASET_DIR+'/train/')
os.system('mkdir '+DATASET_DIR+'/val/')
def is_date(self, text):
return len(text.split('/')) == 3
def process(self, im, anno):
part = anno['partition']
print('partition', part)
if part == 0:
im_name = DATASET_DIR+'/train/'+anno['phash']
else:
im_name = DATASET_DIR+'/val/'+anno['phash']
width = float(im.shape[1])
height = float(im.shape[0])
curr_tag = ''
# print('processing', anno['phash'])
annotations = []
imgs_vec = []
txt_vec = []
curr_im = 0
for bb in anno['anno']:
if bb['ignore']:
continue
if len(bb['labels']) <= 0:
continue
if bb['labels'][0] != 'plate' and bb['labels'][0] != 'plate1' and bb['labels'][0] != 'plate2' and bb['labels'][0] != 'plate_ant':
continue
curr_label = ''
if len(bb['labels']) > 1:
curr_label = bb['labels'][1].lower()
curr_label = curr_label.replace(' ','')
bad_letter = False
for a in curr_label:
if a not in alphabet:
print('invalid char', a, anno['phash'])
bad_letter = True
if bad_letter:
continue
# curr_label = curr_label.replace('-','')
# curr_label = curr_label.replace('.','')
l = int(bb['left'])
t = int(bb['top'])
r = int(bb['right'])
b = int(bb['bottom'])
for i in range(0,NUM_SCALES):
curr_scale = 1+(MAX_SCALE-1)/NUM_SCALES*(i+1)
(nl, nt, nr, nb) = self.safe_scale_bbox(l, t, r, b, curr_scale, im.shape)
# imgs_vec = np.asarray([np.copy(im[nt:nb,nl:nr,:])])
aux_im = np.copy(im[nt:nb,nl:nr,:])
if aux_im.shape[0] <= 0 or aux_im.shape[1] <= 0:
break
scale_im = IM_HEIGHT/float(aux_im.shape[0])
aux_im = cv2.resize(aux_im, (int(aux_im.shape[1]*scale_im), IM_HEIGHT))
imgs_vec = np.asarray([aux_im])
txt_vec.append(curr_label)
for aug in range(0,NUM_AUG_IMAGES):
if curr_scale <= 1.2:
images_aug = seq_1.augment_images(imgs_vec)
elif curr_scale < 1.6:
images_aug = seq_2.augment_images(imgs_vec)
else:
images_aug = seq_3.augment_images(imgs_vec)
for j in range(0,len(images_aug)):
curr_name = im_name+'_'+str(curr_im).zfill(4)
with open(curr_name+'.txt', 'w') as f:
f.write(curr_label+'\n')
cv2.imwrite(curr_name+'.png', images_aug[j,:])
self.image_path_list[part].append(curr_name+'.png')
self.label_list[part].append(curr_label)
curr_im += 1
return (im, anno)
def end(self):
with open(DATASET_DIR+'/train/image_path_list.txt', 'w') as f:
for p in self.image_path_list[0]:
f.write(p+'\n')
with open(DATASET_DIR+'/train/label_list.txt', 'w') as f:
for l in self.label_list[0]:
f.write(l+'\n')
with open(DATASET_DIR+'/val/image_path_list.txt', 'w') as f:
for p in self.image_path_list[1]:
f.write(p+'\n')
with open(DATASET_DIR+'/val/label_list.txt', 'w') as f:
for l in self.label_list[1]:
f.write(l+'\n')
def get_parameters(self):
return {'parameters': []}
def get_version(self):
return self._VERSION
def safe_scale_bbox(self, l, t, r, b, scale, shape):
dw = (r-l)*(scale-1)/6.0
dh = (b-t)*(scale-1)/2.0
l = int(l-dw)
t = int(t-dh)
r = int(r+dw)
b = int(b+dh)
if l<0: l=0
if t<0: t=0
if b>=shape[0]: b=shape[0]-1
if r>=shape[1]: r=shape[1]-1
return (l,t,r,b)
|
meerkat-cv/annotator-supreme
|
annotator_supreme/plugins/export_crlv_plate_augmentation.py
|
Python
|
mit
| 9,056
|
[
"Gaussian"
] |
dcd995400261a74e2d8f2e7785c9b04cd36f0b274b2eb0ecf45107ed8036b26b
|
# -*- coding: utf-8 -*-
{
"'Cancel' will indicate an asset log entry did not occur": "'Cancel' will indicate an asset log entry did not occur",
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": "A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.",
"A volunteer is defined as active if they've participated in an average of 8 or more hours of Program work or Trainings per month in the last year": "A volunteer is defined as active if they've participated in an average of 8 or more hours of Program work or Trainings per month in the last year",
"Acronym of the organization's name, eg. IFRC.": "Acronym of the organization's name, eg. IFRC.",
"Add Person's Details": "Add Person's Details",
"Add Person's Details": "Add Person's Details",
"Address of an image to use for this Layer in the Legend. This allows use of a controlled static image rather than querying the server automatically for what it provides (which won't work through GeoWebCache anyway).": "Address of an image to use for this Layer in the Legend. This allows use of a controlled static image rather than querying the server automatically for what it provides (which won't work through GeoWebCache anyway).",
"Any geographically-based code to describe a message target area, in the form. The key is a user-assigned string designating the domain of the code, and the content of value is a string (which may represent a number) denoting the value itself (e.g., name='ZIP' and value='54321'). This should be used in concert with an equivalent description in the more universally understood polygon and circle forms whenever possible.": "Any geographically-based code to describe a message target area, in the form. The key is a user-assigned string designating the domain of the code, and the content of value is a string (which may represent a number) denoting the value itself (e.g., name='ZIP' and value='54321'). This should be used in concert with an equivalent description in the more universally understood polygon and circle forms whenever possible.",
"Can't import tweepy": "Can't import tweepy",
"Caution: doesn't respect the framework rules!": "Caution: doesn't respect the framework rules!",
"Click 'Start' to synchronize with this repository now:": "Click 'Start' to synchronize with this repository now:",
"Couldn't open %s!": "Couldn't open %s!",
"Create 'More Info'": "Create 'More Info'",
"Don't Know": "Don't Know",
"Edit 'More Info'": "Edit 'More Info'",
"Edit Person's Details": "Edit Person's Details",
"Enter a name to search for. You may use % as wildcard. Press 'Search' without input to list all items.": "Enter a name to search for. You may use % as wildcard. Press 'Search' without input to list all items.",
"For wardens, select a Zone from the list or click 'Add Zone'": "For wardens, select a Zone from the list or click 'Add Zone'",
"Go to %(url)s, sign up & then register your application. You can put any URL in & you only need to select the 'modify the map' permission.": "Go to %(url)s, sign up & then register your application. You can put any URL in & you only need to select the 'modify the map' permission.",
"If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": "If selected, then this Asset's Location will be updated whenever the Person's Location is updated.",
"If this configuration is displayed on the GIS config menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": "If this configuration is displayed on the GIS config menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.",
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": "If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.",
"If you don't see the Cluster in the list, you can add a new one by clicking link 'Add New Cluster'.": "If you don't see the Cluster in the list, you can add a new one by clicking link 'Add New Cluster'.",
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": "If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.",
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": "If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.",
"If you don't see the Sector in the list, you can add a new one by clicking link 'Create Sector'.": "If you don't see the Sector in the list, you can add a new one by clicking link 'Create Sector'.",
"If you don't see the Type in the list, you can add a new one by clicking link 'Add Region'.": "If you don't see the Type in the list, you can add a new one by clicking link 'Add Region'.",
"If you don't see the Type in the list, you can add a new one by clicking link 'Add Warehouse Type'.": "If you don't see the Type in the list, you can add a new one by clicking link 'Add Warehouse Type'.",
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Facility Type'.": "If you don't see the Type in the list, you can add a new one by clicking link 'Create Facility Type'.",
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Office Type'.": "If you don't see the Type in the list, you can add a new one by clicking link 'Create Office Type'.",
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Organization Type'.": "If you don't see the Type in the list, you can add a new one by clicking link 'Create Organization Type'.",
"If you don't see the activity in the list, you can add a new one by clicking link 'Create Activity'.": "If you don't see the activity in the list, you can add a new one by clicking link 'Create Activity'.",
"If you don't see the asset in the list, you can add a new one by clicking link 'Create Asset'.": "If you don't see the asset in the list, you can add a new one by clicking link 'Create Asset'.",
"If you don't see the beneficiary in the list, you can add a new one by clicking link 'Add Beneficiary'.": "If you don't see the beneficiary in the list, you can add a new one by clicking link 'Add Beneficiary'.",
"If you don't see the campaign in the list, you can add a new one by clicking link 'Create Campaign'.": "If you don't see the campaign in the list, you can add a new one by clicking link 'Create Campaign'.",
"If you don't see the community in the list, you can add a new one by clicking link 'Create Community'.": "If you don't see the community in the list, you can add a new one by clicking link 'Create Community'.",
"If you don't see the location in the list, you can add a new one by clicking link 'Create Location'.": "If you don't see the location in the list, you can add a new one by clicking link 'Create Location'.",
"If you don't see the milestone in the list, you can add a new one by clicking link 'Create Milestone'.": "If you don't see the milestone in the list, you can add a new one by clicking link 'Create Milestone'.",
"If you don't see the project in the list, you can add a new one by clicking link 'Create Project'.": "If you don't see the project in the list, you can add a new one by clicking link 'Create Project'.",
"If you don't see the type in the list, you can add a new one by clicking link 'Create Activity Type'.": "If you don't see the type in the list, you can add a new one by clicking link 'Create Activity Type'.",
"If you don't see the vehicle in the list, you can add a new one by clicking link 'Add Vehicle'.": "If you don't see the vehicle in the list, you can add a new one by clicking link 'Add Vehicle'.",
"If you enter a foldername then the layer will appear in this folder in the Map's layer switcher. A sub-folder can be created by separating names with a '/'": "If you enter a foldername then the layer will appear in this folder in the Map's layer switcher. A sub-folder can be created by separating names with a '/'",
"If you specify a module then this will be used as the text in that module's index page": "If you specify a module then this will be used as the text in that module's index page",
"If you specify a resource then this will be used as the text in that resource's summary page": "If you specify a resource then this will be used as the text in that resource's summary page",
"Level is higher than parent's": "Level is higher than parent's",
"List Persons' Details": "List Persons' Details",
"Nail pullers/cat's claws": "Nail pullers/cat's claws",
"Need a 'url' argument!": "Need a 'url' argument!",
"No UTC offset found. Please set UTC offset in your 'User Profile' details. Example: UTC+0530": "No UTC offset found. Please set UTC offset in your 'User Profile' details. Example: UTC+0530",
"Only Categories of type 'Asset' will be seen in the dropdown.": "Only Categories of type 'Asset' will be seen in the dropdown.",
"Only Categories of type 'Vehicle' will be seen in the dropdown.": "Only Categories of type 'Vehicle' will be seen in the dropdown.",
"Only Items whose Category are of type 'Vehicle' will be seen in the dropdown.": "Only Items whose Category are of type 'Vehicle' will be seen in the dropdown.",
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.",
"Parent level should be higher than this record's level. Parent level is": "Parent level should be higher than this record's level. Parent level is",
"Password fields don't match": "Password fields don't match",
"Person's Details added": "Person's Details added",
"Person's Details deleted": "Person's Details deleted",
"Person's Details updated": "Person's Details updated",
"Person's Details": "Person's Details",
"Persons' Details": "Persons' Details",
"Please come back after sometime if that doesn't help.": "Please come back after sometime if that doesn't help.",
"Please provide as much detail as you can, including the URL(s) where the bug occurs or you'd like the new feature to go.": "Please provide as much detail as you can, including the URL(s) where the bug occurs or you'd like the new feature to go.",
"Policy or Strategy added, awaiting administrator's approval": "Policy or Strategy added, awaiting administrator's approval",
"Quantity in %s's Warehouse": "Quantity in %s's Warehouse",
"Required when scope is 'Private', optional when scope is 'Public' or 'Restricted'. Each recipient shall be identified by an identifier or an address.": "Required when scope is 'Private', optional when scope is 'Public' or 'Restricted'. Each recipient shall be identified by an identifier or an address.",
"Search Person's Details": "Search Person's Details",
"Select 2 records from this list, then click 'Merge'.": "Select 2 records from this list, then click 'Merge'.",
"Select a Room from the list or click 'Create Room'": "Select a Room from the list or click 'Create Room'",
"Select a Staff Type from the list or click 'Add Staff Type'": "Select a Staff Type from the list or click 'Add Staff Type'",
"Select a Zone Type from the list or click 'Add Zone Type'": "Select a Zone Type from the list or click 'Add Zone Type'",
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.",
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": "Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.",
"Sender's name": "Sender's name",
"Sorry, things didn't get done on time.": "Sorry, things didn't get done on time.",
"Sorry, we couldn't find that page.": "Sorry, we couldn't find that page.",
"Status 'assigned' requires the %(fieldname)s to not be blank": "Status 'assigned' requires the %(fieldname)s to not be blank",
"The Project module can be used to record Project Information and generate Who's Doing What Where reports.": "The Project module can be used to record Project Information and generate Who's Doing What Where reports.",
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": "The URL of the image file. If you don't upload an image file, then you must specify its location here.",
"The code representing the digital digest ('hash') computed from the resource file": "The code representing the digital digest ('hash') computed from the resource file",
"The human-readable text describing the type and content, such as 'map' or 'photo', of the resource file.": "The human-readable text describing the type and content, such as 'map' or 'photo', of the resource file.",
"The message note is primarily intended for use with status 'Exercise' and message type 'Error'": "The message note is primarily intended for use with status 'Exercise' and message type 'Error'",
"The provided 'formuuid' is invalid. You have selected a Form revision which does not exist on this server.": "The provided 'formuuid' is invalid. You have selected a Form revision which does not exist on this server.",
"The provided 'jobuuid' is invalid. The session of Form upload is invalid. You should retry uploading.": "The provided 'jobuuid' is invalid. The session of Form upload is invalid. You should retry uploading.",
"The staff member's official job title": "The staff member's official job title",
"The volunteer's role": "The volunteer's role",
"There are no details for this person yet. Add Person's Details.": "There are no details for this person yet. Add Person's Details.",
"This isn't visible to the published site, but is used to allow menu items to point to the page": "This isn't visible to the published site, but is used to allow menu items to point to the page",
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.",
"To search for a building assessment, enter the name or ID. You may use % as wildcard. Press 'Search' without input to list all assessments.": "To search for a building assessment, enter the name or ID. You may use % as wildcard. Press 'Search' without input to list all assessments.",
"To search for a building canvass assessment, enter the Building Name or Addresss. You may use % as wildcard. Press 'Search' without input to list all assessments.": "To search for a building canvass assessment, enter the Building Name or Addresss. You may use % as wildcard. Press 'Search' without input to list all assessments.",
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.",
"To search for a member, enter any portion of the name of the person or group. You may use % as wildcard. Press 'Search' without input to list all members.": "To search for a member, enter any portion of the name of the person or group. You may use % as wildcard. Press 'Search' without input to list all members.",
"To search for a patient, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all patients.": "To search for a patient, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all patients.",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.",
"To search for an assessment, enter any portion of the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": "To search for an assessment, enter any portion of the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.",
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": "To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.",
"Type the first few characters of one of the Participant's names.": "Type the first few characters of one of the Participant's names.",
"Type the first few characters of one of the Person's names.": "Type the first few characters of one of the Person's names.",
"Type the name of an existing catalog item OR Click 'Create Item' to add an item which is not in the catalog.": "Type the name of an existing catalog item OR Click 'Create Item' to add an item which is not in the catalog.",
"Type the name of an existing catalog kit OR Click 'Create Kit' to add a kit which is not in the catalog.": "Type the name of an existing catalog kit OR Click 'Create Kit' to add a kit which is not in the catalog.",
"Type the name of an existing site OR Click 'Create Warehouse' to add a new warehouse.": "Type the name of an existing site OR Click 'Create Warehouse' to add a new warehouse.",
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": "Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.",
"Uploaded file(s) are not Image(s). Supported image formats are '.png', '.jpg', '.bmp', '.gif'.": "Uploaded file(s) are not Image(s). Supported image formats are '.png', '.jpg', '.bmp', '.gif'.",
"Used when scope is 'Restricted'.": "Used when scope is 'Restricted'.",
"User's role": "User's role",
"Yes, No, Don't Know": "Yes, No, Don't Know",
"You can search by asset number, item description or comments. You may use % as wildcard. Press 'Search' without input to list all assets.": "You can search by asset number, item description or comments. You may use % as wildcard. Press 'Search' without input to list all assets.",
"You can search by by group name, description or comments and by organization name or acronym. You may use % as wildcard. Press 'Search' without input to list all.": "You can search by by group name, description or comments and by organization name or acronym. You may use % as wildcard. Press 'Search' without input to list all.",
"You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events.": "You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events.",
"You can search by description. You may use % as wildcard. Press 'Search' without input to list all incidents.": "You can search by description. You may use % as wildcard. Press 'Search' without input to list all incidents.",
"You can search by job title or person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "You can search by job title or person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.",
"You can search by person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "You can search by person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.",
"You can search by trainee name, course name or comments. You may use % as wildcard. Press 'Search' without input to list all trainees.": "You can search by trainee name, course name or comments. You may use % as wildcard. Press 'Search' without input to list all trainees.",
"couldn't be parsed so NetworkLinks not followed.": "couldn't be parsed so NetworkLinks not followed.",
"includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": "includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.",
"must not be used except in combination with the 'altitude' element. The ceiling measure is in feet above mean sea level.": "must not be used except in combination with the 'altitude' element. The ceiling measure is in feet above mean sea level.",
'!langcode!': 'sv',
'!langname!': 'sv',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'# Results per query': '# Results per query',
'# of Inhabitants': '# of Inhabitants',
'# of International Staff': '# of International Staff',
'# of National Staff': '# of National Staff',
'# selected': '# selected',
'%(GRN)s Number': '%(GRN)s Number',
'%(GRN)s Status': '%(GRN)s Status',
'%(PO)s Number': '%(PO)s Number',
'%(REQ)s Number': '%(REQ)s Number',
'%(app)s not installed. Ask the Server Administrator to install on Server.': '%(app)s not installed. Ask the Server Administrator to install on Server.',
'%(count)s Entries Found': '%(count)s Entries Found',
'%(count)s Recipients': '%(count)s Recipients',
'%(count)s Roles of the user removed': '%(count)s Roles of the user removed',
'%(count)s Users removed from Role': '%(count)s Users removed from Role',
'%(count_of)d translations have been imported to the %(language)s language file': '%(count_of)d translations have been imported to the %(language)s language file',
'%(item)s requested from %(site)s': '%(item)s requested from %(site)s',
'%(label)s != %(values)s': '%(label)s != %(values)s',
'%(label)s = %(values)s': '%(label)s = %(values)s',
'%(label)s contains %(values)s': '%(label)s contains %(values)s',
'%(label)s contains any of %(values)s': '%(label)s contains any of %(values)s',
'%(label)s does not contain %(values)s': '%(label)s does not contain %(values)s',
'%(label)s is %(values)s': '%(label)s is %(values)s',
'%(label)s like %(values)s': '%(label)s like %(values)s',
'%(label)s not like %(values)s': '%(label)s not like %(values)s',
'%(module)s not installed': '%(module)s not installed',
'%(number)s RDRT members added': '%(number)s RDRT members added',
'%(number)s Recipients added to Alert': '%(number)s Recipients added to Alert',
'%(pe)s in %(location)s': '%(pe)s in %(location)s',
'%(proj4js)s definition': '%(proj4js)s definition',
'%(quantity)s in stock': '%(quantity)s in stock',
'%(resource)s Filter': '%(resource)s Filter',
'%(site)s (Recipient)': '%(site)s (Recipient)',
'%(site)s has no items exactly matching this request. There may still be other items in stock which can fulfill this request!': '%(site)s has no items exactly matching this request. There may still be other items in stock which can fulfill this request!',
'%(site_label)s Status added': '%(site_label)s Status added',
'%(site_label)s Status deleted': '%(site_label)s Status deleted',
'%(site_label)s Status updated': '%(site_label)s Status updated',
'%(site_label)s Status': '%(site_label)s Status',
'%(system_name)s - New User Registered': '%(system_name)s - New User Registered',
'%(system_name)s - New User Registration Approval Pending': '%(system_name)s - New User Registration Approval Pending',
'%.1f km': '%.1f km',
'%I:%M %p': '%I:%M %p',
'%m-%d-%Y': '%m-%d-%Y',
'%s %%{row} deleted': '%s %%{row} deleted',
'%s %%{row} updated': '%s %%{row} updated',
'%s AND %s': '%s AND %s',
'%s OR %s': '%s OR %s',
'%s and %s': '%s and %s',
'%s items are attached to this shipment': '%s items are attached to this shipment',
'%s linked to %s': '%s linked to %s',
'%s or %s': '%s or %s',
'%s selected': '%s selected',
'& then click on the map below to adjust the Lat/Lon fields': '& then click on the map below to adjust the Lat/Lon fields',
'(filtered from _MAX_ total entries)': '(filtered from _MAX_ total entries)',
'* Required Fields': '* Required Fields',
'...or add a new bin': '...or add a new bin',
'0-15 minutes': '0-15 minutes',
'1 location, shorter time, can contain multiple Tasks': '1 location, shorter time, can contain multiple Tasks',
'1-3 days': '1-3 days',
'1. Fill the necessary fields in BLOCK CAPITAL letters.': '1. Fill the necessary fields in BLOCK CAPITAL letters.',
'15-30 minutes': '15-30 minutes',
'2 different options are provided here currently:': '2 different options are provided here currently:',
'2. Always use one box per letter and leave one box space to separate words.': '2. Always use one box per letter and leave one box space to separate words.',
'2x4 Car': '2x4 Car',
'3. Fill in the circles completely.': '3. Fill in the circles completely.',
'30-60 minutes': '30-60 minutes',
'3W Report': '3W Report',
'4-7 days': '4-7 days',
'4x4 Car': '4x4 Car',
'8-14 days': '8-14 days',
'A Contributor can additionally Post comments to the proposed Solutions & add alternative Solutions': 'A Contributor can additionally Post comments to the proposed Solutions & add alternative Solutions',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.',
'A Moderator can additionally create Problems & control Memberships': 'A Moderator can additionally create Problems & control Memberships',
'A Participant can additionally Vote': 'A Participant can additionally Vote',
'A URL associating additional information with the alert message': 'A URL associating additional information with the alert message',
'A block of rich text which could be embedded into a page, viewed as a complete page or viewed as a list of news items.': 'A block of rich text which could be embedded into a page, viewed as a complete page or viewed as a list of news items.',
'A brief description of the group (optional)': 'A brief description of the group (optional)',
'A brief human-readable headline. Note that some displays (for example, short messaging service devices) may only present this headline; it should be made as direct and actionable as possible while remaining short. 160 characters may be a useful target limit for headline length.': 'A brief human-readable headline. Note that some displays (for example, short messaging service devices) may only present this headline; it should be made as direct and actionable as possible while remaining short. 160 characters may be a useful target limit for headline length.',
'A catalog of different Assessment Templates including summary information': 'A catalog of different Assessment Templates including summary information',
'A file in GPX format taken from a GPS.': 'A file in GPX format taken from a GPS.',
'A full absolute URI, typically a Uniform Resource Locator that can be used to retrieve the resource over the Internet.': 'A full absolute URI, typically a Uniform Resource Locator that can be used to retrieve the resource over the Internet.',
'A full, absolute URI for an HTML page or other text resource with additional or reference information regarding this alert.': 'A full, absolute URI for an HTML page or other text resource with additional or reference information regarding this alert.',
'A list of incident(s) referenced by the alert message': 'A list of incident(s) referenced by the alert message',
'A number or string uniquely identifying this message, assigned by the sender. Must notnclude spaces, commas or restricted characters (< and &).': 'A number or string uniquely identifying this message, assigned by the sender. Must notnclude spaces, commas or restricted characters (< and &).',
'A point and radius delineating the affected area': 'A point and radius delineating the affected area',
'A project milestone marks a significant date in the calendar which shows that progress towards the overall objective is being made.': 'A project milestone marks a significant date in the calendar which shows that progress towards the overall objective is being made.',
'A strict location hierarchy cannot have gaps.': 'A strict location hierarchy cannot have gaps.',
'A system-specific additional parameter associated with the alert message': 'A system-specific additional parameter associated with the alert message',
'A system-specific code identifying the event type of the alert message': 'A system-specific code identifying the event type of the alert message',
'A task is a piece of work that an individual or team can do in 1-2 days.': 'A task is a piece of work that an individual or team can do in 1-2 days.',
'A text description of the affected area.': 'A text description of the affected area.',
'A unique identifier of the alert message': 'A unique identifier of the alert message',
'ABOUT CALCULATIONS': 'ABOUT CALCULATIONS',
'ABOUT THIS MODULE': 'ABOUT THIS MODULE',
'ABSOLUTE%(br)sDEVIATION': 'ABSOLUTE%(br)sDEVIATION',
'ACCESS DATA': 'ACCESS DATA',
'ACTION REQUIRED': 'ACTION REQUIRED',
'ALL REPORTS': 'ALL REPORTS',
'ANY': 'ANY',
'API is documented here': 'API is documented here',
'APPROVE REPORTS': 'APPROVE REPORTS',
'ATC-20 Rapid Evaluation modified for New Zealand': 'ATC-20 Rapid Evaluation modified for New Zealand',
'AUTH TOKEN': 'AUTH TOKEN',
'Abbreviation': 'Abbreviation',
'Able to Respond?': 'Able to Respond?',
'About': 'Om',
'Above %s': 'Above %s',
'Accept Push': 'Accept Push',
'Accept unsolicited data transmissions from the repository.': 'Accept unsolicited data transmissions from the repository.',
'Accepted': 'Accepted',
'Access denied': 'Access denied',
'Access to Shelter': 'Access to Shelter',
'Access to education services': 'Access to education services',
'Accessibility of Affected Location': 'Accessibility of Affected Location',
'Accompanying Relative': 'Accompanying Relative',
'Account Name': 'Account Name',
'Account Registered - Please Check Your Email': 'Account Registered - Please Check Your Email',
'Account added': 'Account added',
'Ack: Acknowledge receipt and acceptance of the message(s)': 'Ack: Acknowledge receipt and acceptance of the message(s)',
'Acronym': 'Acronym',
'Action': 'Action',
'Actioned': 'Actioned',
'Actioning officer': 'Actioning officer',
'Actions taken as a result of this request.': 'Actions taken as a result of this request.',
'Actions': 'Actions',
'Activate': 'Activate',
'Active Missions': 'Active Missions',
'Active Problems': 'Active Problems',
'Active': 'Active',
'Active?': 'Active?',
'Activities matching Assessments': 'Activities matching Assessments',
'Activities of boys 13-17yrs before disaster': 'Activities of boys 13-17yrs before disaster',
'Activities of boys 13-17yrs now': 'Activities of boys 13-17yrs now',
'Activities of boys <12yrs before disaster': 'Activities of boys <12yrs before disaster',
'Activities of boys <12yrs now': 'Activities of boys <12yrs now',
'Activities of children': 'Activities of children',
'Activities of girls 13-17yrs before disaster': 'Activities of girls 13-17yrs before disaster',
'Activities of girls 13-17yrs now': 'Activities of girls 13-17yrs now',
'Activities of girls <12yrs before disaster': 'Activities of girls <12yrs before disaster',
'Activities of girls <12yrs now': 'Activities of girls <12yrs now',
'Activities': 'Activities',
'Activity Added': 'Activity Added',
'Activity Deleted': 'Activity Deleted',
'Activity Details': 'Activity Details',
'Activity Level': 'Activity Level',
'Activity Report': 'Activity Report',
'Activity Reports': 'Activity Reports',
'Activity Type Added': 'Activity Type Added',
'Activity Type Deleted': 'Activity Type Deleted',
'Activity Type Updated': 'Activity Type Updated',
'Activity Type added to Activity': 'Activity Type added to Activity',
'Activity Type added to Project Location': 'Activity Type added to Project Location',
'Activity Type removed from Activity': 'Activity Type removed from Activity',
'Activity Type removed from Project Location': 'Activity Type removed from Project Location',
'Activity Type': 'Activity Type',
'Activity Types': 'Activity Types',
'Activity Updated': 'Activity Updated',
'Activity': 'Activity',
'Actual - actionable by all targeted recipients': 'Actual - actionable by all targeted recipients',
'Add %(site_label)s Status': 'Add %(site_label)s Status',
'Add Activity Type to Activity': 'Add Activity Type to Activity',
'Add Activity Type to Project Location': 'Add Activity Type to Project Location',
'Add Address': 'Add Address',
'Add Affiliation': 'Add Affiliation',
'Add Alert': 'Add Alert',
'Add Alternative Item': 'Add Alternative Item',
'Add Annual Budget': 'Add Annual Budget',
'Add Appraisal': 'Add Appraisal',
'Add Assessment Answer': 'Add Assessment Answer',
'Add Assessment Summary': 'Add Assessment Summary',
'Add Assessment': 'Add Assessment',
'Add Baseline Type': 'Add Baseline Type',
'Add Baseline': 'Add Baseline',
'Add Beneficiaries': 'Add Beneficiaries',
'Add Bookmark': 'Add Bookmark',
'Add Branch Organization': 'Add Branch Organization',
'Add Bundle': 'Add Bundle',
'Add Camp Service': 'Add Camp Service',
'Add Camp Status': 'Add Camp Status',
'Add Camp Type': 'Add Camp Type',
'Add Camp': 'Add Camp',
'Add Certificate for Course': 'Add Certificate for Course',
'Add Certification': 'Add Certification',
'Add Contact Information': 'Add Contact Information',
'Add Credential': 'Add Credential',
'Add Data to Theme Layer': 'Add Data to Theme Layer',
'Add Demographic Data': 'Add Demographic Data',
'Add Demographic': 'Add Demographic',
'Add Distribution Item': 'Add Distribution Item',
'Add Distribution': 'Add Distribution',
'Add Donation': 'Add Donation',
'Add Education': 'Add Education',
'Add Email Account': 'Add Email Account',
'Add Evacuation Route': 'Add Evacuation Route',
'Add Fire Station': 'Add Fire Station',
'Add Gauge': 'Add Gauge',
'Add Group Member': 'Add Group Member',
'Add Hazard to Project': 'Add Hazard to Project',
'Add Hours': 'Add Hours',
'Add Human Resource': 'Add Human Resource',
'Add Identity': 'Add Identity',
'Add Image': 'Add Image',
'Add Impact Type': 'Add Impact Type',
'Add Impact': 'Add Impact',
'Add Item to Catalog': 'Add Item to Catalog',
'Add Item to Commitment': 'Add Item to Commitment',
'Add Item to Procurement Plan': 'Add Item to Procurement Plan',
'Add Item to Request': 'Add Item to Request',
'Add Item to Shipment': 'Add Item to Shipment',
'Add Item to Stock': 'Add Item to Stock',
'Add Item': 'Add Item',
'Add Keyword': 'Add Keyword',
'Add Kit': 'Add Kit',
'Add Layer from Catalog': 'Add Layer from Catalog',
'Add Layer to this Profile': 'Add Layer to this Profile',
'Add Level 1 Assessment': 'Add Level 1 Assessment',
'Add Level 2 Assessment': 'Add Level 2 Assessment',
'Add Line': 'Add Line',
'Add Location to Organization': 'Add Location to Organization',
'Add Log Entry': 'Add Log Entry',
'Add Member': 'Add Member',
'Add Membership': 'Add Membership',
'Add Menu Entry': 'Add Menu Entry',
'Add Metadata': 'Add Metadata',
'Add Mobile Commons Settings': 'Add Mobile Commons Settings',
'Add New Alert': 'Add New Alert',
'Add New Assessment Summary': 'Add New Assessment Summary',
'Add New Assessment': 'Add New Assessment',
'Add New Baseline Type': 'Add New Baseline Type',
'Add New Baseline': 'Add New Baseline',
'Add New Beneficiaries': 'Add New Beneficiaries',
'Add New Beneficiary Type': 'Add New Beneficiary Type',
'Add New Branch': 'Add New Branch',
'Add New Budget': 'Add New Budget',
'Add New Bundle': 'Add New Bundle',
'Add New Camp Service': 'Add New Camp Service',
'Add New Camp Status': 'Add New Camp Status',
'Add New Camp Type': 'Add New Camp Type',
'Add New Camp': 'Add New Camp',
'Add New Campaign': 'Add New Campaign',
'Add New Cluster Subsector': 'Add New Cluster Subsector',
'Add New Cluster': 'Add New Cluster',
'Add New Coalition': 'Add New Coalition',
'Add New Commitment Item': 'Add New Commitment Item',
'Add New Community': 'Add New Community',
'Add New Demographic Data': 'Add New Demographic Data',
'Add New Demographic': 'Add New Demographic',
'Add New Detail': 'Add New Detail',
'Add New Distribution Item': 'Add New Distribution Item',
'Add New Distribution': 'Add New Distribution',
'Add New Document': 'Add New Document',
'Add New Donor': 'Add New Donor',
'Add New Email Account': 'Add New Email Account',
'Add New Entry': 'Add New Entry',
'Add New Evacuation Route': 'Add New Evacuation Route',
'Add New Event': 'Add New Event',
'Add New Fire Station': 'Add New Fire Station',
'Add New Gauge': 'Add New Gauge',
'Add New Home': 'Add New Home',
'Add New Human Resource': 'Add New Human Resource',
'Add New Image': 'Add New Image',
'Add New Impact Type': 'Add New Impact Type',
'Add New Impact': 'Add New Impact',
'Add New Information': 'Add New Information',
'Add New Item to Kit': 'Add New Item to Kit',
'Add New Item to Stock': 'Add New Item to Stock',
'Add New Keyword': 'Add New Keyword',
'Add New Layer to Symbology': 'Add New Layer to Symbology',
'Add New Level 1 Assessment': 'Add New Level 1 Assessment',
'Add New Level 2 Assessment': 'Add New Level 2 Assessment',
'Add New Mailing List': 'Add New Mailing List',
'Add New Member': 'Add New Member',
'Add New Membership Type': 'Add New Membership Type',
'Add New Membership': 'Add New Membership',
'Add New Menu Entry': 'Add New Menu Entry',
'Add New Mission': 'Add New Mission',
'Add New Network': 'Add New Network',
'Add New Organization Domain': 'Add New Organization Domain',
'Add New Output': 'Add New Output',
'Add New Participant': 'Add New Participant',
'Add New Patient': 'Add New Patient',
'Add New People': 'Add New People',
'Add New Population Statistic': 'Add New Population Statistic',
'Add New Problem': 'Add New Problem',
'Add New Profile Configuration': 'Add New Profile Configuration',
'Add New Rapid Assessment': 'Add New Rapid Assessment',
'Add New Recipient': 'Add New Recipient',
'Add New Record': 'Add New Record',
'Add New Region': 'Add New Region',
'Add New Relative': 'Add New Relative',
'Add New Report': 'Add New Report',
'Add New Request': 'Add New Request',
'Add New Resource Type': 'Add New Resource Type',
'Add New Response Summary': 'Add New Response Summary',
'Add New Response': 'Add New Response',
'Add New Risk': 'Add New Risk',
'Add New River': 'Add New River',
'Add New Scenario': 'Add New Scenario',
'Add New Security-Related Staff': 'Add New Security-Related Staff',
'Add New Shipment Item': 'Add New Shipment Item',
'Add New Site': 'Add New Site',
'Add New Solution': 'Add New Solution',
'Add New Staff Assignment': 'Add New Staff Assignment',
'Add New Staff Type': 'Add New Staff Type',
'Add New Subsector': 'Add New Subsector',
'Add New Team Member': 'Add New Team Member',
'Add New Team': 'Add New Team',
'Add New Type of People': 'Add New Type of People',
'Add New Type of Trained People': 'Add New Type of Trained People',
'Add New Vehicle Assignment': 'Add New Vehicle Assignment',
'Add New Vehicle Type': 'Add New Vehicle Type',
'Add New Vehicle': 'Add New Vehicle',
'Add New Vulnerability Aggregated Indicator': 'Add New Vulnerability Aggregated Indicator',
'Add New Vulnerability Data': 'Add New Vulnerability Data',
'Add New Vulnerability Indicator': 'Add New Vulnerability Indicator',
'Add New Warehouse Type': 'Add New Warehouse Type',
'Add New Zone Type': 'Add New Zone Type',
'Add New Zone': 'Add New Zone',
'Add Order': 'Add Order',
'Add Organization Domain': 'Add Organization Domain',
'Add Organization Needs': 'Add Organization Needs',
'Add Organization to Project': 'Add Organization to Project',
'Add Participant': 'Add Participant',
'Add Partner Organization': 'Add Partner Organization',
'Add People': 'Add People',
'Add People to Commitment': 'Add People to Commitment',
'Add Person to Commitment': 'Add Person to Commitment',
'Add Person': 'Add Person',
'Add Photo': 'Add Photo',
'Add Point': 'Add Point',
'Add Polygon': 'Add Polygon',
'Add Population Statistic': 'Add Population Statistic',
'Add Position': 'Add Position',
'Add Problem': 'Add Problem',
'Add Procurement Plan': 'Add Procurement Plan',
'Add Professional Experience': 'Add Professional Experience',
'Add Profile Configuration for this Layer': 'Add Profile Configuration for this Layer',
'Add Profile Configuration': 'Add Profile Configuration',
'Add Query': 'Add Query',
'Add RDRT Members': 'Add RDRT Members',
'Add RSS Settings': 'Add RSS Settings',
'Add Rapid Assessment': 'Add Rapid Assessment',
'Add Recipient': 'Add Recipient',
'Add Record': 'Add Record',
'Add Region': 'Add Region',
'Add Report': 'Add Report',
'Add Request Template': 'Add Request Template',
'Add Request': 'Add Request',
'Add Resource Type': 'Add Resource Type',
'Add Response Summary': 'Add Response Summary',
'Add Response': 'Add Response',
'Add Risk': 'Add Risk',
'Add River': 'Add River',
'Add Section': 'Add Section',
'Add Sector to Organization': 'Add Sector to Organization',
'Add Sector to Project': 'Add Sector to Project',
'Add Sector to Theme': 'Add Sector to Theme',
'Add Security-Related Staff': 'Add Security-Related Staff',
'Add Service to Organization': 'Add Service to Organization',
'Add Setting': 'Add Setting',
'Add Site Needs': 'Add Site Needs',
'Add Skill Equivalence': 'Add Skill Equivalence',
'Add Skill Provision': 'Add Skill Provision',
'Add Skill to Request': 'Create Skill to Request',
'Add Solution': 'Add Solution',
'Add Staff Assignment': 'Add Staff Assignment',
'Add Staff Member to Project': 'Add Staff Member to Project',
'Add Staff Type': 'Add Staff Type',
'Add Stock to Warehouse': 'Add Stock to Warehouse',
'Add Subsector': 'Add Subsector',
'Add Supplier': 'Add Supplier',
'Add Symbology to Layer': 'Add Symbology to Layer',
'Add Team Member': 'Add Team Member',
'Add Team': 'Add Team',
'Add Template Section': 'Add Template Section',
'Add Theme to Activity': 'Add Theme to Activity',
'Add Theme to Project Location': 'Add Theme to Project Location',
'Add Theme to Project': 'Add Theme to Project',
'Add Trained People': 'Add Trained People',
'Add Training': 'Add Training',
'Add Translation Language': 'Add Translation Language',
'Add Twilio Settings': 'Add Twilio Settings',
'Add Twitter Search Query': 'Add Twitter Search Query',
'Add Type of People': 'Add Type of People',
'Add Type of Trained People': 'Add Type of Trained People',
'Add Unit': 'Add Unit',
'Add Vehicle Assignment': 'Add Vehicle Assignment',
'Add Vehicle Category': 'Add Vehicle Category',
'Add Vehicle Details': 'Add Vehicle Details',
'Add Vehicle Type': 'Add Vehicle Type',
'Add Vehicle': 'Add Vehicle',
'Add Volunteer Role': 'Add Volunteer Role',
'Add Volunteer to Project': 'Add Volunteer to Project',
'Add Vulnerability Aggregated Indicator': 'Add Vulnerability Aggregated Indicator',
'Add Vulnerability Data': 'Add Vulnerability Data',
'Add Vulnerability Indicator': 'Add Vulnerability Indicator',
'Add Warehouse Type': 'Add Warehouse Type',
'Add Zone Type': 'Add Zone Type',
'Add Zone': 'Add Zone',
'Add a Whitelisted Sender': 'Add a Whitelisted Sender',
'Add a description': 'Add a description',
'Add a new Assessment Answer': 'Add a new Assessment Answer',
'Add a new Assessment Question': 'Add a new Assessment Question',
'Add a new Assessment Template': 'Add a new Assessment Template',
'Add a new Completed Assessment Form': 'Add a new Completed Assessment Form',
'Add a new Disaster Assessment': 'Add a new Disaster Assessment',
'Add a new Template Section': 'Add a new Template Section',
'Add a new certificate to the catalog.': 'Add a new certificate to the catalog.',
'Add a new competency rating to the catalog.': 'Add a new competency rating to the catalog.',
'Add a new job role to the catalog.': 'Add a new job role to the catalog.',
'Add a new membership type to the catalog.': 'Add a new membership type to the catalog.',
'Add a new program to the catalog.': 'Add a new program to the catalog.',
'Add a new skill provision to the catalog.': 'Add a new skill provision to the catalog.',
'Add a new skill type to the catalog.': 'Add a new skill type to the catalog.',
'Add a new vehicle category': 'Add a new vehicle category',
'Add a new vehicle type': 'Add a new vehicle type',
'Add alert information': 'Add alert information',
'Add all organizations which are involved in different roles in this project': 'Add all organizations which are involved in different roles in this project',
'Add as RDRT Members': 'Add as RDRT Members',
'Add new Dataset Price': 'Add new Dataset Price',
'Add new Question Meta-Data': 'Add new Question Meta-Data',
'Add new Station Parameter': 'Add new Station Parameter',
'Add saved search': 'Add saved search',
'Add strings manually through a text file': 'Add strings manually through a text file',
'Add this entry': 'Add this entry',
'Add to Bin': 'Add to Bin',
'Add to Bundle': 'Add to Bundle',
'Add to a Team': 'Add to a Team',
'Add to budget': 'Add to budget',
'Add': 'Add',
'Add...': 'Add...',
'Added to Group': 'Added to Group',
'Additional Beds / 24hrs': 'Additional Beds / 24hrs',
'Additional Description of Damage': 'Additional Description of Damage',
'Additional Needs': 'Additional Needs',
'Address Details': 'Address Details',
'Address Mapped': 'Address Mapped',
'Address NOT Mapped': 'Address NOT Mapped',
'Address Type': 'Address Type',
'Address added': 'Address added',
'Address deleted': 'Address deleted',
'Address updated': 'Address updated',
'Address': 'Address',
'Addresses': 'Addresses',
'Adequate food and water available': 'Adequate food and water available',
'Adequate': 'Adequate',
'Adjust Item Quantity': 'Adjust Item Quantity',
'Adjust Stock Item': 'Adjust Stock Item',
'Adjust Stock Levels': 'Adjust Stock Levels',
'Adjust Stock': 'Adjust Stock',
'Adjustment created': 'Adjustment created',
'Adjustment deleted': 'Adjustment deleted',
'Adjustment modified': 'Adjustment modified',
'Administration': 'Administration',
'Admissions/24hrs': 'Admissions/24hrs',
'Adolescent (12-20)': 'Adolescent (12-20)',
'Adolescent participating in coping activities': 'Adolescent participating in coping activities',
'Adult (21-50)': 'Adult (21-50)',
'Adult ICU': 'Adult ICU',
'Adult Psychiatric': 'Adult Psychiatric',
'Adult female': 'Adult female',
'Adult male': 'Adult male',
'Adults in prisons': 'Adults in prisons',
'Advanced Search': 'Advanced Search',
'Advisory': 'Advisory',
'Affected Persons': 'Affected Persons',
'Affiliation Details': 'Affiliation Details',
'Affiliation added': 'Affiliation added',
'Affiliation deleted': 'Affiliation deleted',
'Affiliation updated': 'Affiliation updated',
'Affiliations': 'Affiliations',
'Age Group': 'Age Group',
'Age': 'Age',
'Aggregation Type': 'Aggregation Type',
'Agriculture': 'Agriculture',
'Air Transport Service': 'Air Transport Service',
'Air': 'Air',
'Aircraft Crash': 'Aircraft Crash',
'Aircraft Hijacking': 'Aircraft Hijacking',
'Aircraft Maximum Size': 'Aircraft Maximum Size',
'Airport Closure': 'Airport Closure',
'Airport Details': 'Airport Details',
'Airport added': 'Airport added',
'Airport deleted': 'Airport deleted',
'Airport updated': 'Airport updated',
'Airport': 'Airport',
'Airports': 'Airports',
'Airspace Closure': 'Airspace Closure',
'Alcohol': 'Alcohol',
'Alert Details updated': 'Alert Details updated',
'Alert Details': 'Alert Details',
'Alert Information': 'Alert Information',
'Alert Qualifiers': 'Alert Qualifiers',
'Alert Sent': 'Alert Sent',
'Alert added': 'Alert added',
'Alert created': 'Alert created',
'Alert deleted': 'Alert deleted',
'Alert information created': 'Alert information created',
'Alert information deleted': 'Alert information deleted',
'Alert information modified': 'Alert information modified',
'Alert information': 'Alert information',
'Alert modified': 'Alert modified',
'Alert': 'Alert',
'Alert: Initial information requiring attention by targeted recipients': 'Alert: Initial information requiring attention by targeted recipients',
'Alerts': 'Alerts',
'Alimentary Support Vehicle': 'Alimentary Support Vehicle',
'All Entities': 'All Entities',
'All Inbound & Outbound Messages are stored here': 'All Inbound & Outbound Messages are stored here',
'All Open Tasks': 'All Open Tasks',
'All Purchased Data': 'All Purchased Data',
'All Records': 'All Records',
'All Resources': 'All Resources',
'All Response Messages': 'All Response Messages',
'All Tasks': 'All Tasks',
'All Teams Must Have Personal Protectivity Equipment': 'All Teams Must Have Personal Protectivity Equipment',
'All data provided by the Sahana Software Foundation from this site is licensed under a Creative Commons Attribution license. However, not all data originates here. Please consult the source field of each entry.': 'All data provided by the Sahana Software Foundation from this site is licensed under a Creative Commons Attribution license. However, not all data originates here. Please consult the source field of each entry.',
'All reports': 'All reports',
'All selected': 'All selected',
'All': 'All',
'AllClear - The subject event no longer poses a threat': 'AllClear - The subject event no longer poses a threat',
'Alternative Item Details': 'Alternative Item Details',
'Alternative Item added': 'Alternative Item added',
'Alternative Item deleted': 'Alternative Item deleted',
'Alternative Item updated': 'Alternative Item updated',
'Alternative Items': 'Alternative Items',
'Alternative places for studying': 'Alternative places for studying',
'Ambulance Service': 'Ambulance Service',
'Amount of the Project Budget spent at this location': 'Amount of the Project Budget spent at this location',
'Amount': 'Amount',
'An Assessment Template can be selected to create a Disaster Assessment. Within a Disaster Assessment, responses can be collected and results can analyzed as tables, charts and maps': 'An Assessment Template can be selected to create a Disaster Assessment. Within a Disaster Assessment, responses can be collected and results can analyzed as tables, charts and maps',
'An ESRI Shapefile (zipped)': 'An ESRI Shapefile (zipped)',
'An Item Category must have a Code OR a Name.': 'An Item Category must have a Code OR a Name.',
'An alert needs to contain at least one info item.': 'An alert needs to contain at least one info item.',
'An extended human readable description of the hazard or event that occasioned this message.': 'An extended human readable description of the hazard or event that occasioned this message.',
'An extended human readable instruction to targeted recipients. If different instructions are intended for different recipients, they should be represented by use of multiple information blocks. You can use a different information block also to specify this information in a different language.': 'An extended human readable instruction to targeted recipients. If different instructions are intended for different recipients, they should be represented by use of multiple information blocks. You can use a different information block also to specify this information in a different language.',
'Analysis': 'Analysis',
'Analyze with KeyGraph': 'Analyze with KeyGraph',
'Animal Die Off': 'Animal Die Off',
'Animal Feed': 'Animal Feed',
'Annual Budget deleted': 'Annual Budget deleted',
'Annual Budget updated': 'Annual Budget updated',
'Annual Budget': 'Annual Budget',
'Annual Budgets': 'Annual Budgets',
'Anonymous': 'Anonymous',
'Antibiotics available': 'Antibiotics available',
'Antibiotics needed per 24h': 'Antibiotics needed per 24h',
'Any system-specific code for events, in the form of key-value pairs. (e.g., SAME, FIPS, ZIP).': 'Any system-specific code for events, in the form of key-value pairs. (e.g., SAME, FIPS, ZIP).',
'Any system-specific datum, in the form of key-value pairs.': 'Any system-specific datum, in the form of key-value pairs.',
'Any user-defined flags or special codes used to flag the alert message for special handling.': 'Any user-defined flags or special codes used to flag the alert message for special handling.',
'Any': 'Any',
'Apparent Age': 'Apparent Age',
'Apparent Gender': 'Apparent Gender',
'Applicable to projects in Pacific countries only': 'Applicable to projects in Pacific countries only',
'Application Deadline': 'Application Deadline',
'Application Permissions': 'Application Permissions',
'Application': 'Application',
'Apply a template': 'Apply a template',
'Appraisal Details': 'Appraisal Details',
'Appraisal added': 'Appraisal added',
'Appraisal deleted': 'Appraisal deleted',
'Appraisal updated': 'Appraisal updated',
'Appraisals': 'Appraisals',
'Approval pending': 'Approval pending',
'Approval request submitted': 'Approval request submitted',
'Approve': 'Approve',
'Approved By': 'Approved By',
'Approved by %(first_name)s.%(last_name)s': 'Approved by %(first_name)s.%(last_name)s',
'Approved': 'Approved',
'Approver': 'Approver',
'Approximate size of the resource file in bytes.': 'Approximate size of the resource file in bytes.',
'ArcGIS REST Layer': 'ArcGIS REST Layer',
'Arctic Outflow': 'Arctic Outflow',
'Are you sure you want to commit to this request and send a shipment?': 'Are you sure you want to commit to this request and send a shipment?',
'Are you sure you want to send this shipment?': 'Are you sure you want to send this shipment?',
'Are you susbscribed?': 'Are you susbscribed?',
'Area description': 'Area description',
'Areas inspected': 'Areas inspected',
'Arguments': 'Arguments',
'Arrived': 'Arrived',
'As of yet, no sections have been added to this template.': 'As of yet, no sections have been added to this template.',
'Asbestos': 'Asbestos',
'Assess - Evaluate the information in this message.': 'Assess - Evaluate the information in this message.',
'Assessed': 'Assessed',
'Assessment Answer Details': 'Assessment Answer Details',
'Assessment Answer added': 'Assessment Answer added',
'Assessment Answer deleted': 'Assessment Answer deleted',
'Assessment Answer updated': 'Assessment Answer updated',
'Assessment Answers': 'Assessment Answers',
'Assessment Details': 'Assessment Details',
'Assessment Question Details': 'Assessment Question Details',
'Assessment Question added': 'Assessment Question added',
'Assessment Question deleted': 'Assessment Question deleted',
'Assessment Question updated': 'Assessment Question updated',
'Assessment Questions': 'Assessment Questions',
'Assessment Reported': 'Assessment Reported',
'Assessment Summaries': 'Assessment Summaries',
'Assessment Summary Details': 'Assessment Summary Details',
'Assessment Summary added': 'Assessment Summary added',
'Assessment Summary deleted': 'Assessment Summary deleted',
'Assessment Summary updated': 'Assessment Summary updated',
'Assessment Template Details': 'Assessment Template Details',
'Assessment Template added': 'Assessment Template added',
'Assessment Template deleted': 'Assessment Template deleted',
'Assessment Template updated': 'Assessment Template updated',
'Assessment Templates': 'Assessment Templates',
'Assessment added': 'Assessment added',
'Assessment deleted': 'Assessment deleted',
'Assessment updated': 'Assessment updated',
'Assessment': 'Assessment',
'Assessments': 'Assessments',
'Assessor 1': 'Assessor 1',
'Assessor 2': 'Assessor 2',
'Assessor': 'Assessor',
'Asset Details': 'Asset Details',
'Asset Item': 'Asset Item',
'Asset Log Details': 'Asset Log Details',
'Asset Log Empty': 'Asset Log Empty',
'Asset Log Entry deleted': 'Asset Log Entry deleted',
'Asset Log Entry updated': 'Asset Log Entry updated',
'Asset Log': 'Asset Log',
'Asset Number': 'Asset Number',
'Asset added': 'Asset added',
'Asset deleted': 'Asset deleted',
'Asset removed': 'Asset removed',
'Asset updated': 'Asset updated',
'Asset': 'Asset',
'Assets are resources which are not consumable but are expected back, so they need tracking.': 'Assets are resources which are not consumable but are expected back, so they need tracking.',
'Assets': 'Tillgånger',
'Assign %(staff)s': 'Assign %(staff)s',
'Assign Asset': 'Assign Asset',
'Assign Facility': 'Assign Facility',
'Assign Human Resource': 'Assign Human Resource',
'Assign New Human Resource': 'Assign New Human Resource',
'Assign Role to a User': 'Assign Role to a User',
'Assign Roles': 'Assign Roles',
'Assign Staff': 'Assign Staff',
'Assign Vehicle': 'Assign Vehicle',
'Assign another Role': 'Assign another Role',
'Assign to Facility/Site': 'Assign to Facility/Site',
'Assign to Organization': 'Assign to Organization',
'Assign to Person': 'Assign to Person',
'Assign': 'Assign',
'Assigned By': 'Assigned By',
'Assigned Human Resources': 'Assigned Human Resources',
'Assigned Roles': 'Assigned Roles',
'Assigned To': 'Assigned To',
'Assigned to Facility/Site': 'Assigned to Facility/Site',
'Assigned to Organization': 'Assigned to Organization',
'Assigned to Person': 'Assigned to Person',
'Assigned to': 'Assigned to',
'Assigned': 'Assigned',
'Assignments': 'Assignments',
'Association': 'Association',
'At or below %s': 'At or below %s',
'At/Visited Location (not virtual)': 'At/Visited Location (not virtual)',
'Attachments': 'Attachments',
'Attributes': 'Attributes',
'Attribution': 'Attribution',
'Audience': 'Audience',
'Authentication Required': 'Authentication Required',
'Author': 'Author',
'Auto start': 'Auto start',
'Availability': 'Availability',
'Available Alternative Inventories': 'Available Alternative Inventories',
'Available Beds': 'Available Beds',
'Available Databases and Tables': 'Available Databases and Tables',
'Available Forms': 'Available Forms',
'Available Inventories': 'Available Inventories',
'Available for Location': 'Available for Location',
'Available in Viewer?': 'Available in Viewer?',
'Avalanche': 'Avalanche',
'Average Rating': 'Average Rating',
'Average': 'Average',
'Avoid - Avoid the subject event as per the instruction': 'Avoid - Avoid the subject event as per the instruction',
'Award added': 'Award added',
'Award deleted': 'Award deleted',
'Award updated': 'Award updated',
'Award': 'Award',
'Awards': 'Awards',
'BACK TO %(system_name_short)s': 'BACK TO %(system_name_short)s',
'BACK TO MAP VIEW': 'BACK TO MAP VIEW',
'BROWSE OTHER REGIONS': 'BROWSE OTHER REGIONS',
'Back to Roles List': 'Back to Roles List',
'Back to Top': 'Back to Top',
'Back to Users List': 'Back to Users List',
'Back to the main screen': 'Back to the main screen',
'Back': 'Back',
'Background Color': 'Background Color',
'Bahai': 'Bahai',
'Baldness': 'Baldness',
'Banana': 'Banana',
'Bank/micro finance': 'Bank/micro finance',
'Barge Capacity': 'Barge Capacity',
'Barricades are needed': 'Barricades are needed',
'Base %(facility)s Set': 'Base %(facility)s Set',
'Base Facility/Site Set': 'Base Facility/Site Set',
'Base Layer?': 'Base Layer?',
'Base Layers': 'Base Layers',
'Base Location': 'Base Location',
'Base Station Details': 'Base Station Details',
'Base Station added': 'Base Station added',
'Base Station deleted': 'Base Station deleted',
'Base Station updated': 'Base Station updated',
'Base Stations': 'Base Stations',
'Base URL of the remote Sahana Eden instance including application path, e.g. http://www.example.org/eden': 'Base URL of the remote Sahana Eden instance including application path, e.g. http://www.example.org/eden',
'Based on the DOB/FEMA sticker, the property is': 'Based on the DOB/FEMA sticker, the property is',
'Baseline Number of Beds': 'Baseline Number of Beds',
'Baseline Type Details': 'Baseline Type Details',
'Baseline Type added': 'Baseline Type added',
'Baseline Type deleted': 'Baseline Type deleted',
'Baseline Type updated': 'Baseline Type updated',
'Baseline Type': 'Baseline Type',
'Baseline Types': 'Baseline Types',
'Baseline added': 'Baseline added',
'Baseline deleted': 'Baseline deleted',
'Baseline number of beds of that type in this unit.': 'Baseline number of beds of that type in this unit.',
'Baseline updated': 'Baseline updated',
'Baseline': 'Baseline',
'Baselines Details': 'Baselines Details',
'Baselines': 'Baselines',
'Basement Flooding': 'Basement Flooding',
'Basic Assessment Reported': 'Basic Assessment Reported',
'Basic Assessment': 'Basic Assessment',
'Basic Details': 'Basic Details',
'Basic Search': 'Basic Search',
'Baud rate to use for your modem - The default is safe for most cases': 'Baud rate to use for your modem - The default is safe for most cases',
'Baud': 'Baud',
'Beam': 'Beam',
'Bed Capacity': 'Bed Capacity',
'Bed Type': 'Bed Type',
'Bed type already registered': 'Bed type already registered',
'Below ground level': 'Below ground level',
'Beneficiaries Added': 'Beneficiaries Added',
'Beneficiaries Deleted': 'Beneficiaries Deleted',
'Beneficiaries Details': 'Beneficiaries Details',
'Beneficiaries Updated': 'Beneficiaries Updated',
'Beneficiaries': 'Beneficiaries',
'Beneficiary Report': 'Beneficiary Report',
'Beneficiary Type Added': 'Beneficiary Type Added',
'Beneficiary Type Deleted': 'Beneficiary Type Deleted',
'Beneficiary Type Updated': 'Beneficiary Type Updated',
'Beneficiary Type': 'Beneficiary Type',
'Beneficiary Types': 'Beneficiary Types',
'Beneficiary': 'Beneficiary',
'Big Capacity Tank Vehicle': 'Big Capacity Tank Vehicle',
'Bin': 'Bin',
'Bing Layer': 'Bing Layer',
'Biological Hazard': 'Biological Hazard',
'Biscuits': 'Biscuits',
'Blizzard': 'Blizzard',
'Blocked': 'Blocked',
'Blood Type (AB0)': 'Blood Type (AB0)',
'Blowing Snow': 'Blowing Snow',
'Boat': 'Boat',
'Bodies found': 'Bodies found',
'Bodies recovered': 'Bodies recovered',
'Bodies': 'Bodies',
'Body Hair': 'Body Hair',
'Body Recovery Request': 'Body Recovery Request',
'Body Recovery Requests': 'Body Recovery Requests',
'Body': 'Body',
'Bomb Explosion': 'Bomb Explosion',
'Bomb Threat': 'Bomb Threat',
'Bomb': 'Bomb',
'Boots': 'Boots',
'Both': 'Both',
'Branch Coordinator': 'Branch Coordinator',
'Branch Organization Details': 'Branch Organization Details',
'Branch Organization added': 'Branch Organization added',
'Branch Organization deleted': 'Branch Organization deleted',
'Branch Organization updated': 'Branch Organization updated',
'Branch Organizations': 'Branch Organizations',
'Branch': 'Branch',
'Branches': 'Branches',
'Brand Details': 'Brand Details',
'Brand added': 'Brand added',
'Brand deleted': 'Brand deleted',
'Brand updated': 'Brand updated',
'Brand': 'Brand',
'Brands': 'Brands',
'Breakdown': 'Breakdown',
'Brick': 'Brick',
'Bricks': 'Bricks',
'Bridge Closed': 'Bridge Closed',
'Brooms': 'Brooms',
'Bucket': 'Bucket',
'Buddhist': 'Buddhist',
'Budget Details': 'Budget Details',
'Budget Updated': 'Budget Updated',
'Budget added': 'Budget added',
'Budget deleted': 'Budget deleted',
'Budget updated': 'Budget updated',
'Budget': 'Budget',
'Budgets': 'Budgets',
'Buffer': 'Buffer',
'Bug': 'Bug',
'Building Collapsed': 'Building Collapsed',
'Building Name or Address': 'Building Name or Address',
'Building Name': 'Building Name',
'Building Short Name/Business Name': 'Building Short Name/Business Name',
'Building or storey leaning': 'Building or storey leaning',
'Built using the Template agreed by a group of NGOs working together as the': 'Built using the Template agreed by a group of NGOs working together as the',
'Bulk Uploader': 'Bulk Uploader',
'Bundle Contents': 'Bundle Contents',
'Bundle Details': 'Bundle Details',
'Bundle Updated': 'Bundle Updated',
'Bundle added': 'Bundle added',
'Bundle deleted': 'Bundle deleted',
'Bundle updated': 'Bundle updated',
'Bundle': 'Bundle',
'Bundles': 'Bundles',
'Burn ICU': 'Burn ICU',
'Burn': 'Burn',
'Burned/charred': 'Burned/charred',
'Business Damaged': 'Business Damaged',
'Business': 'Business',
'Button name': 'Button name',
'By %(site)s': 'By %(site)s',
'By selecting this you agree that we may contact you.': 'By selecting this you agree that we may contact you.',
'CAP': 'CAP',
'CLOSED': 'CLOSED',
'COPY': 'COPY',
'CREATE': 'CREATE',
'CSV file needs to have at least 2 columns!': 'CSV file needs to have at least 2 columns!',
'CTN': 'CTN',
'CV': 'CV',
'Cache Keys': 'Cache Keys',
'Cache': 'Cache',
'Calculated using the Secure Hash Algorithm (SHA-1).': 'Calculated using the Secure Hash Algorithm (SHA-1).',
'Calculation': 'Calculation',
'Calendar': 'Calendar',
'Camp Details': 'Camp Details',
'Camp Service Details': 'Camp Service Details',
'Camp Service added': 'Camp Service added',
'Camp Service deleted': 'Camp Service deleted',
'Camp Service updated': 'Camp Service updated',
'Camp Service': 'Camp Service',
'Camp Services': 'Camp Services',
'Camp Status Details': 'Camp Status Details',
'Camp Status added': 'Camp Status added',
'Camp Status deleted': 'Camp Status deleted',
'Camp Status updated': 'Camp Status updated',
'Camp Status': 'Camp Status',
'Camp Statuses': 'Camp Statuses',
'Camp Type Details': 'Camp Type Details',
'Camp Type added': 'Camp Type added',
'Camp Type deleted': 'Camp Type deleted',
'Camp Type updated': 'Camp Type updated',
'Camp Type': 'Camp Type',
'Camp Types': 'Camp Types',
'Camp added': 'Camp added',
'Camp deleted': 'Camp deleted',
'Camp updated': 'Camp updated',
'Camp': 'Camp',
'Campaign Added': 'Campaign Added',
'Campaign Deleted': 'Campaign Deleted',
'Campaign ID': 'Campaign ID',
'Campaign Message': 'Campaign Message',
'Campaign Updated': 'Campaign Updated',
'Campaign': 'Campaign',
'Campaigns': 'Campaigns',
'Camps': 'Camps',
'Can only approve 1 record at a time!': 'Can only approve 1 record at a time!',
'Can only disable 1 record at a time!': 'Can only disable 1 record at a time!',
'Can only update 1 record at a time!': 'Can only update 1 record at a time!',
'Can read PoIs either from an OpenStreetMap file (.osm) or mirror.': 'Can read PoIs either from an OpenStreetMap file (.osm) or mirror.',
'Cancel Crop': 'Cancel Crop',
'Cancel Log Entry': 'Cancel Log Entry',
'Cancel Shipment': 'Cancel Shipment',
'Cancel editing': 'Cancel editing',
'Cancel': 'Cancel',
'Cancel: Cancel earlier message(s)': 'Cancel: Cancel earlier message(s)',
'Canceled': 'Canceled',
'Cancelled': 'Cancelled',
'Candidate Matches for Body %(label)s': 'Candidate Matches for Body %(label)s',
'Candidate Matches for Body %s': 'Candidate Matches for Body %s',
'Canned Fish': 'Canned Fish',
'Cannot be empty': 'Cannot be empty',
'Cannot disable your own account!': 'Cannot disable your own account!',
'Cannot make an Organization a branch of itself!': 'Cannot make an Organization a branch of itself!',
'Cannot open created OSM file!': 'Cannot open created OSM file!',
'Cannot read from file: %(filename)s': 'Cannot read from file: %(filename)s',
'Cannot send messages if Messaging module disabled': 'Cannot send messages if Messaging module disabled',
'Capacity (Day / Evacuation)': 'Capacity (Day / Evacuation)',
'Capacity (Day)': 'Capacity (Day)',
'Capacity (Night / Post-Impact)': 'Capacity (Night / Post-Impact)',
'Capacity (Night)': 'Capacity (Night)',
'Cardiology': 'Cardiology',
'Cargo Pier Depth': 'Cargo Pier Depth',
'Case Details': 'Case Details',
'Case Number': 'Case Number',
'Case added': 'Case added',
'Case deleted': 'Case deleted',
'Case updated': 'Case updated',
'Cases': 'Cases',
'Cassava': 'Cassava',
'Casual Labor': 'Casual Labor',
'Casualties': 'Casualties',
'Catalog Details': 'Catalog Details',
'Catalog Item added': 'Catalog Item added',
'Catalog Item deleted': 'Catalog Item deleted',
'Catalog Item updated': 'Catalog Item updated',
'Catalog Items': 'Catalog Items',
'Catalog added': 'Catalog added',
'Catalog deleted': 'Catalog deleted',
'Catalog updated': 'Catalog updated',
'Catalog': 'Catalog',
'Catalogs': 'Catalogs',
'Categories': 'Categories',
'Category': 'Category',
'Ceilings, light fixtures': 'Ceilings, light fixtures',
'Cell Tower': 'Cell Tower',
'Certainty unknown': 'Certainty unknown',
'Certainty': 'Certainty',
'Certificate Catalog': 'Certificate Catalog',
'Certificate Details': 'Certificate Details',
'Certificate Status': 'Certificate Status',
'Certificate added': 'Certificate added',
'Certificate deleted': 'Certificate deleted',
'Certificate updated': 'Certificate updated',
'Certificate': 'Certificate',
'Certificates': 'Certificates',
'Certification Details': 'Certification Details',
'Certification added': 'Certification added',
'Certification deleted': 'Certification deleted',
'Certification updated': 'Certification updated',
'Certifications': 'Certifications',
'Certifying Organization': 'Certifying Organization',
'Chalk line': 'Chalk line',
'Change Password': 'Byt Lösenord',
'Changes': 'Changes',
'Channel': 'Channel',
'Chart': 'Chart',
'Check Request': 'Check Request',
'Check all': 'Check all',
'Check for errors in the URL, maybe the address was mistyped.': 'Check for errors in the URL, maybe the address was mistyped.',
'Check if the URL is pointing to a directory instead of a webpage.': 'Check if the URL is pointing to a directory instead of a webpage.',
'Check outbox for the message status': 'Check outbox for the message status',
'Check this box when you have read, ': 'Check this box when you have read, ',
'Check this to make your search viewable by others.': 'Check this to make your search viewable by others.',
'Check to delete': 'Check to delete',
'Check': 'Check',
'Check-In': 'Check-In',
'Check-Out': 'Check-Out',
'Check-in at Facility': 'Check-in at Facility',
'Checked': 'Checked',
'Checking your file...': 'Checking your file...',
'Checklist Item': 'Checklist Item',
'Checklist created': 'Checklist created',
'Checklist deleted': 'Checklist deleted',
'Checklist of Operations': 'Checklist of Operations',
'Checklist updated': 'Checklist updated',
'Checklist': 'Checklist',
'Checklists': 'Checklists',
'Chemical Hazard': 'Chemical Hazard',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack',
'Chicken': 'Chicken',
'Child (2-11)': 'Child (2-11)',
'Child (< 18 yrs)': 'Child (< 18 yrs)',
'Child Abduction Emergency': 'Child Abduction Emergency',
'Child headed households (<18 yrs)': 'Child headed households (<18 yrs)',
'Child': 'Child',
'Children in adult prisons': 'Children in adult prisons',
'Children in boarding schools': 'Children in boarding schools',
'Children in homes for disabled children': 'Children in homes for disabled children',
'Children in juvenile detention': 'Children in juvenile detention',
'Children in orphanages': 'Children in orphanages',
'Children living on their own (without adults)': 'Children living on their own (without adults)',
'Children not enrolled in new school': 'Children not enrolled in new school',
'Children orphaned by the disaster': 'Children orphaned by the disaster',
'Children separated from their parents/caregivers': 'Children separated from their parents/caregivers',
'Children that have been sent to safe places': 'Children that have been sent to safe places',
'Children who have disappeared since the disaster': 'Children who have disappeared since the disaster',
'Choose Country': 'Choose Country',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': 'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.',
'Choose': 'Choose',
'Christian': 'Christian',
'Church': 'Church',
'Circumstances of disappearance': 'Circumstances of disappearance',
'Civil Emergency': 'Civil Emergency',
'Clean Instance': 'Clean Instance',
'Clear all': 'Clear all',
'Clear filter': 'Clear filter',
'Clear selection': 'Clear selection',
'Clear': 'Clear',
'Click on the map to add the points that make up your polygon. Double-click to finish drawing.': 'Click on the map to add the points that make up your polygon. Double-click to finish drawing.',
'Click on the slider to choose a value': 'Click on the slider to choose a value',
'Click to dive in to regions or rollover to see more': 'Click to dive in to regions or rollover to see more',
'Click to edit': 'Click to edit',
'Click where you want to open Streetview': 'Click where you want to open Streetview',
'Climate Data': 'Climate Data',
'Climate Parameter': 'Climate Parameter',
'Climate': 'Climate',
'Clinical Laboratory': 'Clinical Laboratory',
'Clinical Operations': 'Clinical Operations',
'Clinical Status': 'Clinical Status',
'Close': 'Close',
'Closed': 'Closed',
'Closed?': 'Closed?',
'Cloth rags': 'Cloth rags',
'Clothing': 'Clothing',
'Cluster Attribute': 'Cluster Attribute',
'Cluster Details': 'Cluster Details',
'Cluster Distance': 'Cluster Distance',
'Cluster Subsector Details': 'Cluster Subsector Details',
'Cluster Subsector added': 'Cluster Subsector added',
'Cluster Subsector deleted': 'Cluster Subsector deleted',
'Cluster Subsector updated': 'Cluster Subsector updated',
'Cluster Subsector': 'Cluster Subsector',
'Cluster Subsectors': 'Cluster Subsectors',
'Cluster Threshold': 'Cluster Threshold',
'Cluster added': 'Cluster added',
'Cluster deleted': 'Cluster deleted',
'Cluster updated': 'Cluster updated',
'Cluster': 'Cluster',
'Cluster(s)': 'Cluster(s)',
'Clusters': 'Clusters',
'Coalition Details': 'Coalition Details',
'Coalition added': 'Coalition added',
'Coalition removed': 'Coalition removed',
'Coalition updated': 'Coalition updated',
'Coalition': 'Coalition',
'Coalitions': 'Coalitions',
'Code Share': 'Code Share',
'Code Values: Natural language identifier per [RFC 3066]. If not present, an implicit default value of \'en-US\' will be assumed. Edit settings.cap.languages in 000_config.py to add more languages. See <a href="%s">here</a> for a full list.': 'Code Values: Natural language identifier per [RFC 3066]. If not present, an implicit default value of \'en-US\' will be assumed. Edit settings.cap.languages in 000_config.py to add more languages. See <a href="%s">here</a> for a full list.',
'Code': 'Code',
'Codes for special handling of the message': 'Codes for special handling of the message',
'Codes': 'Codes',
'Cold Wave': 'Cold Wave',
'Collapse, partial collapse, off foundation': 'Collapse, partial collapse, off foundation',
'Collect PIN from Twitter': 'Collect PIN from Twitter',
'Collective center': 'Collective center',
'Columns, pilasters, corbels': 'Columns, pilasters, corbels',
'Combined Method': 'Combined Method',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Come back later. Everyone visiting this site is probably experiencing the same problem as you.',
'Come back later.': 'Come back later.',
'Command Tactical Operational Vehicle': 'Command Tactical Operational Vehicle',
'Comment': 'Comment',
'Comments permitted?': 'Comments permitted?',
'Comments': 'Comments',
'Commercial/Offices': 'Commercial/Offices',
'Commit All': 'Commit All',
'Commit Date': 'Commit Date',
'Commit Status': 'Commit Status',
'Commit': 'Commit',
'Commit. Status': 'Commit. Status',
'Commitment Added': 'Commitment Added',
'Commitment Canceled': 'Commitment Canceled',
'Commitment Details': 'Commitment Details',
'Commitment Item Details': 'Commitment Item Details',
'Commitment Item added': 'Commitment Item added',
'Commitment Item deleted': 'Commitment Item deleted',
'Commitment Item updated': 'Commitment Item updated',
'Commitment Items': 'Commitment Items',
'Commitment Updated': 'Commitment Updated',
'Commitment': 'Commitment',
'Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.',
'Commitments': 'Commitments',
'Committed By': 'Committed By',
'Committed Items': 'Committed Items',
'Committed People Details': 'Committed People Details',
'Committed People updated': 'Committed People updated',
'Committed People': 'Committed People',
'Committed Person Details': 'Committed Person Details',
'Committed Person updated': 'Committed Person updated',
'Committed Skills': 'Committed Skills',
'Committed': 'Committed',
'Committing Organization': 'Committing Organization',
'Committing Person': 'Committing Person',
'Committing Warehouse': 'Committing Warehouse',
'Commodities Loaded': 'Commodities Loaded',
'Communication problems': 'Communication problems',
'Communities': 'Communities',
'Community Added': 'Community Added',
'Community Centre': 'Community Centre',
'Community Contacts': 'Community Contacts',
'Community Deleted': 'Community Deleted',
'Community Details': 'Community Details',
'Community Health Center': 'Community Health Center',
'Community Member': 'Community Member',
'Community Updated': 'Community Updated',
'Community': 'Community',
'Company': 'Company',
'Competency Rating Catalog': 'Competency Rating Catalog',
'Competency Rating Details': 'Competency Rating Details',
'Competency Rating added': 'Competency Rating added',
'Competency Rating deleted': 'Competency Rating deleted',
'Competency Rating updated': 'Competency Rating updated',
'Competency Rating': 'Competency Rating',
'Competency': 'Competency',
'Complete Adjustment': 'Complete Adjustment',
'Complete Returns': 'Complete Returns',
'Complete': 'Complete',
'Complete? Please call': 'Complete? Please call',
'Completed Assessment Form Details': 'Completed Assessment Form Details',
'Completed Assessment Form deleted': 'Completed Assessment Form deleted',
'Completed Assessment Form entered': 'Completed Assessment Form entered',
'Completed Assessment Form updated': 'Completed Assessment Form updated',
'Completed Assessment Forms': 'Completed Assessment Forms',
'Completed Assessments': 'Completed Assessments',
'Completed tour?': 'Completed tour?',
'Completed': 'Completed',
'Completion Question': 'Completion Question',
'Complexion': 'Complexion',
'Compose': 'Compose',
'Compromised': 'Compromised',
'Concrete frame': 'Concrete frame',
'Concrete shear wall': 'Concrete shear wall',
'Concrete': 'Concrete',
'Condition': 'Condition',
'Conduct a Disaster Assessment': 'Conduct a Disaster Assessment',
'Config not found!': 'Config not found!',
'Configuration': 'Configuration',
'Configure Layer for this Symbology': 'Configure Layer for this Symbology',
'Configure Run-time Settings': 'Configure Run-time Settings',
'Configure connection details and authentication': 'Configure connection details and authentication',
'Configure resources to synchronize, update methods and policies': 'Configure resources to synchronize, update methods and policies',
'Configure the default proxy server to connect to remote repositories': 'Configure the default proxy server to connect to remote repositories',
'Configure/Monitor Synchronization': 'Configure/Monitor Synchronization',
'Confirm Shipment Received': 'Confirm Shipment Received',
'Confirm that some items were returned from a delivery to beneficiaries and they will be accepted back into stock.': 'Confirm that some items were returned from a delivery to beneficiaries and they will be accepted back into stock.',
'Confirm that the shipment has been received by a destination which will not record the shipment directly into the system and confirmed as received.': 'Confirm that the shipment has been received by a destination which will not record the shipment directly into the system and confirmed as received.',
'Confirmed': 'Confirmed',
'Confirming Organization': 'Confirming Organization',
'Conflict Policy': 'Conflict Policy',
'Connect New Parser': 'Connect New Parser',
'Connect Parser': 'Connect Parser',
'Consignment Number, Tracking Number, etc': 'Consignment Number, Tracking Number, etc',
'Construction Type (Check all that apply)': 'Construction Type (Check all that apply)',
'Construction': 'Construction',
'Consumable': 'Consumable',
'Contact Added': 'Contact Added',
'Contact Data': 'Contact Data',
'Contact Deleted': 'Contact Deleted',
'Contact Details updated': 'Contact Details updated',
'Contact Details': 'Kontaktinformation',
'Contact Info': 'Contact Info',
'Contact Information Added': 'Contact Information Added',
'Contact Information Deleted': 'Contact Information Deleted',
'Contact Information Updated': 'Contact Information Updated',
'Contact Information': 'Contact Information',
'Contact Method': 'Contact Method',
'Contact Name': 'Contact Name',
'Contact People': 'Contact People',
'Contact Person': 'Contact Person',
'Contact Phone': 'Contact Phone',
'Contact Updated': 'Contact Updated',
'Contact Us': 'Kontakta Oss',
'Contact added': 'Contact added',
'Contact deleted': 'Contact deleted',
'Contact information added': 'Contact information added',
'Contact information deleted': 'Contact information deleted',
'Contact information updated': 'Contact information updated',
'Contact information': 'Contact information',
'Contact us': 'Kontakta oss',
'Contact': 'Contact',
'Contacts': 'Contacts',
'Content Management': 'Webbpublicering',
'Content': 'Content',
'Contents': 'Contents',
'Context': 'Context',
'Contract End Date': 'Contract End Date',
'Contract Expiry Date': 'Contract Expiry Date',
'Contributor': 'Contributor',
'Controller name': 'Controller name',
'Controller tour is activated': 'Controller tour is activated',
'Controller': 'Controller',
'Cook Islands': 'Cook Islands',
'Cooking Oil': 'Cooking Oil',
'Coordinate Layer': 'Coordinate Layer',
'Coping Activities': 'Coping Activities',
'Copy Request': 'Copy Request',
'Copy': 'Copy',
'Corn': 'Corn',
'Corporate Entity': 'Corporate Entity',
'Cost Type': 'Cost Type',
'Cost per Megabyte': 'Cost per Megabyte',
'Cost per Minute': 'Cost per Minute',
'Could not add person record': 'Could not add person record',
'Could not auto-register at the repository, please register manually.': 'Could not auto-register at the repository, please register manually.',
'Could not create record.': 'Could not create record.',
'Could not generate report': 'Could not generate report',
'Could not initiate manual synchronization.': 'Could not initiate manual synchronization.',
'Could not merge records. (Internal Error: %s)': 'Could not merge records. (Internal Error: %s)',
'Count of Question': 'Count of Question',
'Count': 'Count',
'Countries': 'Countries',
'Country Code': 'Country Code',
'Country in': 'Country in',
'Country is required!': 'Country is required!',
'Country': 'Country',
'Course Catalog': 'Course Catalog',
'Course Certificate Details': 'Course Certificate Details',
'Course Certificate added': 'Course Certificate added',
'Course Certificate deleted': 'Course Certificate deleted',
'Course Certificate updated': 'Course Certificate updated',
'Course Certificates': 'Course Certificates',
'Course Details': 'Course Details',
'Course added': 'Course added',
'Course deleted': 'Course deleted',
'Course updated': 'Course updated',
'Course': 'Course',
'Create & manage Distribution groups to receive Alerts': 'Create & manage Distribution groups to receive Alerts',
'Create Activity Report': 'Create Activity Report',
'Create Activity Type': 'Create Activity Type',
'Create Activity': 'Create Activity',
'Create Airport': 'Create Airport',
'Create Alert': 'Create Alert',
'Create Assessment Template': 'Create Assessment Template',
'Create Asset': 'Create Asset',
'Create Award': 'Create Award',
'Create Base Station': 'Create Base Station',
'Create Bed Type': 'Create Bed Type',
'Create Beneficiary Type': 'Create Beneficiary Type',
'Create Brand': 'Create Brand',
'Create Budget': 'Add Budget',
'Create Campaign': 'Create Campaign',
'Create Case': 'Create Case',
'Create Catalog Item': 'Create Catalog Item',
'Create Catalog': 'Create Catalog',
'Create Certificate': 'Create Certificate',
'Create Checklist': 'Create Checklist',
'Create Cholera Treatment Capability Information': 'Create Cholera Treatment Capability Information',
'Create Cluster Subsector': 'Create Cluster Subsector',
'Create Cluster': 'Create Cluster',
'Create Coalition': 'Create Coalition',
'Create Community': 'Create Community',
'Create Competency Rating': 'Create Competency Rating',
'Create Contact': 'Create Contact',
'Create Course': 'Create Course',
'Create Dead Body Report': 'Create Dead Body Report',
'Create Department': 'Create Department',
'Create Details': 'Create Details',
'Create Event Type': 'Create Event Type',
'Create Event': 'Create Event',
'Create Facility Type': 'Create Facility Type',
'Create Facility': 'Create Facility',
'Create Feature Layer': 'Create Feature Layer',
'Create GPS data': 'Create GPS data',
'Create Group': 'Create Group',
'Create Hazard': 'Create Hazard',
'Create Heliport': 'Create Heliport',
'Create Hospital': 'Create Hospital',
'Create Identification Report': 'Create Identification Report',
'Create Impact Assessment': 'Create Impact Assessment',
'Create Incident Report': 'Create Incident Report',
'Create Incident Type': 'Create Incident Type',
'Create Incident': 'Create Incident',
'Create Item Category': 'Create Item Category',
'Create Item Pack': 'Create Item Pack',
'Create Item': 'Create Item',
'Create Job Title': 'Create Job Title',
'Create Job': 'Create Job',
'Create Kit': 'Create Kit',
'Create Layer': 'Create Layer',
'Create Location Hierarchy': 'Create Location Hierarchy',
'Create Location': 'Create Location',
'Create Mailing List': 'Create Mailing List',
'Create Map Configuration': 'Create Map Configuration',
'Create Marker': 'Create Marker',
'Create Membership Type': 'Create Membership Type',
'Create Milestone': 'Create Milestone',
'Create Mission': 'Create New Mission',
'Create Mobile Impact Assessment': 'Create Mobile Impact Assessment',
'Create Morgue': 'Create Morgue',
'Create Network': 'Create Network',
'Create Office Type': 'Create Office Type',
'Create Office': 'Create Office',
'Create Organization Type': 'Create Organization Type',
'Create Organization': 'Create Organization',
'Create Personal Effects': 'Create Personal Effects',
'Create PoI Type': 'Create PoI Type',
'Create Point of Interest': 'Create Point of Interest',
'Create Policy or Strategy': 'Create Policy or Strategy',
'Create Post': 'Create Post',
'Create Program': 'Create Program',
'Create Project': 'Lägg till projekt',
'Create Projection': 'Create Projection',
'Create Question Meta-Data': 'Create Question Meta-Data',
'Create Rapid Assessment': 'Create Rapid Assessment',
'Create Reference Document': 'Create Reference Document',
'Create Repository': 'Create Repository',
'Create Request': 'Create New Request',
'Create Request': 'Create Request',
'Create Resource': 'Create Resource',
'Create Role': 'Create New Role',
'Create Role': 'Create Role',
'Create Room': 'Create Room',
'Create Scenario': 'Create New Scenario',
'Create Seaport': 'Create Seaport',
'Create Sector': 'Create Sector',
'Create Series': 'Create Series',
'Create Service Profile': 'Create Service Profile',
'Create Service': 'Create Service',
'Create Shelter Service': 'Create Shelter Service',
'Create Shelter Status': 'Create Shelter Status',
'Create Shelter Type': 'Create Shelter Type',
'Create Shelter': 'Create Shelter',
'Create Skill Type': 'Create Skill Type',
'Create Skill': 'Create Skill',
'Create Staff Member': 'Create Staff Member',
'Create Status Report': 'Create Status Report',
'Create Status': 'Create Status',
'Create Symbology': 'Create Symbology',
'Create Tag': 'Create Tag',
'Create Task': 'Create Task',
'Create Team': 'Create Team',
'Create Template': 'Create Template',
'Create Template': 'Create new Template',
'Create Theme': 'Create Theme',
'Create Tour': 'Create Tour',
'Create Training Event': 'Create Training Event',
'Create User': 'Create New User',
'Create User': 'Create User',
'Create Vehicle Detail': 'Create Vehicle Detail',
'Create Vehicle': 'Create New Vehicle',
'Create Volunteer Cluster Position': 'Create Volunteer Cluster Position',
'Create Volunteer Cluster Type': 'Create Volunteer Cluster Type',
'Create Volunteer Cluster': 'Create Volunteer Cluster',
'Create Volunteer': 'Create Volunteer',
'Create Warehouse': 'Create Warehouse',
'Create a Person': 'Create a Person',
'Create a group entry in the registry.': 'Create a group entry in the registry.',
'Create a new Group.': 'Create a new Group.',
'Create a new Team.': 'Create a new Team.',
'Create a new facility or ensure that you have permissions for an existing facility.': 'Create a new facility or ensure that you have permissions for an existing facility.',
'Create a new organization or ensure that you have permissions for an existing organization.': 'Create a new organization or ensure that you have permissions for an existing organization.',
'Create an Assessment Question': 'Create an Assessment Question',
'Create an information entry': 'Create an information entry',
'Create and broadcast CAP messages': 'Create and broadcast CAP messages',
'Create search': 'Create search',
'Created By': 'Created By',
'Created on %s by %s': 'Created on %s by %s',
'Created on %s': 'Created on %s',
'Credential Details': 'Credential Details',
'Credential added': 'Credential added',
'Credential deleted': 'Credential deleted',
'Credential updated': 'Credential updated',
'Credential': 'Credential',
'Credentialling Organization': 'Credentialling Organization',
'Credentials': 'Credentials',
'Credit Card': 'Credit Card',
'Crime': 'Crime',
'Criteria': 'Criteria',
'Crop Image': 'Crop Image',
'Crowbar': 'Crowbar',
'Currency': 'Currency',
'Current Home Address': 'Current Home Address',
'Current Location Country': 'Current Location Country',
'Current Location Phone Number': 'Current Location Phone Number',
'Current Location Treating Hospital': 'Current Location Treating Hospital',
'Current Location': 'Current Location',
'Current Mileage': 'Current Mileage',
'Current Owned By (Organization/Branch)': 'Current Owned By (Organization/Branch)',
'Current Residence': 'Current Residence',
'Current Status': 'Current Status',
'Current Twitter account': 'Current Twitter account',
'Current community priorities': 'Current community priorities',
'Current general needs': 'Current general needs',
'Current greatest needs of vulnerable groups': 'Current greatest needs of vulnerable groups',
'Current health problems': 'Current health problems',
'Current number of patients': 'Current number of patients',
'Current problems, categories': 'Current problems, categories',
'Current problems, details': 'Current problems, details',
'Current request': 'Current request',
'Current response': 'Current response',
'Current session': 'Current session',
'Current staffing level at the facility.': 'Current staffing level at the facility.',
'Currently no Appraisals entered': 'Currently no Appraisals entered',
'Currently no Certifications registered': 'Currently no Certifications registered',
'Currently no Course Certificates registered': 'Currently no Course Certificates registered',
'Currently no Credentials registered': 'Currently no Credentials registered',
'Currently no Participants registered': 'Currently no Participants registered',
'Currently no Professional Experience entered': 'Currently no Professional Experience entered',
'Currently no Skill Equivalences registered': 'Currently no Skill Equivalences registered',
'Currently no Skills registered': 'Currently no Skills registered',
'Currently no Trainings registered': 'Currently no Trainings registered',
'Currently no entries in the catalog': 'Currently no entries in the catalog',
'Currently no hours recorded for this volunteer': 'Currently no hours recorded for this volunteer',
'Currently no programs registered': 'Currently no programs registered',
'Currently no staff assigned': 'Currently no staff assigned',
'Currently no training events registered': 'Currently no training events registered',
'Customer': 'Customer',
'Customs Capacity': 'Customs Capacity',
'Customs Warehousing Storage Capacity': 'Customs Warehousing Storage Capacity',
'DATA QUALITY': 'DATA QUALITY',
'DATA/REPORT': 'DATA/REPORT',
'DELETE': 'DELETE',
'DISK': 'DISK',
'DNA Profile': 'DNA Profile',
'DNA Profiling': 'DNA Profiling',
'DONATE': 'DONATE',
'DRRPP Extensions': 'DRRPP Extensions',
'DVI Navigator': 'DVI Navigator',
'Daily': 'Daily',
'Dam Overflow': 'Dam Overflow',
'Damage Assessment': 'Damage Assessment',
'Damage Source': 'Damage Source',
'Damage sustained': 'Damage sustained',
'Damage': 'Damage',
'Damaged': 'Damaged',
'Damages': 'Damages',
'Dangerous Person': 'Dangerous Person',
'Data Purchase In Process': 'Data Purchase In Process',
'Data Quality': 'Data Quality',
'Data Source': 'Data Source',
'Data Type': 'Data Type',
'Data added to Theme Layer': 'Data added to Theme Layer',
'Data import error': 'Data import error',
'Data uploaded': 'Data uploaded',
'Data': 'Data',
'Data/Reports': 'Data/Reports',
'DataTable ID': 'DataTable ID',
'DataTable row': 'DataTable row',
'Database %s select': 'Database %s select',
'Database ID': 'Database ID',
'Database': 'Databas',
'Dataset Price Details': 'Dataset Price Details',
'Dataset Price added': 'Dataset Price added',
'Dataset Price removed': 'Dataset Price removed',
'Dataset Price updated': 'Dataset Price updated',
'Dataset Prices': 'Dataset Prices',
'Date & Time': 'Date & Time',
'Date Available': 'Date Available',
'Date Created': 'Date Created',
'Date Delivered': 'Date Delivered',
'Date Due': 'Date Due',
'Date Expected': 'Date Expected',
'Date Exported:': 'Date Exported:',
'Date Joined': 'Date Joined',
'Date Modified': 'Date Modified',
'Date Needed By': 'Date Needed By',
'Date Printed': 'Date Printed',
'Date Published': 'Date Published',
'Date Question': 'Date Question',
'Date Range': 'Date Range',
'Date Ready': 'Date Ready',
'Date Received': 'Date Received',
'Date Released': 'Date Released',
'Date Repacked': 'Date Repacked',
'Date Requested': 'Date Requested',
'Date Required Until': 'Date Required Until',
'Date Required': 'Date Required',
'Date Sent': 'Date Sent',
'Date Taken': 'Date Taken',
'Date Until': 'Date Until',
'Date and Time': 'Date and Time',
'Date and time this report relates to.': 'Date and time this report relates to.',
'Date must be %(max)s or earlier!': 'Date must be %(max)s or earlier!',
'Date must be %(min)s or later!': 'Date must be %(min)s or later!',
'Date must be between %(min)s and %(max)s!': 'Date must be between %(min)s and %(max)s!',
'Date of Birth': 'Date of Birth',
'Date of Recovery': 'Date of Recovery',
'Date of Report': 'Date of Report',
'Date of Treatment': 'Date of Treatment',
'Date of submission': 'Date of submission',
'Date resigned': 'Date resigned',
'Date': 'Date',
'Date/Time of Alert': 'Date/Time of Alert',
'Date/Time of Dispatch': 'Date/Time of Dispatch',
'Date/Time of Find': 'Date/Time of Find',
'Date/Time when found': 'Date/Time when found',
'Date/Time when last seen': 'Date/Time when last seen',
'Date/Time': 'Date/Time',
'Day': 'Day',
'De-duplicate Records': 'De-duplicate Records',
'De-duplicate': 'De-duplicate',
'Dead Body Details': 'Dead Body Details',
'Dead Body Reports': 'Dead Body Reports',
'Dead Body': 'Dead Body',
'Dead body report added': 'Dead body report added',
'Dead body report deleted': 'Dead body report deleted',
'Dead body report updated': 'Dead body report updated',
'Deaths in the past 24h': 'Deaths in the past 24h',
'Deaths/24hrs': 'Deaths/24hrs',
'Deceased': 'Deceased',
'Decision': 'Decision',
'Decline failed': 'Decline failed',
'Decline': 'Decline',
'Decomposed': 'Decomposed',
'Default Base layer?': 'Default Base layer?',
'Default Location': 'Default Location',
'Default Marker': 'Default Marker',
'Default Realm = All Entities the User is a Staff Member of': 'Default Realm = All Entities the User is a Staff Member of',
'Default Realm': 'Default Realm',
'Default map question': 'Default map question',
'Default': 'Default',
'Default?': 'Default?',
'Defecation area for animals': 'Defecation area for animals',
'Defines the icon used for display of features on handheld GPS.': 'Defines the icon used for display of features on handheld GPS.',
'Defines the icon used for display of features on interactive map & KML exports.': 'Defines the icon used for display of features on interactive map & KML exports.',
'Degrees in a latitude must be between -90 to 90.': 'Degrees in a latitude must be between -90 to 90.',
'Degrees in a longitude must be between -180 to 180.': 'Degrees in a longitude must be between -180 to 180.',
'Degrees must be a number.': 'Degrees must be a number.',
'Dehydration': 'Dehydration',
'Delete Affiliation': 'Delete Affiliation',
'Delete Airport': 'Delete Airport',
'Delete Alert': 'Delete Alert',
'Delete Alternative Item': 'Delete Alternative Item',
'Delete Appraisal': 'Delete Appraisal',
'Delete Assessment Summary': 'Delete Assessment Summary',
'Delete Assessment': 'Delete Assessment',
'Delete Asset Log Entry': 'Delete Asset Log Entry',
'Delete Asset': 'Delete Asset',
'Delete Award': 'Delete Award',
'Delete Base Station': 'Delete Base Station',
'Delete Baseline Type': 'Delete Baseline Type',
'Delete Baseline': 'Delete Baseline',
'Delete Branch': 'Delete Branch',
'Delete Brand': 'Delete Brand',
'Delete Budget': 'Delete Budget',
'Delete Bundle': 'Delete Bundle',
'Delete Case': 'Delete Case',
'Delete Catalog Item': 'Delete Catalog Item',
'Delete Catalog': 'Delete Catalog',
'Delete Certificate': 'Delete Certificate',
'Delete Certification': 'Delete Certification',
'Delete Cluster Subsector': 'Delete Cluster Subsector',
'Delete Cluster': 'Delete Cluster',
'Delete Commitment Item': 'Delete Commitment Item',
'Delete Commitment': 'Delete Commitment',
'Delete Competency Rating': 'Delete Competency Rating',
'Delete Contact Information': 'Delete Contact Information',
'Delete Contact': 'Delete Contact',
'Delete Course Certificate': 'Delete Course Certificate',
'Delete Course': 'Delete Course',
'Delete Credential': 'Delete Credential',
'Delete Data from Theme layer': 'Delete Data from Theme layer',
'Delete Department': 'Delete Department',
'Delete Detail': 'Delete Detail',
'Delete Document': 'Delete Document',
'Delete Donation': 'Delete Donation',
'Delete Donor': 'Delete Donor',
'Delete Email': 'Delete Email',
'Delete Evacuation Route': 'Delete Evacuation Route',
'Delete Event': 'Delete Event',
'Delete Facility Type': 'Delete Facility Type',
'Delete Facility': 'Delete Facility',
'Delete Feature Layer': 'Delete Feature Layer',
'Delete Fire Station': 'Delete Fire Station',
'Delete GPS data': 'Delete GPS data',
'Delete Group': 'Delete Group',
'Delete Hazard': 'Delete Hazard',
'Delete Heliport': 'Delete Heliport',
'Delete Home': 'Delete Home',
'Delete Hospital': 'Delete Hospital',
'Delete Hours': 'Delete Hours',
'Delete Image': 'Delete Image',
'Delete Impact Type': 'Delete Impact Type',
'Delete Impact': 'Delete Impact',
'Delete Incident Report': 'Delete Incident Report',
'Delete Item Category': 'Delete Item Category',
'Delete Item Pack': 'Delete Item Pack',
'Delete Item from Request': 'Delete Item from Request',
'Delete Item': 'Delete Item',
'Delete Job Title': 'Delete Job Title',
'Delete Kit': 'Delete Kit',
'Delete Layer': 'Delete Layer',
'Delete Level 1 Assessment': 'Delete Level 1 Assessment',
'Delete Level 2 Assessment': 'Delete Level 2 Assessment',
'Delete Location Hierarchy': 'Delete Location Hierarchy',
'Delete Location': 'Delete Location',
'Delete Mailing List': 'Delete Mailing List',
'Delete Map Configuration': 'Delete Map Configuration',
'Delete Marker': 'Delete Marker',
'Delete Member': 'Delete Member',
'Delete Membership Type': 'Delete Membership Type',
'Delete Membership': 'Delete Membership',
'Delete Menu Entry': 'Delete Menu Entry',
'Delete Message': 'Delete Message',
'Delete Mission': 'Delete Mission',
'Delete Morgue': 'Delete Morgue',
'Delete Office Type': 'Delete Office Type',
'Delete Office': 'Delete Office',
'Delete Order': 'Delete Order',
'Delete Organization Domain': 'Delete Organization Domain',
'Delete Organization Needs': 'Delete Organization Needs',
'Delete Organization Type': 'Delete Organization Type',
'Delete Organization': 'Delete Organization',
'Delete Participant': 'Delete Participant',
'Delete Partner Organization': 'Delete Partner Organization',
'Delete Patient': 'Delete Patient',
'Delete People': 'Delete People',
'Delete Person': 'Delete Person',
'Delete Photo': 'Delete Photo',
'Delete PoI Type': 'Delete PoI Type',
'Delete Point of Interest': 'Delete Point of Interest',
'Delete Population Statistic': 'Delete Population Statistic',
'Delete Position': 'Delete Position',
'Delete Post': 'Delete Post',
'Delete Problem': 'Delete Problem',
'Delete Procurement Plan': 'Delete Procurement Plan',
'Delete Professional Experience': 'Delete Professional Experience',
'Delete Program': 'Delete Program',
'Delete Project': 'Delete Project',
'Delete Projection': 'Delete Projection',
'Delete Rapid Assessment': 'Delete Rapid Assessment',
'Delete Received Shipment': 'Delete Received Shipment',
'Delete Recipient': 'Delete Recipient',
'Delete Record': 'Delete Record',
'Delete Region': 'Delete Region',
'Delete Relative': 'Delete Relative',
'Delete Report': 'Delete Report',
'Delete Request Template': 'Delete Request Template',
'Delete Request': 'Delete Request',
'Delete Resource Type': 'Delete Resource Type',
'Delete Resource': 'Delete Resource',
'Delete Role': 'Delete Role',
'Delete Room': 'Delete Room',
'Delete SMS': 'Delete SMS',
'Delete Scenario': 'Delete Scenario',
'Delete Seaport': 'Delete Seaport',
'Delete Section': 'Delete Section',
'Delete Sector': 'Delete Sector',
'Delete Security-Related Staff': 'Delete Security-Related Staff',
'Delete Sent Shipment': 'Delete Sent Shipment',
'Delete Service Profile': 'Delete Service Profile',
'Delete Service': 'Delete Service',
'Delete Shipment Item': 'Delete Shipment Item',
'Delete Site Needs': 'Delete Site Needs',
'Delete Skill Equivalence': 'Delete Skill Equivalence',
'Delete Skill Provision': 'Delete Skill Provision',
'Delete Skill Type': 'Delete Skill Type',
'Delete Skill': 'Delete Skill',
'Delete Solution': 'Delete Solution',
'Delete Staff Assignment': 'Delete Staff Assignment',
'Delete Staff Member': 'Delete Staff Member',
'Delete Staff Type': 'Delete Staff Type',
'Delete Status': 'Delete Status',
'Delete Stock Adjustment': 'Delete Stock Adjustment',
'Delete Stock Count': 'Delete Stock Count',
'Delete Subsector': 'Delete Subsector',
'Delete Supplier': 'Delete Supplier',
'Delete Symbology': 'Delete Symbology',
'Delete Template': 'Delete Template',
'Delete Theme': 'Delete Theme',
'Delete Tour': 'Delete Tour',
'Delete Trained People': 'Delete Trained People',
'Delete Training Event': 'Delete Training Event',
'Delete Training': 'Delete Training',
'Delete Tweet': 'Delete Tweet',
'Delete Type of People': 'Delete Type of People',
'Delete Type of Trained People ': 'Delete Type of Trained People ',
'Delete Unit': 'Delete Unit',
'Delete User': 'Delete User',
'Delete Vehicle Details': 'Delete Vehicle Details',
'Delete Vehicle Type': 'Delete Vehicle Type',
'Delete Vehicle': 'Delete Vehicle',
'Delete Volunteer Cluster Position': 'Delete Volunteer Cluster Position',
'Delete Volunteer Cluster Type': 'Delete Volunteer Cluster Type',
'Delete Volunteer Cluster': 'Delete Volunteer Cluster',
'Delete Volunteer Role': 'Delete Volunteer Role',
'Delete Volunteer': 'Delete Volunteer',
'Delete Warehouse Type': 'Delete Warehouse Type',
'Delete Warehouse': 'Delete Warehouse',
'Delete Zone Type': 'Delete Zone Type',
'Delete Zone': 'Delete Zone',
'Delete all data of this type which the user has permission to before upload. This is designed for workflows where the data is maintained in an offline spreadsheet and uploaded just for Reads.': 'Delete all data of this type which the user has permission to before upload. This is designed for workflows where the data is maintained in an offline spreadsheet and uploaded just for Reads.',
'Delete from Server?': 'Delete from Server?',
'Delete saved search': 'Delete saved search',
'Delete this Assessment Answer': 'Delete this Assessment Answer',
'Delete this Assessment Question': 'Delete this Assessment Question',
'Delete this Assessment Template': 'Delete this Assessment Template',
'Delete this Completed Assessment Form': 'Delete this Completed Assessment Form',
'Delete this Disaster Assessment': 'Delete this Disaster Assessment',
'Delete this Filter': 'Delete this Filter',
'Delete this Question Meta-Data': 'Delete this Question Meta-Data',
'Delete this Template Section': 'Delete this Template Section',
'Delete': 'Delete',
'Delete:': 'Delete:',
'Deliver To': 'Deliver To',
'Delivered By': 'Delivered By',
'Delivered To': 'Delivered To',
'Delphi Decision Maker': 'Delphi Decision Maker',
'Demographic Data Details': 'Demographic Data Details',
'Demographic Data added': 'Demographic Data added',
'Demographic Data deleted': 'Demographic Data deleted',
'Demographic Data updated': 'Demographic Data updated',
'Demographic Data': 'Demographic Data',
'Demographic Details': 'Demographic Details',
'Demographic added': 'Demographic added',
'Demographic deleted': 'Demographic deleted',
'Demographic updated': 'Demographic updated',
'Demographic': 'Demographic',
'Demographics': 'Demographics',
'Demolding solution': 'Demolding solution',
'Demolition/Gutting': 'Demolition/Gutting',
'Demolition/gutting': 'Demolition/gutting',
'Demonstrations': 'Demonstrations',
'Denotes the appropriate handling of the alert message': 'Denotes the appropriate handling of the alert message',
'Denotes the category of the subject event of the alert message': 'Denotes the category of the subject event of the alert message',
'Denotes the certainty of the subject event of the alert message': 'Denotes the certainty of the subject event of the alert message',
'Denotes the intended distribution of the alert message': 'Denotes the intended distribution of the alert message',
'Denotes the language of the information': 'Denotes the language of the information',
'Denotes the severity of the subject event of the alert message': 'Denotes the severity of the subject event of the alert message',
'Denotes the type of action recommended for the target audience': 'Denotes the type of action recommended for the target audience',
'Denotes the urgency of the subject event of the alert message': 'Denotes the urgency of the subject event of the alert message',
'Dental Examination': 'Dental Examination',
'Dental Profile': 'Dental Profile',
'Department / Unit': 'Department / Unit',
'Department Catalog': 'Department Catalog',
'Department Details': 'Department Details',
'Department added': 'Department added',
'Department deleted': 'Department deleted',
'Department updated': 'Department updated',
'Deploy this Member': 'Deploy this Member',
'Deployed': 'Deployed',
'Deployment Alert': 'Deployment Alert',
'Deployment Location': 'Deployment Location',
'Deployment Request': 'Deployment Request',
'Deployment': 'Deployment',
'Deployments': 'Deployments',
'Depth (feet)': 'Depth (feet)',
'Describe access points, advice for team leaders': 'Describe access points, advice for team leaders',
'Describe the condition of the roads from/to the facility.': 'Describe the condition of the roads from/to the facility.',
'Describe the procedure which this record relates to (e.g. "medical examination")': 'Describe the procedure which this record relates to (e.g. "medical examination")',
'Description of Contacts': 'Description of Contacts',
'Description of defecation area': 'Description of defecation area',
'Description of drinking water source': 'Description of drinking water source',
'Description of perimeter fencing, security guards, security lighting.': 'Description of perimeter fencing, security guards, security lighting.',
'Description of sanitary water source': 'Description of sanitary water source',
'Description of water source before the disaster': 'Description of water source before the disaster',
'Description': 'Description',
'Desire to remain with family': 'Desire to remain with family',
'Destination': 'Destination',
'Destroyed': 'Destroyed',
'Detail added': 'Detail added',
'Detail deleted': 'Detail deleted',
'Detail updated': 'Detail updated',
'Detailed Description/URL': 'Detailed Description/URL',
'Details field is required!': 'Details field is required!',
'Details of Disaster Assessment': 'Details of Disaster Assessment',
'Details of each question in the Template': 'Details of each question in the Template',
'Details': 'Details',
'Dialysis': 'Dialysis',
'Diaphragms, horizontal bracing': 'Diaphragms, horizontal bracing',
'Diarrhea': 'Diarrhea',
'Dignitary Visit': 'Dignitary Visit',
'Direction': 'Direction',
'Disable': 'Disable',
'Disabled participating in coping activities': 'Disabled participating in coping activities',
'Disabled': 'Disabled',
'Disaster Assessment Chart': 'Disaster Assessment Chart',
'Disaster Assessment Map': 'Disaster Assessment Map',
'Disaster Assessment Summary': 'Disaster Assessment Summary',
'Disaster Assessment added': 'Disaster Assessment added',
'Disaster Assessment deleted': 'Disaster Assessment deleted',
'Disaster Assessment updated': 'Disaster Assessment updated',
'Disaster Assessments': 'Disaster Assessments',
'Disaster Victim Identification': 'Disaster Victim Identification',
'Disaster clean-up/repairs': 'Disaster clean-up/repairs',
'Disaster': 'Disaster',
'Disaster(s)': 'Disaster(s)',
'Discharge (cusecs)': 'Discharge (cusecs)',
'Discharges/24hrs': 'Discharges/24hrs',
'Discuss': 'Discuss',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Dispatch Time': 'Dispatch Time',
'Dispatch': 'Dispatch',
'Dispensary': 'Dispensary',
'Displaced Populations': 'Displaced Populations',
'Displaced': 'Displaced',
'Display Chart': 'Display Chart',
'Display Polygons?': 'Display Polygons?',
'Display Question on Map': 'Display Question on Map',
'Display Routes?': 'Display Routes?',
'Display Selected Questions': 'Display Selected Questions',
'Display Tracks?': 'Display Tracks?',
'Display Waypoints?': 'Display Waypoints?',
'Display name': 'Display name',
'Display': 'Display',
'Distance between defecation area and water source': 'Distance between defecation area and water source',
'Distance from %s:': 'Distance from %s:',
'Distributed without Record': 'Distributed without Record',
'Distribution Added': 'Distribution Added',
'Distribution Deleted': 'Distribution Deleted',
'Distribution Details': 'Distribution Details',
'Distribution Item Added': 'Distribution Item Added',
'Distribution Item Deleted': 'Distribution Item Deleted',
'Distribution Item Updated': 'Distribution Item Updated',
'Distribution Item': 'Distribution Item',
'Distribution Items': 'Distribution Items',
'Distribution Report': 'Distribution Report',
'Distribution Updated': 'Distribution Updated',
'Distribution': 'Distribution',
'Distributions': 'Distributions',
'Do you really want to approve this record?': 'Do you really want to approve this record?',
'Do you really want to delete these records?': 'Do you really want to delete these records?',
'Do you really want to delete this record? (This action can not be reversed)': 'Do you really want to delete this record? (This action can not be reversed)',
'Do you want to cancel this received shipment? The items will be removed from the Warehouse. This action CANNOT be undone!': 'Do you want to cancel this received shipment? The items will be removed from the Warehouse. This action CANNOT be undone!',
'Do you want to cancel this sent shipment? The items will be returned to the Warehouse. This action CANNOT be undone!': 'Do you want to cancel this sent shipment? The items will be returned to the Warehouse. This action CANNOT be undone!',
'Do you want to commit to this request?': 'Do you want to commit to this request?',
'Do you want to complete & close this adjustment?': 'Do you want to complete & close this adjustment?',
'Do you want to complete the return process?': 'Do you want to complete the return process?',
'Do you want to receive this shipment?': 'Do you want to receive this shipment?',
'Do you want to send these Committed items?': 'Do you want to send these Committed items?',
'Do you want to send this shipment?': 'Do you want to send this shipment?',
'Document Details': 'Document Details',
'Document Scan': 'Document Scan',
'Document added': 'Document added',
'Document deleted': 'Document deleted',
'Document updated': 'Document updated',
'Document': 'Dokument',
'Documents': 'Dokument',
'Does this facility provide a cholera treatment center?': 'Does this facility provide a cholera treatment center?',
'Doing nothing (no structured activity)': 'Doing nothing (no structured activity)',
'Domain': 'Domain',
'Domestic chores': 'Domestic chores',
'Donate to this Request': 'Donate to this Request',
'Donated': 'Donated',
'Donating Organization': 'Donating Organization',
'Donation Added': 'Donation Added',
'Donation Canceled': 'Donation Canceled',
'Donation Details': 'Donation Details',
'Donation Updated': 'Donation Updated',
'Donation': 'Donation',
'Donations': 'Donations',
'Donor Details': 'Donor Details',
'Donor added': 'Donor added',
'Donor deleted': 'Donor deleted',
'Donor updated': 'Donor updated',
'Donor': 'Donor',
'Donors Report': 'Donors Report',
'Donors': 'Donors',
'Doolie Transportation Ambulance': 'Doolie Transportation Ambulance',
'Door frame': 'Door frame',
'Download Assessment Form Document': 'Download Assessment Form Document',
'Download Assessment Form Spreadsheet': 'Download Assessment Form Spreadsheet',
'Download OCR-able PDF Form': 'Download OCR-able PDF Form',
'Download last build': 'Download last build',
'Download': 'Download',
'Download.CSV formatted Template': 'Download.CSV formatted Template',
'Draft - not actionable in its current form': 'Draft - not actionable in its current form',
'Draft Features': 'Draft Features',
'Draft': 'Draft',
'Drag an image below to crop and scale it before uploading it:': 'Drag an image below to crop and scale it before uploading it:',
'Draw a Polygon around the area to which you wish to restrict your search.': 'Draw a Polygon around the area to which you wish to restrict your search.',
'Draw a square to limit the results to just those within the square.': 'Draw a square to limit the results to just those within the square.',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'Drawing up a Budget for Staff & Equipment across various Locations.',
'Driver Phone Number': 'Driver Phone Number',
'Drivers': 'Drivers',
'Driving License': 'Driving License',
'Drop-off Location for Goods?': 'Drop-off Location for Goods?',
'Drought': 'Drought',
'Drugs': 'Drugs',
'Dry Dock': 'Dry Dock',
'Drywall': 'Drywall',
'Due %(date)s': 'Due %(date)s',
'Dug Well': 'Dug Well',
'Dump': 'Dump',
'Duplicate Locations': 'Duplicate Locations',
'Duplicate label selected': 'Duplicate label selected',
'Duplicate': 'Duplicate',
'Duration (months)': 'Duration (months)',
'Duration': 'Duration',
'Dust Storm': 'Dust Storm',
'Dust pan': 'Dust pan',
'Dwelling': 'Dwelling',
'EMS Status Reasons': 'EMS Status Reasons',
'EMS Status': 'EMS Status',
'ER Status Reason': 'ER Status Reason',
'ER Status': 'ER Status',
'ESRI Shape File': 'ESRI Shape File',
'ETag': 'ETag',
'EXERCISE': 'EXERCISE',
'Ear plugs': 'Ear plugs',
'Earthquake': 'Earthquake',
'Edit %(site_label)s Status': 'Edit %(site_label)s Status',
'Edit %(type)s': 'Edit %(type)s',
'Edit Activity Type': 'Edit Activity Type',
'Edit Activity': 'Edit Activity',
'Edit Address': 'Edit Address',
'Edit Adjustment': 'Edit Adjustment',
'Edit Affiliation': 'Edit Affiliation',
'Edit Airport': 'Edit Airport',
'Edit Alert Details': 'Edit Alert Details',
'Edit Alert': 'Edit Alert',
'Edit Alternative Item': 'Edit Alternative Item',
'Edit Annual Budget': 'Edit Annual Budget',
'Edit Appraisal': 'Edit Appraisal',
'Edit Area': 'Edit Area',
'Edit Assessment Answer': 'Edit Assessment Answer',
'Edit Assessment Question': 'Edit Assessment Question',
'Edit Assessment Summary': 'Edit Assessment Summary',
'Edit Assessment Template': 'Edit Assessment Template',
'Edit Assessment': 'Edit Assessment',
'Edit Asset Log Entry': 'Edit Asset Log Entry',
'Edit Asset': 'Edit Asset',
'Edit Award': 'Edit Award',
'Edit Base Station': 'Edit Base Station',
'Edit Baseline Type': 'Edit Baseline Type',
'Edit Baseline': 'Edit Baseline',
'Edit Beneficiaries': 'Edit Beneficiaries',
'Edit Beneficiary Type': 'Edit Beneficiary Type',
'Edit Branch Organization': 'Edit Branch Organization',
'Edit Brand': 'Edit Brand',
'Edit Budget': 'Edit Budget',
'Edit Bundle': 'Edit Bundle',
'Edit Camp Service': 'Edit Camp Service',
'Edit Camp Status': 'Edit Camp Status',
'Edit Camp Type': 'Edit Camp Type',
'Edit Camp': 'Edit Camp',
'Edit Campaign': 'Edit Campaign',
'Edit Case': 'Edit Case',
'Edit Catalog Item': 'Edit Catalog Item',
'Edit Catalog': 'Edit Catalog',
'Edit Certificate': 'Edit Certificate',
'Edit Certification': 'Edit Certification',
'Edit Cluster Subsector': 'Edit Cluster Subsector',
'Edit Cluster': 'Edit Cluster',
'Edit Commitment Item': 'Edit Commitment Item',
'Edit Commitment': 'Edit Commitment',
'Edit Committed People': 'Edit Committed People',
'Edit Committed Person': 'Edit Committed Person',
'Edit Community Details': 'Edit Community Details',
'Edit Competency Rating': 'Edit Competency Rating',
'Edit Completed Assessment Form': 'Edit Completed Assessment Form',
'Edit Contact Details': 'Edit Contact Details',
'Edit Contact Information': 'Edit Contact Information',
'Edit Contact': 'Edit Contact',
'Edit Contents': 'Edit Contents',
'Edit Course Certificate': 'Edit Course Certificate',
'Edit Course': 'Edit Course',
'Edit Credential': 'Edit Credential',
'Edit DRRPP Extensions': 'Edit DRRPP Extensions',
'Edit Dataset Price': 'Edit Dataset Price',
'Edit Dead Body Details': 'Edit Dead Body Details',
'Edit Demographic Data': 'Edit Demographic Data',
'Edit Demographic': 'Edit Demographic',
'Edit Department': 'Edit Department',
'Edit Description': 'Edit Description',
'Edit Details': 'Edit Details',
'Edit Distribution Item': 'Edit Distribution Item',
'Edit Distribution': 'Edit Distribution',
'Edit Document': 'Edit Document',
'Edit Donation': 'Edit Donation',
'Edit Donor': 'Edit Donor',
'Edit Education Details': 'Edit Education Details',
'Edit Email Settings': 'Edit Email Settings',
'Edit Entry': 'Edit Entry',
'Edit Evacuation Route': 'Edit Evacuation Route',
'Edit Event Type': 'Edit Event Type',
'Edit Event': 'Edit Event',
'Edit Experience': 'Edit Experience',
'Edit Facility Type': 'Edit Facility Type',
'Edit Facility': 'Edit Facility',
'Edit Feature Layer': 'Edit Feature Layer',
'Edit GPS data': 'Edit GPS data',
'Edit Gauge': 'Edit Gauge',
'Edit Group': 'Edit Group',
'Edit Hazard': 'Edit Hazard',
'Edit Heliport': 'Edit Heliport',
'Edit Home': 'Edit Home',
'Edit Hospital': 'Edit Hospital',
'Edit Hours': 'Edit Hours',
'Edit Human Resource': 'Edit Human Resource',
'Edit Identification Report': 'Edit Identification Report',
'Edit Identity': 'Edit Identity',
'Edit Image Details': 'Edit Image Details',
'Edit Impact Type': 'Edit Impact Type',
'Edit Impact': 'Edit Impact',
'Edit Incident Report': 'Edit Incident Report',
'Edit Incident Type': 'Edit Incident Type',
'Edit Incident': 'Edit Incident',
'Edit Item Category': 'Edit Item Category',
'Edit Item Pack': 'Edit Item Pack',
'Edit Item in Request': 'Edit Item in Request',
'Edit Item': 'Edit Item',
'Edit Job Title': 'Edit Job Title',
'Edit Job': 'Edit Job',
'Edit Keyword': 'Edit Keyword',
'Edit Kit': 'Edit Kit',
'Edit Layer': 'Edit Layer',
'Edit Level %d Locations?': 'Edit Level %d Locations?',
'Edit Level 1 Assessment': 'Edit Level 1 Assessment',
'Edit Level 2 Assessment': 'Edit Level 2 Assessment',
'Edit Location Details': 'Edit Location Details',
'Edit Location Hierarchy': 'Edit Location Hierarchy',
'Edit Location': 'Edit Location',
'Edit Log Entry': 'Edit Log Entry',
'Edit Logged Time': 'Edit Logged Time',
'Edit Mailing List': 'Edit Mailing List',
'Edit Map Configuration': 'Edit Map Configuration',
'Edit Marker': 'Edit Marker',
'Edit Member': 'Edit Member',
'Edit Membership Type': 'Edit Membership Type',
'Edit Membership': 'Edit Membership',
'Edit Menu Entry': 'Edit Menu Entry',
'Edit Message': 'Edit Message',
'Edit Metadata': 'Edit Metadata',
'Edit Milestone': 'Edit Milestone',
'Edit Mission Details': 'Edit Mission Details',
'Edit Mobile Commons Settings': 'Edit Mobile Commons Settings',
'Edit Modem Settings': 'Edit Modem Settings',
'Edit Network': 'Edit Network',
'Edit Office Type': 'Edit Office Type',
'Edit Office': 'Edit Office',
'Edit Order': 'Edit Order',
'Edit Organization Domain': 'Edit Organization Domain',
'Edit Organization Needs': 'Edit Organization Needs',
'Edit Organization Type': 'Edit Organization Type',
'Edit Organization': 'Edit Organization',
'Edit Output': 'Edit Output',
'Edit Page': 'Edit Page',
'Edit Parameters': 'Edit Parameters',
'Edit Parser Connection': 'Edit Parser Connection',
'Edit Participant': 'Edit Participant',
'Edit Partner Organization': 'Edit Partner Organization',
'Edit Patient': 'Edit Patient',
'Edit People': 'Edit People',
'Edit Permissions for %(role)s': 'Edit Permissions for %(role)s',
'Edit Person Details': 'Edit Person Details',
'Edit Personal Effects Details': 'Edit Personal Effects Details',
'Edit Photo': 'Edit Photo',
'Edit PoI Type': 'Edit PoI Type',
'Edit Point of Interest': 'Edit Point of Interest',
'Edit Policy or Strategy': 'Edit Policy or Strategy',
'Edit Population Statistic': 'Edit Population Statistic',
'Edit Position': 'Edit Position',
'Edit Post': 'Edit Post',
'Edit Problem': 'Edit Problem',
'Edit Procurement Plan Item': 'Edit Procurement Plan Item',
'Edit Procurement Plan': 'Edit Procurement Plan',
'Edit Professional Experience': 'Edit Professional Experience',
'Edit Profile Configuration': 'Edit Profile Configuration',
'Edit Program': 'Edit Program',
'Edit Project Organization': 'Edit Project Organization',
'Edit Project': 'Edit Project',
'Edit Projection': 'Edit Projection',
'Edit Purchased Data': 'Edit Purchased Data',
'Edit Question Meta-Data': 'Edit Question Meta-Data',
'Edit RSS Settings': 'Edit RSS Settings',
'Edit Rapid Assessment': 'Edit Rapid Assessment',
'Edit Recipient Details': 'Edit Recipient Details',
'Edit Record': 'Edit Record',
'Edit Region': 'Edit Region',
'Edit Registration': 'Edit Registration',
'Edit Relative': 'Edit Relative',
'Edit Repository Configuration': 'Edit Repository Configuration',
'Edit Request Template': 'Edit Request Template',
'Edit Request': 'Edit Request',
'Edit Requested Skill': 'Edit Requested Skill',
'Edit Resource Configuration': 'Edit Resource Configuration',
'Edit Resource Type': 'Edit Resource Type',
'Edit Resource': 'Edit Resource',
'Edit Response Details': 'Edit Response Details',
'Edit Response Summary': 'Edit Response Summary',
'Edit Response': 'Edit Response',
'Edit Risk': 'Edit Risk',
'Edit River': 'Edit River',
'Edit Role': 'Edit Role',
'Edit Room': 'Edit Room',
'Edit SMS Outbound Gateway': 'Edit SMS Outbound Gateway',
'Edit SMTP to SMS Settings': 'Edit SMTP to SMS Settings',
'Edit Saved Query': 'Edit Saved Query',
'Edit Scenario': 'Edit Scenario',
'Edit Seaport': 'Edit Seaport',
'Edit Sector': 'Edit Sector',
'Edit Security-Related Staff': 'Edit Security-Related Staff',
'Edit Sender Priority': 'Edit Sender Priority',
'Edit Series': 'Edit Series',
'Edit Service': 'Edit Service',
'Edit Shelter Service': 'Edit Shelter Service',
'Edit Shelter Status': 'Edit Shelter Status',
'Edit Shelter Type': 'Edit Shelter Type',
'Edit Shelter': 'Edit Shelter',
'Edit Shipment Item': 'Edit Shipment Item',
'Edit Site Needs': 'Edit Site Needs',
'Edit Skill Equivalence': 'Edit Skill Equivalence',
'Edit Skill Provision': 'Edit Skill Provision',
'Edit Skill Type': 'Edit Skill Type',
'Edit Skill': 'Edit Skill',
'Edit Solution': 'Edit Solution',
'Edit Staff Assignment': 'Edit Staff Assignment',
'Edit Staff Member Details': 'Edit Staff Member Details',
'Edit Staff Type': 'Edit Staff Type',
'Edit Station Details': 'Edit Station Details',
'Edit Station Parameter': 'Edit Station Parameter',
'Edit Status Report': 'Edit Status Report',
'Edit Status': 'Edit Status',
'Edit Stock Count': 'Edit Stock Count',
'Edit Subsector': 'Edit Subsector',
'Edit Supplier': 'Edit Supplier',
'Edit Symbology': 'Edit Symbology',
'Edit Synchronization Settings': 'Edit Synchronization Settings',
'Edit Tag': 'Edit Tag',
'Edit Task': 'Edit Task',
'Edit Team': 'Edit Team',
'Edit Template Section': 'Edit Template Section',
'Edit Template': 'Edit Template',
'Edit Theme Data': 'Edit Theme Data',
'Edit Theme': 'Edit Theme',
'Edit Tour': 'Edit Tour',
'Edit Trained People': 'Edit Trained People',
'Edit Training Event': 'Edit Training Event',
'Edit Training': 'Edit Training',
'Edit Tropo Settings': 'Edit Tropo Settings',
'Edit Twilio Settings': 'Edit Twilio Settings',
'Edit Twitter Search Query': 'Edit Twitter Search Query',
'Edit Twitter account': 'Edit Twitter account',
'Edit Type of People': 'Edit Type of People',
'Edit Type of Trained People': 'Edit Type of Trained People',
'Edit User': 'Edit User',
'Edit Vehicle Assignment': 'Edit Vehicle Assignment',
'Edit Vehicle Details': 'Edit Vehicle Details',
'Edit Vehicle Type': 'Edit Vehicle Type',
'Edit Vehicle': 'Edit Vehicle',
'Edit Volunteer Cluster Position': 'Edit Volunteer Cluster Position',
'Edit Volunteer Cluster Type': 'Edit Volunteer Cluster Type',
'Edit Volunteer Cluster': 'Edit Volunteer Cluster',
'Edit Volunteer Details': 'Edit Volunteer Details',
'Edit Volunteer Role': 'Edit Volunteer Role',
'Edit Vulnerability Aggregated Indicator': 'Edit Vulnerability Aggregated Indicator',
'Edit Vulnerability Data': 'Edit Vulnerability Data',
'Edit Vulnerability Indicator': 'Edit Vulnerability Indicator',
'Edit Warehouse Stock': 'Edit Warehouse Stock',
'Edit Warehouse Type': 'Edit Warehouse Type',
'Edit Warehouse': 'Edit Warehouse',
'Edit Web API Settings': 'Edit Web API Settings',
'Edit Zone Type': 'Edit Zone Type',
'Edit Zone': 'Edit Zone',
'Edit current record': 'Edit current record',
'Edit roles for': 'Edit roles for',
'Edit saved search': 'Edit saved search',
'Edit the OpenStreetMap data for this area': 'Edit the OpenStreetMap data for this area',
'Edit this Disaster Assessment': 'Edit this Disaster Assessment',
'Edit this entry': 'Edit this entry',
'Edit': 'Ändra',
'Editable?': 'Editable?',
'Education Details': 'Education Details',
'Education details added': 'Education details added',
'Education details deleted': 'Education details deleted',
'Education details updated': 'Education details updated',
'Education materials received': 'Education materials received',
'Education materials, source': 'Education materials, source',
'Education': 'Education',
'Effective': 'Effective',
'Effects Inventory': 'Effects Inventory',
'Effort Report': 'Effort Report',
'Eggs': 'Eggs',
'Either a shelter or a location must be specified': 'Either a shelter or a location must be specified',
'Either file upload or document URL required.': 'Either file upload or document URL required.',
'Either file upload or image URL required.': 'Either file upload or image URL required.',
'Elderly person headed households (>60 yrs)': 'Elderly person headed households (>60 yrs)',
'Elderly': 'Elderly',
'Electrical': 'Electrical',
'Electrical, gas, sewerage, water, hazmats': 'Electrical, gas, sewerage, water, hazmats',
'Electrician': 'Electrician',
'Electricity': 'Electricity',
'Elevated': 'Elevated',
'Elevators': 'Elevators',
'Email (Inbound)': 'Email (Inbound)',
'Email Account deleted': 'Email Account deleted',
'Email Accounts': 'Email Accounts',
'Email Address to which to send SMS messages. Assumes sending to phonenumber@address': 'Email Address to which to send SMS messages. Assumes sending to phonenumber@address',
'Email Address': 'Email Address',
'Email Details': 'Email Details',
'Email InBox': 'Email InBox',
'Email Settings updated': 'Email Settings updated',
'Email Settings': 'Email Settings',
'Email and SMS': 'Email and SMS',
'Email deleted': 'Email deleted',
'Email': 'Email',
'Embalming': 'Embalming',
'Emergency Capacity Building project': 'Emergency Capacity Building project',
'Emergency Contact': 'Emergency Contact',
'Emergency Contacts': 'Emergency Contacts',
'Emergency Department': 'Emergency Department',
'Emergency Medical Services': 'Emergency Medical Services',
'Emergency Support Facility': 'Emergency Support Facility',
'Emergency Support Service': 'Emergency Support Service',
'Enable in Default Config?': 'Enable in Default Config?',
'Enable': 'Enable',
'Enabled': 'Enabled',
'Enabled?': 'Enabled?',
'End Date': 'End Date',
'Engineer': 'Engineer',
'Enter Completed Assessment Form': 'Enter Completed Assessment Form',
'Enter Completed Assessment': 'Enter Completed Assessment',
'Enter a new support request.': 'Enter a new support request.',
'Enter a unique label!': 'Enter a unique label!',
'Enter a valid email': 'Enter a valid email',
'Enter a valid phone number': 'Enter a valid phone number',
'Enter a value carefully without spelling mistakes, this field needs to match existing data.': 'Enter a value carefully without spelling mistakes, this field needs to match existing data.',
'Enter indicator ratings': 'Enter indicator ratings',
'Enter some characters to bring up ': 'Enter some characters to bring up ',
'Enter some characters to bring up a list of possible matches': 'Enter some characters to bring up a list of possible matches',
'Enter the same password as above': 'Enter the same password as above',
'Enter your first name': 'Enter your first name',
'Enter your organization': 'Enter your organization',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.',
'Entity Information': 'Entity Information',
'Entity': 'Entity',
'Entry added to Asset Log': 'Entry added to Asset Log',
'Environment': 'Environment',
'Error Tickets': 'Felmeddelanden',
'Error logs for "%(app)s"': 'Error logs for "%(app)s"',
'Error sending message!': 'Error sending message!',
'Error sending message': 'Error sending message',
'Error: Indicate rejection of the message(s)': 'Error: Indicate rejection of the message(s)',
'Errors': 'Errors',
'Essential Staff?': 'Essential Staff?',
'Estimated # of households who are affected by the emergency': 'Estimated # of households who are affected by the emergency',
'Estimated # of people who are affected by the emergency': 'Estimated # of people who are affected by the emergency',
'Estimated Days': 'Estimated Days',
'Estimated Delivery Date': 'Estimated Delivery Date',
'Estimated Overall Building Damage': 'Estimated Overall Building Damage',
'Estimated Reopening Date': 'Estimated Reopening Date',
'Estimated Value per Pack': 'Estimated Value per Pack',
'Estimated Value': 'Estimated Value',
'Estimated Volunteers': 'Estimated Volunteers',
'Estimated total number of people in institutions': 'Estimated total number of people in institutions',
'Ethnicity': 'Ethnicity',
'Euros': 'Euros',
'Evacuate - Relocate as instructed in the instruction': 'Evacuate - Relocate as instructed in the instruction',
'Evacuating': 'Evacuating',
'Evacuation Route Details': 'Evacuation Route Details',
'Evacuation Route added': 'Evacuation Route added',
'Evacuation Route removed': 'Evacuation Route removed',
'Evacuation Route updated': 'Evacuation Route updated',
'Evacuation Route': 'Evacuation Route',
'Evacuation Routes': 'Evacuation Routes',
'Evacuation is short-term whilst storm passing e.g. 12 hours, hence people need less space.': 'Evacuation is short-term whilst storm passing e.g. 12 hours, hence people need less space.',
'Event Details': 'Event Details',
'Event Type Details': 'Event Type Details',
'Event Type added': 'Event Type added',
'Event Type removed': 'Event Type removed',
'Event Type updated': 'Event Type updated',
'Event Type': 'Event Type',
'Event Types': 'Event Types',
'Event added': 'Event added',
'Event code': 'Event code',
'Event deleted': 'Event deleted',
'Event updated': 'Event updated',
'Event': 'Event',
'Events': 'Events',
'Example': 'Example',
'Exceeded': 'Exceeded',
'Excellent': 'Excellent',
'Exclude contents': 'Exclude contents',
'Execute - Execute a pre-planned activity identified in instruction': 'Execute - Execute a pre-planned activity identified in instruction',
'Exercise - only for designated participants (decribed in note)': 'Exercise - only for designated participants (decribed in note)',
'Exercise': 'Exercise',
'Exercise?': 'Exercise?',
'Exercises mean all screens have a watermark & all notifications have a prefix.': 'Exercises mean all screens have a watermark & all notifications have a prefix.',
'Existing Placard Type': 'Existing Placard Type',
'Existing food stocks': 'Existing food stocks',
'Exits': 'Exits',
'Expected Return Home': 'Expected Return Home',
'Experience': 'Experience',
'Expiration Date': 'Expiration Date',
'Expiration Details': 'Expiration Details',
'Expiration Report': 'Expiration Report',
'Expired': 'Expired',
'Expired?': 'Expired?',
'Expires at': 'Expires at',
'Expiry (months)': 'Expiry (months)',
'Expiry Date': 'Expiry Date',
'Expiry Date/Time': 'Expiry Date/Time',
'Expiry date': 'Expiry date',
'Explanation about this view': 'Explanation about this view',
'Explosive Hazard': 'Explosive Hazard',
'Export all Completed Assessment Data': 'Export all Completed Assessment Data',
'Export as': 'Export as',
'Export in %(format)s format': 'Export in %(format)s format',
'Export in GPX format': 'Export in GPX format',
'Export in KML format': 'Export in KML format',
'Export in OSM format': 'Export in OSM format',
'Export in XLS format': 'Export in XLS format',
'Extension cords': 'Extension cords',
'Exterior Only': 'Exterior Only',
'Exterior and Interior': 'Exterior and Interior',
'Extraordinary threat to life or property': 'Extraordinary threat to life or property',
'Extrapolated': 'Extrapolated',
'Eye Color': 'Eye Color',
'FAIR': 'FAIR',
'FROM': 'FROM',
'Facebook': 'Facebook',
'Facial hair, color': 'Facial hair, color',
'Facial hair, comment': 'Facial hair, comment',
'Facial hair, length': 'Facial hair, length',
'Facial hair, type': 'Facial hair, type',
'Facilitate easy creation using CAP Profiles and templates': 'Facilitate easy creation using CAP Profiles and templates',
'Facilities': 'Facilities',
'Facility Contact': 'Facility Contact',
'Facility Details': 'Facility Details',
'Facility Operations': 'Facility Operations',
'Facility Status': 'Facility Status',
'Facility Type Details': 'Facility Type Details',
'Facility Type added': 'Facility Type added',
'Facility Type deleted': 'Facility Type deleted',
'Facility Type updated': 'Facility Type updated',
'Facility Type': 'Facility Type',
'Facility Types': 'Facility Types',
'Facility added': 'Facility added',
'Facility deleted': 'Facility deleted',
'Facility removed': 'Facility removed',
'Facility updated': 'Facility updated',
'Facility': 'Facility',
'Fail': 'Fail',
'Failed to approve': 'Failed to approve',
'Failed!': 'Failed!',
'Failed': 'Failed',
'Fair': 'Fair',
'Falling Object Hazard': 'Falling Object Hazard',
'Family tarpaulins received': 'Family tarpaulins received',
'Family tarpaulins, source': 'Family tarpaulins, source',
'Family': 'Family',
'Family/friends': 'Family/friends',
'Farmland/fishing material assistance, Rank': 'Farmland/fishing material assistance, Rank',
'Fatalities': 'Fatalities',
'Fax': 'Fax',
'Feature Info': 'Feature Info',
'Feature Layer Details': 'Feature Layer Details',
'Feature Layer added': 'Feature Layer added',
'Feature Layer deleted': 'Feature Layer deleted',
'Feature Layer updated': 'Feature Layer updated',
'Feature Layer': 'Feature Layer',
'Feature Layers': 'Feature Layers',
'Feature Namespace': 'Feature Namespace',
'Feature Request': 'Feature Request',
'Feature Type': 'Feature Type',
'Features Include': 'Features Include',
'Feedback': 'Feedback',
'Female headed households': 'Female headed households',
'Female': 'Female',
'Few': 'Few',
'Field Hospital': 'Field Hospital',
'File missing': 'File missing',
'File not found': 'File not found',
'File uploaded': 'File uploaded',
'File': 'File',
'Files': 'Files',
'Fill out online below or ': 'Fill out online below or ',
'Filter Options': 'Filter Options',
'Filter Tweets by the date they were tweeted on': 'Filter Tweets by the date they were tweeted on',
'Filter Tweets by who tweeted them': 'Filter Tweets by who tweeted them',
'Filter by Bookmark': 'Filter by Bookmark',
'Filter by Category': 'Filter by Category',
'Filter by Country': 'Filter by Country',
'Filter by Date': 'Filter by Date',
'Filter by Location': 'Filter by Location',
'Filter by Organization': 'Filter by Organization',
'Filter by Status': 'Filter by Status',
'Filter by Tag': 'Filter by Tag',
'Filter by Type': 'Filter by Type',
'Filter type ': 'Filter type ',
'Filter type': 'Filter type',
'Filter': 'Filter',
'Filters': 'Filters',
'Find Dead Body Report': 'Find Dead Body Report',
'Find Hospital': 'Find Hospital',
'Find Person Record': 'Find Person Record',
'Find more': 'Find more',
'Find on Map': 'Find on Map',
'Finder': 'Finder',
'Fingerprint': 'Fingerprint',
'Fingerprinting': 'Fingerprinting',
'Fingerprints': 'Fingerprints',
'Finished': 'Finished',
'Fire Fighter Forest Vehicle': 'Fire Fighter Forest Vehicle',
'Fire Fighter Light Vehicle': 'Fire Fighter Light Vehicle',
'Fire Fighter Rural Vehicle': 'Fire Fighter Rural Vehicle',
'Fire Fighter Special Vehicle': 'Fire Fighter Special Vehicle',
'Fire Fighter Urban Vehicle': 'Fire Fighter Urban Vehicle',
'Fire Station Details': 'Fire Station Details',
'Fire Station added': 'Fire Station added',
'Fire Station deleted': 'Fire Station deleted',
'Fire Station updated': 'Fire Station updated',
'Fire Station': 'Fire Station',
'Fire Stations': 'Fire Stations',
'Fire suppression and rescue': 'Fire suppression and rescue',
'Fire': 'Fire',
'First Floor Flooding': 'First Floor Flooding',
'First Name': 'First Name',
'First': 'First',
'Fishing': 'Fishing',
'Flash Flood': 'Flash Flood',
'Flash Freeze': 'Flash Freeze',
'Flat bar': 'Flat bar',
'Flexible Impact Assessments': 'Flexible Impact Assessments',
'Flood (Contents)': 'Flood (Contents)',
'Flood (Structure)': 'Flood (Structure)',
'Flood Depth': 'Flood Depth',
'Flood Gauge': 'Flood Gauge',
'Flood': 'Flood',
'Flooding': 'Flooding',
'Floor': 'Floor',
'Flow Status': 'Flow Status',
'Focal Person': 'Focal Person',
'Fog': 'Fog',
'Folder': 'Folder',
'Food assistance': 'Food assistance',
'Food': 'Food',
'For Entity': 'For Entity',
'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).',
'For live help from the Sahana community on using this application, go to': 'For live help from the Sahana community on using this application, go to',
'For more details on the Sahana Eden system, see the': 'For more details on the Sahana Eden system, see the',
'For': 'For',
'Foreign Donation': 'Foreign Donation',
'Forest Fire': 'Forest Fire',
'Forest Tank Tactical Vehicle': 'Forest Tank Tactical Vehicle',
'Form Settings': 'Form Settings',
'Formal camp': 'Formal camp',
'Format': 'Format',
'Forms': 'Forms',
'Found': 'Found',
'Foundations': 'Foundations',
'Freezing Drizzle': 'Freezing Drizzle',
'Freezing Rain': 'Freezing Rain',
'Freezing Spray': 'Freezing Spray',
'Freight company or organisation providing transport': 'Freight company or organisation providing transport',
'Frequency': 'Frequency',
'Friday': 'Friday',
'From %(site)s': 'From %(site)s',
'From Facility': 'From Facility',
'From': 'From',
'Frost': 'Frost',
'Fulfil. Status': 'Fulfil. Status',
'Fulfill Status': 'Fulfill Status',
'Full beard': 'Full beard',
'Full': 'Full',
'Function Permissions': 'Function Permissions',
'Function name': 'Function name',
'Function tour is activated': 'Function tour is activated',
'Function': 'Function',
'Functions available': 'Functions available',
'Funding Report': 'Funding Report',
'Funds Contributed': 'Funds Contributed',
'Funeral': 'Funeral',
'Further Action Recommended': 'Further Action Recommended',
'GO TO ANALYSIS': 'GO TO ANALYSIS',
'GO TO THE REGION': 'GO TO THE REGION',
'GPS Data': 'GPS Data',
'GPS ID': 'GPS ID',
'GPS Marker': 'GPS Marker',
'GPS Track File': 'GPS Track File',
'GPS Track': 'GPS Track',
'GPS data added': 'GPS data added',
'GPS data deleted': 'GPS data deleted',
'GPS data updated': 'GPS data updated',
'GPS data': 'GPS data',
'GPX Layer': 'GPX Layer',
'GRN Status': 'GRN Status',
'Gale Wind': 'Gale Wind',
'Gap Analysis Map': 'Gap Analysis Map',
'Gap Analysis Report': 'Gap Analysis Report',
'Gap Map': 'Gap Map',
'Gap Report': 'Gap Report',
'Gas Supply Left (in hours)': 'Gas Supply Left (in hours)',
'Gas Supply Type': 'Gas Supply Type',
'Gas': 'Gas',
'Gauge Details': 'Gauge Details',
'Gauge added': 'Gauge added',
'Gauge deleted': 'Gauge deleted',
'Gauge updated': 'Gauge updated',
'Gauge': 'Gauge',
'Gauges': 'Gauges',
'Gender': 'Gender',
'General Comment': 'General Comment',
'General Medical/Surgical': 'General Medical/Surgical',
'General Person Transportation Vehicle': 'General Person Transportation Vehicle',
'General emergency and public safety': 'General emergency and public safety',
'General information on demographics': 'General information on demographics',
'Generate portable application': 'Generate portable application',
'Generator': 'Generator',
'GeoJSON Layer': 'GeoJSON Layer',
'GeoRSS Layer': 'GeoRSS Layer',
'Geocode': 'Geocode',
'Geocoder Selection': 'Geocoder Selection',
'Geometry Name': 'Geometry Name',
'Geonames.org search requires Internet connectivity!': 'Geonames.org search requires Internet connectivity!',
'Geophysical (inc. landslide)': 'Geophysical (inc. landslide)',
'Geotechnical Hazards': 'Geotechnical Hazards',
'Geotechnical': 'Geotechnical',
'Get Feature Info': 'Get Feature Info',
'Get incoming recovery requests as RSS feed': 'Get incoming recovery requests as RSS feed',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Give a brief description of the image, e.g. what can be seen where on the picture (optional).',
'Give information about where and when you have seen them': 'Give information about where and when you have seen them',
'Go to Request': 'Go to Request',
'Go to the': 'Go to the',
'Go': 'Go',
'Goatee': 'Goatee',
'Goggles': 'Goggles',
'Good Condition': 'Good Condition',
'Good': 'Good',
'Google Layer': 'Google Layer',
'Government UID': 'Government UID',
'Government building': 'Government building',
'Government': 'Government',
'Grade': 'Grade',
'Graph Model': 'Graph Model',
'Graph': 'Graph',
'Great British Pounds': 'Great British Pounds',
'Green': 'Green',
'Grid': 'Grid',
'Grinder': 'Grinder',
'Ground movement, fissures': 'Ground movement, fissures',
'Ground movement, settlement, slips': 'Ground movement, settlement, slips',
'Group Description': 'Group Description',
'Group Details': 'Group Details',
'Group Head': 'Group Head',
'Group Leader': 'Group Leader',
'Group Member added': 'Group Member added',
'Group Members': 'Group Members',
'Group Name': 'Group Name',
'Group Title': 'Group Title',
'Group Type': 'Group Type',
'Group added': 'Group added',
'Group deleted': 'Group deleted',
'Group description': 'Group description',
'Group updated': 'Group updated',
'Group': 'Group',
'Grouped by': 'Grouped by',
'Groups': 'Groups',
'Guest': 'Guest',
'Guests can view all details': 'Guests can view all details',
'Guide': 'Guide',
'Guided Tours': 'Guided Tours',
'Gutting Status': 'Gutting Status',
'HFA Priorities': 'HFA Priorities',
'HFA': 'HFA',
'HFA1: Ensure that disaster risk reduction is a national and a local priority with a strong institutional basis for implementation.': 'HFA1: Ensure that disaster risk reduction is a national and a local priority with a strong institutional basis for implementation.',
'HFA2: Identify, assess and monitor disaster risks and enhance early warning.': 'HFA2: Identify, assess and monitor disaster risks and enhance early warning.',
'HFA3: Use knowledge, innovation and education to build a culture of safety and resilience at all levels.': 'HFA3: Use knowledge, innovation and education to build a culture of safety and resilience at all levels.',
'HFA4: Reduce the underlying risk factors.': 'HFA4: Reduce the underlying risk factors.',
'HFA5: Strengthen disaster preparedness for effective response at all levels.': 'HFA5: Strengthen disaster preparedness for effective response at all levels.',
'HIGH RESILIENCE': 'HIGH RESILIENCE',
'HIGH': 'HIGH',
'HTML ID': 'HTML ID',
'HTML class': 'HTML class',
'HTML': 'HTML',
'Hail': 'Hail',
'Hair Color': 'Hair Color',
'Hair Comments': 'Hair Comments',
'Hair Length': 'Hair Length',
'Hair Style': 'Hair Style',
'Hammers': 'Hammers',
'Hard hats': 'Hard hats',
'Has the %(GRN)s (%(GRN_name)s) form been completed?': 'Has the %(GRN)s (%(GRN_name)s) form been completed?',
'Has the Certificate for receipt of the shipment been given to the sender?': 'Has the Certificate for receipt of the shipment been given to the sender?',
'Hazard Details': 'Hazard Details',
'Hazard Pay': 'Hazard Pay',
'Hazard added to Project': 'Hazard added to Project',
'Hazard added': 'Hazard added',
'Hazard deleted': 'Hazard deleted',
'Hazard removed from Project': 'Hazard removed from Project',
'Hazard removed': 'Hazard removed',
'Hazard updated': 'Hazard updated',
'Hazard': 'Hazard',
'Hazardous Material': 'Hazardous Material',
'Hazardous Road Conditions': 'Hazardous Road Conditions',
'Hazards': 'Hazards',
'Hazmat': 'Hazmat',
'Headlamps': 'Headlamps',
'Headline': 'Headline',
'Health care assistance, Rank': 'Health care assistance, Rank',
'Health center with beds': 'Health center with beds',
'Health center without beds': 'Health center without beds',
'Health center': 'Health center',
'Health services status': 'Health services status',
'Health': 'Health',
'Healthcare Worker': 'Healthcare Worker',
'Heat Wave': 'Heat Wave',
'Heat and Humidity': 'Heat and Humidity',
'Height (cm)': 'Height (cm)',
'Height (m)': 'Height (m)',
'Height': 'Height',
'Helipad Information': 'Helipad Information',
'Heliport Details': 'Heliport Details',
'Heliport added': 'Heliport added',
'Heliport deleted': 'Heliport deleted',
'Heliport updated': 'Heliport updated',
'Heliports': 'Heliports',
'Help': 'Hjälp',
'Here are the solution options related to this problem. You should drag the solutions which you wish to vote on to the right-hand side & then order them with the preferred solution at the top. Note that you do not need to vote on all options.': 'Here are the solution options related to this problem. You should drag the solutions which you wish to vote on to the right-hand side & then order them with the preferred solution at the top. Note that you do not need to vote on all options.',
'Heritage Listed': 'Heritage Listed',
'Hide Chart': 'Hide Chart',
'Hide Pivot Table': 'Hide Pivot Table',
'Hide Table': 'Hide Table',
'Hide': 'Hide',
'Hierarchy Level 1 Name (e.g. State or Province)': 'Hierarchy Level 1 Name (e.g. State or Province)',
'Hierarchy Level 2 Name (e.g. District or County)': 'Hierarchy Level 2 Name (e.g. District or County)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'Hierarchy Level 3 Name (e.g. City / Town / Village)',
'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'Hierarchy Level 4 Name (e.g. Neighbourhood)',
'Hierarchy Level 5 Name': 'Hierarchy Level 5 Name',
'High Tide Depth': 'High Tide Depth',
'High Water': 'High Water',
'High': 'High',
'Highest Priority Open Requests': 'Highest Priority Open Requests',
'Hindu': 'Hindu',
'Hit the back button on your browser to try again.': 'Hit the back button on your browser to try again.',
'Holiday Address': 'Holiday Address',
'Home Address': 'Home Address',
'Home City': 'Home City',
'Home Country': 'Home Country',
'Home Crime': 'Home Crime',
'Home Details': 'Home Details',
'Home Phone Number': 'Home Phone Number',
'Home Phone': 'Home Phone',
'Home Relative': 'Home Relative',
'Home added': 'Home added',
'Home deleted': 'Home deleted',
'Home phone': 'Home phone',
'Home updated': 'Home updated',
'Home': 'Hem',
'Homeowner Availability': 'Homeowner Availability',
'Homes': 'Homes',
'Hospital Details': 'Hospital Details',
'Hospital Status Report': 'Hospital Status Report',
'Hospital information added': 'Hospital information added',
'Hospital information deleted': 'Hospital information deleted',
'Hospital information updated': 'Hospital information updated',
'Hospital status assessment.': 'Hospital status assessment.',
'Hospital': 'Hospital',
'Hospitals': 'Hospitals',
'Host National Society': 'Host National Society',
'Host': 'Host',
'Hot Spot': 'Hot Spot',
'Hour': 'Hour',
'Hourly': 'Hourly',
'Hours Details': 'Hours Details',
'Hours added': 'Hours added',
'Hours deleted': 'Hours deleted',
'Hours updated': 'Hours updated',
'Hours': 'Hours',
'Household kits received': 'Household kits received',
'Household kits, source': 'Household kits, source',
'Households below %(br)s poverty line': 'Households below %(br)s poverty line',
'Households below poverty line': 'Households below poverty line',
'Households': 'Households',
'How data shall be transferred': 'How data shall be transferred',
'How local records shall be updated': 'How local records shall be updated',
'How long will the food last?': 'How long will the food last?',
'How many Boys (0-17 yrs) are Dead due to the crisis': 'How many Boys (0-17 yrs) are Dead due to the crisis',
'How many Boys (0-17 yrs) are Injured due to the crisis': 'How many Boys (0-17 yrs) are Injured due to the crisis',
'How many Boys (0-17 yrs) are Missing due to the crisis': 'How many Boys (0-17 yrs) are Missing due to the crisis',
'How many Girls (0-17 yrs) are Dead due to the crisis': 'How many Girls (0-17 yrs) are Dead due to the crisis',
'How many Girls (0-17 yrs) are Injured due to the crisis': 'How many Girls (0-17 yrs) are Injured due to the crisis',
'How many Girls (0-17 yrs) are Missing due to the crisis': 'How many Girls (0-17 yrs) are Missing due to the crisis',
'How many Men (18 yrs+) are Dead due to the crisis': 'How many Men (18 yrs+) are Dead due to the crisis',
'How many Men (18 yrs+) are Injured due to the crisis': 'How many Men (18 yrs+) are Injured due to the crisis',
'How many Men (18 yrs+) are Missing due to the crisis': 'How many Men (18 yrs+) are Missing due to the crisis',
'How many Women (18 yrs+) are Dead due to the crisis': 'How many Women (18 yrs+) are Dead due to the crisis',
'How many Women (18 yrs+) are Injured due to the crisis': 'How many Women (18 yrs+) are Injured due to the crisis',
'How many Women (18 yrs+) are Missing due to the crisis': 'How many Women (18 yrs+) are Missing due to the crisis',
'How many days will the supplies last?': 'How many days will the supplies last?',
'How many new cases have been admitted to this facility in the past 24h?': 'How many new cases have been admitted to this facility in the past 24h?',
'How many of the patients with the disease died in the past 24h at this facility?': 'How many of the patients with the disease died in the past 24h at this facility?',
'How many patients with the disease are currently hospitalized at this facility?': 'How many patients with the disease are currently hospitalized at this facility?',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.',
'How often you want to be notified. If there are no changes, no notification will be sent.': 'How often you want to be notified. If there are no changes, no notification will be sent.',
'How you want to be notified.': 'How you want to be notified.',
'Human Resource Assignment updated': 'Human Resource Assignment updated',
'Human Resource Assignments': 'Human Resource Assignments',
'Human Resource Details': 'Human Resource Details',
'Human Resource added': 'Human Resource added',
'Human Resource assigned': 'Human Resource assigned',
'Human Resource removed': 'Human Resource removed',
'Human Resource unassigned': 'Human Resource unassigned',
'Human Resource updated': 'Human Resource updated',
'Human Resource': 'Human Resource',
'Human Resources': 'Human Resources',
'Humanitarian NGO': 'Humanitarian NGO',
'Hurricane Force Wind': 'Hurricane Force Wind',
'Hurricane': 'Hurricane',
'Hygiene kits received': 'Hygiene kits received',
'Hygiene kits, source': 'Hygiene kits, source',
'Hygiene problems': 'Hygiene problems',
'Hygiene': 'Hygiene',
'I agree to the %(terms_of_service)s': 'I agree to the %(terms_of_service)s',
'ICONS': 'ICONS',
'ID Tag Number': 'ID Tag Number',
'ID Tag': 'ID Tag',
'ID type': 'ID type',
'ID': 'ID',
'INDICATOR RATINGS': 'INDICATOR RATINGS',
'INDICATORS': 'INDICATORS',
'Ice Pressure': 'Ice Pressure',
'Iceberg': 'Iceberg',
'Identification Report': 'Identification Report',
'Identification Reports': 'Identification Reports',
'Identification Status': 'Identification Status',
'Identification': 'Identification',
'Identified as': 'Identified as',
'Identified by': 'Identified by',
'Identifier Name for your Twilio Account.': 'Identifier Name for your Twilio Account.',
'Identifier which the remote site uses to authenticate at this site when sending synchronization requests.': 'Identifier which the remote site uses to authenticate at this site when sending synchronization requests.',
'Identifier': 'Identifier',
'Identities': 'Identities',
'Identity Details': 'Identity Details',
'Identity added': 'Identity added',
'Identity deleted': 'Identity deleted',
'Identity updated': 'Identity updated',
'Identity': 'Identity',
'If a ticket was issued then please provide the Ticket ID.': 'If a ticket was issued then please provide the Ticket ID.',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.',
'If checked, the notification will contain all modified records. If not checked, a notification will be send for each modified record.': 'If checked, the notification will contain all modified records. If not checked, a notification will be send for each modified record.',
'If it is a URL leading to HTML, then this will downloaded.': 'If it is a URL leading to HTML, then this will downloaded.',
'If neither are defined, then the Default Marker is used.': 'If neither are defined, then the Default Marker is used.',
'If no, specify why': 'If no, specify why',
'If none are selected, then all are searched.': 'If none are selected, then all are searched.',
'If not found, you can have a new location created.': 'If not found, you can have a new location created.',
'If not specified, the effective time shall be assumed to be the same the time the alert was sent.': 'If not specified, the effective time shall be assumed to be the same the time the alert was sent.',
'If the location is a geographic area, then state at what level here.': 'If the location is a geographic area, then state at what level here.',
'If the person counts as essential staff when evacuating all non-essential staff.': 'If the person counts as essential staff when evacuating all non-essential staff.',
'If the service requries HTTP BASIC Auth (e.g. Mobile Commons)': 'If the service requries HTTP BASIC Auth (e.g. Mobile Commons)',
'If there are multiple configs for a person, which should be their default?': 'If there are multiple configs for a person, which should be their default?',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization',
'If this is a request template to be added repeatedly then the schedule can be set on the next page.': 'If this is a request template to be added repeatedly then the schedule can be set on the next page.',
'If this is set to True then mails will be deleted from the server after downloading.': 'If this is set to True then mails will be deleted from the server after downloading.',
'If this item is not provided, each recipient is free to enforce its own policy as to when the message is no longer in effect.': 'If this item is not provided, each recipient is free to enforce its own policy as to when the message is no longer in effect.',
'If this record should be restricted then select which role is required to access the record here.': 'If this record should be restricted then select which role is required to access the record here.',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'If this record should be restricted then select which role(s) are permitted to access the record here.',
'If used with the ceiling element this value is the lower limit of a range. Otherwise, this value specifies a specific altitude. The altitude measure is in feet above mean sea level.': 'If used with the ceiling element this value is the lower limit of a range. Otherwise, this value specifies a specific altitude. The altitude measure is in feet above mean sea level.',
'If yes, specify what and by whom': 'If yes, specify what and by whom',
'If yes, which and how': 'If yes, which and how',
'If you would like to help, then please %(sign_up_now)s': 'If you would like to help, then please %(sign_up_now)s',
'Ignore Errors?': 'Ignore Errors?',
'Illegal Immigrant': 'Illegal Immigrant',
'Image Details': 'Image Details',
'Image File(s), one image per page': 'Image File(s), one image per page',
'Image Type': 'Image Type',
'Image URL': 'Image URL',
'Image added': 'Image added',
'Image deleted': 'Image deleted',
'Image updated': 'Image updated',
'Image': 'Image',
'Images': 'Images',
'Immediately': 'Immediately',
'Immigration and Customs Capabilities': 'Immigration and Customs Capabilities',
'Impact Details': 'Impact Details',
'Impact Type Details': 'Impact Type Details',
'Impact Type added': 'Impact Type added',
'Impact Type deleted': 'Impact Type deleted',
'Impact Type updated': 'Impact Type updated',
'Impact Type': 'Impact Type',
'Impact Types': 'Impact Types',
'Impact added': 'Impact added',
'Impact deleted': 'Impact deleted',
'Impact updated': 'Impact updated',
'Impact': 'Impact',
'Impacts': 'Impacts',
'Import Activity Data': 'Import Activity Data',
'Import Activity Type data': 'Import Activity Type data',
'Import Airports': 'Import Airports',
'Import Alerts': 'Import Alerts',
'Import Annual Budget data': 'Import Annual Budget data',
'Import Assets': 'Import Assets',
'Import Awards': 'Import Awards',
'Import Base Stations': 'Import Base Stations',
'Import Branch Organizations': 'Import Branch Organizations',
'Import Catalog Items': 'Import Catalog Items',
'Import Certificates': 'Import Certificates',
'Import Community Data': 'Import Community Data',
'Import Completed Assessment Forms': 'Import Completed Assessment Forms',
'Import Contacts': 'Import Contacts',
'Import Courses': 'Import Courses',
'Import Data for Theme Layer': 'Import Data for Theme Layer',
'Import Data': 'Import Data',
'Import Demographic Data': 'Import Demographic Data',
'Import Demographics': 'Import Demographics',
'Import Departments': 'Import Departments',
'Import Event Types': 'Import Event Types',
'Import Facilities': 'Import Facilities',
'Import Facility Types': 'Import Facility Types',
'Import Hazard data': 'Import Hazard data',
'Import Hazards': 'Import Hazards',
'Import Heliports': 'Import Heliports',
'Import Hours': 'Import Hours',
'Import Incident Reports from Ushahidi': 'Import Incident Reports from Ushahidi',
'Import Incident Reports': 'Import Incident Reports',
'Import Incident Types': 'Import Incident Types',
'Import Layers': 'Import Layers',
'Import Location Data': 'Import Location Data',
'Import Location data': 'Import Location data',
'Import Locations': 'Import Locations',
'Import Logged Time data': 'Import Logged Time data',
'Import Members': 'Import Members',
'Import Membership Types': 'Import Membership Types',
'Import Milestones': 'Import Milestones',
'Import Missions': 'Import Missions',
'Import Offices': 'Import Offices',
'Import Organizations': 'Import Organizations',
'Import Participants': 'Import Participants',
'Import Partner Organizations': 'Import Partner Organizations',
'Import People': 'Import People',
'Import PoI Types': 'Import PoI Types',
'Import Points of Interest': 'Import Points of Interest',
'Import Policies & Strategies': 'Import Policies & Strategies',
'Import Posts': 'Import Posts',
'Import Project Organizations': 'Import Project Organizations',
'Import Projects': 'Import Projects',
'Import Recipients': 'Import Recipients',
'Import Resource Types': 'Import Resource Types',
'Import Resources': 'Import Resources',
'Import Scenarios': 'Import Scenarios',
'Import Seaports': 'Import Seaports',
'Import Sector data': 'Import Sector data',
'Import Security-Related Staff': 'Import Security-Related Staff',
'Import Series': 'Import Series',
'Import Service data': 'Import Service data',
'Import Services': 'Import Services',
'Import Staff Types': 'Import Staff Types',
'Import Staff': 'Import Staff',
'Import Statuses': 'Import Statuses',
'Import Suppliers': 'Import Suppliers',
'Import Tags': 'Import Tags',
'Import Tasks': 'Import Tasks',
'Import Template Layout': 'Import Template Layout',
'Import Templates': 'Import Templates',
'Import Theme data': 'Import Theme data',
'Import Themes': 'Import Themes',
'Import Trained People': 'Import Trained People',
'Import Training Events': 'Import Training Events',
'Import Training Participants': 'Import Training Participants',
'Import Type of Peoples': 'Import Type of Peoples',
'Import Types of Trained People': 'Import Types of Trained People',
'Import Users': 'Import Users',
'Import Volunteer Cluster Positions': 'Import Volunteer Cluster Positions',
'Import Volunteer Cluster Types': 'Import Volunteer Cluster Types',
'Import Volunteer Clusters': 'Import Volunteer Clusters',
'Import Volunteers': 'Import Volunteers',
'Import Vulnerability Aggregated Indicator': 'Import Vulnerability Aggregated Indicator',
'Import Vulnerability Data': 'Import Vulnerability Data',
'Import Vulnerability Indicators': 'Import Vulnerability Indicators',
'Import Warehouse Stock': 'Import Warehouse Stock',
'Import Warehouses': 'Import Warehouses',
'Import Zone Types': 'Import Zone Types',
'Import Zones': 'Import Zones',
'Import alert information': 'Import alert information',
'Import from OpenStreetMap': 'Import from OpenStreetMap',
'Import': 'Importera',
'Import/Export': 'Import/Export',
'Important': 'Important',
'Imported data': 'Imported data',
'Imported': 'Imported',
'Improper decontamination': 'Improper decontamination',
'Improper handling of dead bodies': 'Improper handling of dead bodies',
'In Catalogs': 'In Catalogs',
'In Inventories': 'In Inventories',
'In Process': 'In Process',
'In Progress': 'In Progress',
'In Stock': 'In Stock',
'In error': 'In error',
'In transit': 'In transit',
'In': 'In',
'In-Progress': 'In-Progress',
'InBox': 'InBox',
'Inactive': 'Inactive',
'Inbound Mail Settings': 'Inbound Mail Settings',
'Inbound?': 'Inbound?',
'Incident Commander': 'Incident Commander',
'Incident Details': 'Incident Details',
'Incident Report Details': 'Incident Report Details',
'Incident Report added': 'Incident Report added',
'Incident Report deleted': 'Incident Report deleted',
'Incident Report removed': 'Incident Report removed',
'Incident Report updated': 'Incident Report updated',
'Incident Report': 'Incident Report',
'Incident Reports': 'Incident Reports',
'Incident Timeline': 'Incident Timeline',
'Incident Type Details': 'Incident Type Details',
'Incident Type added': 'Incident Type added',
'Incident Type removed': 'Incident Type removed',
'Incident Type updated': 'Incident Type updated',
'Incident Type': 'Incident Type',
'Incident Types': 'Incident Types',
'Incident added': 'Incident added',
'Incident removed': 'Incident removed',
'Incident updated': 'Incident updated',
'Incident': 'Incident',
'Incidents': 'Incidents',
'Include Entity Information?': 'Include Entity Information?',
'Include any special requirements such as equipment which they need to bring.': 'Include any special requirements such as equipment which they need to bring.',
'Include core files': 'Include core files',
'Include only items purchased within the specified dates.': 'Include only items purchased within the specified dates.',
'Include only items that expire within the specified dates.': 'Include only items that expire within the specified dates.',
'Include only items where quantity is in this range.': 'Include only items where quantity is in this range.',
'Incoming Shipments': 'Incoming Shipments',
'Incoming': 'Incoming',
'Incomplete': 'Incomplete',
'Incorrect parameters': 'Incorrect parameters',
'Indicator Comparison': 'Indicator Comparison',
'Indicator ratings': 'Indicator ratings',
'Indicator': 'Indicator',
'Indicators': 'Indicators',
'Industrial Crime': 'Industrial Crime',
'Industrial': 'Industrial',
'Industry Fire': 'Industry Fire',
'Infant (0-1)': 'Infant (0-1)',
'Infectious Disease (Hazardous Material)': 'Infectious Disease (Hazardous Material)',
'Infectious Disease': 'Infectious Disease',
'Infectious Diseases': 'Infectious Diseases',
'Infestation': 'Infestation',
'Info template': 'Info template',
'Informal Leader': 'Informal Leader',
'Informal camp': 'Informal camp',
'Information -Mold -FEMA -Legal': 'Information -Mold -FEMA -Legal',
'Information Source': 'Information Source',
'Information entries': 'Information entries',
'Information gaps': 'Information gaps',
'Information template': 'Information template',
'Information': 'Information',
'Infusion catheters available': 'Infusion catheters available',
'Infusion catheters need per 24h': 'Infusion catheters need per 24h',
'Infusion catheters needed per 24h': 'Infusion catheters needed per 24h',
'Infusions available': 'Infusions available',
'Infusions needed per 24h': 'Infusions needed per 24h',
'Inherited?': 'Inherited?',
'Initials': 'Initials',
'Inspected': 'Inspected',
'Inspection Date': 'Inspection Date',
'Inspection date and time': 'Inspection date and time',
'Inspection time': 'Inspection time',
'Inspector ID': 'Inspector ID',
'Instant Porridge': 'Instant Porridge',
'Instruction': 'Instruction',
'Instructor': 'Instructor',
'Instrument Landing System': 'Instrument Landing System',
'Insufficient Privileges': 'Insufficient Privileges',
'Insufficient vars: Need module, resource, jresource, instance': 'Insufficient vars: Need module, resource, jresource, instance',
'Insufficient': 'Insufficient',
'Insurance Renewal Due': 'Insurance Renewal Due',
'Insurance': 'Insurance',
'Intake Date': 'Intake Date',
'Integrity error: record can not be deleted while it is referenced by other records': 'Integrity error: record can not be deleted while it is referenced by other records',
'Intention to Stay Home': 'Intention to Stay Home',
'Interior walls, partitions': 'Interior walls, partitions',
'Internal Shipment': 'Internal Shipment',
'Internal State': 'Internal State',
'Interview taking place at': 'Interview taking place at',
'Invalid Location!': 'Invalid Location!',
'Invalid Query': 'Invalid Query',
'Invalid Site!': 'Invalid Site!',
'Invalid data: record %(id)s not accessible in table %(table)s': 'Invalid data: record %(id)s not accessible in table %(table)s',
'Invalid end selector: %(selector)s': 'Invalid end selector: %(selector)s',
'Invalid fact selector: %(selector)s': 'Invalid fact selector: %(selector)s',
'Invalid form (re-opened in another window?)': 'Invalid form (re-opened in another window?)',
'Invalid phone number!': 'Invalid phone number!',
'Invalid phone number': 'Invalid phone number',
'Invalid request': 'Invalid request',
'Invalid source': 'Invalid source',
'Invalid start selector: %(selector)s': 'Invalid start selector: %(selector)s',
'Invalid ticket': 'Invalid ticket',
'Invalid': 'Invalid',
'Inventory Adjustment Item': 'Inventory Adjustment Item',
'Inventory Adjustment': 'Inventory Adjustment',
'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': 'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.',
'Inventory Items': 'Inventory Items',
'Inventory of Effects': 'Inventory of Effects',
'Inventory': 'Inventory',
'Is editing level L%d locations allowed?': 'Is editing level L%d locations allowed?',
'Is it safe to collect water?': 'Is it safe to collect water?',
'Is this a strict hierarchy?': 'Is this a strict hierarchy?',
'Issued without Record': 'Issued without Record',
'Issuing Authority': 'Issuing Authority',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.',
'Item Added to Shipment': 'Item Added to Shipment',
'Item Catalog Details': 'Item Catalog Details',
'Item Categories': 'Item Categories',
'Item Category Details': 'Item Category Details',
'Item Category added': 'Item Category added',
'Item Category deleted': 'Item Category deleted',
'Item Category updated': 'Item Category updated',
'Item Category': 'Item Category',
'Item Code': 'Item Code',
'Item Details': 'Item Details',
'Item Pack Details': 'Item Pack Details',
'Item Pack added': 'Item Pack added',
'Item Pack deleted': 'Item Pack deleted',
'Item Pack updated': 'Item Pack updated',
'Item Packs': 'Item Packs',
'Item Source Tracking Number': 'Item Source Tracking Number',
'Item Status': 'Item Status',
'Item Tracking Status': 'Item Tracking Status',
'Item added to Procurement Plan': 'Item added to Procurement Plan',
'Item added to stock': 'Item added to stock',
'Item added': 'Item added',
'Item already in Bundle!': 'Item already in Bundle!',
'Item already in Kit!': 'Item already in Kit!',
'Item already in budget!': 'Item already in budget!',
'Item deleted': 'Item deleted',
'Item name': 'Item name',
'Item quantity adjusted': 'Item quantity adjusted',
'Item removed from Procurement Plan': 'Item removed from Procurement Plan',
'Item removed from Stock': 'Item removed from Stock',
'Item updated': 'Item updated',
'Item': 'Item',
'Item(s) added to Request': 'Item(s) added to Request',
'Item(s) deleted from Request': 'Item(s) deleted from Request',
'Item(s) updated on Request': 'Item(s) updated on Request',
'Item/Description': 'Item/Description',
'Items in Category are Vehicles': 'Items in Category are Vehicles',
'Items in Category can be Assets': 'Items in Category can be Assets',
'Items in Procurement Plan': 'Items in Procurement Plan',
'Items in Request': 'Items in Request',
'Items in Stock': 'Items in Stock',
'Items': 'Items',
'Items/Description': 'Items/Description',
'JNAP Priorities': 'JNAP Priorities',
'JNAP-1: Strategic Area 1: Governance': 'JNAP-1: Strategic Area 1: Governance',
'JNAP-2: Strategic Area 2: Monitoring': 'JNAP-2: Strategic Area 2: Monitoring',
'JNAP-3: Strategic Area 3: Disaster Management': 'JNAP-3: Strategic Area 3: Disaster Management',
'JNAP-4: Strategic Area 4: Risk Reduction and Climate Change Adaptation': 'JNAP-4: Strategic Area 4: Risk Reduction and Climate Change Adaptation',
'JS Layer': 'JS Layer',
'Jerry can': 'Jerry can',
'Jewish': 'Jewish',
'Job Schedule': 'Job Schedule',
'Job Title Catalog': 'Job Title Catalog',
'Job Title Details': 'Job Title Details',
'Job Title added': 'Job Title added',
'Job Title deleted': 'Job Title deleted',
'Job Title updated': 'Job Title updated',
'Job Title': 'Job Title',
'Job added': 'Job added',
'Job deleted': 'Job deleted',
'Job reactivated': 'Job reactivated',
'Job updated': 'Job updated',
'Joint National Action Plan for Disaster Risk Management and Climate Change Adaptation. Applicable to Cook Islands only': 'Joint National Action Plan for Disaster Risk Management and Climate Change Adaptation. Applicable to Cook Islands only',
'Journal Entry Details': 'Journal Entry Details',
'Journal entry added': 'Journal entry added',
'Journal entry deleted': 'Journal entry deleted',
'Journal entry updated': 'Journal entry updated',
'Journal': 'Journal',
'KML Layer': 'KML Layer',
'Keep Duplicate': 'Keep Duplicate',
'Keep Original': 'Keep Original',
'Key Value pairs': 'Key Value pairs',
'Key': 'Key',
'Keyword Added': 'Keyword Added',
'Keyword Deleted': 'Keyword Deleted',
'Keyword Updated': 'Keyword Updated',
'Keyword': 'Keyword',
'Keywords': 'Keywords',
'Kit Contents': 'Kit Contents',
'Kit Created': 'Kit Created',
'Kit Details': 'Kit Details',
'Kit Item': 'Kit Item',
'Kit Items': 'Kit Items',
'Kit Updated': 'Kit Updated',
'Kit added': 'Kit added',
'Kit canceled': 'Kit canceled',
'Kit deleted': 'Kit deleted',
'Kit updated': 'Kit updated',
'Kit': 'Kit',
'Kit?': 'Kit?',
'Kits': 'Kits',
'Kllz': 'Kllz',
'Known Locations': 'Known Locations',
'Known incidents of violence against women/girls': 'Known incidents of violence against women/girls',
'Known incidents of violence since disaster': 'Known incidents of violence since disaster',
'LEGEND': 'LEGEND',
'LICENSE': 'LICENSE',
'LOW RESILIENCE': 'LOW RESILIENCE',
'LOW': 'LOW',
'Lack of material': 'Lack of material',
'Lack of school uniform': 'Lack of school uniform',
'Lack of supplies at school': 'Lack of supplies at school',
'Lack of transport to school': 'Lack of transport to school',
'Lactating women': 'Lactating women',
'Ladder Vehicle 30': 'Ladder Vehicle 30',
'Ladder/step stool': 'Ladder/step stool',
'Lahar': 'Lahar',
'Landslide': 'Landslide',
'Language Code': 'Language Code',
'Language': 'Språk',
'Large pails': 'Large pails',
'Last Checked': 'Last Checked',
'Last Contacted': 'Last Contacted',
'Last Data Collected on': 'Last Data Collected on',
'Last Downloaded': 'Last Downloaded',
'Last Modification': 'Last Modification',
'Last Name': 'Last Name',
'Last Polled': 'Last Polled',
'Last Pull': 'Last Pull',
'Last Push': 'Last Push',
'Last Updated': 'Last Updated',
'Last known location': 'Last known location',
'Last pull on': 'Last pull on',
'Last push on': 'Last push on',
'Last run': 'Last run',
'Last status': 'Last status',
'Last updated ': 'Last updated ',
'Last updated by': 'Last updated by',
'Last updated on': 'Last updated on',
'Last': 'Last',
'Latest Information': 'Latest Information',
'Latest Offers': 'Latest Offers',
'Latest Requests': 'Latest Requests',
'Latitude & Longitude': 'Latitude & Longitude',
'Latitude and Longitude are required': 'Latitude and Longitude are required',
'Latitude is Invalid!': 'Latitude is Invalid!',
'Latitude is North - South (Up-Down).': 'Latitude is North - South (Up-Down).',
'Latitude is North-South (Up-Down).': 'Latitude is North-South (Up-Down).',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.',
'Latitude must be between -90 and 90.': 'Latitude must be between -90 and 90.',
'Latitude of Map Center': 'Latitude of Map Center',
'Latitude of far northern end of the region of interest.': 'Latitude of far northern end of the region of interest.',
'Latitude of far southern end of the region of interest.': 'Latitude of far southern end of the region of interest.',
'Latitude should be between': 'Latitude should be between',
'Latitude': 'Latitude',
'Latrines': 'Latrines',
'Law enforcement, military, homeland and local/private security': 'Law enforcement, military, homeland and local/private security',
'Layer Details': 'Layer Details',
'Layer Name': 'Layer Name',
'Layer Properties': 'Layer Properties',
'Layer added': 'Layer added',
'Layer deleted': 'Layer deleted',
'Layer has been Disabled': 'Layer has been Disabled',
'Layer has been Enabled': 'Layer has been Enabled',
'Layer removed from Symbology': 'Layer removed from Symbology',
'Layer updated': 'Layer updated',
'Layer': 'Layer',
'Layers': 'Layers',
'Lead Implementer for this project is already set, please choose another role.': 'Lead Implementer for this project is already set, please choose another role.',
'Lead Implementer': 'Lead Implementer',
'Lead Organization': 'Lead Organization',
'Leader': 'Leader',
'Leave blank to request an unskilled person': 'Leave blank to request an unskilled person',
'Left-side is fully transparent (0), right-side is opaque (1.0).': 'Left-side is fully transparent (0), right-side is opaque (1.0).',
'Legend Format': 'Legend Format',
'Legend URL': 'Legend URL',
'Legend': 'Legend',
'Length (m)': 'Length (m)',
'Less Options': 'Less Options',
'Level 1 Assessment Details': 'Level 1 Assessment Details',
'Level 1 Assessment added': 'Level 1 Assessment added',
'Level 1 Assessment deleted': 'Level 1 Assessment deleted',
'Level 1 Assessment updated': 'Level 1 Assessment updated',
'Level 1 Assessments': 'Level 1 Assessments',
'Level 1': 'Level 1',
'Level 2 Assessment Details': 'Level 2 Assessment Details',
'Level 2 Assessment added': 'Level 2 Assessment added',
'Level 2 Assessment deleted': 'Level 2 Assessment deleted',
'Level 2 Assessment updated': 'Level 2 Assessment updated',
'Level 2 Assessments': 'Level 2 Assessments',
'Level 2 or detailed engineering evaluation recommended': 'Level 2 or detailed engineering evaluation recommended',
'Level 2': 'Level 2',
'Level of Award': 'Level of Award',
'Level of competency this person has with this skill.': 'Level of competency this person has with this skill.',
'Level': 'Level',
'License Number': 'License Number',
'License Plate': 'License Plate',
'Lighting': 'Lighting',
'Likely (p > ~50%)': 'Likely (p > ~50%)',
'Link (or refresh link) between User, Person & HR Record': 'Link (or refresh link) between User, Person & HR Record',
'Link for the RSS Feed.': 'Link for the RSS Feed.',
'Link to Mission': 'Link to Mission',
'Link to this result': 'Link to this result',
'Link': 'Link',
'Links': 'Links',
'List %(site_label)s Status': 'List %(site_label)s Status',
'List Activities': 'List Activities',
'List Activity Reports': 'List Activity Reports',
'List Activity Types': 'List Activity Types',
'List Addresses': 'List Addresses',
'List Affiliations': 'List Affiliations',
'List Airports': 'List Airports',
'List Alerts': 'List Alerts',
'List All Catalogs & Add Items to Catalogs': 'List All Catalogs & Add Items to Catalogs',
'List All Commitments': 'List All Commitments',
'List All Entries': 'List All Entries',
'List All Item Categories': 'List All Item Categories',
'List All Items': 'List All Items',
'List All Requested Items': 'List All Requested Items',
'List All Requested Skills': 'List All Requested Skills',
'List All Requests': 'List All Requests',
'List All Vehicles': 'List All Vehicles',
'List All': 'Visa Alla',
'List Alternative Items': 'List Alternative Items',
'List Annual Budgets': 'List Annual Budgets',
'List Assessment Answers': 'List Assessment Answers',
'List Assessment Questions': 'List Assessment Questions',
'List Assessment Summaries': 'List Assessment Summaries',
'List Assessment Templates': 'List Assessment Templates',
'List Assessments': 'List Assessments',
'List Assets': 'List Assets',
'List Assigned Human Resources': 'List Assigned Human Resources',
'List Awards': 'List Awards',
'List Base Stations': 'List Base Stations',
'List Baseline Types': 'List Baseline Types',
'List Baselines': 'List Baselines',
'List Beneficiaries': 'List Beneficiaries',
'List Beneficiary Types': 'List Beneficiary Types',
'List Branch Organizations': 'List Branch Organizations',
'List Brands': 'List Brands',
'List Budgets': 'List Budgets',
'List Bundles': 'List Bundles',
'List Camp Services': 'List Camp Services',
'List Camp Statuses': 'List Camp Statuses',
'List Camp Types': 'List Camp Types',
'List Campaigns': 'List Campaigns',
'List Camps': 'List Camps',
'List Cases': 'List Cases',
'List Catalog Items': 'List Catalog Items',
'List Catalogs': 'List Catalogs',
'List Certificates': 'List Certificates',
'List Certifications': 'List Certifications',
'List Checklists': 'List Checklists',
'List Cluster Subsectors': 'List Cluster Subsectors',
'List Clusters': 'List Clusters',
'List Coalitions': 'List Coalitions',
'List Commitment Items': 'List Commitment Items',
'List Commitments': 'List Commitments',
'List Committed People': 'List Committed People',
'List Communities': 'List Communities',
'List Competency Ratings': 'List Competency Ratings',
'List Completed Assessment Forms': 'List Completed Assessment Forms',
'List Contact Information': 'List Contact Information',
'List Contacts': 'List Contacts',
'List Course Certificates': 'List Course Certificates',
'List Courses': 'List Courses',
'List Credentials': 'List Credentials',
'List Data in Theme Layer': 'List Data in Theme Layer',
'List Dataset Prices': 'List Dataset Prices',
'List Demographic Data': 'List Demographic Data',
'List Demographics': 'List Demographics',
'List Departments': 'List Departments',
'List Details': 'List Details',
'List Disaster Assessments': 'List Disaster Assessments',
'List Distribution Items': 'List Distribution Items',
'List Distributions': 'List Distributions',
'List Documents': 'List Documents',
'List Donations': 'List Donations',
'List Donors': 'List Donors',
'List Education Details': 'List Education Details',
'List Evacuation Routes': 'List Evacuation Routes',
'List Event Types': 'List Event Types',
'List Events': 'List Events',
'List Facilities': 'List Facilities',
'List Facility Types': 'List Facility Types',
'List Feature Layers': 'List Feature Layers',
'List Fire Stations': 'List Fire Stations',
'List GPS data': 'List GPS data',
'List Gauges': 'List Gauges',
'List Groups': 'List Groups',
'List Hazards': 'List Hazards',
'List Heliports': 'List Heliports',
'List Homes': 'List Homes',
'List Hospitals': 'List Hospitals',
'List Hours': 'List Hours',
'List Human Resources': 'List Human Resources',
'List Identities': 'List Identities',
'List Images': 'List Images',
'List Impact Assessments': 'List Impact Assessments',
'List Impact Types': 'List Impact Types',
'List Impacts': 'List Impacts',
'List Incident Reports': 'List Incident Reports',
'List Incident Types': 'List Incident Types',
'List Incidents': 'List Incidents',
'List Item Categories': 'List Item Categories',
'List Item Packs': 'List Item Packs',
'List Items in Procurement Plan': 'List Items in Procurement Plan',
'List Items in Request': 'List Items in Request',
'List Items in Stock': 'List Items in Stock',
'List Items': 'List Items',
'List Job Titles': 'List Job Titles',
'List Jobs': 'List Jobs',
'List Keywords': 'List Keywords',
'List Kits': 'List Kits',
'List Layers in Profile': 'List Layers in Profile',
'List Layers in Symbology': 'List Layers in Symbology',
'List Layers': 'List Layers',
'List Level 1 Assessments': 'List Level 1 Assessments',
'List Level 1 assessments': 'List Level 1 assessments',
'List Level 2 Assessments': 'List Level 2 Assessments',
'List Level 2 assessments': 'List Level 2 assessments',
'List Location Hierarchies': 'List Location Hierarchies',
'List Locations': 'List Locations',
'List Log Entries': 'List Log Entries',
'List Logged Time': 'List Logged Time',
'List Mailing Lists': 'List Mailing Lists',
'List Map Configurations': 'List Map Configurations',
'List Markers': 'List Markers',
'List Members': 'List Members',
'List Membership Types': 'List Membership Types',
'List Memberships': 'List Memberships',
'List Menu Entries': 'List Menu Entries',
'List Milestones': 'List Milestones',
'List Missing Persons': 'List Missing Persons',
'List Missions': 'List Missions',
'List Morgues': 'List Morgues',
'List Networks': 'List Networks',
'List Office Types': 'List Office Types',
'List Offices': 'List Offices',
'List Orders': 'List Orders',
'List Organization Domains': 'List Organization Domains',
'List Organization Types': 'List Organization Types',
'List Organizations': 'List Organizations',
'List Outputs': 'List Outputs',
'List Participants': 'List Participants',
'List Partner Organizations': 'List Partner Organizations',
'List Patients': 'List Patients',
'List Personal Effects': 'List Personal Effects',
'List Persons': 'List Persons',
'List Photos': 'List Photos',
'List PoI Types': 'List PoI Types',
'List Points of Interest': 'List Points of Interest',
'List Policies & Strategies': 'List Policies & Strategies',
'List Population Statistics': 'List Population Statistics',
'List Positions': 'List Positions',
'List Posts': 'List Posts',
'List Problems': 'List Problems',
'List Procurement Plans': 'List Procurement Plans',
'List Profiles configured for this Layer': 'List Profiles configured for this Layer',
'List Programs': 'List Programs',
'List Project Organizations': 'List Project Organizations',
'List Projections': 'List Projections',
'List Projects': 'List Projects',
'List Question Meta-Data': 'List Question Meta-Data',
'List Rapid Assessments': 'List Rapid Assessments',
'List Received/Incoming Shipments': 'List Received/Incoming Shipments',
'List Recipients': 'List Recipients',
'List Records': 'List Records',
'List Regions': 'List Regions',
'List Registrations': 'List Registrations',
'List Relatives': 'List Relatives',
'List Reports': 'List Reports',
'List Repositories': 'List Repositories',
'List Request Templates': 'List Request Templates',
'List Requested Skills': 'List Requested Skills',
'List Requests': 'List Requests',
'List Resources': 'List Resources',
'List Response Summaries': 'List Response Summaries',
'List Responses': 'List Responses',
'List Risks': 'List Risks',
'List Rivers': 'List Rivers',
'List Roles': 'List Roles',
'List Rooms': 'List Rooms',
'List Saved Queries': 'List Saved Queries',
'List Scenarios': 'List Scenarios',
'List Seaports': 'List Seaports',
'List Sections': 'List Sections',
'List Sectors': 'List Sectors',
'List Security-Related Staff': 'List Security-Related Staff',
'List Sent Shipments': 'List Sent Shipments',
'List Series': 'List Series',
'List Service Profiles': 'List Service Profiles',
'List Services': 'List Services',
'List Shelter Services': 'List Shelter Services',
'List Shelter Statuses': 'List Shelter Statuses',
'List Shelter Types': 'List Shelter Types',
'List Shelters': 'List Shelters',
'List Shipment Items': 'List Shipment Items',
'List Skill Equivalences': 'List Skill Equivalences',
'List Skill Provisions': 'List Skill Provisions',
'List Skill Types': 'List Skill Types',
'List Skills': 'List Skills',
'List Solutions': 'List Solutions',
'List Staff & Volunteers': 'List Staff & Volunteers',
'List Staff Assignments': 'List Staff Assignments',
'List Staff Members': 'List Staff Members',
'List Staff Types': 'List Staff Types',
'List Station Parameters': 'List Station Parameters',
'List Status Reports': 'List Status Reports',
'List Statuses': 'List Statuses',
'List Stock Adjustments': 'List Stock Adjustments',
'List Stock Counts': 'List Stock Counts',
'List Stock in Warehouse': 'List Stock in Warehouse',
'List Subsectors': 'List Subsectors',
'List Suppliers': 'List Suppliers',
'List Support Requests': 'List Support Requests',
'List Symbologies for Layer': 'List Symbologies for Layer',
'List Symbologies': 'List Symbologies',
'List Tagged Posts': 'List Tagged Posts',
'List Tags': 'List Tags',
'List Tasks': 'List Tasks',
'List Teams': 'List Teams',
'List Template Sections': 'List Template Sections',
'List Templates': 'List Templates',
'List Themes': 'List Themes',
'List Tours': 'List Tours',
'List Training Events': 'List Training Events',
'List Trainings': 'List Trainings',
'List Units': 'List Units',
'List Users': 'List Users',
'List Vehicle Assignments': 'List Vehicle Assignments',
'List Vehicle Details': 'List Vehicle Details',
'List Vehicle Types': 'List Vehicle Types',
'List Vehicles': 'List Vehicles',
'List Volunteer Cluster Positions': 'List Volunteer Cluster Positions',
'List Volunteer Cluster Types': 'List Volunteer Cluster Types',
'List Volunteer Clusters': 'List Volunteer Clusters',
'List Volunteer Roles': 'List Volunteer Roles',
'List Volunteers': 'List Volunteers',
'List Vulnerability Aggregated Indicators': 'List Vulnerability Aggregated Indicators',
'List Vulnerability Data': 'List Vulnerability Data',
'List Vulnerability Indicators': 'List Vulnerability Indicators',
'List Warehouse Types': 'List Warehouse Types',
'List Warehouses': 'List Warehouses',
'List Zone Types': 'List Zone Types',
'List Zones': 'List Zones',
'List all Entries': 'List all Entries',
'List available Scenarios': 'List available Scenarios',
'List information entries': 'List information entries',
'List of Appraisals': 'List of Appraisals',
'List of Facilities': 'List of Facilities',
'List of Professional Experience': 'List of Professional Experience',
'List of Roles': 'List of Roles',
'List saved searches': 'List saved searches',
'List': 'List',
'Listing of alert information items': 'Listing of alert information items',
'Live Help': 'Live Help',
'Livelihood': 'Livelihood',
'Load Cleaned Data into Database': 'Load Cleaned Data into Database',
'Load Raw File into Grid': 'Load Raw File into Grid',
'Load': 'Load',
'Loaded By': 'Loaded By',
'Loading Equipment': 'Loading Equipment',
'Loading report details': 'Loading report details',
'Loading': 'Loading',
'Local Currency': 'Local Currency',
'Local Name': 'Local Name',
'Local Names': 'Local Names',
'Location (Site)': 'Location (Site)',
'Location 1': 'Location 1',
'Location 2': 'Location 2',
'Location 3': 'Location 3',
'Location Added': 'Location Added',
'Location Deleted': 'Location Deleted',
'Location Detail': 'Location Detail',
'Location Details': 'Location Details',
'Location Found': 'Location Found',
'Location Group': 'Location Group',
'Location Hierarchies': 'Location Hierarchies',
'Location Hierarchy Level 1 Name': 'Location Hierarchy Level 1 Name',
'Location Hierarchy Level 2 Name': 'Location Hierarchy Level 2 Name',
'Location Hierarchy Level 3 Name': 'Location Hierarchy Level 3 Name',
'Location Hierarchy Level 4 Name': 'Location Hierarchy Level 4 Name',
'Location Hierarchy Level 5 Name': 'Location Hierarchy Level 5 Name',
'Location Hierarchy added': 'Location Hierarchy added',
'Location Hierarchy deleted': 'Location Hierarchy deleted',
'Location Hierarchy updated': 'Location Hierarchy updated',
'Location Hierarchy': 'Location Hierarchy',
'Location NOT Found': 'Location NOT Found',
'Location added to Organization': 'Location added to Organization',
'Location added': 'Location added',
'Location deleted': 'Location deleted',
'Location is Required!': 'Location is Required!',
'Location needs to have WKT!': 'Location needs to have WKT!',
'Location removed from Organization': 'Location removed from Organization',
'Location updated': 'Location updated',
'Location': 'Location',
'Locations of this level need to have a parent of level': 'Locations of this level need to have a parent of level',
'Locations': 'Locations',
'Lockdown': 'Lockdown',
'Locked': 'Locked',
'Loctaion of tip': 'Loctaion of tip',
'Log Entry Deleted': 'Log Entry Deleted',
'Log Entry Details': 'Log Entry Details',
'Log Entry': 'Log Entry',
'Log New Time': 'Log New Time',
'Log Time Spent': 'Log Time Spent',
'Log entry added': 'Log entry added',
'Log entry updated': 'Log entry updated',
'Log': 'Log',
'Logged By': 'Logged By',
'Logged Time Details': 'Logged Time Details',
'Logged Time': 'Logged Time',
'Login using Facebook account': 'Login using Facebook account',
'Login using Google account': 'Login using Google account',
'Login with Facebook': 'Login with Facebook',
'Login with Google': 'Login with Google',
'Login': 'Logga in',
'Logo of the organization. This should be a png or jpeg file and it should be no larger than 400x400': 'Logo of the organization. This should be a png or jpeg file and it should be no larger than 400x400',
'Logo': 'Logo',
'Logout': 'Logga ut',
'Long Name': 'Long Name',
'Long Text': 'Long Text',
'Long pry bar': 'Long pry bar',
'Long-term care': 'Long-term care',
'Longitude is Invalid!': 'Longitude is Invalid!',
'Longitude is West - East (sideways).': 'Longitude is West - East (sideways).',
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.',
'Longitude must be between -180 and 180.': 'Longitude must be between -180 and 180.',
'Longitude of Map Center': 'Longitude of Map Center',
'Longitude of far eastern end of the region of interest.': 'Longitude of far eastern end of the region of interest.',
'Longitude of far western end of the region of interest.': 'Longitude of far western end of the region of interest.',
'Longitude should be between': 'Longitude should be between',
'Longitude': 'Longitude',
'Looting': 'Looting',
'Lost Password': 'Lost Password',
'Lost': 'Lost',
'Low Tide Depth': 'Low Tide Depth',
'Low': 'Low',
'MEDIAN': 'MEDIAN',
'MGRS Layer': 'MGRS Layer',
'MIME content type and sub-type as described in [RFC 2046]. (As of this document, the current IANA registered MIME types are listed at http://www.iana.org/assignments/media-types/)': 'MIME content type and sub-type as described in [RFC 2046]. (As of this document, the current IANA registered MIME types are listed at http://www.iana.org/assignments/media-types/)',
'MODERATE': 'MODERATE',
'MY REPORTS': 'MY REPORTS',
'Magnetic Storm': 'Magnetic Storm',
'Mailing List Details': 'Mailing List Details',
'Mailing List Name': 'Mailing List Name',
'Mailing Lists': 'Mailing Lists',
'Mailing list added': 'Mailing list added',
'Mailing list deleted': 'Mailing list deleted',
'Mailing list updated': 'Mailing list updated',
'Mailing list': 'Mailing list',
'Main?': 'Main?',
'Major Damage': 'Major Damage',
'Major expenses': 'Major expenses',
'Major outward damage': 'Major outward damage',
'Major': 'Major',
'Make Commitment': 'Make Commitment',
'Make Donation': 'Make Donation',
'Make New Commitment': 'Make New Commitment',
'Make People Request': 'Make People Request',
'Make Request': 'Make Request',
'Make Supplies Request': 'Make Supplies Request',
'Male': 'Male',
'Manage Cache': 'Manage Cache',
'Manage Events': 'Manage Events',
'Manage Incidents': 'Manage Incidents',
'Manage Layers in Catalog': 'Manage Layers in Catalog',
'Manage Members': 'Manage Members',
'Manage Missions': 'Manage Missions',
'Manage Returns': 'Manage Returns',
'Manage Users & Roles': 'Manage Users & Roles',
'Manage Your Facilities': 'Manage Your Facilities',
'Manage requests of hospitals for assistance.': 'Manage requests of hospitals for assistance.',
'Managing Office': 'Managing Office',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).',
'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wfs?': 'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wfs?',
'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wms?': 'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wms?',
'Manual Synchronization': 'Manual Synchronization',
'Manual synchronization completed.': 'Manual synchronization completed.',
'Manual synchronization scheduled - refresh page to update status.': 'Manual synchronization scheduled - refresh page to update status.',
'Manual synchronization started in the background.': 'Manual synchronization started in the background.',
'Many': 'Many',
'Map Center Latitude': 'Map Center Latitude',
'Map Center Longitude': 'Map Center Longitude',
'Map Configuration Details': 'Map Configuration Details',
'Map Configuration added': 'Map Configuration added',
'Map Configuration deleted': 'Map Configuration deleted',
'Map Configuration removed': 'Map Configuration removed',
'Map Configuration updated': 'Map Configuration updated',
'Map Configuration': 'Map Configuration',
'Map Configurations': 'Map Configurations',
'Map Settings': 'Map Settings',
'Map Viewing Client': 'Map Viewing Client',
'Map Zoom': 'Map Zoom',
'Map cannot display without prepop data!': 'Map cannot display without prepop data!',
'Map has been copied and set as Default': 'Map has been copied and set as Default',
'Map has been set as Default': 'Map has been set as Default',
'Map is already your Default': 'Map is already your Default',
'Map not available: Cannot write projection file - %s': 'Map not available: Cannot write projection file - %s',
'Map not available: No Projection configured': 'Map not available: No Projection configured',
'Map not available: Projection %(projection)s not supported - please add definition to %(path)s': 'Map not available: Projection %(projection)s not supported - please add definition to %(path)s',
'Map of Base Stations': 'Map of Base Stations',
'Map of Communities': 'Map of Communities',
'Map of Facilities': 'Map of Facilities',
'Map of Fire Stations': 'Map of Fire Stations',
'Map of Gauges': 'Map of Gauges',
'Map of Hospitals': 'Map of Hospitals',
'Map of Incident Reports': 'Map of Incident Reports',
'Map of Offices': 'Map of Offices',
'Map of Projects': 'Map of Projects',
'Map of Requests': 'Map of Requests',
'Map of Resources': 'Map of Resources',
'Map of Vehicles': 'Map of Vehicles',
'Map of Warehouses': 'Map of Warehouses',
'Map': 'Karta',
'Marine Security': 'Marine Security',
'Marital Status': 'Marital Status',
'Mark Sender': 'Mark Sender',
'Mark as duplicate': 'Mark as duplicate',
'Marker Details': 'Marker Details',
'Marker Levels': 'Marker Levels',
'Marker added': 'Marker added',
'Marker deleted': 'Marker deleted',
'Marker updated': 'Marker updated',
'Marker': 'Marker',
'Markers': 'Markers',
'Master Message Log': 'Master Message Log',
'Master': 'Master',
'Match Requests': 'Match Requests',
'Match?': 'Match?',
'Matching Catalog Items': 'Matching Catalog Items',
'Matching Items': 'Matching Items',
'Matching Records': 'Matching Records',
'Matching Vehicle Types': 'Matching Vehicle Types',
'Max Height': 'Max Height',
'Max': 'Max',
'Maximum Extent': 'Maximum Extent',
'Maximum Location Latitude': 'Maximum Location Latitude',
'Maximum Location Longitude': 'Maximum Location Longitude',
'Maximum must be greater than minimum': 'Maximum must be greater than minimum',
'Maximum': 'Maximum',
'Mean Absolute Deviation': 'Mean Absolute Deviation',
'Mean': 'Mean',
'Measure Area: Click the points around the polygon & end with a double-click': 'Measure Area: Click the points around the polygon & end with a double-click',
'Measure Length: Click the points along the path & end with a double-click': 'Measure Length: Click the points along the path & end with a double-click',
'Median Absolute Deviation': 'Median Absolute Deviation',
'Median': 'Median',
'Medical Conditions': 'Medical Conditions',
'Medical and public health': 'Medical and public health',
'Medium': 'Medium',
'Megabytes per Month': 'Megabytes per Month',
'Member Deployed': 'Member Deployed',
'Member Details': 'Member Details',
'Member ID': 'Member ID',
'Member Organizations': 'Member Organizations',
'Member added': 'Member added',
'Member deleted': 'Member deleted',
'Member updated': 'Member updated',
'Members': 'Members',
'Membership Details': 'Membership Details',
'Membership Fee': 'Membership Fee',
'Membership Paid': 'Membership Paid',
'Membership Type Details': 'Membership Type Details',
'Membership Type added': 'Membership Type added',
'Membership Type deleted': 'Membership Type deleted',
'Membership Type updated': 'Membership Type updated',
'Membership Types': 'Membership Types',
'Membership updated': 'Membership updated',
'Membership': 'Membership',
'Memberships': 'Memberships',
'Menu Entries': 'Menu Entries',
'Menu Entry Details': 'Menu Entry Details',
'Menu Entry added': 'Menu Entry added',
'Menu Entry deleted': 'Menu Entry deleted',
'Menu Entry updated': 'Menu Entry updated',
'Merge records': 'Merge records',
'Merge': 'Merge',
'Message Details': 'Message Details',
'Message Log': 'Message Log',
'Message Sent': 'Message Sent',
'Message Source': 'Message Source',
'Message Text': 'Message Text',
'Message Type': 'Message Type',
'Message Variable': 'Message Variable',
'Message deleted': 'Message deleted',
'Message sending failed! Reason:': 'Message sending failed! Reason:',
'Message updated': 'Message updated',
'Message variable': 'Message variable',
'Message': 'Message',
'Messaging': 'Messaging',
'Metadata': 'Metadata',
'Metal Stud': 'Metal Stud',
'Meteorite': 'Meteorite',
'Meteorological (inc. flood)': 'Meteorological (inc. flood)',
'Method disabled': 'Method disabled',
'Method used': 'Method used',
'Middle Name': 'Middle Name',
'Migrants or ethnic minorities': 'Migrants or ethnic minorities',
'Mileage': 'Mileage',
'Milestone Added': 'Milestone Added',
'Milestone Deleted': 'Milestone Deleted',
'Milestone Details': 'Milestone Details',
'Milestone Updated': 'Milestone Updated',
'Milestone': 'Milestone',
'Milestones': 'Milestones',
'Min': 'Min',
'Minimal to no known threat to life or property': 'Minimal to no known threat to life or property',
'Minimum Location Latitude': 'Minimum Location Latitude',
'Minimum Location Longitude': 'Minimum Location Longitude',
'Minimum': 'Minimum',
'Minor Damage': 'Minor Damage',
'Minor/None': 'Minor/None',
'Minorities participating in coping activities': 'Minorities participating in coping activities',
'Minutes must be a number.': 'Minutes must be a number.',
'Minutes must be less than 60.': 'Minutes must be less than 60.',
'Minutes per Month': 'Minutes per Month',
'Miscellaneous': 'Miscellaneous',
'Missing Person Details': 'Missing Person Details',
'Missing Person': 'Missing Person',
'Missing Persons Registry': 'Missing Persons Registry',
'Missing Persons': 'Missing Persons',
'Missing Report': 'Missing Report',
'Missing Senior Citizen': 'Missing Senior Citizen',
'Missing Vulnerable Person': 'Missing Vulnerable Person',
'Missing': 'Missing',
'Mission Details updated': 'Mission Details updated',
'Mission added': 'Mission added',
'Mission deleted': 'Mission deleted',
'Mission': 'Mission',
'Missions': 'Missions',
'Mobile Basic Assessment': 'Mobile Basic Assessment',
'Mobile Commons (Inbound)': 'Mobile Commons (Inbound)',
'Mobile Commons Setting Details': 'Mobile Commons Setting Details',
'Mobile Commons Setting added': 'Mobile Commons Setting added',
'Mobile Commons Setting deleted': 'Mobile Commons Setting deleted',
'Mobile Commons Settings': 'Mobile Commons Settings',
'Mobile Commons settings updated': 'Mobile Commons settings updated',
'Mobile Phone Number': 'Mobile Phone Number',
'Mobile Phone': 'Mobile Phone',
'Mobile': 'Mobile',
'Mode': 'Mode',
'Model/Type': 'Model/Type',
'Modem settings updated': 'Modem settings updated',
'Modem': 'Modem',
'Moderate': 'Moderate',
'Moderator': 'Moderator',
'Module prefix': 'Module prefix',
'Module': 'Module',
'Module-wise Percentage of Translated Strings': 'Module-wise Percentage of Translated Strings',
'Mold Notes': 'Mold Notes',
'Mold Status': 'Mold Status',
'Mold removal equipment': 'Mold removal equipment',
'Mold removal': 'Mold removal',
'Monday': 'Monday',
'Monetization Details': 'Monetization Details',
'Monetization Report': 'Monetization Report',
'Monetization': 'Monetization',
'Monitor - Attend to information sources as described in instruction': 'Monitor - Attend to information sources as described in instruction',
'Month': 'Month',
'Monthly Cost': 'Monthly Cost',
'Monthly Salary': 'Monthly Salary',
'Monthly': 'Monthly',
'Months': 'Months',
'More Info': 'More Info',
'More Options': 'More Options',
'Morgue Details': 'Morgue Details',
'Morgue Status': 'Morgue Status',
'Morgue Units Available': 'Morgue Units Available',
'Morgue added': 'Morgue added',
'Morgue deleted': 'Morgue deleted',
'Morgue updated': 'Morgue updated',
'Morgue': 'Morgue',
'Morgues': 'Morgues',
'Mosque': 'Mosque',
'Motorcycle': 'Motorcycle',
'Moustache': 'Moustache',
'Mud/sand removal': 'Mud/sand removal',
'Multi-Family/Apts': 'Multi-Family/Apts',
'Multi-Option': 'Multi-Option',
'Multiple response types can be selected by holding down control and then selecting the items': 'Multiple response types can be selected by holding down control and then selecting the items',
'Multiple': 'Multiple',
'Muslim': 'Muslim',
'Must a location have a parent location?': 'Must a location have a parent location?',
'My Bookmarks': 'My Bookmarks',
'My Logged Hours': 'My Logged Hours',
'My Maps': 'My Maps',
'My Open Tasks': 'My Open Tasks',
'My reports': 'My reports',
'MyLabel': 'MyLabel',
'N/A': 'N/A',
'N95 Dust masks': 'N95 Dust masks',
'NO': 'NO',
'NONE': 'NONE',
'NOT %s AND NOT %s': 'NOT %s AND NOT %s',
'NOT %s OR NOT %s': 'NOT %s OR NOT %s',
'NRs per datum': 'NRs per datum',
'Name and/or ID': 'Name and/or ID',
'Name field is required!': 'Name field is required!',
'Name for your Twilio Account.': 'Name for your Twilio Account.',
'Name of Award': 'Name of Award',
'Name of Driver': 'Name of Driver',
'Name of Father': 'Name of Father',
'Name of Institute': 'Name of Institute',
'Name of Map': 'Name of Map',
'Name of Mother': 'Name of Mother',
'Name of a programme or another project which this project is implemented as part of': 'Name of a programme or another project which this project is implemented as part of',
'Name of the person in local language and script (optional).': 'Name of the person in local language and script (optional).',
'Name of the repository (for you own reference)': 'Name of the repository (for you own reference)',
'Name': 'Name',
'Name, and/or ID': 'Name, and/or ID',
'National ID Card': 'National ID Card',
'Nationality of the person.': 'Nationality of the person.',
'Nationality': 'Nationality',
'Nautical Accident': 'Nautical Accident',
'Nautical Hijacking': 'Nautical Hijacking',
'Need to be logged-in to be able to submit assessments': 'Need to be logged-in to be able to submit assessments',
'Need to configure Twitter Authentication': 'Need to configure Twitter Authentication',
'Need to configure an Email Address!': 'Need to configure an Email Address!',
'Need to specify a Budget!': 'Need to specify a Budget!',
'Need to specify a Kit!': 'Need to specify a Kit!',
'Need to specify a bundle!': 'Need to specify a bundle!',
'Need to specify a table!': 'Need to specify a table!',
'Needs Maintenance': 'Needs Maintenance',
'Needs to reduce vulnerability to violence': 'Needs to reduce vulnerability to violence',
'Needs': 'Needs',
'Negative Flow Isolation': 'Negative Flow Isolation',
'Neighborhood': 'Neighborhood',
'Neighbouring building hazard': 'Neighbouring building hazard',
'Neonatal ICU': 'Neonatal ICU',
'Neonatology': 'Neonatology',
'Network Details': 'Network Details',
'Network added': 'Network added',
'Network removed': 'Network removed',
'Network updated': 'Network updated',
'Network': 'Network',
'Networks': 'Networks',
'Neurology': 'Neurology',
'Never': 'Never',
'New Activity Type': 'New Activity Type',
'New Alert': 'New Alert',
'New Annual Budget created': 'New Annual Budget created',
'New Assessment reported from': 'New Assessment reported from',
'New Checklist': 'New Checklist',
'New Entry in Asset Log': 'New Entry in Asset Log',
'New Entry': 'New Entry',
'New Event': 'New Event',
'New Hazard': 'New Hazard',
'New Home': 'New Home',
'New Location': 'New Location',
'New Mission': 'New Mission',
'New Organization': 'Lägg Till Organisation',
'New Output': 'New Output',
'New Page': 'New Page',
'New Patient': 'New Patient',
'New Post': 'New Post',
'New Recipient': 'New Recipient',
'New Record': 'New Record',
'New Records': 'New Records',
'New Relative': 'New Relative',
'New Role': 'New Role',
'New Scenario': 'New Scenario',
'New Sector': 'New Sector',
'New Service': 'New Service',
'New Status Report': 'New Status Report',
'New Stock Adjustment': 'New Stock Adjustment',
'New Stock Count': 'New Stock Count',
'New Support Request': 'New Support Request',
'New Theme': 'New Theme',
'New cases in the past 24h': 'New cases in the past 24h',
'New updates are available.': 'New updates are available.',
'New': 'Lägg till',
'News': 'News',
'Next View': 'Next View',
'Next run': 'Next run',
'Next': 'Next',
'No Accounts currently defined': 'No Accounts currently defined',
'No Activities Found': 'No Activities Found',
'No Activity Types Found': 'No Activity Types Found',
'No Activity Types found for this Activity': 'No Activity Types found for this Activity',
'No Activity Types found for this Project Location': 'No Activity Types found for this Project Location',
'No Affiliations defined': 'No Affiliations defined',
'No Airports currently registered': 'No Airports currently registered',
'No Alerts currently registered': 'No Alerts currently registered',
'No Alternative Items currently registered': 'No Alternative Items currently registered',
'No Appraisals found': 'No Appraisals found',
'No Assessment Answers': 'No Assessment Answers',
'No Assessment Questions': 'No Assessment Questions',
'No Assessment Summaries currently registered': 'No Assessment Summaries currently registered',
'No Assessment Templates': 'No Assessment Templates',
'No Assessments currently registered': 'No Assessments currently registered',
'No Assessments found': 'No Assessments found',
'No Assets currently registered in this incident': 'No Assets currently registered in this incident',
'No Assets currently registered': 'No Assets currently registered',
'No Awards found': 'No Awards found',
'No Base Layer': 'No Base Layer',
'No Base Stations currently registered': 'No Base Stations currently registered',
'No Baseline Types currently registered': 'No Baseline Types currently registered',
'No Baselines currently registered': 'No Baselines currently registered',
'No Beneficiaries Found': 'No Beneficiaries Found',
'No Beneficiary Types Found': 'No Beneficiary Types Found',
'No Branch Organizations currently registered': 'No Branch Organizations currently registered',
'No Brands currently registered': 'No Brands currently registered',
'No Budgets currently registered': 'No Budgets currently registered',
'No Bundles currently registered': 'No Bundles currently registered',
'No Camp Services currently registered': 'No Camp Services currently registered',
'No Camp Statuses currently registered': 'No Camp Statuses currently registered',
'No Camp Types currently registered': 'No Camp Types currently registered',
'No Campaigns Found': 'No Campaigns Found',
'No Camps currently registered': 'No Camps currently registered',
'No Cases found': 'No Cases found',
'No Catalog Items currently registered': 'No Catalog Items currently registered',
'No Catalogs currently registered': 'No Catalogs currently registered',
'No Checklist available': 'No Checklist available',
'No Cluster Subsectors currently registered': 'No Cluster Subsectors currently registered',
'No Clusters currently registered': 'No Clusters currently registered',
'No Coalitions currently recorded': 'No Coalitions currently recorded',
'No Commitment Items currently registered': 'No Commitment Items currently registered',
'No Commitments': 'No Commitments',
'No Communities Found': 'No Communities Found',
'No Completed Assessment Forms': 'No Completed Assessment Forms',
'No Contacts Found': 'No Contacts Found',
'No Contacts currently registered': 'No Contacts currently registered',
'No Data Purchased': 'No Data Purchased',
'No Data currently defined for this Theme Layer': 'No Data currently defined for this Theme Layer',
'No Data': 'No Data',
'No Dataset Prices': 'No Dataset Prices',
'No Details currently registered': 'No Details currently registered',
'No Disaster Assessments': 'No Disaster Assessments',
'No Distribution Items Found': 'No Distribution Items Found',
'No Distributions Found': 'No Distributions Found',
'No Documents found': 'No Documents found',
'No Donations': 'No Donations',
'No Donors currently registered': 'No Donors currently registered',
'No Emails currently in InBox': 'No Emails currently in InBox',
'No Emails currently in Outbox': 'No Emails currently in Outbox',
'No Entries Found': 'No Entries Found',
'No Evacuation Routes currently registered for this event': 'No Evacuation Routes currently registered for this event',
'No Event Types currently registered': 'No Event Types currently registered',
'No Events currently registered': 'No Events currently registered',
'No Facilities currently registered in this incident': 'No Facilities currently registered in this incident',
'No Facilities currently registered': 'No Facilities currently registered',
'No Facility Types currently registered': 'No Facility Types currently registered',
'No Feature Layers currently defined': 'No Feature Layers currently defined',
'No Fire Stations could be found': 'No Fire Stations could be found',
'No Fire Stations currently registered': 'No Fire Stations currently registered',
'No GPS data currently registered': 'No GPS data currently registered',
'No Gauges currently registered': 'No Gauges currently registered',
'No Groups currently defined': 'No Groups currently defined',
'No Groups currently registered': 'No Groups currently registered',
'No Hazards currently recorded': 'No Hazards currently recorded',
'No Hazards currently registered': 'No Hazards currently registered',
'No Hazards found for this Project': 'No Hazards found for this Project',
'No Heliports currently registered': 'No Heliports currently registered',
'No Homes currently registered': 'No Homes currently registered',
'No Hospitals currently registered': 'No Hospitals currently registered',
'No Human Resources currently assigned to this incident': 'No Human Resources currently assigned to this incident',
'No Human Resources currently registered in this scenario': 'No Human Resources currently registered in this scenario',
'No Identification Report Available': 'No Identification Report Available',
'No Identities currently registered': 'No Identities currently registered',
'No Images currently registered': 'No Images currently registered',
'No Impact Types currently registered': 'No Impact Types currently registered',
'No Impacts currently registered': 'No Impacts currently registered',
'No Incident Reports currently registered for this event': 'No Incident Reports currently registered for this event',
'No Incident Reports currently registered in this incident': 'No Incident Reports currently registered in this incident',
'No Incident Reports currently registered': 'No Incident Reports currently registered',
'No Incident Types currently registered in this event': 'No Incident Types currently registered in this event',
'No Incident Types currently registered': 'No Incident Types currently registered',
'No Incidents currently registered in this event': 'No Incidents currently registered in this event',
'No Inventories currently have suitable alternative items in stock': 'No Inventories currently have suitable alternative items in stock',
'No Inventories currently have this item in stock': 'No Inventories currently have this item in stock',
'No Item Categories currently registered': 'No Item Categories currently registered',
'No Item Packs currently registered': 'No Item Packs currently registered',
'No Items currently registered in this Procurement Plan': 'No Items currently registered in this Procurement Plan',
'No Items currently registered': 'No Items currently registered',
'No Items currently requested': 'No Items currently requested',
'No Keywords Found': 'No Keywords Found',
'No Kits currently registered': 'No Kits currently registered',
'No Kits': 'No Kits',
'No Layers currently configured in this Profile': 'No Layers currently configured in this Profile',
'No Layers currently defined in this Symbology': 'No Layers currently defined in this Symbology',
'No Layers currently defined': 'No Layers currently defined',
'No Level 1 Assessments currently registered': 'No Level 1 Assessments currently registered',
'No Level 2 Assessments currently registered': 'No Level 2 Assessments currently registered',
'No Location Hierarchies currently defined': 'No Location Hierarchies currently defined',
'No Locations Found': 'No Locations Found',
'No Locations currently available': 'No Locations currently available',
'No Locations currently registered': 'No Locations currently registered',
'No Locations found for this Organization': 'No Locations found for this Organization',
'No Mailing List currently established': 'No Mailing List currently established',
'No Map Configurations currently defined': 'No Map Configurations currently defined',
'No Map Configurations currently registered in this incident': 'No Map Configurations currently registered in this incident',
'No Map Configurations currently registered in this scenario': 'No Map Configurations currently registered in this scenario',
'No Markers currently available': 'No Markers currently available',
'No Matching Catalog Items': 'No Matching Catalog Items',
'No Matching Items': 'No Matching Items',
'No Matching Records': 'No Matching Records',
'No Matching Vehicle Types': 'No Matching Vehicle Types',
'No Member Selected!': 'No Member Selected!',
'No Members currently registered': 'No Members currently registered',
'No Memberships currently defined': 'No Memberships currently defined',
'No Menu Entries currently defined': 'No Menu Entries currently defined',
'No Messages currently in InBox': 'No Messages currently in InBox',
'No Messages currently in Outbox': 'No Messages currently in Outbox',
'No Messages currently in the Message Log': 'No Messages currently in the Message Log',
'No Messages found': 'No Messages found',
'No Milestones Found': 'No Milestones Found',
'No Missions currently registered': 'No Missions currently registered',
'No Mobile Commons Settings currently defined': 'No Mobile Commons Settings currently defined',
'No Networks currently recorded': 'No Networks currently recorded',
'No Office Types currently registered': 'No Office Types currently registered',
'No Offices currently registered': 'No Offices currently registered',
'No Open Tasks for %(project)s': 'No Open Tasks for %(project)s',
'No Orders registered': 'No Orders registered',
'No Organization Domains currently registered': 'No Organization Domains currently registered',
'No Organization Types currently registered': 'No Organization Types currently registered',
'No Organizations currently registered': 'No Organizations currently registered',
'No Organizations for Project(s)': 'No Organizations for Project(s)',
'No Organizations found for this Policy/Strategy': 'No Organizations found for this Policy/Strategy',
'No Packs for Item': 'No Packs for Item',
'No Parsers currently connected': 'No Parsers currently connected',
'No Partner Organizations currently registered': 'No Partner Organizations currently registered',
'No Patients currently registered': 'No Patients currently registered',
'No People currently committed': 'No People currently committed',
'No People currently registered in this camp': 'No People currently registered in this camp',
'No People currently registered in this shelter': 'No People currently registered in this shelter',
'No People defined': 'No People defined',
'No Persons currently registered': 'No Persons currently registered',
'No Persons currently reported missing': 'No Persons currently reported missing',
'No Persons found': 'No Persons found',
'No Photos found': 'No Photos found',
'No PoI Types currently available': 'No PoI Types currently available',
'No PoIs available.': 'No PoIs available.',
'No Points of Interest currently available': 'No Points of Interest currently available',
'No Policies or Strategies found': 'No Policies or Strategies found',
'No Population Statistics currently registered': 'No Population Statistics currently registered',
'No Posts available': 'No Posts available',
'No Posts currently tagged to this event': 'No Posts currently tagged to this event',
'No Presence Log Entries currently registered': 'No Presence Log Entries currently registered',
'No Problems currently defined': 'No Problems currently defined',
'No Procurement Plans currently registered': 'No Procurement Plans currently registered',
'No Professional Experience found': 'No Professional Experience found',
'No Profiles currently have Configurations for this Layer': 'No Profiles currently have Configurations for this Layer',
'No Projections currently defined': 'No Projections currently defined',
'No Projects currently registered': 'Inga projekt registrerade',
'No Query currently defined': 'No Query currently defined',
'No Question Meta-Data': 'No Question Meta-Data',
'No Rapid Assessments currently registered': 'No Rapid Assessments currently registered',
'No Ratings for Skill Type': 'No Ratings for Skill Type',
'No Received Shipments': 'No Received Shipments',
'No Recipients Selected!': 'No Recipients Selected!',
'No Recipients Selected': 'No Recipients Selected',
'No Recipients currently defined': 'No Recipients currently defined',
'No Records currently available': 'No Records currently available',
'No Region': 'No Region',
'No Regions currently registered': 'No Regions currently registered',
'No Relatives currently registered': 'No Relatives currently registered',
'No Request Templates': 'No Request Templates',
'No Requests': 'No Requests',
'No Resource Types defined': 'No Resource Types defined',
'No Resources in Inventory': 'No Resources in Inventory',
'No Response Summaries Found': 'No Response Summaries Found',
'No Response': 'No Response',
'No Responses Found': 'No Responses Found',
'No Restrictions': 'No Restrictions',
'No Risks currently registered for this event': 'No Risks currently registered for this event',
'No Rivers currently registered': 'No Rivers currently registered',
'No Roles defined': 'No Roles defined',
'No Rooms currently registered': 'No Rooms currently registered',
'No SMS currently in InBox': 'No SMS currently in InBox',
'No SMS currently in Outbox': 'No SMS currently in Outbox',
'No Saved Queries': 'No Saved Queries',
'No Scenarios currently registered': 'No Scenarios currently registered',
'No Seaports currently registered': 'No Seaports currently registered',
'No Search saved': 'No Search saved',
'No Sections currently registered': 'No Sections currently registered',
'No Sectors currently registered': 'No Sectors currently registered',
'No Sectors found for this Organization': 'No Sectors found for this Organization',
'No Sectors found for this Project': 'No Sectors found for this Project',
'No Sectors found for this Theme': 'No Sectors found for this Theme',
'No Security-Related Staff currently registered': 'No Security-Related Staff currently registered',
'No Senders Whitelisted': 'No Senders Whitelisted',
'No Sent Shipments': 'No Sent Shipments',
'No Services currently registered': 'No Services currently registered',
'No Services found for this Organization': 'No Services found for this Organization',
'No Settings currently defined': 'No Settings currently defined',
'No Shelter Services currently registered': 'No Shelter Services currently registered',
'No Shelter Statuses currently registered': 'No Shelter Statuses currently registered',
'No Shelter Types currently registered': 'No Shelter Types currently registered',
'No Shelters currently registered': 'No Shelters currently registered',
'No Shipment Items': 'No Shipment Items',
'No Skills Required': 'No Skills Required',
'No Skills currently requested': 'No Skills currently requested',
'No Solutions currently defined': 'No Solutions currently defined',
'No Staff Types currently registered': 'No Staff Types currently registered',
'No Staff currently registered': 'No Staff currently registered',
'No Station Parameters': 'No Station Parameters',
'No Statuses currently registered': 'No Statuses currently registered',
'No Stock currently registered in this Warehouse': 'No Stock currently registered in this Warehouse',
'No Stock currently registered': 'No Stock currently registered',
'No Subsectors currently registered': 'No Subsectors currently registered',
'No Suppliers currently registered': 'No Suppliers currently registered',
'No Support Requests currently registered': 'No Support Requests currently registered',
'No Symbologies currently defined for this Layer': 'No Symbologies currently defined for this Layer',
'No Symbologies currently defined': 'No Symbologies currently defined',
'No Tasks Assigned': 'No Tasks Assigned',
'No Tasks currently registered in this incident': 'No Tasks currently registered in this incident',
'No Teams currently registered': 'No Teams currently registered',
'No Template Sections': 'No Template Sections',
'No Themes currently registered': 'No Themes currently registered',
'No Themes found for this Activity': 'No Themes found for this Activity',
'No Themes found for this Project Location': 'No Themes found for this Project Location',
'No Themes found for this Project': 'No Themes found for this Project',
'No Time Logged': 'No Time Logged',
'No Tours currently registered': 'No Tours currently registered',
'No Trained People defined': 'No Trained People defined',
'No Tweets Available.': 'No Tweets Available.',
'No Tweets currently in InBox': 'No Tweets currently in InBox',
'No Tweets currently in Outbox': 'No Tweets currently in Outbox',
'No Twilio Settings currently defined': 'No Twilio Settings currently defined',
'No Type of Peoples defined': 'No Type of Peoples defined',
'No Types of Trained People defined': 'No Types of Trained People defined',
'No Users currently registered': 'No Users currently registered',
'No Vehicle Details currently defined': 'No Vehicle Details currently defined',
'No Vehicle Types currently registered': 'No Vehicle Types currently registered',
'No Vehicles could be found': 'No Vehicles could be found',
'No Vehicles currently assigned to this incident': 'No Vehicles currently assigned to this incident',
'No Vehicles currently registered': 'No Vehicles currently registered',
'No Volunteer Cluster Positions': 'No Volunteer Cluster Positions',
'No Volunteer Cluster Types': 'No Volunteer Cluster Types',
'No Volunteer Clusters': 'No Volunteer Clusters',
'No Volunteers currently registered': 'No Volunteers currently registered',
'No Warehouse Types currently registered': 'No Warehouse Types currently registered',
'No Warehouses currently registered': 'No Warehouses currently registered',
'No Zone Types currently registered': 'No Zone Types currently registered',
'No Zones currently registered': 'No Zones currently registered',
'No access at all': 'No access at all',
'No access to this record!': 'No access to this record!',
'No alert information to show': 'No alert information to show',
'No alerts to show': 'No alerts to show',
'No annual budgets found': 'No annual budgets found',
'No assets currently registered in this scenario': 'No assets currently registered in this scenario',
'No contact information available': 'No contact information available',
'No contact method found': 'No contact method found',
'No contacts currently registered': 'No contacts currently registered',
'No contacts yet defined for this site': 'No contacts yet defined for this site',
'No data available in table': 'No data available in table',
'No data available': 'No data available',
'No data in this table - cannot create PDF!': 'No data in this table - cannot create PDF!',
'No databases in this application': 'No databases in this application',
'No dead body reports available': 'No dead body reports available',
'No demographic data currently defined': 'No demographic data currently defined',
'No demographics currently defined': 'No demographics currently defined',
'No education details currently registered': 'No education details currently registered',
'No entries currently available': 'No entries currently available',
'No entries found': 'No entries found',
'No entry available': 'No entry available',
'No facilities currently registered in this scenario': 'No facilities currently registered in this scenario',
'No forms to the corresponding resource have been downloaded yet.': 'No forms to the corresponding resource have been downloaded yet.',
'No further users can be assigned.': 'No further users can be assigned.',
'No items currently in stock': 'No items currently in stock',
'No items have been selected for shipping.': 'No items have been selected for shipping.',
'No jobs configured yet': 'No jobs configured yet',
'No jobs configured': 'No jobs configured',
'No location information defined!': 'No location information defined!',
'No matching element found in the data source': 'No matching element found in the data source',
'No matching records found': 'No matching records found',
'No matching result': 'No matching result',
'No membership types currently registered': 'No membership types currently registered',
'No more items may be added to this request': 'No more items may be added to this request',
'No morgues found': 'No morgues found',
'No options available': 'No options available',
'No options currently available': 'No options currently available',
'No organizations currently registered in this scenario': 'No organizations currently registered in this scenario',
'No outputs found': 'No outputs found',
'No posts currently available': 'No posts currently available',
'No posts currently set as module/resource homepages': 'No posts currently set as module/resource homepages',
'No posts currently tagged': 'No posts currently tagged',
'No problem group defined yet': 'No problem group defined yet',
'No records in this resource': 'No records in this resource',
'No records in this resource. Add one more records manually and then retry.': 'No records in this resource. Add one more records manually and then retry.',
'No records to review': 'No records to review',
'No report specified.': 'No report specified.',
'No reports available.': 'No reports available.',
'No reports currently available': 'No reports currently available',
'No repositories configured': 'No repositories configured',
'No requests found': 'No requests found',
'No resources configured yet': 'No resources configured yet',
'No resources currently reported': 'No resources currently reported',
'No role to delete': 'No role to delete',
'No roles currently assigned to this user.': 'No roles currently assigned to this user.',
'No series currently defined': 'No series currently defined',
'No service profile available': 'No service profile available',
'No staff or volunteers currently registered': 'No staff or volunteers currently registered',
'No status information available': 'No status information available',
'No status information currently available': 'No status information currently available',
'No stock adjustments have been done': 'No stock adjustments have been done',
'No stock counts have been done': 'No stock counts have been done',
'No tags currently defined': 'No tags currently defined',
'No tasks currently registered in this scenario': 'No tasks currently registered in this scenario',
'No tasks currently registered': 'No tasks currently registered',
'No templates to show': 'No templates to show',
'No time stamps found in this resource': 'No time stamps found in this resource',
'No translations exist in spreadsheet': 'No translations exist in spreadsheet',
'No units currently registered': 'No units currently registered',
'No users have taken a tour': 'No users have taken a tour',
'No users with this role at the moment.': 'No users with this role at the moment.',
'No valid data in the file': 'No valid data in the file',
'No vulnerability aggregated indicators currently defined': 'No vulnerability aggregated indicators currently defined',
'No vulnerability data currently defined': 'No vulnerability data currently defined',
'No vulnerability indicators currently defined': 'No vulnerability indicators currently defined',
'No': 'No',
'Non-structural Hazards': 'Non-structural Hazards',
'None (no such record)': 'None (no such record)',
'None - No action recommended': 'None - No action recommended',
'None of the above': 'None of the above',
'None': 'None',
'Nonexistent or invalid resource': 'Nonexistent or invalid resource',
'Noodles': 'Noodles',
'Normal Job': 'Normal Job',
'Normal': 'Normal',
'Not Applicable': 'Not Applicable',
'Not Authorized': 'Not Authorized',
'Not Parsed': 'Not Parsed',
'Not Possible': 'Not Possible',
'Not Started': 'Not Started',
'Not actioned': 'Not actioned',
'Not allowed to Donate without matching to a Request!': 'Not allowed to Donate without matching to a Request!',
'Not expected to occur (p ~ 0)': 'Not expected to occur (p ~ 0)',
'Not implemented': 'Not implemented',
'Not installed or incorrectly configured.': 'Not installed or incorrectly configured.',
'Not yet a Member of any Group': 'Not yet a Member of any Group',
'Not you?': 'Not you?',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead',
'Note that when using geowebcache, this can be set in the GWC config.': 'Note that when using geowebcache, this can be set in the GWC config.',
'Note': 'Note',
'Note: Make sure that all the text cells are quoted in the csv file before uploading': 'Note: Make sure that all the text cells are quoted in the csv file before uploading',
'Notice to Airmen': 'Notice to Airmen',
'Notification frequency': 'Notification frequency',
'Notification method': 'Notification method',
'Notification': 'Notification',
'Notify': 'Notify',
'Number of Barges': 'Number of Barges',
'Number of Beneficiaries': 'Number of Beneficiaries',
'Number of Completed Assessment Forms': 'Number of Completed Assessment Forms',
'Number of Facilities': 'Number of Facilities',
'Number of Incidents': 'Number of Incidents',
'Number of Items': 'Number of Items',
'Number of Patients': 'Number of Patients',
'Number of People Affected': 'Number of People Affected',
'Number of People Dead': 'Number of People Dead',
'Number of People Displaced': 'Number of People Displaced',
'Number of People Injured': 'Number of People Injured',
'Number of People Missing': 'Number of People Missing',
'Number of People Required': 'Number of People Required',
'Number of People': 'Number of People',
'Number of Responses': 'Number of Responses',
'Number of Trained People': 'Number of Trained People',
'Number of Tugboats': 'Number of Tugboats',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Number of additional beds of that type expected to become available in this unit within the next 24 hours.',
'Number of alternative places for studying': 'Number of alternative places for studying',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Number of available/vacant beds of that type in this unit at the time of reporting.',
'Number of bodies found': 'Number of bodies found',
'Number of deaths during the past 24 hours.': 'Number of deaths during the past 24 hours.',
'Number of discharged patients during the past 24 hours.': 'Number of discharged patients during the past 24 hours.',
'Number of doctors': 'Number of doctors',
'Number of in-patients at the time of reporting.': 'Number of in-patients at the time of reporting.',
'Number of items': 'Number of items',
'Number of newly admitted patients during the past 24 hours.': 'Number of newly admitted patients during the past 24 hours.',
'Number of non-medical staff': 'Number of non-medical staff',
'Number of nurses': 'Number of nurses',
'Number of private schools': 'Number of private schools',
'Number of public schools': 'Number of public schools',
'Number of religious schools': 'Number of religious schools',
'Number of residential units not habitable': 'Number of residential units not habitable',
'Number of residential units': 'Number of residential units',
'Number of vacant/available beds in this facility. Automatically updated from daily reports.': 'Number of vacant/available beds in this facility. Automatically updated from daily reports.',
'Number of vacant/available units to which victims can be transported immediately.': 'Number of vacant/available units to which victims can be transported immediately.',
'Number or Label on the identification tag this person is wearing (if any).': 'Number or Label on the identification tag this person is wearing (if any).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)',
'Number': 'Number',
'Number/Percentage of affected population that is Female & Aged 0-5': 'Number/Percentage of affected population that is Female & Aged 0-5',
'Number/Percentage of affected population that is Female & Aged 13-17': 'Number/Percentage of affected population that is Female & Aged 13-17',
'Number/Percentage of affected population that is Female & Aged 18-25': 'Number/Percentage of affected population that is Female & Aged 18-25',
'Number/Percentage of affected population that is Female & Aged 26-60': 'Number/Percentage of affected population that is Female & Aged 26-60',
'Number/Percentage of affected population that is Female & Aged 6-12': 'Number/Percentage of affected population that is Female & Aged 6-12',
'Number/Percentage of affected population that is Female & Aged 61+': 'Number/Percentage of affected population that is Female & Aged 61+',
'Number/Percentage of affected population that is Male & Aged 0-5': 'Number/Percentage of affected population that is Male & Aged 0-5',
'Number/Percentage of affected population that is Male & Aged 13-17': 'Number/Percentage of affected population that is Male & Aged 13-17',
'Number/Percentage of affected population that is Male & Aged 18-25': 'Number/Percentage of affected population that is Male & Aged 18-25',
'Number/Percentage of affected population that is Male & Aged 26-60': 'Number/Percentage of affected population that is Male & Aged 26-60',
'Number/Percentage of affected population that is Male & Aged 6-12': 'Number/Percentage of affected population that is Male & Aged 6-12',
'Number/Percentage of affected population that is Male & Aged 61+': 'Number/Percentage of affected population that is Male & Aged 61+',
'Numeric': 'Numeric',
'Nursery Beds': 'Nursery Beds',
'Nutrition problems': 'Nutrition problems',
'Nutrition': 'Nutrition',
'OCR Form Review': 'OCR Form Review',
'OCR module is disabled. Ask the Server Administrator to enable it.': 'OCR module is disabled. Ask the Server Administrator to enable it.',
'OCR review data has been stored into the database successfully.': 'OCR review data has been stored into the database successfully.',
'OK': 'OK',
'OR Status Reason': 'OR Status Reason',
'OR Status': 'OR Status',
'OSM file generation failed!': 'OSM file generation failed!',
'OSM file generation failed: %s': 'OSM file generation failed: %s',
'OTHER DATA': 'OTHER DATA',
'OTHER REPORTS': 'OTHER REPORTS',
'OVERALL RESILIENCE': 'OVERALL RESILIENCE',
'Object': 'Object',
'Objectives': 'Objectives',
'Observed: determined to have occurred or to be ongoing': 'Observed: determined to have occurred or to be ongoing',
'Observer': 'Observer',
'Obsolete': 'Obsolete',
'Obstetrics/Gynecology': 'Obstetrics/Gynecology',
'Off': 'Off',
'Office Address': 'Office Address',
'Office Details': 'Office Details',
'Office Phone': 'Office Phone',
'Office Type Details': 'Office Type Details',
'Office Type added': 'Office Type added',
'Office Type deleted': 'Office Type deleted',
'Office Type updated': 'Office Type updated',
'Office Type': 'Office Type',
'Office Types': 'Office Types',
'Office added': 'Office added',
'Office deleted': 'Office deleted',
'Office updated': 'Office updated',
'Office': 'Office',
'Offices': 'Offices',
'Oil Terminal Depth': 'Oil Terminal Depth',
'Older people as primary caregivers of children': 'Older people as primary caregivers of children',
'Older people in care homes': 'Older people in care homes',
'Older people participating in coping activities': 'Older people participating in coping activities',
'Older person (>60 yrs)': 'Older person (>60 yrs)',
'On Hold': 'On Hold',
'On Order': 'On Order',
'On by default?': 'On by default?',
'On': 'On',
'One Time Cost': 'One Time Cost',
'One item is attached to this shipment': 'One item is attached to this shipment',
'One time cost': 'One time cost',
'One-time costs': 'One-time costs',
'One-time': 'One-time',
'Only showing accessible records!': 'Only showing accessible records!',
'Only use this button to accept back into stock some items that were returned from a delivery to beneficiaries who do not record the shipment details directly into the system': 'Only use this button to accept back into stock some items that were returned from a delivery to beneficiaries who do not record the shipment details directly into the system',
'Only use this button to confirm that the shipment has been received by a destination which will not record the shipment directly into the system': 'Only use this button to confirm that the shipment has been received by a destination which will not record the shipment directly into the system',
'Only visible to Email recipients': 'Only visible to Email recipients',
'Onset': 'Onset',
'Oops! Something went wrong...': 'Oops! Something went wrong...',
'Oops! something went wrong on our side.': 'Oops! something went wrong on our side.',
'Opacity': 'Opacity',
'Open Appraisal': 'Open Appraisal',
'Open Chart': 'Open Chart',
'Open Incidents': 'Open Incidents',
'Open Map': 'Open Map',
'Open Member Profile (in a new tab)': 'Open Member Profile (in a new tab)',
'Open Report': 'Open Report',
'Open Table': 'Open Table',
'Open Tasks for %(project)s': 'Open Tasks for %(project)s',
'Open Tasks for Project': 'Open Tasks for Project',
'Open area': 'Open area',
'Open recent': 'Open recent',
'Open the merged record': 'Open the merged record',
'Open': 'Open',
'OpenStreetMap Layer': 'OpenStreetMap Layer',
'OpenStreetMap OAuth Consumer Key': 'OpenStreetMap OAuth Consumer Key',
'OpenStreetMap OAuth Consumer Secret': 'OpenStreetMap OAuth Consumer Secret',
'OpenWeatherMap Layer': 'OpenWeatherMap Layer',
'Opening Times': 'Opening Times',
'Operating Rooms': 'Operating Rooms',
'Operation not permitted': 'Operation not permitted',
'Operational': 'Operational',
'Operator': 'Operator',
'Opportunities to Volunteer On-Site?': 'Opportunities to Volunteer On-Site?',
'Opportunities to Volunteer Remotely?': 'Opportunities to Volunteer Remotely?',
'Optical Character Recognition': 'Optical Character Recognition',
'Option Other': 'Option Other',
'Option': 'Option',
'Optional Subject to put into Email - can be used as a Security Password by the service provider': 'Optional Subject to put into Email - can be used as a Security Password by the service provider',
'Optional link to an Incident which this Assessment was triggered by.': 'Optional link to an Incident which this Assessment was triggered by.',
'Optional password for HTTP Basic Authentication.': 'Optional password for HTTP Basic Authentication.',
'Optional selection of a MapServer map.': 'Optional selection of a MapServer map.',
'Optional selection of a background color.': 'Optional selection of a background color.',
'Optional selection of an alternate style.': 'Optional selection of an alternate style.',
'Optional username for HTTP Basic Authentication.': 'Optional username for HTTP Basic Authentication.',
'Optional': 'Optional',
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, the workspace is the FeatureType Name part before the colon(:).': 'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, the workspace is the FeatureType Name part before the colon(:).',
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.',
'Optional. The name of an element whose contents should be put into Popups.': 'Optional. The name of an element whose contents should be put into Popups.',
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.',
'Options': 'Options',
'Or add a new language code': 'Or add a new language code',
'Order Created': 'Order Created',
'Order Date': 'Order Date',
'Order Details': 'Order Details',
'Order Due %(date)s': 'Order Due %(date)s',
'Order Item': 'Order Item',
'Order canceled': 'Order canceled',
'Order updated': 'Order updated',
'Order': 'Order',
'Orders': 'Orders',
'Organization Details': 'Organization Details',
'Organization Domain Details': 'Organization Domain Details',
'Organization Domain added': 'Organization Domain added',
'Organization Domain deleted': 'Organization Domain deleted',
'Organization Domain updated': 'Organization Domain updated',
'Organization Domains': 'Organization Domains',
'Organization Group': 'Organization Group',
'Organization Needs added': 'Organization Needs added',
'Organization Needs deleted': 'Organization Needs deleted',
'Organization Needs updated': 'Organization Needs updated',
'Organization Needs': 'Organization Needs',
'Organization Resource Type': 'Organization Resource Type',
'Organization Resource': 'Organization Resource',
'Organization Type Details': 'Organization Type Details',
'Organization Type added': 'Organization Type added',
'Organization Type deleted': 'Organization Type deleted',
'Organization Type updated': 'Organization Type updated',
'Organization Type': 'Organization Type',
'Organization Types': 'Organization Types',
'Organization Units': 'Organization Units',
'Organization added to Policy/Strategy': 'Organization added to Policy/Strategy',
'Organization added to Project': 'Organization added to Project',
'Organization added': 'Organization added',
'Organization deleted': 'Organization deleted',
'Organization group': 'Organization group',
'Organization removed from Policy/Strategy': 'Organization removed from Policy/Strategy',
'Organization removed from Project': 'Organization removed from Project',
'Organization removed': 'Organization removed',
'Organization updated': 'Organization updated',
'Organization': 'Organization',
'Organization(s)': 'Organization(s)',
'Organization/Branch': 'Organization/Branch',
'Organization/Supplier': 'Organization/Supplier',
'Organizations / Teams / Facilities': 'Organizations / Teams / Facilities',
'Organizations': 'Organisationer',
'Origin of the separated children': 'Origin of the separated children',
'Origin': 'Origin',
'Original Quantity': 'Original Quantity',
'Original Value per Pack': 'Original Value per Pack',
'Original': 'Original',
'Other (describe)': 'Other (describe)',
'Other (specify)': 'Other (specify)',
'Other Address': 'Other Address',
'Other Contact Information': 'Other Contact Information',
'Other Data': 'Other Data',
'Other Details': 'Other Details',
'Other Evidence': 'Other Evidence',
'Other Faucet/Piped Water': 'Other Faucet/Piped Water',
'Other Inventories': 'Other Inventories',
'Other Isolation': 'Other Isolation',
'Other Name': 'Other Name',
'Other Reports': 'Other Reports',
'Other Users': 'Other Users',
'Other Warehouse': 'Other Warehouse',
'Other activities of boys 13-17yrs before disaster': 'Other activities of boys 13-17yrs before disaster',
'Other activities of boys 13-17yrs': 'Other activities of boys 13-17yrs',
'Other activities of boys <12yrs before disaster': 'Other activities of boys <12yrs before disaster',
'Other activities of boys <12yrs': 'Other activities of boys <12yrs',
'Other activities of girls 13-17yrs before disaster': 'Other activities of girls 13-17yrs before disaster',
'Other activities of girls 13-17yrs': 'Other activities of girls 13-17yrs',
'Other activities of girls<12yrs before disaster': 'Other activities of girls<12yrs before disaster',
'Other activities of girls<12yrs': 'Other activities of girls<12yrs',
'Other alternative infant nutrition in use': 'Other alternative infant nutrition in use',
'Other alternative places for study': 'Other alternative places for study',
'Other assistance needed': 'Other assistance needed',
'Other assistance, Rank': 'Other assistance, Rank',
'Other current health problems, adults': 'Other current health problems, adults',
'Other current health problems, children': 'Other current health problems, children',
'Other events': 'Other events',
'Other factors affecting school attendance': 'Other factors affecting school attendance',
'Other major expenses': 'Other major expenses',
'Other non-food items': 'Other non-food items',
'Other recommendations': 'Other recommendations',
'Other reports': 'Other reports',
'Other residential': 'Other residential',
'Other school assistance received': 'Other school assistance received',
'Other school assistance, details': 'Other school assistance, details',
'Other school assistance, source': 'Other school assistance, source',
'Other settings can only be set by editing a file on the server': 'Other settings can only be set by editing a file on the server',
'Other side dishes in stock': 'Other side dishes in stock',
'Other types of water storage containers': 'Other types of water storage containers',
'Other ways to obtain food': 'Other ways to obtain food',
'Other': 'Other',
'Others': 'Others',
'Out': 'Out',
'Outbound Mail settings are configured in models/000_config.py.': 'Outbound Mail settings are configured in models/000_config.py.',
'Outbound?': 'Outbound?',
'Outbox': 'Outbox',
'Outcomes, Impact, Challenges': 'Outcomes, Impact, Challenges',
'Outgoing SMS Handler': 'Outgoing SMS Handler',
'Output added': 'Output added',
'Output removed': 'Output removed',
'Output updated': 'Output updated',
'Output': 'Output',
'Outputs': 'Outputs',
'Over 60': 'Over 60',
'Overall Hazards': 'Overall Hazards',
'Overall Resilience': 'Overall Resilience',
'Overall status of the clinical operations.': 'Overall status of the clinical operations.',
'Overall status of the facility operations.': 'Overall status of the facility operations.',
'Overhead falling hazard': 'Overhead falling hazard',
'Overland Flow Flood': 'Overland Flow Flood',
'Overview': 'Overview',
'Own': 'Own',
'Owned By (Organization/Branch)': 'Owned By (Organization/Branch)',
'Owned Records': 'Owned Records',
'Owned Resources': 'Owned Resources',
'Ownership': 'Ownership',
'Owning Organization': 'Owning Organization',
'Owning organization': 'Owning organization',
'PAHO UID': 'PAHO UID',
'PDAM': 'PDAM',
'PDF File': 'PDF File',
'PDF': 'PDF',
'PIFACC Priorities': 'PIFACC Priorities',
'PIFACC-1: Implementing Tangible, On-Ground Adaptation Measures': 'PIFACC-1: Implementing Tangible, On-Ground Adaptation Measures',
'PIFACC-2: Governance and Decision Making': 'PIFACC-2: Governance and Decision Making',
'PIFACC-3: Improving our understanding of climate change': 'PIFACC-3: Improving our understanding of climate change',
'PIFACC-4: Education, Training and Awareness': 'PIFACC-4: Education, Training and Awareness',
'PIFACC-5: Mitigation of Global Greenhouse Gas Emissions': 'PIFACC-5: Mitigation of Global Greenhouse Gas Emissions',
'PIFACC-6: Partnerships and Cooperation': 'PIFACC-6: Partnerships and Cooperation',
'PIN number from Twitter (leave empty to detach account)': 'PIN number from Twitter (leave empty to detach account)',
'PIN': 'PIN',
'POOR': 'POOR',
'POPULATION DENSITY': 'POPULATION DENSITY',
'POPULATION:': 'POPULATION:',
'Pacific Islands Framework for Action on Climate Change. Applicable to projects in Pacific countries only': 'Pacific Islands Framework for Action on Climate Change. Applicable to projects in Pacific countries only',
'Pack': 'Pack',
'Packs': 'Packs',
'Page': 'Page',
'Paid': 'Paid',
'Paint brushes': 'Paint brushes',
'Pan Map: keep the left mouse button pressed and drag the map': 'Pan Map: keep the left mouse button pressed and drag the map',
'Parameters': 'Parameters',
'Parapets, ornamentation': 'Parapets, ornamentation',
'Parent Item': 'Parent Item',
'Parent Project': 'Parent Project',
'Parent needs to be of the correct level': 'Parent needs to be of the correct level',
'Parent needs to be set for locations of level': 'Parent needs to be set for locations of level',
'Parent needs to be set': 'Parent needs to be set',
'Parent': 'Parent',
'Parents/Caregivers missing children': 'Parents/Caregivers missing children',
'Parking Area': 'Parking Area',
'Parking/Tarmac Space Capacity': 'Parking/Tarmac Space Capacity',
'Parking/Tarmac Space Units': 'Parking/Tarmac Space Units',
'Parse': 'Parse',
'Parsed': 'Parsed',
'Parser Connection Details': 'Parser Connection Details',
'Parser Connections': 'Parser Connections',
'Parser connected': 'Parser connected',
'Parser connection removed': 'Parser connection removed',
'Parser connection updated': 'Parser connection updated',
'Parser': 'Parser',
'Parsing Status': 'Parsing Status',
'Part of the URL to call to access the Features': 'Part of the URL to call to access the Features',
'Partial': 'Partial',
'Participant Details': 'Participant Details',
'Participant added': 'Participant added',
'Participant deleted': 'Participant deleted',
'Participant updated': 'Participant updated',
'Participant': 'Participant',
'Participants': 'Participants',
'Partner National Society': 'Partner National Society',
'Partner Organization Details': 'Partner Organization Details',
'Partner Organization added': 'Partner Organization added',
'Partner Organization deleted': 'Partner Organization deleted',
'Partner Organization updated': 'Partner Organization updated',
'Partner Organizations': 'Partner Organizations',
'Partner': 'Partner',
'Partners': 'Partners',
'Pass': 'Pass',
'Passport': 'Passport',
'Password to use for authentication at the remote site.': 'Password to use for authentication at the remote site.',
'Password': 'Password',
'Pathology': 'Pathology',
'Patient Details': 'Patient Details',
'Patient Transportation Ambulance': 'Patient Transportation Ambulance',
'Patient added': 'Patient added',
'Patient deleted': 'Patient deleted',
'Patient updated': 'Patient updated',
'Patient': 'Patient',
'Patients': 'Patients',
'Pediatric ICU': 'Pediatric ICU',
'Pediatric Psychiatric': 'Pediatric Psychiatric',
'Pediatrics': 'Pediatrics',
'Pending': 'Pending',
'People Details': 'People Details',
'People Trapped': 'People Trapped',
'People added to Commitment': 'People added to Commitment',
'People added': 'People added',
'People deleted': 'People deleted',
'People removed from Commitment': 'People removed from Commitment',
'People updated': 'People updated',
'People': 'People',
'Percentage': 'Percentage',
'Performance Rating': 'Performance Rating',
'Permanent Home Address': 'Permanent Home Address',
'Permission from Owner to Work': 'Permission from Owner to Work',
'Person Details': 'Person Details',
'Person Entity': 'Person Entity',
'Person Registry': 'Personer',
'Person Transportation Tactical Vehicle': 'Person Transportation Tactical Vehicle',
'Person added to Commitment': 'Person added to Commitment',
'Person added': 'Person added',
'Person deleted': 'Person deleted',
'Person details updated': 'Person details updated',
'Person interviewed': 'Person interviewed',
'Person must be specified!': 'Person must be specified!',
'Person or OU': 'Person or OU',
'Person removed from Commitment': 'Person removed from Commitment',
'Person who has actually seen the person/group.': 'Person who has actually seen the person/group.',
'Person': 'Person',
'Person.': 'Person.',
'Person/Group': 'Person/Group',
'Personal Data': 'Persondata',
'Personal Effects Details': 'Personal Effects Details',
'Personal Effects': 'Personal Effects',
'Personal Profile': 'Personal Profile',
'Personal': 'Personal',
'Persons in institutions': 'Persons in institutions',
'Persons with disability (mental)': 'Persons with disability (mental)',
'Persons with disability (physical)': 'Persons with disability (physical)',
'Persons': 'Persons',
'Phone #': 'Phone #',
'Phone 1': 'Phone 1',
'Phone 2': 'Phone 2',
'Phone Number': 'Phone Number',
'Phone number is required': 'Phone number is required',
'Phone': 'Phone',
'Phone/Business': 'Phone/Business',
'Phone/Emergency': 'Phone/Emergency',
'Phone/Exchange (Switchboard)': 'Phone/Exchange (Switchboard)',
'Photo Details': 'Photo Details',
'Photo Taken?': 'Photo Taken?',
'Photo added': 'Photo added',
'Photo deleted': 'Photo deleted',
'Photo updated': 'Photo updated',
'Photograph': 'Photograph',
'Photos': 'Photos',
'Physical Description': 'Physical Description',
'Physical Safety': 'Physical Safety',
'Pick axe': 'Pick axe',
'Place of Birth': 'Place of Birth',
'Place of Recovery': 'Place of Recovery',
'Place on Map': 'Place on Map',
'Place': 'Place',
'Places for defecation': 'Places for defecation',
'Places the children have been sent to': 'Places the children have been sent to',
'Planned %(date)s': 'Planned %(date)s',
'Planned Procurement Item': 'Planned Procurement Item',
'Planned Procurement': 'Planned Procurement',
'Planned Procurements': 'Planned Procurements',
'Planned': 'Planned',
'Playing': 'Playing',
'Please Select a Facility': 'Please Select a Facility',
'Please choose a type': 'Please choose a type',
'Please do not remove this sheet': 'Please do not remove this sheet',
'Please enter a %(site)s OR an Organization': 'Please enter a %(site)s OR an Organization',
'Please enter a %(site)s': 'Please enter a %(site)s',
'Please enter a first name': 'Please enter a first name',
'Please enter a last name': 'Please enter a last name',
'Please enter a valid email address': 'Please enter a valid email address',
'Please enter an Organization/Supplier': 'Please enter an Organization/Supplier',
'Please enter details of the Request': 'Please enter details of the Request',
'Please enter request details here.': 'Please enter request details here.',
'Please enter the details of the data you wish to purchase': 'Please enter the details of the data you wish to purchase',
'Please enter the details on the next screen.': 'Please enter the details on the next screen.',
'Please enter the first few letters of the Person/Group for the autocomplete.': 'Please enter the first few letters of the Person/Group for the autocomplete.',
'Please enter the recipient(s)': 'Please enter the recipient(s)',
'Please fill this!': 'Please fill this!',
'Please give an estimated figure about how many bodies have been found.': 'Please give an estimated figure about how many bodies have been found.',
'Please make your payment in person at the DHM office, or by bank Transfer to:': 'Please make your payment in person at the DHM office, or by bank Transfer to:',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': 'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.',
'Please record Beneficiary according to the reporting needs of your project': 'Please record Beneficiary according to the reporting needs of your project',
'Please report here where you are:': 'Please report here where you are:',
'Please review demographic data for': 'Please review demographic data for',
'Please review indicator ratings for': 'Please review indicator ratings for',
'Please select a valid image!': 'Please select a valid image!',
'Please select exactly two records': 'Please select exactly two records',
'Please select': 'Please select',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'Please use this field to record any additional information, including a history of the record if it is updated.',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.',
'Pledge Support': 'Pledge Support',
'Plumber': 'Plumber',
'PoI Type Details': 'PoI Type Details',
'PoI Type added': 'PoI Type added',
'PoI Type deleted': 'PoI Type deleted',
'PoI Type updated': 'PoI Type updated',
'PoI Types': 'PoI Types',
'PoI': 'PoI',
'PoIs successfully imported.': 'PoIs successfully imported.',
'Point of Interest Details': 'Point of Interest Details',
'Point of Interest added': 'Point of Interest added',
'Point of Interest deleted': 'Point of Interest deleted',
'Point of Interest updated': 'Point of Interest updated',
'Points of Interest': 'Points of Interest',
'Poisoning': 'Poisoning',
'Poisonous Gas': 'Poisonous Gas',
'Police': 'Police',
'Policies & Strategies': 'Policies & Strategies',
'Policy or Strategy added': 'Policy or Strategy added',
'Policy or Strategy deleted': 'Policy or Strategy deleted',
'Policy or Strategy updated': 'Policy or Strategy updated',
'Policy or Strategy': 'Policy or Strategy',
'Poll': 'Poll',
'Pollution and other environmental': 'Pollution and other environmental',
'Polygon': 'Polygon',
'Poor': 'Poor',
'Population Statistic Details': 'Population Statistic Details',
'Population Statistic added': 'Population Statistic added',
'Population Statistic deleted': 'Population Statistic deleted',
'Population Statistic updated': 'Population Statistic updated',
'Population Statistic': 'Population Statistic',
'Population Statistics': 'Population Statistics',
'Population and number of households': 'Population and number of households',
'Population': 'Population',
'Popup Fields': 'Popup Fields',
'Popup Label': 'Popup Label',
'Porridge': 'Porridge',
'Port Closure': 'Port Closure',
'Port': 'Port',
'Portable lights': 'Portable lights',
'Portal at': 'Portal at',
'Position Catalog': 'Position Catalog',
'Position Details': 'Position Details',
'Position added': 'Position added',
'Position deleted': 'Position deleted',
'Position in tour': 'Position in tour',
'Position updated': 'Position updated',
'Position': 'Position',
'Positions': 'Positions',
'Possible but not likely (p <= ~50%)': 'Possible but not likely (p <= ~50%)',
'Possible threat to life or property': 'Possible threat to life or property',
'Post Details': 'Post Details',
'Post Tagged': 'Post Tagged',
'Post added': 'Post added',
'Post deleted': 'Post deleted',
'Post removed': 'Post removed',
'Post set as Module/Resource homepage': 'Post set as Module/Resource homepage',
'Post updated': 'Post updated',
'Post': 'Post',
'Post-impact shelterees are there for a longer time, so need more space to Sleep.': 'Post-impact shelterees are there for a longer time, so need more space to Sleep.',
'Postcode': 'Postcode',
'Posted on': 'Posted on',
'Posts': 'Posts',
'Poultry restocking, Rank': 'Poultry restocking, Rank',
'Poultry': 'Poultry',
'Power Failure': 'Power Failure',
'Power Outage': 'Power Outage',
'Power Supply Type': 'Power Supply Type',
'Power': 'Power',
'Pre-cast connections': 'Pre-cast connections',
'Preferred Name': 'Preferred Name',
'Pregnant women': 'Pregnant women',
'Preliminary': 'Preliminary',
'Prepare - Make preparations per the instruction': 'Prepare - Make preparations per the instruction',
'Prepare Shipment': 'Prepare Shipment',
'Presence Condition': 'Presence Condition',
'Presence Log': 'Presence Log',
'Presence': 'Presence',
'Previous Deployments': 'Previous Deployments',
'Previous View': 'Previous View',
'Previous': 'Previous',
'Primarily Flood': 'Primarily Flood',
'Primary Occupancy': 'Primary Occupancy',
'Print Work Order': 'Print Work Order',
'Print': 'Print',
'Priority from 1 to 9. 1 is most preferred.': 'Priority from 1 to 9. 1 is most preferred.',
'Priority': 'Priority',
'Privacy': 'Privacy',
'Private - only to specified addresses (mentioned as recipients)': 'Private - only to specified addresses (mentioned as recipients)',
'Private': 'Private',
'Problem Code': 'Problem Code',
'Problem Details': 'Problem Details',
'Problem Group': 'Problem Group',
'Problem Title': 'Problem Title',
'Problem added': 'Problem added',
'Problem connecting to twitter.com - please refresh': 'Problem connecting to twitter.com - please refresh',
'Problem deleted': 'Problem deleted',
'Problem updated': 'Problem updated',
'Problem': 'Problem',
'Problems': 'Problems',
'Problems? Please call': 'Problems? Please call',
'Procedure': 'Procedure',
'Process Received Shipment': 'Process Received Shipment',
'Process Shipment to Send': 'Process Shipment to Send',
'Processed with KeyGraph?': 'Processed with KeyGraph?',
'Processing': 'Processing',
'Procured': 'Procured',
'Procurement Plan Details': 'Procurement Plan Details',
'Procurement Plan Item Details': 'Procurement Plan Item Details',
'Procurement Plan Item updated': 'Procurement Plan Item updated',
'Procurement Plan added': 'Procurement Plan added',
'Procurement Plan deleted': 'Procurement Plan deleted',
'Procurement Plan updated': 'Procurement Plan updated',
'Procurement Plan': 'Procurement Plan',
'Procurement Plans': 'Procurement Plans',
'Profession': 'Profession',
'Professional Experience Details': 'Professional Experience Details',
'Professional Experience added': 'Professional Experience added',
'Professional Experience deleted': 'Professional Experience deleted',
'Professional Experience updated': 'Professional Experience updated',
'Professional Experience': 'Professional Experience',
'Profile Configuration removed': 'Profile Configuration removed',
'Profile Configuration updated': 'Profile Configuration updated',
'Profile Configuration': 'Profile Configuration',
'Profile Configurations': 'Profile Configurations',
'Profile Configured': 'Profile Configured',
'Profile Details': 'Profile Details',
'Profile Page': 'Profile Page',
'Profile Picture': 'Profile Picture',
'Profile Picture?': 'Profile Picture?',
'Profiles': 'Profiles',
'Program Details': 'Program Details',
'Program Hours (Month)': 'Program Hours (Month)',
'Program Hours (Year)': 'Program Hours (Year)',
'Program added': 'Program added',
'Program deleted': 'Program deleted',
'Program updated': 'Program updated',
'Program': 'Program',
'Programs': 'Programs',
'Progress and Notes': 'Progress and Notes',
'Project Activity': 'Project Activity',
'Project Beneficiary Type': 'Project Beneficiary Type',
'Project Beneficiary': 'Project Beneficiary',
'Project Calendar': 'Project Calendar',
'Project Campaign Keyword': 'Project Campaign Keyword',
'Project Campaign Response Summary': 'Project Campaign Response Summary',
'Project Details': 'Project Details',
'Project Framework': 'Project Framework',
'Project Name': 'Project Name',
'Project Organization Details': 'Project Organization Details',
'Project Organization updated': 'Project Organization updated',
'Project Organizations': 'Project Organizations',
'Project Report': 'Project Report',
'Project Task': 'Project Task',
'Project Time Report': 'Project Time Report',
'Project added': 'Project added',
'Project deleted': 'Project deleted',
'Project not Found': 'Project not Found',
'Project updated': 'Project updated',
'Project': 'Projekt',
'Projection Details': 'Projection Details',
'Projection Type': 'Projection Type',
'Projection added': 'Projection added',
'Projection deleted': 'Projection deleted',
'Projection updated': 'Projection updated',
'Projection': 'Projection',
'Projections': 'Projections',
'Projects Map': 'Projects Map',
'Projects': 'Projekt',
'Property': 'Property',
'Protection': 'Protection',
'Protocol': 'Protocol',
'Provide Metadata for your media files': 'Provide Metadata for your media files',
'Provide a password': 'Provide a password',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': 'Provide an optional sketch of the entire building or damage points. Indicate damage points.',
'Proxy Server URL': 'Proxy Server URL',
'Psychiatrics/Adult': 'Psychiatrics/Adult',
'Psychiatrics/Pediatric': 'Psychiatrics/Pediatric',
'Public - unrestricted audiences': 'Public - unrestricted audiences',
'Public Event': 'Public Event',
'Public and private transportation': 'Public and private transportation',
'Public assembly': 'Public assembly',
'Public': 'Public',
'Published on': 'Published on',
'Published': 'Published',
'Pump equipment': 'Pump equipment',
'Pump out water': 'Pump out water',
'Pump/Hoses': 'Pump/Hoses',
'Purchase Date': 'Purchase Date',
'Purchase New Data': 'Purchase New Data',
'Purchase date': 'Purchase date',
'Purchase': 'Purchase',
'Purchased Data Details': 'Purchased Data Details',
'Purchased Data removed': 'Purchased Data removed',
'Purchased Data updated': 'Purchased Data updated',
'Purpose': 'Purpose',
'Pyroclastic Flow': 'Pyroclastic Flow',
'Pyroclastic Surge': 'Pyroclastic Surge',
'Python GDAL required for Shapefile support!': 'Python GDAL required for Shapefile support!',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Python Serial module not available within the running Python - this needs installing to activate the Modem',
'Quantity Committed': 'Quantity Committed',
'Quantity Delivered': 'Quantity Delivered',
'Quantity Fulfilled': 'Quantity Fulfilled',
'Quantity Needed': 'Quantity Needed',
'Quantity Received': 'Quantity Received',
'Quantity Returned': 'Quantity Returned',
'Quantity Sent': 'Quantity Sent',
'Quantity in Transit': 'Quantity in Transit',
'Quantity range': 'Quantity range',
'Quantity': 'Quantity',
'Quarantine': 'Quarantine',
'Query Saved': 'Query Saved',
'Query added': 'Query added',
'Query deleted': 'Query deleted',
'Query updated': 'Query updated',
'Query': 'Query',
'Query:': 'Query:',
'Queryable?': 'Queryable?',
'Question Details': 'Question Details',
'Question Meta-Data Details': 'Question Meta-Data Details',
'Question Meta-Data added': 'Question Meta-Data added',
'Question Meta-Data deleted': 'Question Meta-Data deleted',
'Question Meta-Data updated': 'Question Meta-Data updated',
'Question Meta-Data': 'Question Meta-Data',
'Question Summary': 'Question Summary',
'Question': 'Question',
'RAM Cache Keys': 'RAM Cache Keys',
'RAM': 'RAM',
'RC frame with masonry infill': 'RC frame with masonry infill',
'READ': 'READ',
'REPORTS': 'REPORTS',
'RESET': 'RESET',
'RESILIENCE': 'RESILIENCE',
'REST Filter': 'REST Filter',
'RFA Priorities': 'RFA Priorities',
'RFA1: Governance-Organisational, Institutional, Policy and Decision Making Framework': 'RFA1: Governance-Organisational, Institutional, Policy and Decision Making Framework',
'RFA2: Knowledge, Information, Public Awareness and Education': 'RFA2: Knowledge, Information, Public Awareness and Education',
'RFA3: Analysis and Evaluation of Hazards, Vulnerabilities and Elements at Risk': 'RFA3: Analysis and Evaluation of Hazards, Vulnerabilities and Elements at Risk',
'RFA4: Planning for Effective Preparedness, Response and Recovery': 'RFA4: Planning for Effective Preparedness, Response and Recovery',
'RFA5: Effective, Integrated and People-Focused Early Warning Systems': 'RFA5: Effective, Integrated and People-Focused Early Warning Systems',
'RFA6: Reduction of Underlying Risk Factors': 'RFA6: Reduction of Underlying Risk Factors',
'RSS Feed': 'RSS Feed',
'RSS Post Details': 'RSS Post Details',
'RSS Post deleted': 'RSS Post deleted',
'RSS Posts': 'RSS Posts',
'RSS Setting Details': 'RSS Setting Details',
'RSS Setting deleted': 'RSS Setting deleted',
'RSS Settings': 'RSS Settings',
'RSS settings updated': 'RSS settings updated',
'RSS': 'RSS',
'Race': 'Race',
'Radio Callsign': 'Radio Callsign',
'Radio Details': 'Radio Details',
'Radio': 'Radio',
'Radiological Hazard': 'Radiological Hazard',
'Radiology': 'Radiology',
'Rail': 'Rail',
'Railway Accident': 'Railway Accident',
'Railway Hijacking': 'Railway Hijacking',
'Rain Fall': 'Rain Fall',
'Ram Cleared': 'Ram Cleared',
'Range From': 'Range From',
'Range To': 'Range To',
'Rank': 'Rank',
'Rapid Assessment Details': 'Rapid Assessment Details',
'Rapid Assessment added': 'Rapid Assessment added',
'Rapid Assessment deleted': 'Rapid Assessment deleted',
'Rapid Assessment updated': 'Rapid Assessment updated',
'Rapid Assessment': 'Rapid Assessment',
'Rapid Assessments': 'Rapid Assessments',
'Rapid Close Lead': 'Rapid Close Lead',
'Rapid Data Entry': 'Rapid Data Entry',
'Rating': 'Rating',
'Ready to Start': 'Ready to Start',
'Ready': 'Ready',
'Reason': 'Reason',
'Receipt number / Student ID / other notes': 'Receipt number / Student ID / other notes',
'Receive %(opt_in)s updates:': 'Receive %(opt_in)s updates:',
'Receive New Shipment': 'Receive New Shipment',
'Receive Shipment': 'Receive Shipment',
'Receive updates': 'Receive updates',
'Receive': 'Receive',
'Receive/Incoming': 'Receive/Incoming',
'Received By': 'Received By',
'Received Shipment Details': 'Received Shipment Details',
'Received Shipment canceled': 'Received Shipment canceled',
'Received Shipment updated': 'Received Shipment updated',
'Received Shipments': 'Received Shipments',
'Received date': 'Received date',
'Received': 'Received',
'Received/Incoming Shipments': 'Received/Incoming Shipments',
'Receiving Inventory': 'Receiving Inventory',
'Reception': 'Reception',
'Recipient Details updated': 'Recipient Details updated',
'Recipient Details': 'Recipient Details',
'Recipient added': 'Recipient added',
'Recipient deleted': 'Recipient deleted',
'Recipient': 'Recipient',
'Recipient(s)': 'Recipient(s)',
'Recipients (%(number)s Total)': 'Recipients (%(number)s Total)',
'Recipients': 'Recipients',
'Recommendations for Repair and Reconstruction or Demolition': 'Recommendations for Repair and Reconstruction or Demolition',
'Record Details': 'Record Details',
'Record Updates': 'Record Updates',
'Record added': 'Record added',
'Record already exists': 'Record already exists',
'Record any restriction on use or entry': 'Record any restriction on use or entry',
'Record approved': 'Record approved',
'Record could not be approved.': 'Record could not be approved.',
'Record could not be deleted.': 'Record could not be deleted.',
'Record deleted': 'Record deleted',
'Record id': 'Record id',
'Record not found!': 'Record not found!',
'Record not found': 'Record not found',
'Record updated': 'Record updated',
'Record': 'Record',
'Records merged successfully.': 'Records merged successfully.',
'Records': 'Records',
'Recovery Request added': 'Recovery Request added',
'Recovery Request deleted': 'Recovery Request deleted',
'Recovery Request updated': 'Recovery Request updated',
'Recovery Request': 'Recovery Request',
'Recovery': 'Recovery',
'Recurring Cost': 'Recurring Cost',
'Recurring Request?': 'Recurring Request?',
'Recurring cost': 'Recurring cost',
'Recurring costs': 'Recurring costs',
'Recurring': 'Recurring',
'Red (unsafe)': 'Red (unsafe)',
'Red': 'Red',
'Redirect URL': 'Redirect URL',
'Reference': 'Reference',
'Refresh Rate (seconds)': 'Refresh Rate (seconds)',
'Region Details': 'Region Details',
'Region Location': 'Region Location',
'Region added': 'Region added',
'Region deleted': 'Region deleted',
'Region updated': 'Region updated',
'Region': 'Region',
'Regions': 'Regions',
'Register As': 'Register As',
'Register Person into this Camp': 'Register Person into this Camp',
'Register Person into this Shelter': 'Register Person into this Shelter',
'Register Person': 'Register Person',
'Register for Account': 'Register for Account',
'Register': 'Register',
'Registered People': 'Registered People',
'Registered users can %(login)s to access the system': 'Registered users can %(login)s to access the system',
'Registration Details': 'Registration Details',
'Registration added': 'Registration added',
'Registration entry deleted': 'Registration entry deleted',
'Registration not permitted': 'Registration not permitted',
'Registration updated': 'Registration updated',
'Registration': 'Registration',
'Rehabilitation/Long Term Care': 'Rehabilitation/Long Term Care',
'Reinforced masonry': 'Reinforced masonry',
'Reject request submitted': 'Reject request submitted',
'Reject': 'Reject',
'Rejected': 'Rejected',
'Related to Solution (optional)': 'Related to Solution (optional)',
'Relationship': 'Relationship',
'Relative Details': 'Relative Details',
'Relative added': 'Relative added',
'Relative deleted': 'Relative deleted',
'Relative updated': 'Relative updated',
'Relatives': 'Relatives',
'Reliability': 'Reliability',
'Relief Team': 'Relief Team',
'Relief': 'Relief',
'Religion': 'Religion',
'Religious Leader': 'Religious Leader',
'Religious': 'Religious',
'Reload': 'Reload',
'Remarks': 'Remarks',
'Remote Error': 'Remote Error',
'Remote ID': 'Remote ID',
'Remove Asset from this incident': 'Remove Asset from this incident',
'Remove Asset from this scenario': 'Remove Asset from this scenario',
'Remove Bookmark': 'Remove Bookmark',
'Remove Coalition': 'Remove Coalition',
'Remove Dataset Price': 'Remove Dataset Price',
'Remove Event Type from this event': 'Remove Event Type from this event',
'Remove Facility from this incident': 'Remove Facility from this incident',
'Remove Facility from this scenario': 'Remove Facility from this scenario',
'Remove Furniture': 'Remove Furniture',
'Remove Hazard': 'Remove Hazard',
'Remove Human Resource from this incident': 'Remove Human Resource from this incident',
'Remove Human Resource from this scenario': 'Remove Human Resource from this scenario',
'Remove Incident Report from this event': 'Remove Incident Report from this event',
'Remove Incident Report from this incident': 'Remove Incident Report from this incident',
'Remove Incident Type from this event': 'Remove Incident Type from this event',
'Remove Incident from this event': 'Remove Incident from this event',
'Remove Item from Procurement Plan': 'Remove Item from Procurement Plan',
'Remove Item from Stock': 'Remove Item from Stock',
'Remove Layer from Profile': 'Remove Layer from Profile',
'Remove Layer from Symbology': 'Remove Layer from Symbology',
'Remove Loose Debris': 'Remove Loose Debris',
'Remove Major Appliances': 'Remove Major Appliances',
'Remove Map Configuration from this incident': 'Remove Map Configuration from this incident',
'Remove Map Configuration from this scenario': 'Remove Map Configuration from this scenario',
'Remove Membership': 'Remove Membership',
'Remove Network': 'Remove Network',
'Remove Organization from Project': 'Remove Organization from Project',
'Remove Organization from this scenario': 'Remove Organization from this scenario',
'Remove People from Commitment': 'Remove People from Commitment',
'Remove Person from Commitment': 'Remove Person from Commitment',
'Remove Profile Configuration for Layer': 'Remove Profile Configuration for Layer',
'Remove Purchased Data': 'Remove Purchased Data',
'Remove Risk from this event': 'Remove Risk from this event',
'Remove Saved Query': 'Remove Saved Query',
'Remove Skill from Request': 'Remove Skill from Request',
'Remove Skill': 'Remove Skill',
'Remove Station Parameter': 'Remove Station Parameter',
'Remove Stock from Warehouse': 'Remove Stock from Warehouse',
'Remove Symbology from Layer': 'Remove Symbology from Layer',
'Remove Tag for this Event from this Post': 'Remove Tag for this Event from this Post',
'Remove Task from this incident': 'Remove Task from this incident',
'Remove Task from this scenario': 'Remove Task from this scenario',
'Remove Vehicle from this incident': 'Remove Vehicle from this incident',
'Remove Water Heater': 'Remove Water Heater',
'Remove all log entries': 'Remove all log entries',
'Remove existing data before import': 'Remove existing data before import',
'Remove selection': 'Remove selection',
'Remove this entry': 'Remove this entry',
'Remove': 'Remove',
'Removed from Group': 'Removed from Group',
'Rent': 'Rent',
'Reopened': 'Reopened',
'Repacked By': 'Repacked By',
'Repair': 'Repair',
'Repaired': 'Repaired',
'Repairs': 'Repairs',
'Repeat your password': 'Repeat your password',
'Repeat': 'Repeat',
'Replace': 'Replace',
'Replies': 'Replies',
'Reply Message': 'Reply Message',
'Reply': 'Reply',
'Report Another Assessment...': 'Report Another Assessment...',
'Report Date': 'Report Date',
'Report Details': 'Report Details',
'Report Options': 'Report Options',
'Report Resource': 'Report Resource',
'Report To': 'Report To',
'Report added': 'Report added',
'Report deleted': 'Report deleted',
'Report my location': 'Report my location',
'Report of': 'Report of',
'Report on Annual Budgets': 'Report on Annual Budgets',
'Report the contributing factors for the current EMS status.': 'Report the contributing factors for the current EMS status.',
'Report the contributing factors for the current OR status.': 'Report the contributing factors for the current OR status.',
'Report them as found': 'Report them as found',
'Report them missing': 'Report them missing',
'Report updated': 'Report updated',
'Report': 'Report',
'Reported By (Not Staff)': 'Reported By (Not Staff)',
'Reported By (Staff)': 'Reported By (Staff)',
'Reported By': 'Reported By',
'Reported To': 'Reported To',
'Reported': 'Reported',
'Reports': 'Reports',
'Repositories': 'Repositories',
'Repository Base URL': 'Repository Base URL',
'Repository Configuration': 'Repository Configuration',
'Repository Name': 'Repository Name',
'Repository Type': 'Repository Type',
'Repository UUID': 'Repository UUID',
'Repository configuration deleted': 'Repository configuration deleted',
'Repository configuration updated': 'Repository configuration updated',
'Repository configured': 'Repository configured',
'Repository': 'Repository',
'Request Added': 'Request Added',
'Request Canceled': 'Request Canceled',
'Request Details': 'Request Details',
'Request From': 'Request From',
'Request Item Details': 'Request Item Details',
'Request Item': 'Request Item',
'Request Items': 'Request Items',
'Request Job': 'Request Job',
'Request New People': 'Request New People',
'Request Schedule': 'Request Schedule',
'Request Status updated': 'Request Status updated',
'Request Status': 'Request Status',
'Request Stock from Available Warehouse': 'Request Stock from Available Warehouse',
'Request Template Added': 'Request Template Added',
'Request Template Deleted': 'Request Template Deleted',
'Request Template Details': 'Request Template Details',
'Request Template Updated': 'Request Template Updated',
'Request Templates': 'Request Templates',
'Request Type': 'Request Type',
'Request Updated': 'Request Updated',
'Request added': 'Request added',
'Request deleted': 'Request deleted',
'Request from Facility': 'Request from Facility',
'Request updated': 'Request updated',
'Request': 'Request',
'Requested By Facility': 'Requested By Facility',
'Requested By': 'Requested By',
'Requested For Facility': 'Requested For Facility',
'Requested For Site': 'Requested For Site',
'Requested For': 'Requested For',
'Requested From': 'Requested From',
'Requested Items': 'Requested Items',
'Requested Skill Details': 'Requested Skill Details',
'Requested Skill updated': 'Requested Skill updated',
'Requested Skills': 'Requested Skills',
'Requested for Site': 'Requested for Site',
'Requested': 'Requested',
'Requester': 'Requester',
'Requests Management': 'Requests Management',
'Requests Report': 'Requests Report',
'Requests': 'Requests',
'Required Skills (optional)': 'Required Skills (optional)',
'Required Skills': 'Required Skills',
'Requires Login!': 'Requires Login!',
'Requires Login': 'Requires Login',
'Rescue Ambulance': 'Rescue Ambulance',
'Rescue Vehicle Tactical Assistance': 'Rescue Vehicle Tactical Assistance',
'Rescue and recovery': 'Rescue and recovery',
'Rescue': 'Rescue',
'Reset all filters': 'Reset all filters',
'Reset': 'Reset',
'Residence is Vacation Home': 'Residence is Vacation Home',
'Resolved': 'Resolved',
'Resource Configuration': 'Resource Configuration',
'Resource Details': 'Resource Details',
'Resource Files': 'Resource Files',
'Resource Inventory': 'Resource Inventory',
'Resource Name': 'Resource Name',
'Resource Type Details': 'Resource Type Details',
'Resource Type added': 'Resource Type added',
'Resource Type deleted': 'Resource Type deleted',
'Resource Type updated': 'Resource Type updated',
'Resource Type': 'Resource Type',
'Resource Types': 'Resource Types',
'Resource added': 'Resource added',
'Resource configuration deleted': 'Resource configuration deleted',
'Resource configuration updated': 'Resource configuration updated',
'Resource configured': 'Resource configured',
'Resource deleted': 'Resource deleted',
'Resource name': 'Resource name',
'Resource updated': 'Resource updated',
'Resource': 'Resource',
'Resources': 'Resources',
'Respirators': 'Respirators',
'Respiratory Infections': 'Respiratory Infections',
'Responded': 'Responded',
'Responder(s)': 'Responder(s)',
'Respone action should be taken immediately': 'Respone action should be taken immediately',
'Response Added': 'Response Added',
'Response Deleted': 'Response Deleted',
'Response Details': 'Response Details',
'Response Message': 'Response Message',
'Response Messages': 'Response Messages',
'Response Report': 'Response Report',
'Response Summaries': 'Response Summaries',
'Response Summary Added': 'Response Summary Added',
'Response Summary Deleted': 'Response Summary Deleted',
'Response Summary Details': 'Response Summary Details',
'Response Summary Report': 'Response Summary Report',
'Response Summary Updated': 'Response Summary Updated',
'Response Updated': 'Response Updated',
'Response action should be taken soon (within next hour)': 'Response action should be taken soon (within next hour)',
'Response linked to %(mission)s': 'Response linked to %(mission)s',
'Response linked to Mission': 'Response linked to Mission',
'Response type': 'Response type',
'Response': 'Response',
'Responses': 'Responses',
'Responsive action is no longer required': 'Responsive action is no longer required',
'Responsive action should be taken in the near future': 'Responsive action should be taken in the near future',
'Restricted - to users with a known operational requirement (described in restriction)': 'Restricted - to users with a known operational requirement (described in restriction)',
'Restricted Access': 'Restricted Access',
'Restricted Use': 'Restricted Use',
'Restriction': 'Restriction',
'Restrictions': 'Restrictions',
'Retail Crime': 'Retail Crime',
'Retrieve Password': 'Retrieve Password',
'Return to Request': 'Return to Request',
'Return': 'Return',
'Returned From': 'Returned From',
'Returned': 'Returned',
'Returning': 'Returning',
'Revert Entry': 'Revert Entry',
'Review Incoming Shipment to Receive': 'Review Incoming Shipment to Receive',
'Review Requests': 'Review Requests',
'Review next': 'Review next',
'Review': 'Review',
'Revised Quantity': 'Revised Quantity',
'Revised Status': 'Revised Status',
'Revised Value per Pack': 'Revised Value per Pack',
'Rice': 'Rice',
'Rich Text?': 'Rich Text?',
'Riot': 'Riot',
'Risk Details': 'Risk Details',
'Risk added': 'Risk added',
'Risk removed': 'Risk removed',
'Risk updated': 'Risk updated',
'Risk': 'Risk',
'Risks': 'Risks',
'River Details': 'River Details',
'River added': 'River added',
'River deleted': 'River deleted',
'River updated': 'River updated',
'River': 'River',
'Rivers': 'Rivers',
'Road Accident': 'Road Accident',
'Road Closed': 'Road Closed',
'Road Conditions': 'Road Conditions',
'Road Delay': 'Road Delay',
'Road Hijacking': 'Road Hijacking',
'Road Usage Condition': 'Road Usage Condition',
'Road': 'Road',
'Role Details': 'Role Details',
'Role Name': 'Role Name',
'Role Required': 'Role Required',
'Role added': 'Role added',
'Role assigned to User': 'Role assigned to User',
'Role deleted': 'Role deleted',
'Role updated': 'Role updated',
'Role': 'Role',
'Roles Permitted': 'Roles Permitted',
'Roles currently assigned': 'Roles currently assigned',
'Roles of User': 'Roles of User',
'Roles updated': 'Roles updated',
'Roles': 'Roles',
'Roll On Roll Off Berth': 'Roll On Roll Off Berth',
'Roof tile': 'Roof tile',
'Roofs, floors (vertical load)': 'Roofs, floors (vertical load)',
'Room Details': 'Room Details',
'Room added': 'Room added',
'Room deleted': 'Room deleted',
'Room updated': 'Room updated',
'Room': 'Room',
'Rooms': 'Rooms',
'Roster': 'Roster',
'Rows in Table': 'Rows in Table',
'Rows selected': 'Rows selected',
'Rubber Boots': 'Rubber Boots',
'Rubber gloves': 'Rubber gloves',
'Run Now': 'Run Now',
'Run every': 'Run every',
'Running Cost': 'Running Cost',
'Rural Tank Tactical Vehicle': 'Rural Tank Tactical Vehicle',
'S3PivotTable unresolved dependencies': 'S3PivotTable unresolved dependencies',
'SEARCH': 'SÖK',
'SMS Details': 'SMS Details',
'SMS InBox': 'SMS InBox',
'SMS Modem': 'SMS Modem',
'SMS Modems (Inbound & Outbound)': 'SMS Modems (Inbound & Outbound)',
'SMS Outbound Gateway updated': 'SMS Outbound Gateway updated',
'SMS Outbound Gateway': 'SMS Outbound Gateway',
'SMS WebAPI (Outbound)': 'SMS WebAPI (Outbound)',
'SMS deleted': 'SMS deleted',
'SMS via SMTP (Outbound)': 'SMS via SMTP (Outbound)',
'SMS': 'SMS',
'SMTP to SMS settings updated': 'SMTP to SMS settings updated',
'SMTP': 'SMTP',
'STRONG': 'STRONG',
'SUBMIT DATA': 'SUBMIT DATA',
'Safe environment for vulnerable groups': 'Safe environment for vulnerable groups',
'Safety Assessment Form': 'Safety Assessment Form',
'Safety glasses': 'Safety glasses',
'Safety of children and women affected by disaster?': 'Safety of children and women affected by disaster?',
'Sahana Community Chat': 'Sahana Community Chat',
'Sahana Eden Humanitarian Management Platform': 'Sahana Eden Humanitarian Management Platform',
'Sahana Eden Website': 'Sahana Eden Website',
'Sahana Eden portable application generator': 'Sahana Eden portable application generator',
'Sahana Eden': 'Sahana Eden',
'Sale': 'Sale',
'Salted Fish': 'Salted Fish',
'Sanitation problems': 'Sanitation problems',
'Sanitization': 'Sanitization',
'Satellite': 'Satellite',
'Saturday': 'Saturday',
'Save Changes': 'Save Changes',
'Save Map': 'Save Map',
'Save Query': 'Save Query',
'Save Vote': 'Save Vote',
'Save and Continue Editing': 'Save and Continue Editing',
'Save and add Items': 'Save and add Items',
'Save and add People': 'Save and add People',
'Save and add another language...': 'Save and add another language...',
'Save and edit information': 'Save and edit information',
'Save as New Map?': 'Save as New Map?',
'Save model as...': 'Save model as...',
'Save search': 'Save search',
'Save this search': 'Save this search',
'Save': 'Save',
'Save: Default Lat, Lon & Zoom for the Viewport': 'Save: Default Lat, Lon & Zoom for the Viewport',
'Saved Filters': 'Saved Filters',
'Saved Filters...': 'Saved Filters...',
'Saved Maps': 'Saved Maps',
'Saved Queries': 'Saved Queries',
'Saved Query Details': 'Saved Query Details',
'Saved Query removed': 'Saved Query removed',
'Saved Query updated': 'Saved Query updated',
'Saved Searches': 'Saved Searches',
'Saved filters': 'Saved filters',
'Saved search added': 'Saved search added',
'Saved search deleted': 'Saved search deleted',
'Saved search details': 'Saved search details',
'Saved search updated': 'Saved search updated',
'Saved searches': 'Saved searches',
'Saved': 'Saved',
'Saved.': 'Saved.',
'Saving...': 'Saving...',
'Sawzall': 'Sawzall',
'Scale of Results': 'Scale of Results',
'Scale': 'Scale',
'Scanned Copy': 'Scanned Copy',
'Scanned Forms Upload': 'Scanned Forms Upload',
'Scenario Details': 'Scenario Details',
'Scenario added': 'Scenario added',
'Scenario deleted': 'Scenario deleted',
'Scenario updated': 'Scenario updated',
'Scenario': 'Scenario',
'Scenarios': 'Scenarios',
'Schedule synchronization jobs': 'Schedule synchronization jobs',
'Schedule': 'Schedule',
'Scheduled Jobs': 'Scheduled Jobs',
'Schema': 'Schema',
'School Closure': 'School Closure',
'School Holidays only': 'School Holidays only',
'School Lockdown': 'School Lockdown',
'School Teacher': 'School Teacher',
'School activities': 'School activities',
'School assistance': 'School assistance',
'School attendance': 'School attendance',
'School destroyed': 'School destroyed',
'School heavily damaged': 'School heavily damaged',
'School tents received': 'School tents received',
'School tents, source': 'School tents, source',
'School used for other purpose': 'School used for other purpose',
'School': 'School',
'School/studying': 'School/studying',
'Scope': 'Scope',
'Screwdrivers': 'Screwdrivers',
'Scubadiving Support Vehicle': 'Scubadiving Support Vehicle',
'Sea': 'Sea',
'Seaport Details': 'Seaport Details',
'Seaport added': 'Seaport added',
'Seaport deleted': 'Seaport deleted',
'Seaport updated': 'Seaport updated',
'Seaport': 'Seaport',
'Seaports': 'Seaports',
'Search %(site_label)s Status': 'Search %(site_label)s Status',
'Search Activities': 'Search Activities',
'Search Activity Report': 'Search Activity Report',
'Search Activity Types': 'Search Activity Types',
'Search Addresses': 'Search Addresses',
'Search Affiliations': 'Search Affiliations',
'Search After Save?': 'Search After Save?',
'Search Airports': 'Search Airports',
'Search Alerts': 'Search Alerts',
'Search Alternative Items': 'Search Alternative Items',
'Search Annual Budgets': 'Search Annual Budgets',
'Search Appraisals': 'Search Appraisals',
'Search Assessment Summaries': 'Search Assessment Summaries',
'Search Assessments': 'Search Assessments',
'Search Asset Log': 'Search Asset Log',
'Search Assets': 'Search Assets',
'Search Assigned Human Resources': 'Search Assigned Human Resources',
'Search Awards': 'Search Awards',
'Search Base Stations': 'Search Base Stations',
'Search Baseline Type': 'Search Baseline Type',
'Search Baselines': 'Search Baselines',
'Search Beneficiaries': 'Search Beneficiaries',
'Search Beneficiary Types': 'Search Beneficiary Types',
'Search Branch Organizations': 'Search Branch Organizations',
'Search Brands': 'Search Brands',
'Search Budgets': 'Search Budgets',
'Search Bundles': 'Search Bundles',
'Search Camp Services': 'Search Camp Services',
'Search Camp Statuses': 'Search Camp Statuses',
'Search Camp Types': 'Search Camp Types',
'Search Campaigns': 'Search Campaigns',
'Search Camps': 'Search Camps',
'Search Cases': 'Search Cases',
'Search Catalog Items': 'Search Catalog Items',
'Search Catalogs': 'Search Catalogs',
'Search Certificates': 'Search Certificates',
'Search Certifications': 'Search Certifications',
'Search Checklists': 'Search Checklists',
'Search Cluster Subsectors': 'Search Cluster Subsectors',
'Search Clusters': 'Search Clusters',
'Search Coalitions': 'Search Coalitions',
'Search Commitment Items': 'Search Commitment Items',
'Search Commitments': 'Search Commitments',
'Search Committed People': 'Search Committed People',
'Search Communities': 'Search Communities',
'Search Competency Ratings': 'Search Competency Ratings',
'Search Contact Information': 'Search Contact Information',
'Search Contacts': 'Search Contacts',
'Search Course Certificates': 'Search Course Certificates',
'Search Courses': 'Search Courses',
'Search Credentials': 'Search Credentials',
'Search Criteria': 'Search Criteria',
'Search Dataset Prices': 'Search Dataset Prices',
'Search Demographic Data': 'Search Demographic Data',
'Search Demographics': 'Search Demographics',
'Search Departments': 'Search Departments',
'Search Details': 'Search Details',
'Search Distribution Items': 'Search Distribution Items',
'Search Distributions': 'Search Distributions',
'Search Documents': 'Search Documents',
'Search Donations': 'Search Donations',
'Search Donors': 'Search Donors',
'Search Education Details': 'Search Education Details',
'Search Entries': 'Search Entries',
'Search Evacuation Routes': 'Search Evacuation Routes',
'Search Event Types': 'Search Event Types',
'Search Events': 'Search Events',
'Search Facilities': 'Search Facilities',
'Search Facility Types': 'Search Facility Types',
'Search Feature Layers': 'Search Feature Layers',
'Search Filters': 'Search Filters',
'Search GPS data': 'Search GPS data',
'Search Gauges': 'Search Gauges',
'Search Groups': 'Search Groups',
'Search Hazards': 'Search Hazards',
'Search Heliports': 'Search Heliports',
'Search Homes': 'Search Homes',
'Search Hours': 'Search Hours',
'Search Human Resources': 'Search Human Resources',
'Search Identity': 'Search Identity',
'Search Images': 'Search Images',
'Search Impact Type': 'Search Impact Type',
'Search Impacts': 'Search Impacts',
'Search Incident Reports': 'Search Incident Reports',
'Search Incident Types': 'Search Incident Types',
'Search Incidents': 'Search Incidents',
'Search Item Categories': 'Search Item Categories',
'Search Item Packs': 'Search Item Packs',
'Search Items': 'Search Items',
'Search Job Titles': 'Search Job Titles',
'Search Keywords': 'Search Keywords',
'Search Kits': 'Search Kits',
'Search Layers': 'Search Layers',
'Search Level 1 Assessments': 'Search Level 1 Assessments',
'Search Level 2 Assessments': 'Search Level 2 Assessments',
'Search Location Hierarchies': 'Search Location Hierarchies',
'Search Location': 'Search Location',
'Search Locations': 'Search Locations',
'Search Log Entry': 'Search Log Entry',
'Search Logged Time': 'Search Logged Time',
'Search Mailing Lists': 'Search Mailing Lists',
'Search Map Configurations': 'Search Map Configurations',
'Search Markers': 'Search Markers',
'Search Members': 'Search Members',
'Search Membership Types': 'Search Membership Types',
'Search Membership': 'Search Membership',
'Search Memberships': 'Search Memberships',
'Search Menu Entries': 'Search Menu Entries',
'Search Milestones': 'Search Milestones',
'Search Missions': 'Search Missions',
'Search Morgues': 'Search Morgues',
'Search Networks': 'Search Networks',
'Search Office Types': 'Search Office Types',
'Search Offices': 'Search Offices',
'Search Open Tasks for %(project)s': 'Search Open Tasks for %(project)s',
'Search Orders': 'Search Orders',
'Search Organization Domains': 'Search Organization Domains',
'Search Organization Types': 'Search Organization Types',
'Search Organizations': 'Search Organizations',
'Search Participants': 'Search Participants',
'Search Partner Organizations': 'Search Partner Organizations',
'Search Patients': 'Search Patients',
'Search People': 'Search People',
'Search Personal Effects': 'Search Personal Effects',
'Search Persons': 'Search Persons',
'Search Photos': 'Search Photos',
'Search PoI Types': 'Search PoI Types',
'Search Points of Interest': 'Search Points of Interest',
'Search Policies & Strategies': 'Search Policies & Strategies',
'Search Population Statistics': 'Search Population Statistics',
'Search Positions': 'Search Positions',
'Search Posts': 'Search Posts',
'Search Problems': 'Search Problems',
'Search Procurement Plan Items': 'Search Procurement Plan Items',
'Search Procurement Plans': 'Search Procurement Plans',
'Search Professional Experience': 'Search Professional Experience',
'Search Programs': 'Search Programs',
'Search Project Organizations': 'Search Project Organizations',
'Search Projections': 'Search Projections',
'Search Projects': 'Search Projects',
'Search Purchased Data': 'Search Purchased Data',
'Search Query': 'Search Query',
'Search Rapid Assessments': 'Search Rapid Assessments',
'Search Received/Incoming Shipments': 'Search Received/Incoming Shipments',
'Search Recipients': 'Search Recipients',
'Search Records': 'Search Records',
'Search Regions': 'Search Regions',
'Search Registations': 'Search Registations',
'Search Relatives': 'Search Relatives',
'Search Report': 'Search Report',
'Search Request': 'Search Request',
'Search Requested Items': 'Search Requested Items',
'Search Requested Skills': 'Search Requested Skills',
'Search Requests': 'Search Requests',
'Search Resource Types': 'Search Resource Types',
'Search Resource Inventory': 'Search Resource Inventory',
'Search Resources': 'Search Resources',
'Search Response Messages': 'Search Response Messages',
'Search Response Summaries': 'Search Response Summaries',
'Search Responses': 'Search Responses',
'Search Results': 'Search Results',
'Search Risks': 'Search Risks',
'Search Rivers': 'Search Rivers',
'Search Roles': 'Search Roles',
'Search Rooms': 'Search Rooms',
'Search Saved Queries': 'Search Saved Queries',
'Search Scenarios': 'Search Scenarios',
'Search Seaports': 'Search Seaports',
'Search Sections': 'Search Sections',
'Search Sectors': 'Search Sectors',
'Search Security-Related Staff': 'Search Security-Related Staff',
'Search Sent Shipments': 'Search Sent Shipments',
'Search Series': 'Search Series',
'Search Service Profiles': 'Search Service Profiles',
'Search Services': 'Search Services',
'Search Shelter Services': 'Search Shelter Services',
'Search Shelter Statuses': 'Search Shelter Statuses',
'Search Shelter Types': 'Search Shelter Types',
'Search Shelters': 'Search Shelters',
'Search Shipment Items': 'Search Shipment Items',
'Search Shipped Items': 'Search Shipped Items',
'Search Skill Equivalences': 'Search Skill Equivalences',
'Search Skill Provisions': 'Search Skill Provisions',
'Search Skill Types': 'Search Skill Types',
'Search Skills': 'Search Skills',
'Search Solutions': 'Search Solutions',
'Search Staff & Volunteers': 'Search Staff & Volunteers',
'Search Staff Assignments': 'Search Staff Assignments',
'Search Staff Types': 'Search Staff Types',
'Search Staff': 'Search Staff',
'Search Station Parameters': 'Search Station Parameters',
'Search Status Reports': 'Search Status Reports',
'Search Status': 'Search Status',
'Search Stock Adjustments': 'Search Stock Adjustments',
'Search Stock Counts': 'Search Stock Counts',
'Search Stock Items': 'Search Stock Items',
'Search Subsectors': 'Search Subsectors',
'Search Suppliers': 'Search Suppliers',
'Search Support Requests': 'Search Support Requests',
'Search Symbologies': 'Search Symbologies',
'Search Tags': 'Search Tags',
'Search Tasks': 'Search Tasks',
'Search Teams': 'Search Teams',
'Search Templates': 'Search Templates',
'Search Theme Data': 'Search Theme Data',
'Search Themes': 'Search Themes',
'Search Tours': 'Search Tours',
'Search Trained People': 'Search Trained People',
'Search Trained Type of Peoples': 'Search Trained Type of Peoples',
'Search Training Events': 'Search Training Events',
'Search Training Participants': 'Search Training Participants',
'Search Type of Peoples': 'Search Type of Peoples',
'Search Units': 'Search Units',
'Search Users': 'Search Users',
'Search Vehicle Assignments': 'Search Vehicle Assignments',
'Search Vehicle Details': 'Search Vehicle Details',
'Search Vehicle Types': 'Search Vehicle Types',
'Search Vehicles': 'Search Vehicles',
'Search Volunteer Cluster Positions': 'Search Volunteer Cluster Positions',
'Search Volunteer Cluster Types': 'Search Volunteer Cluster Types',
'Search Volunteer Clusters': 'Search Volunteer Clusters',
'Search Volunteer Roles': 'Search Volunteer Roles',
'Search Volunteers': 'Search Volunteers',
'Search Vulnerability Aggregated Indicators': 'Search Vulnerability Aggregated Indicators',
'Search Vulnerability Data': 'Search Vulnerability Data',
'Search Vulnerability Indicators': 'Search Vulnerability Indicators',
'Search Warehouse Stock': 'Search Warehouse Stock',
'Search Warehouse Types': 'Search Warehouse Types',
'Search Warehouses': 'Search Warehouses',
'Search Zone Types': 'Search Zone Types',
'Search Zones': 'Search Zones',
'Search alert information': 'Search alert information',
'Search for Job': 'Search for Job',
'Search for Repository': 'Search for Repository',
'Search for Resource': 'Search for Resource',
'Search for Vehicles': 'Search for Vehicles',
'Search for a Person': 'Search for a Person',
'Search for a Policy or Strategy by name or description.': 'Search for a Policy or Strategy by name or description.',
'Search for a Project Community by name.': 'Search for a Project Community by name.',
'Search for a Project by name, code, location, or description.': 'Search for a Project by name, code, location, or description.',
'Search for a Project by name, code, or description.': 'Search for a Project by name, code, or description.',
'Search for a commitment by Committer name, Request ID, Site or Organization.': 'Search for a commitment by Committer name, Request ID, Site or Organization.',
'Search for a request by Site name, Requester name or free text.': 'Search for a request by Site name, Requester name or free text.',
'Search for a shipment by looking for text in any field.': 'Search for a shipment by looking for text in any field.',
'Search for a shipment received between these dates': 'Search for a shipment received between these dates',
'Search for a shipment sent between these dates.': 'Search for a shipment sent between these dates.',
'Search for a shipment which has an estimated delivery between these dates.': 'Search for a shipment which has an estimated delivery between these dates.',
'Search for an Alert by sender, incident, headline or event.': 'Search for an Alert by sender, incident, headline or event.',
'Search for an item by Year of Manufacture.': 'Search for an item by Year of Manufacture.',
'Search for an item by brand.': 'Search for an item by brand.',
'Search for an item by catalog.': 'Search for an item by catalog.',
'Search for an item by category.': 'Search for an item by category.',
'Search for an item by its code, name, model and/or comment.': 'Search for an item by its code, name, model and/or comment.',
'Search for an item by text.': 'Search for an item by text.',
'Search for an order by looking for text in any field.': 'Search for an order by looking for text in any field.',
'Search for an order expected between these dates': 'Search for an order expected between these dates',
'Search for commitments available between these dates.': 'Search for commitments available between these dates.',
'Search for commitments made between these dates.': 'Search for commitments made between these dates.',
'Search for items by owning organization.': 'Search for items by owning organization.',
'Search for items with this text in the name.': 'Search for items with this text in the name.',
'Search for office by organization or branch.': 'Search for office by organization or branch.',
'Search for office by organization.': 'Search for office by organization.',
'Search for requests made between these dates.': 'Search for requests made between these dates.',
'Search for requests required between these dates.': 'Search for requests required between these dates.',
'Search for warehouse by organization.': 'Search for warehouse by organization.',
'Search for warehouse by text.': 'Search for warehouse by text.',
'Search here for a person record in order to:': 'Search here for a person record in order to:',
'Search location in Geonames': 'Search location in Geonames',
'Search saved searches': 'Search saved searches',
'Search': 'Sök',
'Searched?': 'Searched?',
'Searches': 'Searches',
'Secondary Server (Optional)': 'Secondary Server (Optional)',
'Seconds must be a number.': 'Seconds must be a number.',
'Seconds must be less than 60.': 'Seconds must be less than 60.',
'Section Details': 'Section Details',
'Section deleted': 'Section deleted',
'Section updated': 'Section updated',
'Section': 'Section',
'Sections that are part of this template': 'Sections that are part of this template',
'Sections': 'Sections',
'Sector Details': 'Sector Details',
'Sector added to Organization': 'Sector added to Organization',
'Sector added to Project': 'Sector added to Project',
'Sector added to Theme': 'Sector added to Theme',
'Sector added': 'Sector added',
'Sector deleted': 'Sector deleted',
'Sector removed from Organization': 'Sector removed from Organization',
'Sector removed from Project': 'Sector removed from Project',
'Sector removed from Theme': 'Sector removed from Theme',
'Sector updated': 'Sector updated',
'Sector': 'Sector',
'Sector(s)': 'Sector(s)',
'Sectors to which this Activity Type can apply': 'Sectors to which this Activity Type can apply',
'Sectors to which this Theme can apply': 'Sectors to which this Theme can apply',
'Sectors': 'Sectors',
'Secure Storage Capacity': 'Secure Storage Capacity',
'Security Description': 'Security Description',
'Security Required': 'Security Required',
'Security Status': 'Security Status',
'Security problems': 'Security problems',
'Security': 'Security',
'Security-Related Staff Details': 'Security-Related Staff Details',
'Security-Related Staff added': 'Security-Related Staff added',
'Security-Related Staff deleted': 'Security-Related Staff deleted',
'Security-Related Staff updated': 'Security-Related Staff updated',
'Security-Related Staff': 'Security-Related Staff',
'See All Entries': 'See All Entries',
'See a detailed description of the module on the Sahana Eden wiki': 'See a detailed description of the module on the Sahana Eden wiki',
'See options.': 'See options.',
'See the universally unique identifier (UUID) of this repository': 'See the universally unique identifier (UUID) of this repository',
'See unassigned recovery requests': 'See unassigned recovery requests',
'Seen': 'Seen',
'Select %(location)s': 'Select %(location)s',
'Select %(up_to_3_locations)s to compare overall resilience': 'Select %(up_to_3_locations)s to compare overall resilience',
'Select All': 'Select All',
'Select Existing Location': 'Select Existing Location',
'Select Items from the Request': 'Select Items from the Request',
'Select Label Question': 'Select Label Question',
'Select Member': 'Select Member',
'Select Mission': 'Select Mission',
'Select Modules which are to be translated': 'Select Modules which are to be translated',
'Select Numeric Questions (one or more):': 'Select Numeric Questions (one or more):',
'Select Project': 'Select Project',
'Select Recipients': 'Select Recipients',
'Select Stock from this Warehouse': 'Select Stock from this Warehouse',
'Select This Location': 'Select This Location',
'Select a Country': 'Select a Country',
'Select a commune to': 'Select a commune to',
'Select a label question and at least one numeric question to display the chart.': 'Select a label question and at least one numeric question to display the chart.',
'Select all templates (All modules included)': 'Select all templates (All modules included)',
'Select all that apply': 'Select all that apply',
'Select all': 'Select all',
'Select an existing bin': 'Select an existing bin',
'Select an image to upload. You can crop this later by opening this record.': 'Select an image to upload. You can crop this later by opening this record.',
'Select as Recipients': 'Select as Recipients',
'Select data type': 'Select data type',
'Select from registry': 'Select from registry',
'Select language code': 'Select language code',
'Select one or more option(s) that apply': 'Select one or more option(s) that apply',
'Select resources to import': 'Select resources to import',
'Select the Question': 'Select the Question',
'Select the default site.': 'Select the default site.',
'Select the language file': 'Select the language file',
'Select the option that applies': 'Select the option that applies',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Select the overlays for Assessments and Activities relating to each Need to identify the gap.',
'Select the person assigned to this role for this project.': 'Select the person assigned to this role for this project.',
'Select the required modules': 'Select the required modules',
'Select this %(location)s': 'Select this %(location)s',
'Select this if you need this resource to be mapped from site_id instead of location_id.': 'Select this if you need this resource to be mapped from site_id instead of location_id.',
'Selected OCR Form has no pages. Use another revision of create a new revision by downloading a new Form.': 'Selected OCR Form has no pages. Use another revision of create a new revision by downloading a new Form.',
'Selected Questions for all Completed Assessment Forms': 'Selected Questions for all Completed Assessment Forms',
'Selects what type of gateway to use for outbound SMS': 'Selects what type of gateway to use for outbound SMS',
'Send Alert': 'Send Alert',
'Send Alerts using Email &/or SMS': 'Send Alerts using Email &/or SMS',
'Send By': 'Send By',
'Send Commitment as Shipment': 'Send Commitment as Shipment',
'Send Dispatch Update': 'Send Dispatch Update',
'Send Message': 'Send Message',
'Send New Shipment': 'Send New Shipment',
'Send Notification': 'Send Notification',
'Send Shipment': 'Send Shipment',
'Send Task Notification': 'Send Task Notification',
'Send a message to this person': 'Send a message to this person',
'Send a message to this team': 'Send a message to this team',
'Send batch': 'Send batch',
'Send from %s': 'Send from %s',
'Send message': 'Send message',
'Send this Alert': 'Send this Alert',
'Send': 'Send',
'Sender Priority updated': 'Sender Priority updated',
'Sender Whitelisted': 'Sender Whitelisted',
'Sender deleted': 'Sender deleted',
'Sender': 'Sender',
'Senior (50+)': 'Senior (50+)',
'Sent By Person': 'Sent By Person',
'Sent By': 'Sent By',
'Sent Emails': 'Sent Emails',
'Sent SMS': 'Sent SMS',
'Sent Shipment Details': 'Sent Shipment Details',
'Sent Shipment canceled and items returned to Warehouse': 'Sent Shipment canceled and items returned to Warehouse',
'Sent Shipment canceled': 'Sent Shipment canceled',
'Sent Shipment has returned, indicate how many items will be returned to Warehouse.': 'Sent Shipment has returned, indicate how many items will be returned to Warehouse.',
'Sent Shipment updated': 'Sent Shipment updated',
'Sent Shipments': 'Sent Shipments',
'Sent Tweets': 'Sent Tweets',
'Sent date': 'Sent date',
'Sent': 'Sent',
'Separated children, caregiving arrangements': 'Separated children, caregiving arrangements',
'Serial Number': 'Serial Number',
'Series Details': 'Series Details',
'Series added': 'Series added',
'Series deleted': 'Series deleted',
'Series details missing': 'Series details missing',
'Series updated': 'Series updated',
'Series': 'Series',
'Server': 'Server',
'Service Details': 'Service Details',
'Service Due': 'Service Due',
'Service Record': 'Service Record',
'Service added to Organization': 'Service added to Organization',
'Service added': 'Service added',
'Service deleted': 'Service deleted',
'Service or Facility': 'Service or Facility',
'Service profile added': 'Service profile added',
'Service profile deleted': 'Service profile deleted',
'Service profile updated': 'Service profile updated',
'Service removed from Organization': 'Service removed from Organization',
'Service updated': 'Service updated',
'Service': 'Service',
'Services Available': 'Services Available',
'Services': 'Services',
'Set Base Facility/Site': 'Set Base Facility/Site',
'Set By': 'Set By',
'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.',
'Set as default Site': 'Set as default Site',
'Set as my Default': 'Set as my Default',
'Setting Details': 'Setting Details',
'Setting added': 'Setting added',
'Setting deleted': 'Setting deleted',
'Settings were reset because authenticating with Twitter failed': 'Settings were reset because authenticating with Twitter failed',
'Settings which can be configured through the web interface are available here.': 'Settings which can be configured through the web interface are available here.',
'Settings': 'Inställningar',
'Severe': 'Severe',
'Severity unknown': 'Severity unknown',
'Severity': 'Severity',
'Sewer Back-up': 'Sewer Back-up',
'Sex': 'Sex',
'Shapefile Layer': 'Shapefile Layer',
'Share': 'Share',
'Shelter & Essential NFIs': 'Shelter & Essential NFIs',
'Shelter - Take shelter in place or per instruction': 'Shelter - Take shelter in place or per instruction',
'Shelter Details': 'Shelter Details',
'Shelter Name': 'Shelter Name',
'Shelter Service Details': 'Shelter Service Details',
'Shelter Service added': 'Shelter Service added',
'Shelter Service deleted': 'Shelter Service deleted',
'Shelter Service updated': 'Shelter Service updated',
'Shelter Service': 'Shelter Service',
'Shelter Services': 'Shelter Services',
'Shelter Status Details': 'Shelter Status Details',
'Shelter Status added': 'Shelter Status added',
'Shelter Status deleted': 'Shelter Status deleted',
'Shelter Status updated': 'Shelter Status updated',
'Shelter Status': 'Shelter Status',
'Shelter Statuses': 'Shelter Statuses',
'Shelter Type Details': 'Shelter Type Details',
'Shelter Type added': 'Shelter Type added',
'Shelter Type deleted': 'Shelter Type deleted',
'Shelter Type updated': 'Shelter Type updated',
'Shelter Type': 'Shelter Type',
'Shelter Types': 'Shelter Types',
'Shelter added': 'Shelter added',
'Shelter deleted': 'Shelter deleted',
'Shelter updated': 'Shelter updated',
'Shelter': 'Shelter',
'Shelter/NFI Assistance': 'Shelter/NFI Assistance',
'Shelters': 'Shelters',
'Shift': 'Shift',
'Shifts': 'Shifts',
'Shipment Created': 'Shipment Created',
'Shipment Item Details': 'Shipment Item Details',
'Shipment Item deleted': 'Shipment Item deleted',
'Shipment Item updated': 'Shipment Item updated',
'Shipment Items Received': 'Shipment Items Received',
'Shipment Items sent from Warehouse': 'Shipment Items sent from Warehouse',
'Shipment Items': 'Shipment Items',
'Shipment Type': 'Shipment Type',
'Shipment received': 'Shipment received',
'Shipment to Receive': 'Shipment to Receive',
'Shipment to Send': 'Shipment to Send',
'Shipment': 'Shipment',
'Shipments': 'Shipments',
'Shipping Method': 'Shipping Method',
'Shipping Organization': 'Shipping Organization',
'Shooting': 'Shooting',
'Shop vac + HEPA filter': 'Shop vac + HEPA filter',
'Short Assessment': 'Short Assessment',
'Short Description': 'Short Description',
'Short Text': 'Short Text',
'Short Title / ID': 'Short Title / ID',
'Shovels': 'Shovels',
'Show Location?': 'Show Location?',
'Show Pivot Table': 'Show Pivot Table',
'Show Table': 'Show Table',
'Show _MENU_ entries': 'Show _MENU_ entries',
'Show author picture?': 'Show author picture?',
'Show less': 'Show less',
'Show more': 'Show more',
'Show on Map': 'Show on Map',
'Show totals': 'Show totals',
'Show': 'Show',
'Showing 0 to 0 of 0 entries': 'Showing 0 to 0 of 0 entries',
'Showing _START_ to _END_ of _TOTAL_ entries': 'Showing _START_ to _END_ of _TOTAL_ entries',
'Showing latest entries first': 'Showing latest entries first',
'Signature / Stamp': 'Signature / Stamp',
'Signature': 'Signature',
'Significant threat to life or property': 'Significant threat to life or property',
'Simple Search': 'Simple Search',
'Single Family': 'Single Family',
'Single Female Head of Household': 'Single Female Head of Household',
'Single PDF File': 'Single PDF File',
'Site Contact': 'Site Contact',
'Site Key which this site uses to authenticate at the remote site (if required for this type of repository).': 'Site Key which this site uses to authenticate at the remote site (if required for this type of repository).',
'Site Key': 'Site Key',
'Site Name': 'Site Name',
'Site Needs added': 'Site Needs added',
'Site Needs deleted': 'Site Needs deleted',
'Site Needs updated': 'Site Needs updated',
'Site Needs': 'Site Needs',
'Site': 'Site',
'Situation': 'Situation',
'Size of cache:': 'Size of cache:',
'Skeleton Example': 'Skeleton Example',
'Sketch': 'Sketch',
'Skill Catalog': 'Skill Catalog',
'Skill Details': 'Skill Details',
'Skill Equivalence Details': 'Skill Equivalence Details',
'Skill Equivalence added': 'Skill Equivalence added',
'Skill Equivalence deleted': 'Skill Equivalence deleted',
'Skill Equivalence updated': 'Skill Equivalence updated',
'Skill Equivalence': 'Skill Equivalence',
'Skill Equivalences': 'Skill Equivalences',
'Skill Provision Catalog': 'Skill Provision Catalog',
'Skill Provision Details': 'Skill Provision Details',
'Skill Provision added': 'Skill Provision added',
'Skill Provision deleted': 'Skill Provision deleted',
'Skill Provision updated': 'Skill Provision updated',
'Skill Provision': 'Skill Provision',
'Skill Type Catalog': 'Skill Type Catalog',
'Skill Type added': 'Skill Type added',
'Skill Type deleted': 'Skill Type deleted',
'Skill Type updated': 'Skill Type updated',
'Skill Type': 'Skill Type',
'Skill added to Request': 'Skill added to Request',
'Skill added': 'Skill added',
'Skill deleted': 'Skill deleted',
'Skill removed from Request': 'Skill removed from Request',
'Skill removed': 'Skill removed',
'Skill updated': 'Skill updated',
'Skill': 'Skill',
'Skills Required': 'Skills Required',
'Skills': 'Skills',
'Skin Marks': 'Skin Marks',
'Skype ID': 'Skype ID',
'Skype': 'Skype',
'Sledgehammer': 'Sledgehammer',
'Slope failure, debris': 'Slope failure, debris',
'Small Children': 'Small Children',
'Small Trade': 'Small Trade',
'Smoke': 'Smoke',
'Snapshot Report': 'Snapshot Report',
'Snow Fall': 'Snow Fall',
'Snow Squall': 'Snow Squall',
'Soil bulging, liquefaction': 'Soil bulging, liquefaction',
'Soliciting Cash Donations?': 'Soliciting Cash Donations?',
'Solution Details': 'Solution Details',
'Solution Item': 'Solution Item',
'Solution added': 'Solution added',
'Solution deleted': 'Solution deleted',
'Solution updated': 'Solution updated',
'Solution': 'Solution',
'Solutions': 'Solutions',
'Some': 'Some',
'Sorry location %(location)s appears to be outside the area of parent %(parent)s.': 'Sorry location %(location)s appears to be outside the area of parent %(parent)s.',
'Sorry location %(location)s appears to be outside the area supported by this deployment.': 'Sorry location %(location)s appears to be outside the area supported by this deployment.',
'Sorry location appears to be outside the area of parent %(parent)s.': 'Sorry location appears to be outside the area of parent %(parent)s.',
'Sorry location appears to be outside the area supported by this deployment.': 'Sorry location appears to be outside the area supported by this deployment.',
'Sorry, I could not understand your request': 'Sorry, I could not understand your request',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Sorry, only users with the MapAdmin role are allowed to edit these locations',
'Sorry, something went wrong.': 'Sorry, something went wrong.',
'Sorry, that page is forbidden for some reason.': 'Sorry, that page is forbidden for some reason.',
'Sorry, that service is temporary unavailable.': 'Sorry, that service is temporary unavailable.',
'Sorry, there are no addresses to display': 'Sorry, there are no addresses to display',
'Source Name': 'Source Name',
'Source URL': 'Source URL',
'Source': 'Source',
'Sources of income': 'Sources of income',
'Space Debris': 'Space Debris',
'Special Ice': 'Special Ice',
'Special Marine': 'Special Marine',
'Special Multirisk Protection Vehicle': 'Special Multirisk Protection Vehicle',
'Special Skills Required': 'Special Skills Required',
'Special Tools and Skills': 'Special Tools and Skills',
'Specialized Hospital': 'Specialized Hospital',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.',
'Specific Operations Vehicle': 'Specific Operations Vehicle',
'Specific locations need to have a parent of level': 'Specific locations need to have a parent of level',
'Specify the bed type of this unit.': 'Specify the bed type of this unit.',
'Specify the number of available sets': 'Specify the number of available sets',
'Specify the number of available units (adult doses)': 'Specify the number of available units (adult doses)',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions',
'Specify the number of sets needed per 24h': 'Specify the number of sets needed per 24h',
'Specify the number of units (adult doses) needed per 24h': 'Specify the number of units (adult doses) needed per 24h',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h',
'Speed': 'Speed',
'Spherical Mercator (900913) is needed to use OpenStreetMap/Google/Bing base layers.': 'Spherical Mercator (900913) is needed to use OpenStreetMap/Google/Bing base layers.',
'Spreadsheet': 'Spreadsheet',
'Spring': 'Spring',
'Squall': 'Squall',
'Staff & Volunteers': 'Staff & Volunteers',
'Staff Assigned': 'Staff Assigned',
'Staff Assignment Details': 'Staff Assignment Details',
'Staff Assignment removed': 'Staff Assignment removed',
'Staff Assignment updated': 'Staff Assignment updated',
'Staff Assignments': 'Staff Assignments',
'Staff ID': 'Staff ID',
'Staff Member Details updated': 'Staff Member Details updated',
'Staff Member Details': 'Staff Member Details',
'Staff Member added': 'Staff Member added',
'Staff Member deleted': 'Staff Member deleted',
'Staff Record': 'Staff Record',
'Staff Type Details': 'Staff Type Details',
'Staff Type added': 'Staff Type added',
'Staff Type deleted': 'Staff Type deleted',
'Staff Type updated': 'Staff Type updated',
'Staff Types': 'Staff Types',
'Staff member added': 'Staff member added',
'Staff present and caring for residents': 'Staff present and caring for residents',
'Staff with Contracts Expiring in the next Month': 'Staff with Contracts Expiring in the next Month',
'Staff': 'Medarbetare',
'Staff/Volunteer Record': 'Staff/Volunteer Record',
'Staff/Volunteer': 'Staff/Volunteer',
'Staff2': 'Staff2',
'Staffing Level': 'Staffing Level',
'Stairs': 'Stairs',
'Standard Deviation': 'Standard Deviation',
'Start Date': 'Start Date',
'Start': 'Start',
'Station Details': 'Station Details',
'Station Parameter Details': 'Station Parameter Details',
'Station Parameter added': 'Station Parameter added',
'Station Parameter removed': 'Station Parameter removed',
'Station Parameter updated': 'Station Parameter updated',
'Station Parameters': 'Station Parameters',
'Station': 'Station',
'Stationery': 'Stationery',
'Statistics': 'Statistics',
'Status Details': 'Status Details',
'Status Report added': 'Status Report added',
'Status Report deleted': 'Status Report deleted',
'Status Report updated': 'Status Report updated',
'Status Report': 'Status Report',
'Status Updated': 'Status Updated',
'Status added': 'Status added',
'Status deleted': 'Status deleted',
'Status of morgue capacity.': 'Status of morgue capacity.',
'Status of operations/availability of emergency medical services at this facility.': 'Status of operations/availability of emergency medical services at this facility.',
'Status of security procedures/access restrictions for the facility.': 'Status of security procedures/access restrictions for the facility.',
'Status of the clinical departments.': 'Status of the clinical departments.',
'Status of the facility.': 'Status of the facility.',
'Status of the operating rooms of this facility.': 'Status of the operating rooms of this facility.',
'Status updated': 'Status updated',
'Status': 'Status',
'Statuses': 'Statuses',
'Steel frame': 'Steel frame',
'Stock Adjustment Details': 'Stock Adjustment Details',
'Stock Adjustment': 'Stock Adjustment',
'Stock Adjustments': 'Stock Adjustments',
'Stock Count Details': 'Stock Count Details',
'Stock Count created': 'Stock Count created',
'Stock Count deleted': 'Stock Count deleted',
'Stock Count modified': 'Stock Count modified',
'Stock Counts': 'Stock Counts',
'Stock Expires %(date)s': 'Stock Expires %(date)s',
'Stock added to Warehouse': 'Stock added to Warehouse',
'Stock in Warehouse': 'Stock in Warehouse',
'Stock removed from Warehouse': 'Stock removed from Warehouse',
'Stock': 'Stock',
'Stolen': 'Stolen',
'Storage Capacity (m3)': 'Storage Capacity (m3)',
'Storage Type': 'Storage Type',
'Storeys at and above ground level': 'Storeys at and above ground level',
'Storm Force Wind': 'Storm Force Wind',
'Storm Surge': 'Storm Surge',
'Stowaway': 'Stowaway',
'Strategy': 'Strategy',
'Street Address': 'Street Address',
'Street View': 'Street View',
'String used to configure Proj4js. Can be found from %(url)s': 'String used to configure Proj4js. Can be found from %(url)s',
'Strong Wind': 'Strong Wind',
'Strong': 'Strong',
'Structural Hazards': 'Structural Hazards',
'Structural': 'Structural',
'Style invalid': 'Style invalid',
'Style': 'Style',
'Subject': 'Subject',
'Submission successful - please wait': 'Submission successful - please wait',
'Submit Data': 'Submit Data',
'Submit a request for recovery': 'Submit a request for recovery',
'Submit all': 'Submit all',
'Submit data to the region': 'Submit data to the region',
'Submit more': 'Submit more',
'Submit new Level 1 assessment (full form)': 'Submit new Level 1 assessment (full form)',
'Submit new Level 1 assessment (triage)': 'Submit new Level 1 assessment (triage)',
'Submit new Level 2 assessment': 'Submit new Level 2 assessment',
'Submit online': 'Submit online',
'Submit': 'Submit',
'Submitted by': 'Submitted by',
'Subscribe': 'Subscribe',
'Subscriptions Status': 'Subscriptions Status',
'Subsector Details': 'Subsector Details',
'Subsector added': 'Subsector added',
'Subsector deleted': 'Subsector deleted',
'Subsector updated': 'Subsector updated',
'Subsector': 'Subsector',
'Subsectors': 'Subsectors',
'Subsistence Cost': 'Subsistence Cost',
'Suburb': 'Suburb',
'Successfully registered at the repository.': 'Successfully registered at the repository.',
'Suggested By': 'Suggested By',
'Sum': 'Sum',
'Summary Details': 'Summary Details',
'Summary by Administration Level': 'Summary by Administration Level',
'Summary by Question Type - (The fewer text questions the better the analysis can be)': 'Summary by Question Type - (The fewer text questions the better the analysis can be)',
'Summary of Completed Assessment Forms': 'Summary of Completed Assessment Forms',
'Summary of Incoming Supplies': 'Summary of Incoming Supplies',
'Summary of Releases': 'Summary of Releases',
'Summary': 'Summary',
'Sunday': 'Sunday',
'Supervisor': 'Supervisor',
'Supplier Details': 'Supplier Details',
'Supplier added': 'Supplier added',
'Supplier deleted': 'Supplier deleted',
'Supplier updated': 'Supplier updated',
'Supplier': 'Supplier',
'Supplier/Donor': 'Supplier/Donor',
'Suppliers': 'Suppliers',
'Supply Chain Management': 'Supply Chain Management',
'Support Request': 'Support Request',
'Support Requests': 'Support Requests',
'Surgery': 'Surgery',
'Surplus': 'Surplus',
'Survey Answer': 'Survey Answer',
'Survey Question Type': 'Survey Question Type',
'Survey': 'Survey',
'Switch to 3D': 'Switch to 3D',
'Symbologies': 'Symbologies',
'Symbology Details': 'Symbology Details',
'Symbology added': 'Symbology added',
'Symbology deleted': 'Symbology deleted',
'Symbology removed from Layer': 'Symbology removed from Layer',
'Symbology updated': 'Symbology updated',
'Symbology': 'Symbology',
'Synchronization Job': 'Synchronization Job',
'Synchronization Log': 'Synchronization Log',
'Synchronization Schedule': 'Synchronization Schedule',
'Synchronization Settings': 'Synchronization Settings',
'Synchronization currently active - refresh page to update status.': 'Synchronization currently active - refresh page to update status.',
'Synchronization mode': 'Synchronization mode',
'Synchronization settings updated': 'Synchronization settings updated',
'Synchronization': 'Synkronisering',
'System - for internal functions': 'System - for internal functions',
'TMS Layer': 'TMS Layer',
'TO': 'TO',
'Table Permissions': 'Table Permissions',
'Table name of the resource to synchronize': 'Table name of the resource to synchronize',
'Table': 'Table',
'Tablename': 'Tablename',
'Tag Details': 'Tag Details',
'Tag Post': 'Tag Post',
'Tag added': 'Tag added',
'Tag deleted': 'Tag deleted',
'Tag removed': 'Tag removed',
'Tag updated': 'Tag updated',
'Tag': 'Tag',
'Tags': 'Tags',
'Task Details': 'Task Details',
'Task Status': 'Task Status',
'Task added': 'Task added',
'Task deleted': 'Task deleted',
'Task removed': 'Task removed',
'Task updated': 'Task updated',
'Task': 'Task',
'Tasks': 'Tasks',
'Team Description': 'Team Description',
'Team Details': 'Team Details',
'Team Leader': 'Team Leader',
'Team Member added': 'Team Member added',
'Team Members': 'Team Members',
'Team Name': 'Team Name',
'Team Type': 'Team Type',
'Team added': 'Team added',
'Team deleted': 'Team deleted',
'Team updated': 'Team updated',
'Team': 'Team',
'Teams': 'Teams',
'Technical Support Vehicle': 'Technical Support Vehicle',
'Telephone Details': 'Telephone Details',
'Telephone': 'Telephone',
'Telephony': 'Telephony',
'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.': 'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.',
'Template Name': 'Template Name',
'Template Section Details': 'Template Section Details',
'Template Section added': 'Template Section added',
'Template Section deleted': 'Template Section deleted',
'Template Section updated': 'Template Section updated',
'Template Sections': 'Template Sections',
'Template Summary': 'Template Summary',
'Template Title': 'Template Title',
'Template created': 'Template created',
'Template deleted': 'Template deleted',
'Template modified': 'Template modified',
'Template': 'Template',
'Templates': 'Templates',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).',
'Term for the primary within-country administrative division (e.g. State or Province).': 'Term for the primary within-country administrative division (e.g. State or Province).',
'Term for the secondary within-country administrative division (e.g. District or County).': 'Term for the secondary within-country administrative division (e.g. District or County).',
'Term for the third-level within-country administrative division (e.g. City or Town).': 'Term for the third-level within-country administrative division (e.g. City or Town).',
'Terms of Service': 'Terms of Service',
'Territorial Authority': 'Territorial Authority',
'Terrorism': 'Terrorism',
'Tertiary Server (Optional)': 'Tertiary Server (Optional)',
'Test - testing, all recipients disregard': 'Test - testing, all recipients disregard',
'Test Results': 'Testresultat',
'Text': 'Text',
'Thank you for your approval': 'Thank you for your approval',
'Thank you, the submission%(br)shas been declined': 'Thank you, the submission%(br)shas been declined',
'Thanks for your assistance': 'Thanks for your assistance',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'The Area which this Site is located within.': 'The Area which this Site is located within.',
'The Assessment Module stores assessment templates and allows responses to assessments for specific events to be collected and analyzed': 'The Assessment Module stores assessment templates and allows responses to assessments for specific events to be collected and analyzed',
'The Assessments module allows field workers to send in assessments.': 'The Assessments module allows field workers to send in assessments.',
'The Author of this Document (optional)': 'The Author of this Document (optional)',
'The Bin in which the Item is being stored (optional).': 'The Bin in which the Item is being stored (optional).',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.',
'The Camp this Request is from': 'The Camp this Request is from',
'The Camp this person is checking into.': 'The Camp this person is checking into.',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.',
'The Fire Stations Module can be used to record information on Fire Stations.': 'The Fire Stations Module can be used to record information on Fire Stations.',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The Maximum valid bounds, in projected coordinates': 'The Maximum valid bounds, in projected coordinates',
'The Media Library provides a catalog of digital media.': 'The Media Library provides a catalog of digital media.',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.',
'The Organization Registry keeps track of all the relief organizations working in the area.': 'The Organization Registry keeps track of all the relief organizations working in the area.',
'The POST variable containing the phone number': 'The POST variable containing the phone number',
'The POST variable on the URL used for sending messages': 'The POST variable on the URL used for sending messages',
'The POST variables other than the ones containing the message and the phone number': 'The POST variables other than the ones containing the message and the phone number',
'The Patient Tracking system keeps track of all the evacuated patients & their relatives.': 'The Patient Tracking system keeps track of all the evacuated patients & their relatives.',
'The Role this person plays within this hospital.': 'The Role this person plays within this hospital.',
'The Shelter this Request is from': 'The Shelter this Request is from',
'The Shelter this person is checking into.': 'The Shelter this person is checking into.',
'The Tracking Number %s ': 'The Tracking Number %s ',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.',
'The URL of your web gateway without the POST parameters': 'The URL of your web gateway without the POST parameters',
'The URL to access the service.': 'The URL to access the service.',
'The URL with field query. Used to fetch the search results.': 'The URL with field query. Used to fetch the search results.',
'The Unique Identifier (UUID) as assigned to this facility by the government.': 'The Unique Identifier (UUID) as assigned to this facility by the government.',
'The affected area of the alert message': 'The affected area of the alert message',
'The alert information': 'The alert information',
'The alert message containing this information': 'The alert message containing this information',
'The answers are missing': 'The answers are missing',
'The area is': 'The area is',
'The attribute used to determine which features to cluster together (optional).': 'The attribute used to determine which features to cluster together (optional).',
'The attribute which is used for the title of popups.': 'The attribute which is used for the title of popups.',
'The attribute within the KML which is used for the title of popups.': 'The attribute within the KML which is used for the title of popups.',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)',
'The body height (crown to heel) in cm.': 'The body height (crown to heel) in cm.',
'The circular area is represented by a central point given as a coordinate pair followed by a radius value in kilometers.': 'The circular area is represented by a central point given as a coordinate pair followed by a radius value in kilometers.',
'The contact for follow-up and confirmation of the alert message': 'The contact for follow-up and confirmation of the alert message',
'The effective time of the information of the alert message': 'The effective time of the information of the alert message',
'The expected time of the beginning of the subject event of the alert message': 'The expected time of the beginning of the subject event of the alert message',
'The expiry time of the information of the alert message': 'The expiry time of the information of the alert message',
'The extended message identifier(s) (in the form sender,identifier,sent) of an earlier CAP message or messages referenced by this one.': 'The extended message identifier(s) (in the form sender,identifier,sent) of an earlier CAP message or messages referenced by this one.',
'The facility where this position is based.': 'The facility where this position is based.',
'The first or only name of the person (mandatory).': 'The first or only name of the person (mandatory).',
'The following %(new)s %(resource)s have been added': 'The following %(new)s %(resource)s have been added',
'The following %(upd)s %(resource)s have been updated': 'The following %(upd)s %(resource)s have been updated',
'The following %s have been added': 'The following %s have been added',
'The following %s have been updated': 'The following %s have been updated',
'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.',
'The geographic code delineating the affected area': 'The geographic code delineating the affected area',
'The group listing identifying earlier message(s) referenced by the alert message': 'The group listing identifying earlier message(s) referenced by the alert message',
'The group listing of intended recipients of the alert message': 'The group listing of intended recipients of the alert message',
'The human-readable name of the agency or authority issuing this alert.': 'The human-readable name of the agency or authority issuing this alert.',
'The identifier of the MIME content type and sub-type describing the resource file': 'The identifier of the MIME content type and sub-type describing the resource file',
'The identifier of the hyperlink for the resource file': 'The identifier of the hyperlink for the resource file',
'The identifier of the sender of the alert message': 'The identifier of the sender of the alert message',
'The integer indicating the size of the resource file': 'The integer indicating the size of the resource file',
'The intended audience of the alert message': 'The intended audience of the alert message',
'The language you wish the site to be displayed in.': 'The language you wish the site to be displayed in.',
'The length is': 'The length is',
'The list of Brands are maintained by the Administrators.': 'The list of Brands are maintained by the Administrators.',
'The list of Catalogs are maintained by the Administrators.': 'The list of Catalogs are maintained by the Administrators.',
'The map will be displayed initially with this latitude at the center.': 'The map will be displayed initially with this latitude at the center.',
'The map will be displayed initially with this longitude at the center.': 'The map will be displayed initially with this longitude at the center.',
'The maximum altitude of the affected area': 'The maximum altitude of the affected area',
'The minimum number of features to form a cluster. 0 to disable.': 'The minimum number of features to form a cluster. 0 to disable.',
'The name to be used when calling for or directly addressing the person (optional).': 'The name to be used when calling for or directly addressing the person (optional).',
'The nature of the alert message': 'The nature of the alert message',
'The next screen will allow you to detail the number of people here & their needs.': 'The next screen will allow you to detail the number of people here & their needs.',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': 'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item',
'The number of aggregated records': 'The number of aggregated records',
'The number of geographical units within the aggregation area': 'The number of geographical units within the aggregation area',
'The number of pixels apart that features need to be before they are clustered.': 'The number of pixels apart that features need to be before they are clustered.',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.',
'The parent project or programme which this project is implemented under': 'The parent project or programme which this project is implemented under',
'The parse request has been submitted': 'The parse request has been submitted',
'The particular source of this alert; e.g., an operator or a specific device.': 'The particular source of this alert; e.g., an operator or a specific device.',
'The poll request has been submitted, so new messages should appear shortly - refresh to see them': 'The poll request has been submitted, so new messages should appear shortly - refresh to see them',
'The recommended action to be taken by recipients of the alert message': 'The recommended action to be taken by recipients of the alert message',
'The search request has been submitted, so new messages should appear shortly - refresh to see them': 'The search request has been submitted, so new messages should appear shortly - refresh to see them',
'The search results are now being processed with KeyGraph': 'The search results are now being processed with KeyGraph',
'The search results should appear shortly - refresh to see them': 'The search results should appear shortly - refresh to see them',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'The server received an incorrect response from another server that it was accessing to fill the request by the browser.',
'The specific or minimum altitude of the affected area': 'The specific or minimum altitude of the affected area',
'The staff responsibile for Facilities can make Requests for assistance.': 'The staff responsibile for Facilities can make Requests for assistance.',
'The subject event of the alert message': 'The subject event of the alert message',
'The synchronization module allows the synchronization of data resources between Sahana Eden instances.': 'The synchronization module allows the synchronization of data resources between Sahana Eden instances.',
'The system supports 2 projections by default:': 'The system supports 2 projections by default:',
'The text denoting the type of the subject event of the alert message': 'The text denoting the type of the subject event of the alert message',
'The text describing the purpose or significance of the alert message': 'The text describing the purpose or significance of the alert message',
'The text describing the rule for limiting distribution of the restricted alert message': 'The text describing the rule for limiting distribution of the restricted alert message',
'The text headline of the alert message': 'The text headline of the alert message',
'The text identifying the source of the alert message': 'The text identifying the source of the alert message',
'The text naming the originator of the alert message': 'The text naming the originator of the alert message',
'The time at which the Event started.': 'The time at which the Event started.',
'The time at which the Incident started.': 'The time at which the Incident started.',
'The title of the page, as seen in the browser (optional)': 'The title of the page, as seen in the browser (optional)',
'The token associated with this application on': 'The token associated with this application on',
'The type and content of the resource file': 'The type and content of the resource file',
'The uploaded Form is unreadable, please do manual data entry.': 'The uploaded Form is unreadable, please do manual data entry.',
'The urgency, severity, and certainty elements collectively distinguish less emphatic from more emphatic messages.': 'The urgency, severity, and certainty elements collectively distinguish less emphatic from more emphatic messages.',
'The urgency, severity, and certainty of the information collectively distinguish less emphatic from more emphatic messages.': 'The urgency, severity, and certainty of the information collectively distinguish less emphatic from more emphatic messages.',
'The way in which an item is normally distributed': 'The way in which an item is normally distributed',
'The weight in kg.': 'The weight in kg.',
'The': 'The',
'Theme Data deleted': 'Theme Data deleted',
'Theme Data updated': 'Theme Data updated',
'Theme Data': 'Theme Data',
'Theme Details': 'Theme Details',
'Theme Layer': 'Theme Layer',
'Theme added to Activity': 'Theme added to Activity',
'Theme added to Project Location': 'Theme added to Project Location',
'Theme added to Project': 'Theme added to Project',
'Theme added': 'Theme added',
'Theme deleted': 'Theme deleted',
'Theme removed from Activity': 'Theme removed from Activity',
'Theme removed from Project Location': 'Theme removed from Project Location',
'Theme removed from Project': 'Theme removed from Project',
'Theme updated': 'Theme updated',
'Theme': 'Theme',
'Themes': 'Themes',
'There are multiple records at this location': 'There are multiple records at this location',
'There are no contacts available for this person!': 'There are no contacts available for this person!',
'There are too many features, please Zoom In': 'There are too many features, please Zoom In',
'There is a problem with your file.': 'There is a problem with your file.',
'There is insufficient data to draw a chart from the questions selected': 'There is insufficient data to draw a chart from the questions selected',
'There is no address for this person yet. Add new address.': 'There is no address for this person yet. Add new address.',
'There is no status for this %(site_label)s yet. Add %(site_label)s Status.': 'There is no status for this %(site_label)s yet. Add %(site_label)s Status.',
'These are settings for Inbound Mail.': 'These are settings for Inbound Mail.',
'These are the Incident Categories visible to normal End-Users': 'These are the Incident Categories visible to normal End-Users',
'These are the filters being used by the search.': 'These are the filters being used by the search.',
'These need to be added in Decimal Degrees.': 'These need to be added in Decimal Degrees.',
'This Alert has already been sent!': 'This Alert has already been sent!',
'This Alert has no Recipients yet!': 'This Alert has no Recipients yet!',
'This adjustment has already been closed.': 'This adjustment has already been closed.',
'This email-address is already registered.': 'This email-address is already registered.',
'This file already exists on the server as': 'This file already exists on the server as',
'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': 'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.',
'This is guaranteed by assigner to be unique globally; e.g., may be based on an Internet domain name. Must not include spaces, commas or restricted characters (< and &).': 'This is guaranteed by assigner to be unique globally; e.g., may be based on an Internet domain name. Must not include spaces, commas or restricted characters (< and &).',
'This is normally edited using the Widget in the Style Tab in the Layer Properties on the Map.': 'This is normally edited using the Widget in the Style Tab in the Layer Properties on the Map.',
'This is required if analyzing with KeyGraph.': 'This is required if analyzing with KeyGraph.',
'This is the Common Alerting Protocol Broker interface.': 'This is the Common Alerting Protocol Broker interface.',
'This is the full name of the language and will be displayed to the user when selecting the template language.': 'This is the full name of the language and will be displayed to the user when selecting the template language.',
'This is the short code of the language and will be used as the name of the file. This should be the ISO 639 code.': 'This is the short code of the language and will be used as the name of the file. This should be the ISO 639 code.',
'This job has already been finished successfully.': 'This job has already been finished successfully.',
'This level is not open for editing.': 'This level is not open for editing.',
'This might be due to a temporary overloading or maintenance of the server.': 'This might be due to a temporary overloading or maintenance of the server.',
'This module allows Warehouse Stock to be managed, requested & shipped between the Warehouses and Other Inventories': 'This module allows Warehouse Stock to be managed, requested & shipped between the Warehouses and Other Inventories',
'This module allows you to manage Events - whether pre-planned (e.g. exercises) or Live Incidents. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'This module allows you to manage Events - whether pre-planned (e.g. exercises) or Live Incidents. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.',
'This module allows you to plan scenarios for both Exercises & Incidents. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'This module allows you to plan scenarios for both Exercises & Incidents. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.',
'This module can be used to send alerts to regional disaster response teams and track their deployments.': 'This module can be used to send alerts to regional disaster response teams and track their deployments.',
'This resource is already configured for this repository': 'This resource is already configured for this repository',
'This role can not be assigned to users.': 'This role can not be assigned to users.',
'This screen allows you to upload a collection of photos to the server.': 'This screen allows you to upload a collection of photos to the server.',
'This shipment contains %s items': 'This shipment contains %s items',
'This shipment contains one line item': 'This shipment contains one line item',
'This shipment has already been received & subsequently canceled.': 'This shipment has already been received & subsequently canceled.',
'This shipment has already been received.': 'This shipment has already been received.',
'This shipment has already been sent.': 'This shipment has already been sent.',
'This shipment has not been received - it has NOT been canceled because can still be edited.': 'This shipment has not been received - it has NOT been canceled because can still be edited.',
'This shipment has not been returned.': 'This shipment has not been returned.',
'This shipment has not been sent - it cannot be returned because it can still be edited.': 'This shipment has not been sent - it cannot be returned because it can still be edited.',
'This shipment has not been sent - it has NOT been canceled because can still be edited.': 'This shipment has not been sent - it has NOT been canceled because can still be edited.',
'This should be an export service URL, see': 'This should be an export service URL, see',
'Thunderstorm': 'Thunderstorm',
'Thursday': 'Thursday',
'Ticket ID': 'Ticket ID',
'Ticket': 'Ticket',
'Tiled': 'Tiled',
'Tilt-up concrete': 'Tilt-up concrete',
'Timber frame': 'Timber frame',
'Time Actual': 'Time Actual',
'Time Estimate': 'Time Estimate',
'Time Estimated': 'Time Estimated',
'Time Frame': 'Time Frame',
'Time In': 'Time In',
'Time Log Deleted': 'Time Log Deleted',
'Time Log Updated': 'Time Log Updated',
'Time Log': 'Time Log',
'Time Logged': 'Time Logged',
'Time Out': 'Time Out',
'Time Question': 'Time Question',
'Time Taken': 'Time Taken',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Time': 'Time',
'Timeline Report': 'Timeline Report',
'Timeline': 'Timeline',
'Times Changed': 'Times Changed',
'Times Completed': 'Times Completed',
'Timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Timestamps can be correlated with the timestamps on the photos to locate them on the map.',
'Title to show for the Web Map Service panel in the Tools panel.': 'Title to show for the Web Map Service panel in the Tools panel.',
'Title': 'Title',
'To %(site)s': 'To %(site)s',
'To Organization': 'To Organization',
'To Person': 'To Person',
'To Print or Share the Map you will have to take a screenshot. If you need help taking a screen shot, have a look at these instructions for %(windows)s or %(mac)s': 'To Print or Share the Map you will have to take a screenshot. If you need help taking a screen shot, have a look at these instructions for %(windows)s or %(mac)s',
'To activate Freehand mode, hold down the shift key.': 'To activate Freehand mode, hold down the shift key.',
'To be done': 'To be done',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in your Map Config': 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in your Map Config',
'To move the Timeline: use the mouse scroll wheel, the arrow keys or grab and drag the Timeline.': 'To move the Timeline: use the mouse scroll wheel, the arrow keys or grab and drag the Timeline.',
'To variable': 'To variable',
'To': 'To',
'Tools Other': 'Tools Other',
'Tools and materials required': 'Tools and materials required',
'Tools': 'Tools',
'Tornado': 'Tornado',
'Total # of households of site visited': 'Total # of households of site visited',
'Total Affected': 'Total Affected',
'Total Annual Budget': 'Total Annual Budget',
'Total Beds': 'Total Beds',
'Total Cost per Megabyte': 'Total Cost per Megabyte',
'Total Cost per Minute': 'Total Cost per Minute',
'Total Cost': 'Total Cost',
'Total Dead': 'Total Dead',
'Total Funding (Local Currency)': 'Total Funding (Local Currency)',
'Total Funding Amount': 'Total Funding Amount',
'Total Injured': 'Total Injured',
'Total Locations': 'Total Locations',
'Total Monthly Cost': 'Total Monthly Cost',
'Total Monthly': 'Total Monthly',
'Total One-time Costs': 'Total One-time Costs',
'Total Persons': 'Total Persons',
'Total Population': 'Total Population',
'Total Records: %(numrows)s': 'Total Records: %(numrows)s',
'Total Recurring Costs': 'Total Recurring Costs',
'Total Unit Cost': 'Total Unit Cost',
'Total Units': 'Total Units',
'Total Value': 'Total Value',
'Total gross floor area (square meters)': 'Total gross floor area (square meters)',
'Total number of beds in this facility. Automatically updated from daily reports.': 'Total number of beds in this facility. Automatically updated from daily reports.',
'Total number of houses in the area': 'Total number of houses in the area',
'Total number of schools in affected area': 'Total number of schools in affected area',
'Total population of site visited': 'Total population of site visited',
'Total': 'Total',
'Totals for Budget:': 'Totals for Budget:',
'Totals for Bundle:': 'Totals for Bundle:',
'Totals for Kit:': 'Totals for Kit:',
'Tour Configuration': 'Tour Configuration',
'Tour Details': 'Tour Details',
'Tour Name': 'Tour Name',
'Tour User': 'Tour User',
'Tour added': 'Tour added',
'Tour deleted': 'Tour deleted',
'Tour updated': 'Tour updated',
'Tourist Group': 'Tourist Group',
'Tours': 'Tours',
'Traceback': 'Traceback',
'Track Shipment': 'Track Shipment',
'Track with this Person?': 'Track with this Person?',
'Trackable': 'Trackable',
'Traffic Report': 'Traffic Report',
'Trained People Details': 'Trained People Details',
'Trained People added': 'Trained People added',
'Trained People deleted': 'Trained People deleted',
'Trained People updated': 'Trained People updated',
'Trained People': 'Trained People',
'Training Details': 'Training Details',
'Training Event Details': 'Training Event Details',
'Training Event added': 'Training Event added',
'Training Event deleted': 'Training Event deleted',
'Training Event updated': 'Training Event updated',
'Training Event': 'Training Event',
'Training Events': 'Training Events',
'Training Facility': 'Training Facility',
'Training Hours (Month)': 'Training Hours (Month)',
'Training Hours (Year)': 'Training Hours (Year)',
'Training Report': 'Training Report',
'Training added': 'Training added',
'Training deleted': 'Training deleted',
'Training updated': 'Training updated',
'Training': 'Training',
'Trainings': 'Trainings',
'Transfer Ownership To (Organization/Branch)': 'Transfer Ownership To (Organization/Branch)',
'Transfer Ownership': 'Transfer Ownership',
'Transfer': 'Transfer',
'Transit Status': 'Transit Status',
'Transit': 'Transit',
'Translate the System': 'Translate the System',
'Translate': 'Translate',
'Translated File': 'Translated File',
'Translation': 'Översättning',
'Transparent?': 'Transparent?',
'Transport Reference': 'Transport Reference',
'Transportation Required': 'Transportation Required',
'Transportation assistance, Rank': 'Transportation assistance, Rank',
'Transported By': 'Transported By',
'Transported by': 'Transported by',
'Trash bags': 'Trash bags',
'Trauma Center': 'Trauma Center',
'Travel Cost': 'Travel Cost',
'Tropical Storm': 'Tropical Storm',
'Tropo Messaging Token': 'Tropo Messaging Token',
'Tropo Voice Token': 'Tropo Voice Token',
'Tropo settings updated': 'Tropo settings updated',
'Tropo': 'Tropo',
'Truck': 'Truck',
'Try checking the URL for errors, maybe it was mistyped.': 'Try checking the URL for errors, maybe it was mistyped.',
'Try hitting refresh/reload button or trying the URL from the address bar again.': 'Try hitting refresh/reload button or trying the URL from the address bar again.',
'Try refreshing the page or hitting the back button on your browser.': 'Try refreshing the page or hitting the back button on your browser.',
'Tsunami': 'Tsunami',
'Tuesday': 'Tuesday',
'Tugboat Capacity': 'Tugboat Capacity',
'Tweet Details': 'Tweet Details',
'Tweet ID': 'Tweet ID',
'Tweet deleted': 'Tweet deleted',
'Tweet': 'Tweet',
'Tweeted By': 'Tweeted By',
'Tweeted On': 'Tweeted On',
'Tweeted by': 'Tweeted by',
'Tweeted on': 'Tweeted on',
'Twilio (Inbound)': 'Twilio (Inbound)',
'Twilio Setting Details': 'Twilio Setting Details',
'Twilio Setting added': 'Twilio Setting added',
'Twilio Setting deleted': 'Twilio Setting deleted',
'Twilio Settings': 'Twilio Settings',
'Twilio settings updated': 'Twilio settings updated',
'Twilio': 'Twilio',
'Twitter InBox': 'Twitter InBox',
'Twitter Search Queries': 'Twitter Search Queries',
'Twitter Search Results': 'Twitter Search Results',
'Twitter Timeline': 'Twitter Timeline',
'Twitter account updated': 'Twitter account updated',
'Twitter': 'Twitter',
'Type of Construction': 'Type of Construction',
'Type of Insurance': 'Type of Insurance',
'Type of People Details': 'Type of People Details',
'Type of People added': 'Type of People added',
'Type of People deleted': 'Type of People deleted',
'Type of People updated': 'Type of People updated',
'Type of People': 'Type of People',
'Type of Peoples': 'Type of Peoples',
'Type of Property': 'Type of Property',
'Type of Trained People Details': 'Type of Trained People Details',
'Type of Trained People added': 'Type of Trained People added',
'Type of Trained People deleted': 'Type of Trained People deleted',
'Type of Trained People updated': 'Type of Trained People updated',
'Type of Trained People': 'Type of Trained People',
'Type of Transport': 'Type of Transport',
'Type of water source before the disaster': 'Type of water source before the disaster',
'Type the name of an existing catalog kit': 'Type the name of an existing catalog kit',
'Type': 'Type',
'Types of People': 'Types of People',
'Types of Trained People': 'Types of Trained People',
'Tyvek suits': 'Tyvek suits',
'UPDATE': 'UPDATE',
'URL for the Mobile Commons API': 'URL for the Mobile Commons API',
'URL for the twilio API.': 'URL for the twilio API.',
'URL of the default proxy server to connect to remote repositories (if required). If only some of the repositories require the use of a proxy server, you can configure this in the respective repository configurations.': 'URL of the default proxy server to connect to remote repositories (if required). If only some of the repositories require the use of a proxy server, you can configure this in the respective repository configurations.',
'URL of the proxy server to connect to the repository (leave empty for default proxy)': 'URL of the proxy server to connect to the repository (leave empty for default proxy)',
'URL to a Google Calendar to display on the project timeline.': 'URL to a Google Calendar to display on the project timeline.',
'URL to resume tour': 'URL to resume tour',
'URL': 'URL',
'URL/Link': 'URL/Link',
'UUID': 'UUID',
'Un-Repairable': 'Un-Repairable',
'Unable to find sheet %(sheet_name)s in uploaded spreadsheet': 'Unable to find sheet %(sheet_name)s in uploaded spreadsheet',
'Unable to open spreadsheet': 'Unable to open spreadsheet',
'Unable to parse CSV file or file contains invalid data': 'Unable to parse CSV file or file contains invalid data',
'Unable to parse CSV file!': 'Unable to parse CSV file!',
'Unassigned': 'Unassigned',
'Uncertainty': 'Uncertainty',
'Uncheck all': 'Uncheck all',
'Undefined Table': 'Undefined Table',
'Undefined': 'Undefined',
'Under 5': 'Under 5',
'Under which condition a local record shall be updated if it also has been modified locally since the last synchronization': 'Under which condition a local record shall be updated if it also has been modified locally since the last synchronization',
'Under which conditions local records shall be updated': 'Under which conditions local records shall be updated',
'Understaffed': 'Understaffed',
'Unidentified': 'Unidentified',
'Unique Locations': 'Unique Locations',
'Unique code': 'Unique code',
'Unique identifier which THIS repository identifies itself with when sending synchronization requests.': 'Unique identifier which THIS repository identifies itself with when sending synchronization requests.',
'Unit Cost': 'Unit Cost',
'Unit Value': 'Unit Value',
'Unit added': 'Unit added',
'Unit deleted': 'Unit deleted',
'Unit of Measure': 'Unit of Measure',
'Unit updated': 'Unit updated',
'Unit': 'Unit',
'United States Dollars': 'United States Dollars',
'Units': 'Units',
'Unknown Locations': 'Unknown Locations',
'Unknown question code': 'Unknown question code',
'Unknown type of facility': 'Unknown type of facility',
'Unknown': 'Unknown',
'Unloading': 'Unloading',
'Unmark as duplicate': 'Unmark as duplicate',
'Unranked Options': 'Unranked Options',
'Unreinforced masonry': 'Unreinforced masonry',
'Unsafe': 'Unsafe',
'Unselect to disable the modem': 'Unselect to disable the modem',
'Unselect to disable this API service': 'Unselect to disable this API service',
'Unselect to disable this SMTP service': 'Unselect to disable this SMTP service',
'Unsent': 'Unsent',
'Unskilled': 'Unskilled',
'Unspecified': 'Unspecified',
'Unsubscribe': 'Unsubscribe',
'Unsupported data format': 'Unsupported data format',
'Unsupported method': 'Unsupported method',
'Upcoming Features Include': 'Upcoming Features Include',
'Update Activity Report': 'Update Activity Report',
'Update Cholera Treatment Capability Information': 'Update Cholera Treatment Capability Information',
'Update Coalition': 'Update Coalition',
'Update Map': 'Update Map',
'Update Master file': 'Update Master file',
'Update Method': 'Update Method',
'Update Morgue Details': 'Update Morgue Details',
'Update Notification': 'Update Notification',
'Update Policy': 'Update Policy',
'Update Report': 'Update Report',
'Update Request': 'Update Request',
'Update Service Profile': 'Update Service Profile',
'Update Status': 'Update Status',
'Update Task Status': 'Update Task Status',
'Update Unit': 'Update Unit',
'Update alert information': 'Update alert information',
'Update this entry': 'Update this entry',
'Update': 'Update',
'Update: Update and supercede earlier message(s)': 'Update: Update and supercede earlier message(s)',
'Update:': 'Update:',
'Updated By': 'Updated By',
'Upload .CSV': 'Upload .CSV',
'Upload Appraisal': 'Upload Appraisal',
'Upload Completed Assessment Form': 'Upload Completed Assessment Form',
'Upload Fire Stations List': 'Upload Fire Stations List',
'Upload Format': 'Upload Format',
'Upload Photos': 'Upload Photos',
'Upload Scanned OCR Form': 'Upload Scanned OCR Form',
'Upload Shapefile': 'Upload Shapefile',
'Upload Vehicles List': 'Upload Vehicles List',
'Upload Web2py portable build as a zip file': 'Upload Web2py portable build as a zip file',
'Upload a (completely or partially) translated CSV file': 'Upload a (completely or partially) translated CSV file',
'Upload a Question List import file': 'Upload a Question List import file',
'Upload a text file containing new-line separated strings:': 'Upload a text file containing new-line separated strings:',
'Upload an Assessment Template import file': 'Upload an Assessment Template import file',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': 'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!',
'Upload an image file (png or jpeg), max. 400x400 pixels!': 'Upload an image file (png or jpeg), max. 400x400 pixels!',
'Upload demographic data': 'Upload demographic data',
'Upload file': 'Upload file',
'Upload indicators': 'Upload indicators',
'Upload successful': 'Upload successful',
'Upload the Completed Assessment Form': 'Upload the Completed Assessment Form',
'Upload': 'Upload',
'Uploaded PDF file has more/less number of page(s) than required. Check if you have provided appropriate revision for your Form as well as check the Form contains appropriate number of pages.': 'Uploaded PDF file has more/less number of page(s) than required. Check if you have provided appropriate revision for your Form as well as check the Form contains appropriate number of pages.',
'Uploaded file is not a PDF file. Provide a Form in valid PDF Format.': 'Uploaded file is not a PDF file. Provide a Form in valid PDF Format.',
'Uploading report details': 'Uploading report details',
'Urban Fire': 'Urban Fire',
'Urban Tank Tactical Vehicle': 'Urban Tank Tactical Vehicle',
'Urban area': 'Urban area',
'Urgency': 'Urgency',
'Urgent': 'Urgent',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'Use Geocoder for address lookups?': 'Use Geocoder for address lookups?',
'Use Site?': 'Use Site?',
'Use decimal': 'Use decimal',
'Use default': 'Use default',
'Use deg, min, sec': 'Use deg, min, sec',
'Use these links to download data that is currently in the database.': 'Use these links to download data that is currently in the database.',
'Use this to set the starting location for the Location Selector.': 'Use this to set the starting location for the Location Selector.',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'Used in onHover Tooltip & Cluster Popups to differentiate between types.',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.',
'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.',
'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.',
'Used to collate multiple messages referring to different aspects of the same incident. If multiple incident identifiers are referenced, they SHALL be separated by whitespace. Incident names including whitespace SHALL be surrounded by double-quotes.': 'Used to collate multiple messages referring to different aspects of the same incident. If multiple incident identifiers are referenced, they SHALL be separated by whitespace. Incident names including whitespace SHALL be surrounded by double-quotes.',
'Used to populate feature attributes which can be used for Styling.': 'Used to populate feature attributes which can be used for Styling.',
'User Account has been Approved': 'User Account has been Approved',
'User Account has been Disabled': 'User Account has been Disabled',
'User Account': 'User Account',
'User Details': 'User Details',
'User Guidelines Synchronization': 'User Guidelines Synchronization',
'User Profile': 'Användarprofil',
'User Roles': 'User Roles',
'User added to Role': 'User added to Role',
'User added': 'User added',
'User deleted': 'User deleted',
'User has been (re)linked to Person and Human Resource record': 'User has been (re)linked to Person and Human Resource record',
'User updated': 'User updated',
'User with Role': 'User with Role',
'User': 'User',
'Username to use for authentication at the remote site.': 'Username to use for authentication at the remote site.',
'Username': 'Username',
'Users in my Organizations': 'Users in my Organizations',
'Users with this Role': 'Users with this Role',
'Users': 'Användare',
'Uses the REST Query Format defined in': 'Uses the REST Query Format defined in',
'Utilities': 'Utilities',
'Utility knives': 'Utility knives',
'Utility, telecommunication, other non-transport infrastructure': 'Utility, telecommunication, other non-transport infrastructure',
'Utilization Details': 'Utilization Details',
'Utilization Report': 'Utilization Report',
'VCA REPORTS': 'VCA REPORTS',
'VCA Report': 'VCA Report',
'VCA Reports': 'VCA Reports',
'Valid From': 'Valid From',
'Valid Until': 'Valid Until',
'Valid': 'Valid',
'Validation error': 'Validation error',
'Value per Pack': 'Value per Pack',
'Value': 'Value',
'Variance': 'Variance',
'Vehicle Assignment updated': 'Vehicle Assignment updated',
'Vehicle Assignments': 'Vehicle Assignments',
'Vehicle Categories': 'Vehicle Categories',
'Vehicle Category': 'Vehicle Category',
'Vehicle Crime': 'Vehicle Crime',
'Vehicle Deployments': 'Vehicle Deployments',
'Vehicle Details added': 'Vehicle Details added',
'Vehicle Details deleted': 'Vehicle Details deleted',
'Vehicle Details updated': 'Vehicle Details updated',
'Vehicle Details': 'Vehicle Details',
'Vehicle Management': 'Vehicle Management',
'Vehicle Plate Number': 'Vehicle Plate Number',
'Vehicle Type Details': 'Vehicle Type Details',
'Vehicle Type added': 'Vehicle Type added',
'Vehicle Type deleted': 'Vehicle Type deleted',
'Vehicle Type updated': 'Vehicle Type updated',
'Vehicle Type': 'Vehicle Type',
'Vehicle Types': 'Vehicle Types',
'Vehicle added': 'Vehicle added',
'Vehicle assigned': 'Vehicle assigned',
'Vehicle deleted': 'Vehicle deleted',
'Vehicle unassigned': 'Vehicle unassigned',
'Vehicle updated': 'Vehicle updated',
'Vehicle': 'Vehicle',
'Vehicles are assets with some extra details.': 'Vehicles are assets with some extra details.',
'Vehicles': 'Vehicles',
'Verified': 'Verified',
'Verified?': 'Verified?',
'Verify password': 'Verify password',
'Version': 'Version',
'Very Good': 'Very Good',
'Very High': 'Very High',
'Very Strong': 'Very Strong',
'Vessel Max Length': 'Vessel Max Length',
'Video Tutorials': 'Video Tutorials',
'View Alerts received using either Email or SMS': 'View Alerts received using either Email or SMS',
'View Email Accounts': 'View Email Accounts',
'View Email InBox': 'View Email InBox',
'View Error Tickets': 'View Error Tickets',
'View Fullscreen Map': 'View Fullscreen Map',
'View InBox': 'View InBox',
'View Items': 'View Items',
'View Location Details': 'View Location Details',
'View Message Log': 'View Message Log',
'View Mobile Commons Settings': 'View Mobile Commons Settings',
'View Outbox': 'View Outbox',
'View Parser Connections': 'View Parser Connections',
'View Queries': 'View Queries',
'View RSS Posts': 'View RSS Posts',
'View RSS Settings': 'View RSS Settings',
'View Reports': 'View Reports',
'View SMS InBox': 'View SMS InBox',
'View Sender Priority': 'View Sender Priority',
'View Sent Emails': 'View Sent Emails',
'View Sent SMS': 'View Sent SMS',
'View Sent Tweets': 'View Sent Tweets',
'View Settings': 'View Settings',
'View Skills': 'View Skills',
'View Test Result Reports': 'View Test Result Reports',
'View Tweet': 'View Tweet',
'View Twilio Settings': 'View Twilio Settings',
'View Twitter InBox': 'View Twitter InBox',
'View all log entries': 'View all log entries',
'View and/or update their details': 'View and/or update their details',
'View full screen': 'View full screen',
'View full size': 'View full size',
'View log entries per repository': 'View log entries per repository',
'View on Map': 'View on Map',
'View or update the status of a hospital.': 'View or update the status of a hospital.',
'View pending requests and pledge support.': 'View pending requests and pledge support.',
'View the hospitals on a map.': 'View the hospitals on a map.',
'View the module-wise percentage of translated strings': 'View the module-wise percentage of translated strings',
'View': 'View',
'View/Edit the Database directly': 'View/Edit the Database directly',
'Village Leader': 'Village Leader',
'Village': 'Village',
'Visual Recognition': 'Visual Recognition',
'Volcanic Ash Cloud': 'Volcanic Ash Cloud',
'Volcanic Event': 'Volcanic Event',
'Volume (m3)': 'Volume (m3)',
'Volunteer Cluster Position added': 'Volunteer Cluster Position added',
'Volunteer Cluster Position deleted': 'Volunteer Cluster Position deleted',
'Volunteer Cluster Position updated': 'Volunteer Cluster Position updated',
'Volunteer Cluster Position': 'Volunteer Cluster Position',
'Volunteer Cluster Type added': 'Volunteer Cluster Type added',
'Volunteer Cluster Type deleted': 'Volunteer Cluster Type deleted',
'Volunteer Cluster Type updated': 'Volunteer Cluster Type updated',
'Volunteer Cluster Type': 'Volunteer Cluster Type',
'Volunteer Cluster added': 'Volunteer Cluster added',
'Volunteer Cluster deleted': 'Volunteer Cluster deleted',
'Volunteer Cluster updated': 'Volunteer Cluster updated',
'Volunteer Cluster': 'Volunteer Cluster',
'Volunteer Contact': 'Volunteer Contact',
'Volunteer Details updated': 'Volunteer Details updated',
'Volunteer Details': 'Volunteer Details',
'Volunteer Hours': 'Volunteer Hours',
'Volunteer Record': 'Volunteer Record',
'Volunteer Request': 'Volunteer Request',
'Volunteer Role Catalog': 'Volunteer Role Catalog',
'Volunteer Role Details': 'Volunteer Role Details',
'Volunteer Role added': 'Volunteer Role added',
'Volunteer Role deleted': 'Volunteer Role deleted',
'Volunteer Role updated': 'Volunteer Role updated',
'Volunteer Role': 'Volunteer Role',
'Volunteer Service Record': 'Volunteer Service Record',
'Volunteer added': 'Volunteer added',
'Volunteer deleted': 'Volunteer deleted',
'Volunteer': 'Volunteer',
'Volunteers': 'Volontärer',
'Vote': 'Vote',
'Voted on': 'Voted on',
'Votes': 'Votes',
'Vulnerabilities': 'Vulnerabilities',
'Vulnerability Aggregated Indicator Details': 'Vulnerability Aggregated Indicator Details',
'Vulnerability Aggregated Indicator added': 'Vulnerability Aggregated Indicator added',
'Vulnerability Aggregated Indicator deleted': 'Vulnerability Aggregated Indicator deleted',
'Vulnerability Aggregated Indicator updated': 'Vulnerability Aggregated Indicator updated',
'Vulnerability Aggregated Indicator': 'Vulnerability Aggregated Indicator',
'Vulnerability Aggregated Indicators': 'Vulnerability Aggregated Indicators',
'Vulnerability Data Details': 'Vulnerability Data Details',
'Vulnerability Data added': 'Vulnerability Data added',
'Vulnerability Data deleted': 'Vulnerability Data deleted',
'Vulnerability Data updated': 'Vulnerability Data updated',
'Vulnerability Data': 'Vulnerability Data',
'Vulnerability Document': 'Vulnerability Document',
'Vulnerability Indicator Details': 'Vulnerability Indicator Details',
'Vulnerability Indicator added': 'Vulnerability Indicator added',
'Vulnerability Indicator deleted': 'Vulnerability Indicator deleted',
'Vulnerability Indicator updated': 'Vulnerability Indicator updated',
'Vulnerability Indicator': 'Vulnerability Indicator',
'Vulnerability Indicators': 'Vulnerability Indicators',
'Vulnerability Mapping': 'Vulnerability Mapping',
'Vulnerability': 'Vulnerability',
'WARNING': 'WARNING',
'WFS Layer': 'WFS Layer',
'WGS84 (EPSG 4236) is required for many WMS servers.': 'WGS84 (EPSG 4236) is required for many WMS servers.',
'WKT is Invalid!': 'WKT is Invalid!',
'WMS Layer': 'WMS Layer',
'Walking Only': 'Walking Only',
'Wall or other structural damage': 'Wall or other structural damage',
'Warehouse Details': 'Warehouse Details',
'Warehouse Stock Details': 'Warehouse Stock Details',
'Warehouse Stock Report': 'Warehouse Stock Report',
'Warehouse Stock updated': 'Warehouse Stock updated',
'Warehouse Stock': 'Warehouse Stock',
'Warehouse Type Details': 'Warehouse Type Details',
'Warehouse Type added': 'Warehouse Type added',
'Warehouse Type deleted': 'Warehouse Type deleted',
'Warehouse Type updated': 'Warehouse Type updated',
'Warehouse Type': 'Warehouse Type',
'Warehouse Types': 'Warehouse Types',
'Warehouse added': 'Warehouse added',
'Warehouse deleted': 'Warehouse deleted',
'Warehouse updated': 'Warehouse updated',
'Warehouse': 'Warehouse',
'Warehouses': 'Warehouses',
'Warehousing Storage Capacity': 'Warehousing Storage Capacity',
'WatSan': 'WatSan',
'Water collection': 'Water collection',
'Water gallon': 'Water gallon',
'Water storage containers in households': 'Water storage containers in households',
'Water supply': 'Water supply',
'Waterspout': 'Waterspout',
'Weak': 'Weak',
'Web API settings updated': 'Web API settings updated',
'Web API': 'Web API',
'Web Form': 'Web Form',
'Web Map Service Browser Name': 'Web Map Service Browser Name',
'Web Map Service Browser URL': 'Web Map Service Browser URL',
'Web2py executable zip file found - Upload to replace the existing file': 'Web2py executable zip file found - Upload to replace the existing file',
'Web2py executable zip file needs to be uploaded first to use this function.': 'Web2py executable zip file needs to be uploaded first to use this function.',
'Web2py executable zip file needs to be uploaded to use this function.': 'Web2py executable zip file needs to be uploaded to use this function.',
'Website': 'Website',
'Wednesday': 'Wednesday',
'Week': 'Week',
'Weekends only': 'Weekends only',
'Weekly': 'Weekly',
'Weight (kg)': 'Weight (kg)',
'Weight': 'Weight',
'Welcome to the': 'Welcome to the',
'Well-Known Text': 'Well-Known Text',
'What are you submitting?': 'What are you submitting?',
'What order to be contacted in.': 'What order to be contacted in.',
'What the Items will be used for': 'What the Items will be used for',
'Wheat': 'Wheat',
'Wheelbarrows': 'Wheelbarrows',
'When reports were entered': 'When reports were entered',
'When this search was last checked for changes.': 'When this search was last checked for changes.',
'Where reached': 'Where reached',
'Whether calls to this resource should use this configuration as the default one': 'Whether calls to this resource should use this configuration as the default one',
'Whether the Latitude & Longitude are inherited from a higher level in the location hierarchy rather than being a separately-entered figure.': 'Whether the Latitude & Longitude are inherited from a higher level in the location hierarchy rather than being a separately-entered figure.',
'Whether the resource should be tracked using S3Track rather than just using the Base Location': 'Whether the resource should be tracked using S3Track rather than just using the Base Location',
'Which methods to apply when importing data to the local repository': 'Which methods to apply when importing data to the local repository',
'Whiskers': 'Whiskers',
'Whitelist a Sender': 'Whitelist a Sender',
'Whitelisted Senders': 'Whitelisted Senders',
'Who is doing What Where': 'Who is doing What Where',
'Who is this alert for?': 'Who is this alert for?',
'Who usually collects water for the family?': 'Who usually collects water for the family?',
'Width (m)': 'Width (m)',
'Wild Fire': 'Wild Fire',
'Will be filled automatically when the Item has been Repacked': 'Will be filled automatically when the Item has been Repacked',
'Will be filled automatically when the Shipment has been Received': 'Will be filled automatically when the Shipment has been Received',
'Will create and link your user account to the following records': 'Will create and link your user account to the following records',
'Wind Chill': 'Wind Chill',
'Wind/Hurricane': 'Wind/Hurricane',
'Wind/Wind driven rain': 'Wind/Wind driven rain',
'Window frame': 'Window frame',
'Winter Storm': 'Winter Storm',
'Wire brush': 'Wire brush',
'Women participating in coping activities': 'Women participating in coping activities',
'Womens Focus Groups': 'Womens Focus Groups',
'Wood Frame': 'Wood Frame',
'Wooden plank': 'Wooden plank',
'Wooden poles': 'Wooden poles',
'Work Order': 'Work Order',
'Work Plan': 'Work Plan',
'Work Requested': 'Work Requested',
'Work gloves': 'Work gloves',
'Work on Program': 'Work on Program',
'Work phone': 'Work phone',
'Work': 'Work',
'Working or other to provide money/food': 'Working or other to provide money/food',
'Wrench': 'Wrench',
'X-Ray': 'X-Ray',
'XML parse error': 'XML parse error',
'XSLT stylesheet not found': 'XSLT stylesheet not found',
'XSLT transformation error': 'XSLT transformation error',
'XYZ Layer': 'XYZ Layer',
'YES': 'YES',
'Year Built': 'Year Built',
'Year built': 'Year built',
'Year of Manufacture': 'Year of Manufacture',
'Year that the organization was founded': 'Year that the organization was founded',
'Year': 'Year',
'Yellow': 'Yellow',
'Yes': 'Yes',
'Yes, No': 'Yes, No',
'You are a recovery team?': 'You are a recovery team?',
'You are about to submit indicator ratings for': 'You are about to submit indicator ratings for',
'You are attempting to delete your own account - are you sure you want to proceed?': 'You are attempting to delete your own account - are you sure you want to proceed?',
'You are currently reported missing!': 'You are currently reported missing!',
'You are not allowed to Vote': 'You are not allowed to Vote',
'You are not permitted to approve documents': 'You are not permitted to approve documents',
'You are not permitted to upload files': 'You are not permitted to upload files',
'You are viewing': 'You are viewing',
'You can click on the map below to select the Lat/Lon fields': 'You can click on the map below to select the Lat/Lon fields',
'You can only make %d kit(s) with the available stock': 'You can only make %d kit(s) with the available stock',
'You can search by name, acronym or comments': 'You can search by name, acronym or comments',
'You can search by name, acronym, comments or parent name or acronym.': 'You can search by name, acronym, comments or parent name or acronym.',
'You can select an area on the image and save to crop it.': 'You can select an area on the image and save to crop it.',
'You can select the Draw tool': 'You can select the Draw tool',
'You can set the modem settings for SMS here.': 'You can set the modem settings for SMS here.',
'You do not have permission for any facility to add an order.': 'You do not have permission for any facility to add an order.',
'You do not have permission for any facility to make a commitment.': 'You do not have permission for any facility to make a commitment.',
'You do not have permission for any facility to make a request.': 'You do not have permission for any facility to make a request.',
'You do not have permission for any facility to perform this action.': 'You do not have permission for any facility to perform this action.',
'You do not have permission for any facility to receive a shipment.': 'You do not have permission for any facility to receive a shipment.',
'You do not have permission for any facility to send a shipment.': 'You do not have permission for any facility to send a shipment.',
'You do not have permission for any organization to perform this action.': 'You do not have permission for any organization to perform this action.',
'You do not have permission for any site to add an inventory item.': 'You do not have permission for any site to add an inventory item.',
'You do not have permission to adjust the stock level in this warehouse.': 'You do not have permission to adjust the stock level in this warehouse.',
'You do not have permission to cancel this received shipment.': 'You do not have permission to cancel this received shipment.',
'You do not have permission to cancel this sent shipment.': 'You do not have permission to cancel this sent shipment.',
'You do not have permission to make this commitment.': 'You do not have permission to make this commitment.',
'You do not have permission to receive this shipment.': 'You do not have permission to receive this shipment.',
'You do not have permission to return this sent shipment.': 'You do not have permission to return this sent shipment.',
'You do not have permission to send messages': 'You do not have permission to send messages',
'You do not have permission to send this shipment.': 'You do not have permission to send this shipment.',
'You have committed for all people in this Request. Please check that all details are correct and update as-required.': 'You have committed for all people in this Request. Please check that all details are correct and update as-required.',
'You have committed to all items in this Request. Please check that all details are correct and update as-required.': 'You have committed to all items in this Request. Please check that all details are correct and update as-required.',
'You have committed to this Request. Please check that all details are correct and update as-required.': 'You have committed to this Request. Please check that all details are correct and update as-required.',
'You have found a dead body?': 'You have found a dead body?',
'You have unsaved changes. You need to press the Save button to save them': 'You have unsaved changes. You need to press the Save button to save them',
'You may select multiple categories by holding down control and then selecting the items.': 'You may select multiple categories by holding down control and then selecting the items.',
'You must %(login)s to Vote': 'You must %(login)s to Vote',
'You must agree to the Terms of Service': 'You must agree to the Terms of Service',
'You must be logged in to report persons missing or found.': 'You must be logged in to report persons missing or found.',
'You must enter a minimum of %d characters': 'You must enter a minimum of %d characters',
'You need to check all item quantities and allocate to bins before you can receive the shipment': 'You need to check all item quantities and allocate to bins before you can receive the shipment',
'You need to check all item quantities before you can complete the return process': 'You need to check all item quantities before you can complete the return process',
'You need to check all the revised quantities before you can close this adjustment': 'You need to check all the revised quantities before you can close this adjustment',
'You need to create a template before you can create a series': 'You need to create a template before you can create a series',
'You need to create at least one alert information item in order to be able to broadcast this alert!': 'You need to create at least one alert information item in order to be able to broadcast this alert!',
'You need to have at least 2 records in this list in order to merge them.': 'You need to have at least 2 records in this list in order to merge them.',
'You need to use the spreadsheet which you can download from this page': 'You need to use the spreadsheet which you can download from this page',
'You should edit Twitter settings in models/000_config.py': 'You should edit Twitter settings in models/000_config.py',
'Your current ordered list of solution options is shown in the right-hand column. You can change your vote by reordering this list. You may choose to ignore any solution options by having these in the left-hand column.': 'Your current ordered list of solution options is shown in the right-hand column. You can change your vote by reordering this list. You may choose to ignore any solution options by having these in the left-hand column.',
'Your name for this search. Notifications will use this name.': 'Your name for this search. Notifications will use this name.',
'access granted': 'access granted',
'activate to sort column ascending': 'activate to sort column ascending',
'activate to sort column descending': 'activate to sort column descending',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'allows a budget to be developed based on staff & equipment costs, including any admin overheads.',
'always update': 'always update',
'an individual/team to do in 1-2 days': 'an individual/team to do in 1-2 days',
'and': 'and',
'anonymous user': 'anonymous user',
'appadmin is disabled because insecure channel': 'appadmin is disabled because insecure channel',
'average': 'average',
'black': 'black',
'blond': 'blond',
'blue': 'blue',
'brown': 'brown',
'by %(person)s': 'by %(person)s',
'by': 'by',
'cache': 'cache',
'cannot be deleted.': 'cannot be deleted.',
'caucasoid': 'caucasoid',
'characters left': 'characters left',
'check all': 'check all',
'clear': 'clear',
'covered': 'covered',
'created': 'created',
'curly': 'curly',
'current': 'current',
'dark': 'dark',
'data uploaded': 'data uploaded',
'db': 'db',
'deceased': 'deceased',
'delete all checked': 'delete all checked',
'deleted': 'deleted',
'design': 'design',
'diseased': 'diseased',
'displaced': 'displaced',
'divorced': 'divorced',
'done!': 'done!',
'e.g. Census 2010': 'e.g. Census 2010',
'eg. gas, electricity, water': 'eg. gas, electricity, water',
'enclosed area': 'enclosed area',
'enter date and time in range %(min)s %(max)s': 'enter date and time in range %(min)s %(max)s',
'enter date and time on or after %(min)s': 'enter date and time on or after %(min)s',
'enter date and time on or before %(max)s': 'enter date and time on or before %(max)s',
'enter date and time': 'enter date and time',
'expired': 'expired',
'export as csv file': 'export as csv file',
'fair': 'fair',
'fat': 'fat',
'female': 'female',
'fill in order: day(2) month(2) year(4)': 'fill in order: day(2) month(2) year(4)',
'fill in order: hour(2) min(2) day(2) month(2) year(4)': 'fill in order: hour(2) min(2) day(2) month(2) year(4)',
'fill in order: hour(2) min(2) month(2) day(2) year(4)': 'fill in order: hour(2) min(2) month(2) day(2) year(4)',
'fill in order: month(2) day(2) year(4)': 'fill in order: month(2) day(2) year(4)',
'flush latrine with septic tank': 'flush latrine with septic tank',
'food_sources': 'food_sources',
'forehead': 'forehead',
'found': 'found',
'ft': 'ft',
'green': 'green',
'grey': 'grey',
'hours': 'hours',
'households': 'households',
'identified': 'identified',
'ignore': 'ignore',
'import': 'importera',
'in Stock': 'in Stock',
'in this': 'in this',
'in': 'in',
'injured': 'injured',
'insufficient number of pages provided': 'insufficient number of pages provided',
'invalid request': 'invalid request',
'latrines': 'latrines',
'less': 'less',
'light': 'light',
'login': 'logga in',
'long': 'long',
'long>12cm': 'long>12cm',
'm': 'm',
'm3': 'm3',
'male': 'male',
'mandatory fields': 'mandatory fields',
'married': 'married',
'medium': 'medium',
'medium<12cm': 'medium<12cm',
'meters': 'meters',
'missing': 'missing',
'moderate': 'moderate',
'module allows the site administrator to configure various options.': 'module allows the site administrator to configure various options.',
'module helps monitoring the status of hospitals.': 'module helps monitoring the status of hospitals.',
'mongoloid': 'mongoloid',
'more': 'mer',
'more...': 'mer...',
'n/a': 'n/a',
'negroid': 'negroid',
'never update': 'never update',
'never': 'never',
'new ACL': 'new ACL',
'new record inserted': 'new record inserted',
'next 100 rows': 'next 100 rows',
'no options available': 'no options available',
'no': 'no',
'none': 'none',
'not accessible - no cached version available!': 'not accessible - no cached version available!',
'not accessible - using cached version from': 'not accessible - using cached version from',
'not sent': 'not sent',
'not specified': 'not specified',
'number of planes': 'number of planes',
'obsolete': 'obsolete',
'of total data reported': 'of total data reported',
'of': 'of',
'on %(date)s': 'on %(date)s',
'on': 'on',
'open defecation': 'open defecation',
'optional': 'optional',
'or import from csv file': 'or import from csv file',
'or': 'or',
'other': 'other',
'out of': 'out of',
'over one hour': 'over one hour',
'overdue': 'overdue',
'paid': 'paid',
'people': 'people',
'per': 'per',
'piece': 'piece',
'pit latrine': 'pit latrine',
'pit': 'pit',
'poor': 'poor',
'previous 100 rows': 'previous 100 rows',
'pull and push': 'pull and push',
'pull': 'pull',
'push': 'push',
'pygraphviz library not found': 'pygraphviz library not found',
'record does not exist': 'record does not exist',
'records deleted': 'records deleted',
'red': 'red',
'replace': 'replace',
'reports successfully imported.': 'reports successfully imported.',
'representation of the Polygon/Line.': 'representation of the Polygon/Line.',
'river': 'river',
'seconds': 'seconds',
'see comment': 'see comment',
'see more': 'see more',
'sent': 'sent',
'separated from family': 'separated from family',
'separated': 'separated',
'shaved': 'shaved',
'short': 'short',
'short<6cm': 'short<6cm',
'sides': 'sides',
'sign-up now': 'sign-up now',
'single': 'single',
'slim': 'slim',
'specify': 'specify',
'state location': 'state location',
'state': 'state',
'straight': 'straight',
'strong': 'strong',
'submit': 'submit',
'submitted by': 'submitted by',
'suffered financial losses': 'suffered financial losses',
'tall': 'tall',
'times (0 = unlimited)': 'times (0 = unlimited)',
'times': 'times',
'to download a OCR Form.': 'to download a OCR Form.',
'to reset your password': 'to reset your password',
'tonsure': 'tonsure',
'total': 'total',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!',
'unable to parse csv file': 'unable to parse csv file',
'uncheck all': 'uncheck all',
'uncovered': 'uncovered',
'unidentified': 'unidentified',
'unknown': 'unknown',
'unlimited': 'unlimited',
'unspecified': 'unspecified',
'up to 3 locations': 'up to 3 locations',
'update if master': 'update if master',
'update if newer': 'update if newer',
'update': 'update',
'updated': 'updated',
'using default': 'using default',
'wavy': 'wavy',
'white': 'white',
'wider area, longer term, usually contain multiple Activities': 'wider area, longer term, usually contain multiple Activities',
'widowed': 'widowed',
'within human habitat': 'within human habitat',
'xlwt not installed, so cannot export as a Spreadsheet': 'xlwt not installed, so cannot export as a Spreadsheet',
'yes': 'yes',
}
|
code-for-india/sahana_shelter_worldbank
|
languages/sv.py
|
Python
|
mit
| 403,965
|
[
"VisIt"
] |
a9adc002bc84f637c2cc4c4e457bb6ab4ea1126e8936719de947a683e9b3058b
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import print_function
from __future__ import absolute_import
import re
import math
from collections import defaultdict
from .exceptions import *
from . import qcformat
#import molpro_basissets
from . import options
from .pdict import PreservingDict
def harvest_output(outtext):
"""Function to separate portions of a Psi4 output file *outtext*.
"""
psivar = PreservingDict()
psivar_coord = None
psivar_grad = None
NUMBER = "((?:[-+]?\\d*\\.\\d+(?:[DdEe][-+]?\\d+)?)|(?:[-+]?\\d+\\.\\d*(?:[DdEe][-+]?\\d+)?))"
# # Process NRE
# mobj = re.search(r'^\s+' + r'(?:Nuclear Repulsion Energy =)' + r'\s+' + NUMBER + r'\s+hartrees\s*$',
# outtext, re.MULTILINE)
# if mobj:
# print('matched nre')
# psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
# Process HF UNTESTED
mobj = re.search(
r'^\s+' + r'(?:Nuclear Repulsion Energy =)' + r'\s+' + NUMBER + r'\s+hartrees\s*' +
r'(?:.*?)' +
r'(?:Hartree-Fock SCF calculation)' +
r'(?:.*?)' +
r'^\s+\d+\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + 'Convergence criterion met' + r'\s*$',
outtext, re.MULTILINE | re.DOTALL)
if mobj:
print('matched hf')
psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
psivar['HF TOTAL ENERGY'] = mobj.group(2)
# Process DFT-D2 UNTESTED
# mobj = re.search(
# r'^\s+' + r'(?:Nuclear Repulsion Energy =)' + r'\s+' + NUMBER + r'\s+hartrees\s*' +
# r'(?:.*?)' +
# r'(?:HF-DFT SCF calculation)' +
# r'(?:.*?)' +
# r'^\s+' + r'(?:Empirical dispersion =)' + r'\s+' + NUMBER + r'\s+hartree\s*' +
# r'(?:.*?)' +
# r'^\s+\d+\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + 'Convergence criterion met' + r'\s*$',
# outtext, re.MULTILINE | re.DOTALL)
# if mobj:
# print('matched dft-d2')
# psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
# psivar['DISPERSION CORRECTION ENERGY'] = mobj.group(2)
# psivar['DFT TOTAL ENERGY'] = mobj.group(3)
# psivar['DFT FUNCTIONAL TOTAL ENERGY'] = mobj.group(3) - mboj.group(2)
# Process DFT-D3 UNTESTED
# mobj = re.search(
# r'(?:grimme3)' + r'\s*' +
# r'(?:.*?)' +
# r'^\s+' + r'(?:Nuclear Repulsion Energy =)' + r'\s+' + NUMBER + r'\s+hartrees\s*' +
# r'(?:.*?)' +
# r'(?:HF-DFT SCF calculation)' +
# r'(?:.*?)' +
# r'^\s+\d+\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + 'Convergence criterion met' + r'\s*$',
# outtext, re.MULTILINE | re.DOTALL)
# if mobj:
# print('matched dft-d3')
# psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
# psivar['DISPERSION CORRECTION ENERGY'] = None
# psivar['DFT TOTAL ENERGY'] = mobj.group(2)
# psivar['DFT FUNCTIONAL TOTAL ENERGY'] = None
# /^((?!PART).)*$/
# Process DFT no-D or internal-D
mobj = re.search(
# r'((?!grimme3).)*' + r'\s*' + # severe negative performance impact
# r'(?:.*?)' +
r'^\s+' + r'(?:Nuclear Repulsion Energy =)' + r'\s+' + NUMBER + r'\s+hartrees\s*' +
r'(?:.*?)' +
r'(?:HF-DFT SCF calculation)' +
r'(?:.*?)' +
r'^\s+\d+\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + 'Convergence criterion met' + r'\s*$',
outtext, re.MULTILINE | re.DOTALL | re.IGNORECASE)
if mobj:
print('matched dft')
psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
#psivar['DFT TOTAL ENERGY'] = mobj.group(2)
psivar['DFT FUNCTIONAL TOTAL ENERGY'] = mobj.group(2)
# with negative lookahead
#psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(2)
#psivar['DFT TOTAL ENERGY'] = mobj.group(3)
#psivar['DFT FUNCTIONAL TOTAL ENERGY'] = mobj.group(3)
# Process DHDFT no-D or internal-D
mobj = re.search(
# negative grimme3 lookahead goes here
#r'^\s+' + r'(?:Nuclear Repulsion Energy =)' + r'\s+' + NUMBER + r'\s+hartrees\s*' +
#r'(?:.*?)' +
r'(?:HF-DFT SCF calculation)' +
r'(?:.*?)' +
#r'^\s+\d+\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + 'Convergence criterion met' + r'\s*' +
#r'(?:.*?)' +
# need a not "Hartree-Fock SCF calculation" here so DFT @@@ MP2 not caught?
r'^\s*' + r'(?:Total (?:RI)?MP2 correlation energy =)' + r'\s+' + NUMBER + r'\s+' + r'au' + r'\s*' +
r'^\s+' + r'(?:(?:RI)?MP2 total energy =)' + r'\s+' + NUMBER + r'\s+' + r'au' + r'\s*$',
outtext, re.MULTILINE | re.DOTALL | re.IGNORECASE)
if mobj:
print('matched dhdft')
#psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
#psivar['DFT TOTAL ENERGY'] = mobj.group(2)
#psivar['DFT FUNCTIONAL TOTAL ENERGY'] = mobj.group(2)
psivar['DOUBLE-HYBRID CORRECTION ENERGY'] = mobj.group(1)
# Process MP2
mobj = re.search(
r'(?:Hartree-Fock SCF calculation)' +
r'(?:.*?)' +
#r'^\s+\d+\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + 'Convergence criterion met' + r'\s*' +
#r'(?:.*?)' +
# need a not "Hartree-Fock SCF calculation" here so DFT @@@ MP2 not caught?
r'^\s*' + r'(?:Total RIMP2 correlation energy =)' + r'\s+' + NUMBER + r'\s+' + r'au' + r'\s*' +
r'^\s+' + r'(?:RIMP2 total energy =)' + r'\s+' + NUMBER + r'\s+' + r'au' + r'\s*$',
outtext, re.MULTILINE | re.DOTALL | re.IGNORECASE)
if mobj:
print('matched mp2')
#psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1)
#psivar['DFT TOTAL ENERGY'] = mobj.group(2)
#psivar['DFT FUNCTIONAL TOTAL ENERGY'] = mobj.group(2)
psivar['MP2 CORRELATION ENERGY'] = mobj.group(1)
#psivar['DOUBLE-HYBRID CORRECTION ENERGY'] = mobj.group(1)
print(psivar)
# TODO: need to split on 'Q-Chem begins' or 'Quantum Leap' or something
# # Process DFT no-D or internal-D WORKS BUT LOOKAHEAD VERY SLOW
# mobj = re.search(
# r'((?!grimme3).)*' + r'\s*' + # severe negative performance impact
# TODO note neg lookahead insufficient since option could be negated
# r'(?:.*?)' +
# r'^\s+' + r'(?:Nuclear Repulsion Energy =)' + r'\s+' + NUMBER + r'\s+hartrees\s*' +
# r'(?:.*?)' +
# r'(?:HF-DFT SCF calculation)' +
# r'(?:.*?)' +
# r'^\s+\d+\s+' + NUMBER + r'\s+' + NUMBER + r'\s+' + 'Convergence criterion met' + r'\s*$',
# outtext, re.MULTILINE | re.DOTALL | re.IGNORECASE)
# if mobj:
# print('matched dft')
# psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(2)
# psivar['DFT TOTAL ENERGY'] = mobj.group(3)
# psivar['DFT FUNCTIONAL TOTAL ENERGY'] = mobj.group(3)
# # Process PsiVariables
# mobj = re.search(r'^(?: Variable Map:)\s*' +
# r'^\s*(?:-+)\s*' +
# r'^(.*?)' +
# r'^(?:\s*?)$',
# outtext, re.MULTILINE | re.DOTALL)
#
# if mobj:
# for pv in mobj.group(1).split('\n'):
# submobj = re.search(r'^\s+' + r'"(.+?)"' + r'\s+=>\s+' + NUMBER + r'\s*$', pv)
# if submobj:
# psivar['%s' % (submobj.group(1))] = submobj.group(2)
# Process Completion
mobj = re.search(r'Thank you very much for using Q-Chem. Have a nice day.',
outtext, re.MULTILINE)
if mobj:
psivar['SUCCESS'] = True
return psivar, psivar_coord, psivar_grad
def muster_memory(mem):
"""Transform input *mem* in MB into psi4-type options.
"""
text = ''
# prepare memory keywords to be set as c-side keywords
options = defaultdict(lambda: defaultdict(dict))
options['QCHEM']['QCHEM_MEM_TOTAL']['value'] = int(mem)
#options['QCHEM']['QCHEM_CC_MEMORY']['value'] = int(mem)
#options['QCHEM']['QCHEM_MEM_STATIC']['value'] = int(mem)
for item in options['QCHEM']:
options['QCHEM'][item]['clobber'] = True
return text, options
def muster_basis(bas):
"""Transform input *mem* in MB into psi4-type options.
"""
text = ''
# prepare memory keywords to be set as c-side keywords
options = defaultdict(lambda: defaultdict(dict))
options['QCHEM']['QCHEM_BASIS']['value'] = bas
for item in options['QCHEM']:
options['QCHEM'][item]['clobber'] = True
return text, options
class Infile(qcformat.InputFormat2):
def __init__(self, mem, mol, mtd, der, opt):
qcformat.InputFormat2.__init__(self, mem, mol, mtd, der, opt)
# #print self.method, self.molecule.nactive_fragments()
# if 'sapt' in self.method and self.molecule.nactive_fragments() != 2:
# raise FragmentCountError("""Requested molecule has %d, not 2, fragments.""" % (self.molecule.nactive_fragments()))
#
## # memory in MB --> MW
## self.memory = int(math.ceil(mem / 8.0))
## # auxiliary basis sets
## [self.unaugbasis, self.augbasis, self.auxbasis] = self.corresponding_aux_basis()
def format_infile_string(self):
"""
"""
# Handle memory and comment
cmtcmd = """$comment\n%s\n$end\n\n""" % (self.molecule.tagline)
memcmd, memkw = muster_memory(self.memory)
# Handle molecule and basis set
molcmd, molkw = self.molecule.format_molecule_for_qchem(mixedbas=False)
# TODO mixedbas=True once handling basis sets
# not translating basis at present
_bascmd, baskw = muster_basis(self.basis)
# format global convergence directions
_cdscmd, cdskw = muster_cdsgroup_options()
# Handle calc type and quantum chemical method
mdccmd, mdckw = procedures['energy'][self.method](self.method, self.dertype)
## make options from imdb only user options (currently non-existent). set basis and castup from here.
# Handle driver vs input/default keyword reconciliation
userkw = self.options # p4util.prepare_options_for_modules()
userkw = options.reconcile_options2(userkw, memkw)
userkw = options.reconcile_options2(userkw, molkw)
userkw = options.reconcile_options2(userkw, baskw)
#userkw = qcdb.options.reconcile_options(userkw, psikw)
userkw = options.reconcile_options2(userkw, cdskw)
userkw = options.reconcile_options2(userkw, mdckw)
# Handle conversion of psi4 keyword structure into cfour format
optcmd = options.prepare_options_for_qchem(userkw)
# Handle text to be passed untouched to psi4
litcmd = ''
# Assemble infile pieces
return cmtcmd + memcmd + molcmd + optcmd + mdccmd + litcmd
#'hf'
#'df-hf'
#'b3lyp'
#'blyp'
#'bp86'
#'fno-ccsd(t)'
#'df-ccsd(t)'
#'fno-df-ccsd(t)'
#'df-b97-d'
#'df-b97-d3'
#'pbe0-2'
#'dsd-pbep86'
#'wb97x-2'
#'DLdf+d'
#'DLdf+d09'
#'df-b3lyp'
#'df-b3lyp-d'
#'df-b3lyp-d3'
#'df-wb97x-d'
def muster_cdsgroup_options():
text = ''
options = defaultdict(lambda: defaultdict(dict))
# options['GLOBALS']['E_CONVERGENCE']['value'] = 8
# options['SCF']['GUESS']['value'] = 'sad'
# options['SCF']['MAXITER']['value'] = 200
options['QCHEM']['QCHEM_MEM_STATIC']['value'] = 512
options['QCHEM']['QCHEM_XC_GRID']['value'] = '000100000302'
options['QCHEM']['QCHEM_THRESH']['value'] = 12
options['QCHEM']['QCHEM_SCF_CONVERGENCE']['value'] = 7
#options['QCHEM']['QCHEM_INTEGRALS_BUFFER']['value'] = 512
options['QCHEM']['QCHEM_MAX_SCF_CYCLES']['value'] = 200
options['QCHEM']['QCHEM_SYM_IGNORE']['value'] = True
options['QCHEM']['QCHEM_SYMMETRY']['value'] = False
options['QCHEM']['QCHEM_INTEGRALS_BUFFER']['value'] = 512
return text, options
def muster_modelchem(name, dertype):
"""Transform calculation method *name* and derivative level *dertype*
into options for cfour. While deliberately requested pieces,
generally |cfour__cfour_deriv_level| and |cfour__cfour_calc_level|,
are set to complain if contradicted ('clobber' set to True), other
'recommended' settings, like |cfour__cfour_cc_program|, can be
countermanded by keywords in input file ('clobber' set to False).
Occasionally, want these pieces to actually overcome keywords in
input file ('superclobber' set to True).
"""
text = ''
lowername = name.lower()
options = defaultdict(lambda: defaultdict(dict))
if dertype == 0:
options['QCHEM']['QCHEM_JOBTYPE']['value'] = 'SP'
# text += """energy('"""
else:
raise ValidationError("""Requested Psi4 dertype %d is not available.""" % (dertype))
if lowername == 'wb97x-v':
options['QCHEM']['QCHEM_EXCHANGE']['value'] = 'omegaB97X-V'
# text += """mp2')\n\n"""
#
# elif lowername == 'df-mp2':
# options['GLOBALS']['FREEZE_CORE']['value'] = True
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['MP2']['MP2_TYPE']['value'] = 'df'
# text += """mp2')\n\n"""
#
# elif lowername == 'sapt0':
# options['GLOBALS']['FREEZE_CORE']['value'] = True
# options['SCF']['SCF_TYPE']['value'] = 'df'
# text += """sapt0')\n\n"""
#
# elif lowername == 'sapt2+':
# options['GLOBALS']['FREEZE_CORE']['value'] = True
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['SAPT']['NAT_ORBS_T2']['value'] = True
# options['SAPT']['NAT_ORBS_T3']['value'] = True
# options['SAPT']['NAT_ORBS_V4']['value'] = True
# options['SAPT']['OCC_TOLERANCE']['value'] = 1.0e-6
# text += """sapt2+')\n\n"""
#
# elif lowername == 'sapt2+(3)':
# options['GLOBALS']['FREEZE_CORE']['value'] = True
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['SAPT']['NAT_ORBS_T2']['value'] = True
# options['SAPT']['NAT_ORBS_T3']['value'] = True
# options['SAPT']['NAT_ORBS_V4']['value'] = True
# options['SAPT']['OCC_TOLERANCE']['value'] = 1.0e-6
# text += """sapt2+(3)')\n\n"""
#
# elif lowername == 'sapt2+3(ccd)':
# options['GLOBALS']['FREEZE_CORE']['value'] = True
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['SAPT']['NAT_ORBS_T2']['value'] = True
# options['SAPT']['NAT_ORBS_T3']['value'] = True
# options['SAPT']['NAT_ORBS_V4']['value'] = True
# options['SAPT']['OCC_TOLERANCE']['value'] = 1.0e-6
# options['SAPT']['DO_MBPT_DISP']['value'] = True
# text += """sapt2+3(ccd)')\n\n"""
#
# elif lowername == 'df-b97-d3':
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
# options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
# text += """b97-d3')\n\n"""
#
# elif lowername == 'df-wb97x-d':
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
# options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
# text += """wb97x-d')\n\n"""
#
# elif lowername == 'df-b3lyp-d3':
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
# options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
# text += """b3lyp-d3')\n\n"""
#
# elif lowername == 'dfdf-b2plyp-d3':
# options['GLOBALS']['FREEZE_CORE']['value'] = True
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['DFMP2']['MP2_TYPE']['value'] = 'df'
# options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
# options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
# text += """b2plyp-d3')\n\n"""
#
# elif lowername == 'df-wpbe':
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
# options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
# text += """wpbe')\n\n"""
#
# elif lowername == 'ccsd-polarizability':
# options['GLOBALS']['FREEZE_CORE']['value'] = True
# text = """property('ccsd', properties=['polarizability'])\n\n"""
#
# elif lowername == 'mrccsdt(q)':
# options['SCF']['SCF_TYPE']['value'] = 'pk'
# options['GLOBALS']['FREEZE_CORE']['value'] = True
# options['GLOBALS']['NAT_ORBS']['value'] = True # needed by mrcc but not recognized by mrcc
# options['FNOCC']['OCC_TOLERANCE']['value'] = 6
# text += """mrccsdt(q)')\n\n"""
#
# elif lowername == 'c4-ccsdt(q)':
# options['CFOUR']['CFOUR_SCF_CONV']['value'] = 11
# options['CFOUR']['CFOUR_CC_CONV']['value'] = 10
# options['CFOUR']['CFOUR_FROZEN_CORE']['value'] = True
# text += """c4-ccsdt(q)')\n\n"""
#
# elif lowername == 'df-m05-2x':
# options['SCF']['SCF_TYPE']['value'] = 'df'
# options['SCF']['DFT_SPHERICAL_POINTS']['value'] = 302
# options['SCF']['DFT_RADIAL_POINTS']['value'] = 100
# text += """m05-2x')\n\n"""
else:
raise ValidationError("""Requested Psi4 computational methods %d is not available.""" % (lowername))
# # Set clobbering
# if 'CFOUR_DERIV_LEVEL' in options['CFOUR']:
# options['CFOUR']['CFOUR_DERIV_LEVEL']['clobber'] = True
# options['CFOUR']['CFOUR_DERIV_LEVEL']['superclobber'] = True
# if 'CFOUR_CALC_LEVEL' in options['CFOUR']:
# options['CFOUR']['CFOUR_CALC_LEVEL']['clobber'] = True
# options['CFOUR']['CFOUR_CALC_LEVEL']['superclobber'] = True
# if 'CFOUR_CC_PROGRAM' in options['CFOUR']:
# options['CFOUR']['CFOUR_CC_PROGRAM']['clobber'] = False
return text, options
procedures = {
'energy': {
'wb97x-v' : muster_modelchem,
}
}
qcmtdIN = procedures['energy']
def psi4_list():
"""Return an array of Psi4 methods with energies.
"""
return sorted(procedures['energy'].keys())
|
rmcgibbo/psi4public
|
psi4/driver/qcdb/qchem.py
|
Python
|
lgpl-3.0
| 18,475
|
[
"CFOUR",
"Psi4",
"Q-Chem"
] |
0baa13a55ebd7b3d29de8f253f7d911a91013d0e0664a499b31ec7b93779c427
|
"""Support for creating packages of data.
Ambry creates packages of data that simplify the process of finding,
cleaning, transforming and loading popular datasets. The data bundle format,
tools and management processes are designed to make common public data sets easy
to use and share, while allowing users to audit how the data they use has been
acquired and processed. The Data Bundle concept includes the data format, a
definition for bundle configuration and meta data, tools for manipulating
bundles, and a process for acquiring, processing and managing data. The goal of
a data bundle is for data analysts to be able to run few simple commands to find
a dataset and load it into a relational database.
Visit http://ambry.io for more information.
Copyright (c) 2015 Civic Knowledge. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
from ._meta import *
from ambry.util import memoize
@memoize
def config(path=None, root=None, db=None):
"""Return the default run_config object for this installation."""
import ambry.run
return ambry.run.load(path=path, root=root, db=db)
@memoize
def get_library(path=None, root=None, db=None):
import ambry.library as _l
"""Return the default library for this installation."""
rc = config(path=path, root=root, db=db )
return _l.new_library(rc)
|
CivicKnowledge/ambry
|
ambry/__init__.py
|
Python
|
bsd-2-clause
| 1,391
|
[
"VisIt"
] |
df5ec87fedb094d85bccf6979dc5f9a6495276248cfa344bcd20998959c2af5f
|
'''A set of utilities to interact with gromacs'''
# Need to add a parser to insert this contrib script
# $ chemlab gromacs energy
# it should show a little interface to view the energy
# Let's launch the program and determine what happens
from chemlab.io import datafile
from pylab import *
from chemlab.molsim.analysis import rdf
import difflib
import sys, re
import numpy as np
def setup_commands(subparsers):
groparser = subparsers.add_parser("gromacs")
subparsers2 = groparser.add_subparsers()
eparser = subparsers2.add_parser("energy")
eparser.add_argument('filenames', metavar='filenames', type=str, nargs='+')
eparser.add_argument('-e', metavar='energies', type=str, nargs='+',
help='Properties to display in the energy viewer.')
eparser.add_argument('-o', help='Do not display GUI and save the plot')
eparser.set_defaults(func=lambda args: energy(args, args.o))
rdfparser = subparsers2.add_parser("rdf")
rdfparser.add_argument('selection', metavar='selection', type=str)
rdfparser.add_argument('filename', metavar='filename', type=str)
rdfparser.add_argument('trajectory', metavar='trajectory', type=str)
rdfparser.add_argument('-t', metavar='t', type=str)
rdfparser.set_defaults(func=rdffunc)
def energy(args, output=None):
ens = args.e
fns = args.filenames
datafiles = [datafile(fn) for fn in fns]
quants = datafiles[0].read('avail quantities')
for i,e in enumerate(ens):
if e not in quants:
match = difflib.get_close_matches(e, quants)
print('Quantity %s not present, taking close match: %s'
% (e, match[0]))
ens[i] = match[0]
toplot = []
for df in datafiles:
for e in ens:
plotargs = {}
plotargs['points'] = df.read('quantity', e)
plotargs['filename'] = df.fd.name
plotargs['quantity'] = e
toplot.append(plotargs)
plots = []
legends = []
for arg in toplot:
p, = plot(arg['points'][0], arg['points'][1])
plots.append(p)
legends.append(arg['filename'])
xlabel('Time(ps)')
ylabel(ens[0])
ticklabel_format(style='sci', axis='y', scilimits=(0,0))
grid()
figlegend(plots, legends, 'upper right')
show()
def get_rdf(arguments):
return rdf(arguments[0], arguments[1], periodic=arguments[2])
def rdffunc(args):
import multiprocessing
type_a, type_b = args.selection.split('-')
syst = datafile(args.filename).read("system")
sel_a = syst.type_array == type_a
sel_b = syst.type_array == type_b
df = datafile(args.trajectory)
t, coords = df.read("trajectory")
boxes = df.read("boxes")
times = [int(tim) for tim in args.t.split(',')]
ind = np.searchsorted(t, times)
arguments = ((coords[i][sel_a], coords[i][sel_b], boxes[i]) for i in ind)
rds = map(get_rdf, arguments)
for rd in rds:
plot(rd[0], rd[1])
ticklabel_format(style='sci', axis='y', scilimits=(0,0))
xlabel('Time(ps)')
ylabel(args.selection)
grid()
show()
if __name__ == '__main__':
main(['pressure'])
|
GabrielNicolasAvellaneda/chemlab
|
chemlab/contrib/gromacs.py
|
Python
|
gpl-3.0
| 3,251
|
[
"Gromacs"
] |
8f926999158b8244fdd2e215a159e6d2ab2cc9948f8bea6700ef8e93f5d25d1e
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors: Teng Lin
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
Test the cython dtr module
Note, this file cannot be located in the dtr subdirectory, because that
directory is not a python package (it has no __init__.py) and is thus tests
there are not discovered by nose
"""
import tempfile, os
import numpy as np
from mdtraj.formats import DTRTrajectoryFile, DCDTrajectoryFile
from nose.tools import assert_raises
from mdtraj.testing import get_fn, eq, raises
from shutil import rmtree
fn_dtr = get_fn('frame0.dtr')
fn_dcd = get_fn('frame0.dcd')
fn_pdb = get_fn('native.pdb')
temp = tempfile.mkdtemp(suffix='.dtr')
def teardown_module(module):
"""
Remove the temporary trajectory directory created by tests
in this file this gets automatically called by nose
"""
try:
rmtree(temp)
except OSError:
pass
def test_read():
"""
test the default read and compare against reference trajectory in dcd format
"""
dtr_traj = DTRTrajectoryFile(fn_dtr)
eq(len(dtr_traj), 501)
xyz, times, cell_lens, cell_angles = dtr_traj.read()
xyz2, cell_lens2, cell_angles2 = DCDTrajectoryFile(fn_dcd).read()
eq(xyz, xyz2)
eq(cell_lens, cell_lens2)
eq(cell_angles, cell_angles2)
def test_read_1():
""" test read with n_frame"""
xyz, times, cell_lens, cell_angles = DTRTrajectoryFile(fn_dtr).read()
xyz2, times2, cell_lens2, cell_angles2 = DTRTrajectoryFile(fn_dtr).read(n_frames=501)
eq(xyz, xyz2)
eq(times, times2)
eq(cell_lens, cell_lens2)
eq(cell_angles, cell_angles2)
def test_read_2():
""" test read with atom indices"""
indices = np.array([0, 3, 12, 4])
xyz, times, cell_lens, cell_angles = DTRTrajectoryFile(fn_dtr).read()
xyz2, times2, cell_lens2, cell_angles2 = DTRTrajectoryFile(fn_dtr).read(atom_indices=indices)
eq(xyz[:,indices,:], xyz2)
eq(times, times2)
eq(cell_lens, cell_lens2)
eq(cell_angles, cell_angles2)
def test_read_3():
"""test read with n_frames"""
dtr_traj = DTRTrajectoryFile(fn_dtr)
dtr_traj.seek(1)
xyz, times, cell_lens, cell_angles = dtr_traj.read(n_frames=900)
eq(len(xyz), 500)
def test_read_stride():
"Read dtr with stride"
with DTRTrajectoryFile(fn_dtr) as f:
xyz1, times1, box_lengths1, box_angles1 = f.read()
with DTRTrajectoryFile(fn_dtr) as f:
xyz2, times2, box_lengths2, box_angles2 = f.read(stride=2)
yield lambda: eq(xyz1[::2], xyz2)
yield lambda: eq(times1[::2], times2)
yield lambda: eq(box_lengths1[::2], box_lengths2)
yield lambda: eq(box_angles1[::2], box_angles2)
def test_read_4():
"""Read dtr with stride and n_frames"""
# dtr_traj = DTRTrajectoryFile(fn_dtr)
# dtr_traj.seek(1)
# xyz, times, cell_lens, cell_angles = dtr_traj.read(n_frames=300, stride=2)
# eq(len(xyz), 251)
with DTRTrajectoryFile(fn_dtr) as f:
xyz1, times1, box_lengths1, box_angles1 = f.read()
with DTRTrajectoryFile(fn_dtr) as f:
xyz2, times2, box_lengths2, box_angles2 = f.read(n_frames=300, stride=2)
yield lambda: eq(xyz1[::2], xyz2)
yield lambda: eq(times1[::2], times2)
yield lambda: eq(box_lengths1[::2], box_lengths2)
yield lambda: eq(box_angles1[::2], box_angles2)
def test_read_5():
"check streaming read of frames 1 at a time"
xyz_ref, times_ref, box_lengths_ref, box_angles_ref = DTRTrajectoryFile(fn_dtr).read()
reader = DTRTrajectoryFile(fn_dtr)
for i in range(len(xyz_ref)):
xyz, times, box_lenths, box_angles = reader.read(1)
eq(xyz_ref[np.newaxis, i], xyz)
eq(times_ref[np.newaxis, i], times)
eq(box_lengths_ref[np.newaxis, i], box_lenths)
eq(box_angles_ref[np.newaxis, i], box_angles)
def test_read_6():
"DTRReader: check streaming read followed by reading the 'rest'"
xyz_ref, times_ref, box_lengths_ref, box_angles_ref = DTRTrajectoryFile(fn_dtr).read()
reader = DTRTrajectoryFile(fn_dtr)
for i in range(int(len(xyz_ref)/2)):
xyz, times, box_lenths, box_angles = reader.read(1)
eq(xyz_ref[np.newaxis, i], xyz)
eq(times_ref[np.newaxis, i], times)
eq(box_lengths_ref[np.newaxis, i], box_lenths)
eq(box_angles_ref[np.newaxis, i], box_angles)
xyz_rest, times_rest, box_rest, angles_rest = reader.read()
yield lambda: eq(xyz_ref[i+1:], xyz_rest)
yield lambda: eq(times_ref[i+1:], times_rest)
yield lambda: eq(box_lengths_ref[i+1:], box_rest)
yield lambda: eq(box_angles_ref[i+1:], angles_rest)
yield lambda: len(xyz_ref) == i + len(xyz_rest)
def test_read_7():
'test two full read'
reader = DTRTrajectoryFile(fn_dtr)
xyz, times, cell_lens, cell_angles = reader.read()
xyz, times, cell_lens, cell_angles = reader.read()
eq(len(xyz), 0)
eq(len(times), 0)
eq(len(cell_lens), 0)
eq(len(cell_angles), 0)
def test_read_8():
with DTRTrajectoryFile(fn_dtr) as f:
xyz_ref, times_ref, box_lengths_ref, box_angles_ref = f.read()
with DTRTrajectoryFile(fn_dtr) as f:
xyz, times, box_lengths, box_angles = f.read(atom_indices=slice(None, None, 2))
yield lambda: eq(xyz_ref[:, ::2, :], xyz)
def test_write_1():
"test write"
xyz, times, cell_lens, cell_angles = DTRTrajectoryFile(fn_dtr).read()
xyz += 1
DTRTrajectoryFile(temp, 'w').write(xyz,cell_lengths=cell_lens,
cell_angles=cell_angles, times=times)
xyz2, times2, cell_lens2, cell_angles2 = DTRTrajectoryFile(temp).read()
eq(xyz, xyz2)
eq(times, times2)
eq(cell_lens, cell_lens2)
eq(cell_angles, cell_angles2)
def test_write_2():
"""
test two separate write call
"""
xyz, times, cell_lens, cell_angles = DTRTrajectoryFile(fn_dtr).read()
writer = DTRTrajectoryFile(temp, 'w')
writer.write(xyz,cell_lengths=cell_lens,
cell_angles=cell_angles, times=times)
n_frames = len(xyz)
times += 50.0
writer.write(xyz,cell_lengths=cell_lens,
cell_angles=cell_angles, times=times)
# # try to write frames with different number of atoms
# assert_raises(ValueError, writer.write, xyz[:,10:,:],
# cell_lengths=cell_lens,
# cell_angles=cell_angles,
# times=times)
writer.close()
xyz2, times2, cell_lens2, cell_angles2 = DTRTrajectoryFile(temp).read()
eq(len(xyz2), n_frames*2)
eq(xyz, xyz2[n_frames:])
eq(times, times2[n_frames:])
eq(cell_lens, cell_lens2[n_frames:])
eq(cell_angles, cell_angles2[n_frames:])
def test_write_3():
"test a random write operation"
xyz = np.array(np.random.uniform(low=-50, high=-50, size=(3, 17, 3)), dtype=np.float32)
times = np.array([1, 23.0, 48.0], dtype=np.float64)
cell_lengths=np.array(np.random.uniform(low=100, high=200, size=(3, 3)), dtype=np.float32)
cell_angles=np.array([[90, 90, 90],
[80, 100, 120],
[120, 90, 80]],
dtype=np.float32)
with DTRTrajectoryFile(temp, 'w') as f:
f.write(xyz, cell_lengths=cell_lengths,
cell_angles=cell_angles, times=times)
with DTRTrajectoryFile(temp) as f:
xyz2, times2, cell_lengths2, cell_angles2 = f.read()
eq(xyz, xyz2)
def test_write_4():
"test write error"
xyz = np.array(np.random.uniform(low=-50, high=-50, size=(3, 17, 3)), dtype=np.float32)
times = np.array([1, 23.0, 48.0], dtype=np.float64)
cell_lengths=np.array(np.random.uniform(low=100, high=200, size=(3, 3)), dtype=np.float32)
cell_angles=np.array([[90, 90, 90],
[80, 100, 120],
[120, 90, 80]],
dtype=np.float32)
bad_times = np.array([21, 3.0, 48.0], dtype=np.float64)
f = DTRTrajectoryFile(temp, 'w')
assert_raises(ValueError, f.write, xyz, cell_lengths=cell_lengths)
assert_raises(ValueError, f.write, xyz, cell_angles=cell_angles)
assert_raises(ValueError, f.write, xyz, times=times)
assert_raises(ValueError, f.write, xyz,
cell_lengths=cell_lengths,
cell_angles=cell_angles,
times=bad_times)
f.close()
# assert_raises(IOError, f.write, xyz,
# cell_lengths=cell_lengths,
# cell_angles=cell_angles,
# times=times)
def test_seek():
reference = DTRTrajectoryFile(fn_dtr).read()[0]
with DTRTrajectoryFile(fn_dtr) as f:
eq(f.tell(), 0)
eq(f.read(1)[0][0], reference[0])
eq(f.tell(), 1)
xyz = f.read(1)[0][0]
eq(xyz, reference[1])
eq(f.tell(), 2)
f.seek(0)
eq(f.tell(), 0)
xyz = f.read(1)[0][0]
eq(f.tell(), 1)
eq(xyz, reference[0])
f.seek(5)
eq(f.read(1)[0][0], reference[5])
eq(f.tell(), 6)
f.seek(-5, 1)
eq(f.tell(), 1)
eq(f.read(1)[0][0], reference[1])
@raises(IOError)
def test_read_closed():
f = DTRTrajectoryFile(fn_dtr)
f.close()
f.read()
# @raises(IOError)
# def test_write_closed():
# f = DTRTrajectoryFile(fn_dtr, 'w')
# f.close()
# xyz = np.array(np.random.uniform(low=-50, high=-50, size=(3, 17, 3)), dtype=np.float32)
# times = np.array([1, 23.0, 48.0], dtype=np.float64)
# cell_lengths=np.array(np.random.uniform(low=100, high=200, size=(3, 3)), dtype=np.float32)
# cell_angles=np.array([[90, 90, 90],
# [80, 100, 120],
# [120, 90, 80]],
# dtype=np.float32)
#
# f.write(xyz, cell_lengths=cell_lengths,
# cell_angles=cell_angles,
# times=times)
def test_tell():
with DTRTrajectoryFile(fn_dtr) as f:
last = len(f)
eq(f.tell(), 0)
f.read(2)
eq(f.tell(), 2)
f.read(100)
eq(f.tell(), 102)
f.seek(600)
eq(f.tell(), last)
test_read_7()
|
ctk3b/mdtraj
|
mdtraj/tests/test_dtr.py
|
Python
|
lgpl-2.1
| 10,946
|
[
"MDTraj"
] |
02b9746f56b899f5e012abf3e65d23b3e0c7070e2dceca86861694f8033a1d10
|
"""
Otsu's binarization
uses parameter retVal
global thresholding -- uses arbitrary value for threshold (trial and error)
bimodal image (histogram has 2 peaks) - can approximately take value between the 2 peaks as threshold
- Otsu binarization - automatically calculates threshold value from image histogram for bimodal image
automatically calculates threshold value from image histogram for bimodal image
function:
cv2.threshold(), pass extra flag: cv2.THRESH_OTSU
--> for threshold value, PASS ZERO
algorithm finds optimal threshold value, returns it as 2nd output
(Otsu thresholding not used, retVal same as threshold value used)
"""
# example - imput image is noisy image.
# 1st case - global thresholding of 127
# 2nd case - Otsu's thresholding applied directly
# 3rd case - image filtered with 5x5 Gaussian kernel to remove noise, then Otsu thresholding applied
# (noise filtering improves the result)
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('noisy2.png', 0)
# global thresholding
ret1, th1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
# Otsu's thresholding
ret2, th2 = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# Otsu's thresholding after Gaussian filtering
blur = cv2.GaussianBlur(img, (5,5), 0)
ret3, th3 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# plot all the images and their histograms
images = [img, 0, th1,
img, 0, th2,
img, 0, th3]
titles = [ 'Original Noisy Image', 'Histogram', 'Global Thresholding (v=127)',
'Original Noisy Image', 'Histogram', "Otsu's Thresholding",
'Gaussian filtered Image','Histogram', "Otsu's Thresholding"]
for i in xrange(3):
plt.subplot(3,3,i*3+1),plt.imshow(images[i*3],'gray')
plt.title(titles[i*3]), plt.xticks([]), plt.yticks([])
plt.subplot(3,3,i*3+2),plt.hist(images[i*3].ravel(),256)
plt.title(titles[i*3+1]), plt.xticks([]), plt.yticks([])
plt.subplot(3,3,i*3+3),plt.imshow(images[i*3+2],'gray')
plt.title(titles[i*3+2]), plt.xticks([]), plt.yticks([])
plt.show()
# SEE: Digital Image Processing, Rafael C. Gonzalez
|
SSG-DRD-IOT/commercial-iot-security-system
|
opencv/tutorials/imageProcessing/thresholding/otsu_binarization.py
|
Python
|
mit
| 2,182
|
[
"Gaussian"
] |
63a4a0582f53f577cf27f7b5b39ced709f08886f5156dfd94f334dba547f829c
|
# This program computes the delay between the different gene segments using the
# convolved Gaussian process framework for RNA pol-II dynamics
#
# Ciira wa Maina, 2013
# Dedan Kimathi University of Technology.
# Nyeri-Kenya
import sys
import numpy as np
import pylab as pb
import conv_gp_funcs as cgf
import scipy as sp
from scipy.optimize import fmin_tnc
import argparse
#Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_file', dest='input_file', required=True,help='Properly Formatted Input file. It is assumed that the file name is in the form <gene name>.txt')
parser.add_argument('-l', '--gene_length', dest='gene_len', required=True,type=float, help='Gene length')
parser.add_argument('-n', '--num_try', dest='num_try', type=int,default=1,help='Number of random initializations when performing maximum likelihood optimization')
parser.add_argument('-t', '--trans', dest='trans', type=bool,default='True',help='Parameter transformation flag. When true, the parameters are transformed using a logit function before optimization.')
parser.add_argument('-o', '--out_dir', dest='out_dir', default='',help='The complete path of the output directory to store program output. The outputs are a plot of the inferred pol-II segment profiles, <gene name>.pdf, a text file with the delays of each segment <gene name_delay>.txt and a text file with the gene transcription speed in kilobases per second <gene name_speed>.txt. If not supplied the outputs are stored in the current directory.')
parser.add_argument('-s', '--rnd_seed', dest='rnd_seed',type=int, help='Random Seed')
args = parser.parse_args()
Data=np.genfromtxt(args.input_file)#Load the properly formated data
obs_time=Data[:,0]#Extract the observation times
num_obs=len(obs_time)#Number of observations
num_seg=Data.shape[1]-1#Number of gene segments or observation streams
per=1.0/num_seg#percentage of gene corresponding to each segment
num_param=1+3*(num_seg) #number of parameters in the model
bound=10.0#Bound on the transformed variable to prevent numerical instability
#Obtain the parameter bounds
a=cgf.lowerbound_tied_fsf(num_seg,args.gene_len,per)
b= cgf.upperbound_tied_fsf(num_seg,args.gene_len,per)
bound=10.0#Bound on the transformed variable to prevent numerical instability
gene=args.input_file.split('/')[-1].split('.')[0]#assume the input file is in the form /home/.../Data Directory/<gene name>.txt
#set the random seed if supplied
if args.rnd_seed!=None:
np.random.seed(args.rnd_seed)
#Form a vector of the observed time series
Y=[]
for i in range(0,num_seg):
Y=np.concatenate((Y,Data[0:num_obs,i+1]))
opt=np.zeros((args.num_try,num_param+1))#store the final parameters and final loglikelihood
diag=0
for i in range(0,args.num_try):
#we try a number of random initializations and chose the one leading to maximum loglikelihood
x0=cgf.init_param_tied_fsf(num_seg,args.trans,a,b,args.gene_len,per)
if args.trans==1:
xopt=sp.optimize.fmin_tnc(cgf.loglik_tied_fsf, x0, cgf.grad_loglik_tied_fsf, args=(obs_time,Y,num_seg,args.trans,a,b,diag),approx_grad=0, bounds=[(-bound,bound) for j in range(0,len(x0))],messages=0)[0]
opt[i,:]=np.concatenate((xopt,np.array([ -cgf.loglik_tied_fsf(xopt,obs_time,Y,num_seg,args.trans,a,b,diag)])))
#get the optimum parameters
xopt=opt[np.argmax(opt[:,num_param]),0:num_param]
#make some predictions
t_pred=np.linspace(obs_time[0],obs_time[len(obs_time)-1],500)#prediction times
pb.figure()
seg_color=['b','g','c','r','m','y','b','g','c','r','m','y']
seg_mark=['o','s','d','p','*','>','o','s','d','p','*','>']
ymax=np.ceil(np.max(Data))
yy=np.linspace(0,1,np.ceil(1/per)+1)*100
pb.subplot(num_seg+1,1,1)
Res=cgf.pred_Lat_tied_fsf(obs_time,t_pred,Y,xopt,num_seg,args.trans,a,b)
pb.plot(t_pred,Res['mu'],seg_color[0],linewidth=2)
mu=Res['mu']
Cov=Res['Cov']
pb.plot(t_pred,mu[:,0]+2*np.sqrt(np.diag(Cov)),'--'+seg_color[0])
pb.plot(t_pred,mu[:,0]-2*np.sqrt(np.diag(Cov)),'--'+seg_color[0])
pb.yticks([])
pb.xticks([])
pb.title("Pol-II activity over different segments of the "+gene+" gene ")
for i in range(0,num_seg):
pb.subplot(num_seg+1,1,i+2)
pb.plot(obs_time,Data[0:num_obs,i+1],seg_color[i+1]+seg_mark[i+1])
Res=cgf.pred_Cov_tied_fsf(obs_time,t_pred,Y,xopt,num_seg,i,args.trans,a,b)
pb.plot(t_pred,Res['mu'],seg_color[i+1],linewidth=2)
mu=Res['mu']
Cov=Res['Cov']
pb.plot(t_pred,mu[:,0]+2*np.sqrt(np.diag(Cov)),'--'+seg_color[i+1])
pb.plot(t_pred,mu[:,0]-2*np.sqrt(np.diag(Cov)),'--'+seg_color[i+1])
pb.yticks([])
if i!=num_seg-1:
pb.xticks([])
pb.ylabel((str(int(yy[i]))+'-'+str(int(yy[i+1]))+'%'))
pb.xlabel("Time (min)")
figname=args.out_dir+gene+'.pdf'
pb.savefig(figname)
#pb.show()
if args.trans==1:
xopt=cgf.paramInvTrans(xopt,a,b)
#Obtain the delay parameters
ind=1+num_seg
D=xopt[ind:ind+num_seg-1]
np.savetxt(args.out_dir+gene+'_delay.txt',D)
#Compute the transcription speed by performing a linear regression through the origin
lengths_gene=args.gene_len*per*(np.arange(num_seg-1)+1)
B=np.ones((len(D),1))
B[:,0]=D
TransSpeed=np.dot(np.dot(np.linalg.inv(np.dot(B.T,B)),B.T),lengths_gene)[0]
np.savetxt(args.out_dir+gene+'_speed.txt',np.array([np.round(TransSpeed/1000,2)]),fmt='%3.2f')
print gene,np.round(TransSpeed/1000,1),'kilobases per second'
|
ciiram/PyPol_II
|
PyPolII.py
|
Python
|
bsd-3-clause
| 5,281
|
[
"Gaussian"
] |
19df8641f1fe470811aa27f0933f0511055520a4c0800225d289025ecbb27872
|
import sys
import hashlib
import pytest
import numpy as np
from numpy.linalg import LinAlgError
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_allclose,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
from numpy.random import Generator, MT19937, SeedSequence, RandomState
random = Generator(MT19937())
JUMP_TEST_DATA = [
{
"seed": 0,
"steps": 10,
"initial": {"key_sha256": "bb1636883c2707b51c5b7fc26c6927af4430f2e0785a8c7bc886337f919f9edf", "pos": 9},
"jumped": {"key_sha256": "ff682ac12bb140f2d72fba8d3506cf4e46817a0db27aae1683867629031d8d55", "pos": 598},
},
{
"seed":384908324,
"steps":312,
"initial": {"key_sha256": "16b791a1e04886ccbbb4d448d6ff791267dc458ae599475d08d5cced29d11614", "pos": 311},
"jumped": {"key_sha256": "a0110a2cf23b56be0feaed8f787a7fc84bef0cb5623003d75b26bdfa1c18002c", "pos": 276},
},
{
"seed": [839438204, 980239840, 859048019, 821],
"steps": 511,
"initial": {"key_sha256": "d306cf01314d51bd37892d874308200951a35265ede54d200f1e065004c3e9ea", "pos": 510},
"jumped": {"key_sha256": "0e00ab449f01a5195a83b4aee0dfbc2ce8d46466a640b92e33977d2e42f777f8", "pos": 475},
},
]
@pytest.fixture(scope='module', params=[True, False])
def endpoint(request):
return request.param
class TestSeed:
def test_scalar(self):
s = Generator(MT19937(0))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937(4294967295))
assert_equal(s.integers(1000), 324)
def test_array(self):
s = Generator(MT19937(range(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937(np.arange(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937([0]))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937([4294967295]))
assert_equal(s.integers(1000), 324)
def test_seedsequence(self):
s = MT19937(SeedSequence(0))
assert_equal(s.random_raw(1), 2058676884)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, MT19937, -0.5)
assert_raises(ValueError, MT19937, -1)
def test_invalid_array(self):
# seed must be an unsigned integer
assert_raises(TypeError, MT19937, [-0.5])
assert_raises(ValueError, MT19937, [-1])
assert_raises(ValueError, MT19937, [1, -2, 4294967296])
def test_noninstantized_bitgen(self):
assert_raises(ValueError, Generator, MT19937)
class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.integers(-5, -1) < -1)
x = random.integers(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random = Generator(MT19937(1432985819))
non_contig = random.multinomial(100, pvals=pvals)
random = Generator(MT19937(1432985819))
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
def test_multidimensional_pvals(self):
assert_raises(ValueError, random.multinomial, 10, [[0, 1]])
assert_raises(ValueError, random.multinomial, 10, [[0], [1]])
assert_raises(ValueError, random.multinomial, 10, [[[0], [1]], [[1], [0]]])
assert_raises(ValueError, random.multinomial, 10, np.array([[0, 1], [1, 0]]))
def test_multinomial_pvals_float32(self):
x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09,
1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32)
pvals = x / x.sum()
random = Generator(MT19937(1432985819))
match = r"[\w\s]*pvals array is cast to 64-bit floating"
with pytest.raises(ValueError, match=match):
random.multinomial(1, pvals)
class TestMultivariateHypergeometric:
def setup(self):
self.seed = 8675309
def test_argument_validation(self):
# Error cases...
# `colors` must be a 1-d sequence
assert_raises(ValueError, random.multivariate_hypergeometric,
10, 4)
# Negative nsample
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], -1)
# Negative color
assert_raises(ValueError, random.multivariate_hypergeometric,
[-1, 2, 3], 2)
# nsample exceeds sum(colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], 10)
# nsample exceeds sum(colors) (edge case of empty colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[], 1)
# Validation errors associated with very large values in colors.
assert_raises(ValueError, random.multivariate_hypergeometric,
[999999999, 101], 5, 1, 'marginals')
int64_info = np.iinfo(np.int64)
max_int64 = int64_info.max
max_int64_index = max_int64 // int64_info.dtype.itemsize
assert_raises(ValueError, random.multivariate_hypergeometric,
[max_int64_index - 100, 101], 5, 1, 'count')
@pytest.mark.parametrize('method', ['count', 'marginals'])
def test_edge_cases(self, method):
# Set the seed, but in fact, all the results in this test are
# deterministic, so we don't really need this.
random = Generator(MT19937(self.seed))
x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([], 0, method=method)
assert_array_equal(x, [])
x = random.multivariate_hypergeometric([], 0, size=1, method=method)
assert_array_equal(x, np.empty((1, 0), dtype=np.int64))
x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method)
assert_array_equal(x, [3, 0, 0])
colors = [1, 1, 0, 1, 1]
x = random.multivariate_hypergeometric(colors, sum(colors),
method=method)
assert_array_equal(x, colors)
x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3,
method=method)
assert_array_equal(x, [[3, 4, 5]]*3)
# Cases for nsample:
# nsample < 10
# 10 <= nsample < colors.sum()/2
# colors.sum()/2 < nsample < colors.sum() - 10
# colors.sum() - 10 < nsample < colors.sum()
@pytest.mark.parametrize('nsample', [8, 25, 45, 55])
@pytest.mark.parametrize('method', ['count', 'marginals'])
@pytest.mark.parametrize('size', [5, (2, 3), 150000])
def test_typical_cases(self, nsample, method, size):
random = Generator(MT19937(self.seed))
colors = np.array([10, 5, 20, 25])
sample = random.multivariate_hypergeometric(colors, nsample, size,
method=method)
if isinstance(size, int):
expected_shape = (size,) + colors.shape
else:
expected_shape = size + colors.shape
assert_equal(sample.shape, expected_shape)
assert_((sample >= 0).all())
assert_((sample <= colors).all())
assert_array_equal(sample.sum(axis=-1),
np.full(size, fill_value=nsample, dtype=int))
if isinstance(size, int) and size >= 100000:
# This sample is large enough to compare its mean to
# the expected values.
assert_allclose(sample.mean(axis=0),
nsample * colors / colors.sum(),
rtol=1e-3, atol=0.005)
def test_repeatability1(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5,
method='count')
expected = np.array([[2, 1, 2],
[2, 1, 2],
[1, 1, 3],
[2, 0, 3],
[2, 1, 2]])
assert_array_equal(sample, expected)
def test_repeatability2(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 50,
size=5,
method='marginals')
expected = np.array([[ 9, 17, 24],
[ 7, 13, 30],
[ 9, 15, 26],
[ 9, 17, 24],
[12, 14, 24]])
assert_array_equal(sample, expected)
def test_repeatability3(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 12,
size=5,
method='marginals')
expected = np.array([[2, 3, 7],
[5, 3, 4],
[2, 5, 5],
[5, 3, 4],
[1, 5, 6]])
assert_array_equal(sample, expected)
class TestSetState:
def setup(self):
self.seed = 1234567890
self.rg = Generator(MT19937(self.seed))
self.bit_generator = self.rg.bit_generator
self.state = self.bit_generator.state
self.legacy_state = (self.state['bit_generator'],
self.state['state']['key'],
self.state['state']['pos'])
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.rg.standard_normal(size=3)
self.bit_generator.state = self.state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.rg.standard_normal()
state = self.bit_generator.state
old = self.rg.standard_normal(size=3)
self.bit_generator.state = state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.rg.negative_binomial(0.5, 0.5)
class TestIntegers:
rfunc = random.integers
# valid integer/boolean types
itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self, endpoint):
assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)
def test_bounds_checking(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint,
dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [ubnd], [lbnd],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, [0],
endpoint=endpoint, dtype=dt)
def test_bounds_checking_array(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)
assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd] * 2,
[ubnd + 1] * 2, endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [1] * 2, 0,
endpoint=endpoint, dtype=dt)
def test_rng_zero_and_extremes(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
is_open = not endpoint
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], [tgt + is_open],
size=1000, endpoint=endpoint, dtype=dt),
tgt)
def test_rng_zero_and_extremes_array(self, endpoint):
size = 1000
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
tgt = ubnd - 1
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
def test_full_range(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_full_range_array(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self, endpoint):
# Don't use fixed seed
random = Generator(MT19937())
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16,
endpoint=endpoint, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint,
dtype=bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_scalar_array_equiv(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
size = 1000
random = Generator(MT19937(1234))
scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
scalar_array = random.integers([lbnd], [ubnd], size=size,
endpoint=endpoint, dtype=dt)
random = Generator(MT19937(1234))
array = random.integers([lbnd] * size, [ubnd] *
size, size=size, endpoint=endpoint, dtype=dt)
assert_array_equal(scalar, scalar_array)
assert_array_equal(scalar, array)
def test_repeatability(self, endpoint):
# We use a sha256 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '053594a9b82d656f967c54869bc6970aa0358cf94ad469c81478459c6a90eee3',
'int16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4',
'int32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b',
'int64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1',
'int8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1',
'uint16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4',
'uint32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b',
'uint64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1',
'uint8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1'}
for dt in self.itype[1:]:
random = Generator(MT19937(1234))
# view as little endian for hash
if sys.byteorder == 'little':
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt)
else:
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt).byteswap()
res = hashlib.sha256(val).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random = Generator(MT19937(1234))
val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint,
dtype=bool).view(np.int8)
res = hashlib.sha256(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_repeatability_broadcasting(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min
ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# view as little endian for hash
random = Generator(MT19937(1234))
val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint,
dtype=dt)
assert_array_equal(val, val_bc)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000,
endpoint=endpoint, dtype=dt)
assert_array_equal(val, val_bc)
@pytest.mark.parametrize(
'bound, expected',
[(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612,
3769704066, 1170797179, 4108474671])),
(2**32, np.array([517043487, 1364798666, 1733884390, 1353720613,
3769704067, 1170797180, 4108474672])),
(2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673,
1831631863, 1215661561, 3869512430]))]
)
def test_repeatability_32bit_boundary(self, bound, expected):
for size in [None, len(expected)]:
random = Generator(MT19937(1234))
x = random.integers(bound, size=size)
assert_equal(x, expected if size is not None else expected[0])
def test_repeatability_32bit_boundary_broadcasting(self):
desired = np.array([[[1622936284, 3620788691, 1659384060],
[1417365545, 760222891, 1909653332],
[3788118662, 660249498, 4092002593]],
[[3625610153, 2979601262, 3844162757],
[ 685800658, 120261497, 2694012896],
[1207779440, 1586594375, 3854335050]],
[[3004074748, 2310761796, 3012642217],
[2067714190, 2786677879, 1363865881],
[ 791663441, 1867303284, 2169727960]],
[[1939603804, 1250951100, 298950036],
[1040128489, 3791912209, 3317053765],
[3155528714, 61360675, 2305155588]],
[[ 817688762, 1335621943, 3288952434],
[1770890872, 1102951817, 1957607470],
[3099996017, 798043451, 48334215]]])
for size in [None, (5, 3, 3)]:
random = Generator(MT19937(12345))
x = random.integers([[-1], [0], [1]],
[2**32 - 1, 2**32, 2**32 + 1],
size=size)
assert_array_equal(x, desired if size is not None else desired[0])
def test_int64_uint64_broadcast_exceptions(self, endpoint):
configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)),
np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0),
(-2**63-1, -2**63-1))}
for dtype in configs:
for config in configs[dtype]:
low, high = config
high = high - endpoint
low_a = np.array([[low]*10])
high_a = np.array([high] * 10)
assert_raises(ValueError, random.integers, low, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_a,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high_a,
endpoint=endpoint, dtype=dtype)
low_o = np.array([[low]*10], dtype=object)
high_o = np.array([high] * 10, dtype=object)
assert_raises(ValueError, random.integers, low_o, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_o,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_o, high_o,
endpoint=endpoint, dtype=dtype)
def test_int64_uint64_corner_case(self, endpoint):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)
# None of these function calls should
# generate a ValueError now.
actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert not hasattr(sample, 'dtype')
assert_equal(type(sample), dt)
def test_respect_dtype_array(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint,
dtype=dt)
assert_equal(sample.dtype, dt)
def test_zero_size(self, endpoint):
# See gh-7203
for dt in self.itype:
sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)
assert sample.shape == (3, 0, 4)
assert sample.dtype == dt
assert self.rfunc(0, -10, 0, endpoint=endpoint,
dtype=dt).shape == (0,)
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape,
(3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
def test_error_byteorder(self):
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.raises(ValueError):
random.integers(0, 200, size=10, dtype=other_byteord_dt)
# chi2max is the maximum acceptable chi-squared value.
@pytest.mark.slow
@pytest.mark.parametrize('sample_size,high,dtype,chi2max',
[(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25
(5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30
(10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25
(50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25
])
def test_integers_small_dtype_chisquared(self, sample_size, high,
dtype, chi2max):
# Regression test for gh-14774.
samples = random.integers(high, size=sample_size, dtype=dtype)
values, counts = np.unique(samples, return_counts=True)
expected = sample_size / high
chi2 = ((counts - expected)**2 / expected).sum()
assert chi2 < chi2max
class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_integers(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2))
desired = np.array([[-80, -56], [41, 37], [-83, -16]])
assert_array_equal(actual, desired)
def test_integers_masked(self):
# Test masked rejection sampling algorithm to generate array of
# uint32 in an interval.
random = Generator(MT19937(self.seed))
actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32)
desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32)
assert_array_equal(actual, desired)
def test_integers_closed(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2), endpoint=True)
desired = np.array([[-80, -56], [ 41, 38], [-83, -15]])
assert_array_equal(actual, desired)
def test_integers_max_int(self):
# Tests whether integers with closed=True can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
actual = random.integers(np.iinfo('l').max, np.iinfo('l').max,
endpoint=True)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.096999199829214, 0.707517457682192],
[0.084364834598269, 0.767731206553125],
[0.665069021359413, 0.715487190596693]])
assert_array_almost_equal(actual, desired, decimal=15)
random = Generator(MT19937(self.seed))
actual = random.random()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_random_float(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.0969992 , 0.70751746],
[0.08436483, 0.76773121],
[0.66506902, 0.71548719]])
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_float_scalar(self):
random = Generator(MT19937(self.seed))
actual = random.random(dtype=np.float32)
desired = 0.0969992
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_unsupported_type(self):
assert_raises(TypeError, random.random, dtype='int32')
def test_choice_uniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4)
desired = np.array([0, 0, 2, 2], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([0, 1, 0, 1], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False)
desired = np.array([2, 0, 3], dtype=np.int64)
assert_array_equal(actual, desired)
actual = random.choice(4, 4, replace=False, shuffle=False)
desired = np.arange(4, dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([0, 2, 3], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random = Generator(MT19937(self.seed))
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['a', 'a', 'c', 'c'])
assert_array_equal(actual, desired)
def test_choice_multidimensional_default_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)
desired = np.array([[0, 1], [0, 1], [4, 5]])
assert_array_equal(actual, desired)
def test_choice_multidimensional_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)
desired = np.array([[0], [2], [4], [6]])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random = Generator(MT19937(self.seed))
non_contig = random.choice(5, 3, p=p[::2])
random = Generator(MT19937(self.seed))
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_choice_return_type(self):
# gh 9867
p = np.ones(4) / 4.
actual = random.choice(4, 2)
assert actual.dtype == np.int64
actual = random.choice(4, 2, replace=False)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p, replace=False)
assert actual.dtype == np.int64
def test_choice_large_sample(self):
choice_hash = '4266599d12bfcfb815213303432341c06b4349f5455890446578877bb322e222'
random = Generator(MT19937(self.seed))
actual = random.choice(10000, 5000, replace=False)
if sys.byteorder != 'little':
actual = actual.byteswap()
res = hashlib.sha256(actual.view(np.int8)).hexdigest()
assert_(choice_hash == res)
def test_bytes(self):
random = Generator(MT19937(self.seed))
actual = random.bytes(10)
desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random = Generator(MT19937(self.seed))
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])
assert_array_equal(actual, desired)
def test_shuffle_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=1)
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=-1)
assert_array_equal(actual, desired)
def test_shuffle_custom_axis_empty(self):
random = Generator(MT19937(self.seed))
desired = np.array([]).reshape((0, 6))
for axis in (0, 1):
actual = np.array([]).reshape((0, 6))
random.shuffle(actual, axis=axis)
assert_array_equal(actual, desired)
def test_shuffle_axis_nonsquare(self):
y1 = np.arange(20).reshape(2, 10)
y2 = y1.copy()
random = Generator(MT19937(self.seed))
random.shuffle(y1, axis=1)
random = Generator(MT19937(self.seed))
random.shuffle(y2.T)
assert_array_equal(y1, y2)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_shuffle_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.shuffle, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.shuffle, arr, 3)
assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))
arr = [[1, 2, 3], [4, 5, 6]]
assert_raises(NotImplementedError, random.shuffle, arr, 1)
arr = np.array(3)
assert_raises(TypeError, random.shuffle, arr)
arr = np.ones((3, 2))
assert_raises(np.AxisError, random.shuffle, arr, 2)
def test_permutation(self):
random = Generator(MT19937(self.seed))
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7]
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
bad_x_str = "abcd"
assert_raises(np.AxisError, random.permutation, bad_x_str)
bad_x_float = 1.2
assert_raises(np.AxisError, random.permutation, bad_x_float)
random = Generator(MT19937(self.seed))
integer_val = 10
desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6]
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
def test_permutation_custom_axis(self):
a = np.arange(16).reshape((4, 4))
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=1)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=-1)
assert_array_equal(actual, desired)
def test_permutation_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.permutation, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.permutation, arr, 3)
assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
@pytest.mark.parametrize("dtype", [int, object])
@pytest.mark.parametrize("axis, expected",
[(None, np.array([[3, 7, 0, 9, 10, 11],
[8, 4, 2, 5, 1, 6]])),
(0, np.array([[6, 1, 2, 9, 10, 11],
[0, 7, 8, 3, 4, 5]])),
(1, np.array([[ 5, 3, 4, 0, 2, 1],
[11, 9, 10, 6, 8, 7]]))])
def test_permuted(self, dtype, axis, expected):
random = Generator(MT19937(self.seed))
x = np.arange(12).reshape(2, 6).astype(dtype)
random.permuted(x, axis=axis, out=x)
assert_array_equal(x, expected)
random = Generator(MT19937(self.seed))
x = np.arange(12).reshape(2, 6).astype(dtype)
y = random.permuted(x, axis=axis)
assert y.dtype == dtype
assert_array_equal(y, expected)
def test_permuted_with_strides(self):
random = Generator(MT19937(self.seed))
x0 = np.arange(22).reshape(2, 11)
x1 = x0.copy()
x = x0[:, ::3]
y = random.permuted(x, axis=1, out=x)
expected = np.array([[0, 9, 3, 6],
[14, 20, 11, 17]])
assert_array_equal(y, expected)
x1[:, ::3] = expected
# Verify that the original x0 was modified in-place as expected.
assert_array_equal(x1, x0)
def test_permuted_empty(self):
y = random.permuted([])
assert_array_equal(y, [])
@pytest.mark.parametrize('outshape', [(2, 3), 5])
def test_permuted_out_with_wrong_shape(self, outshape):
a = np.array([1, 2, 3])
out = np.zeros(outshape, dtype=a.dtype)
with pytest.raises(ValueError, match='same shape'):
random.permuted(a, out=out)
def test_permuted_out_with_wrong_type(self):
out = np.zeros((3, 5), dtype=np.int32)
x = np.ones((3, 5))
with pytest.raises(TypeError, match='Cannot cast'):
random.permuted(x, axis=1, out=out)
def test_beta(self):
random = Generator(MT19937(self.seed))
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.083029353267698e-10, 2.449965303168024e-11],
[2.397085162969853e-02, 3.590779671820755e-08],
[2.830254190078299e-04, 1.744709918330393e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[42, 41],
[42, 48],
[44, 50]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456)
desired = 42
assert_array_equal(actual, desired)
def test_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[32.9850547060149, 39.0219480493301],
[56.2006134779419, 57.3474165711485],
[55.4243733880198, 55.4209797925213]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.5439892869558927, 0.45601071304410745],
[0.5588917345860708, 0.4411082654139292 ]],
[[0.5632074165063435, 0.43679258349365657],
[0.54862581112627, 0.45137418887373015]],
[[0.49961831357047226, 0.5003816864295278 ],
[0.52374806183482, 0.47625193816517997]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
# gh-15876
assert_raises(ValueError, random.dirichlet, [[5, 1]])
assert_raises(ValueError, random.dirichlet, [[5], [1]])
assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])
assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random = Generator(MT19937(self.seed))
non_contig = random.dirichlet(alpha, size=(3, 2))
random = Generator(MT19937(self.seed))
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_dirichlet_small_alpha(self):
eps = 1.0e-9 # 1.0e-10 -> runtime x 10; 1e-11 -> runtime x 200, etc.
alpha = eps * np.array([1., 1.0e-3])
random = Generator(MT19937(self.seed))
actual = random.dirichlet(alpha, size=(3, 2))
expected = np.array([
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]]
])
assert_array_almost_equal(actual, expected, decimal=15)
@pytest.mark.slow
def test_dirichlet_moderately_small_alpha(self):
# Use alpha.max() < 0.1 to trigger stick breaking code path
alpha = np.array([0.02, 0.04, 0.03])
exact_mean = alpha / alpha.sum()
random = Generator(MT19937(self.seed))
sample = random.dirichlet(alpha, size=20000000)
sample_mean = sample.mean(axis=0)
assert_allclose(sample_mean, exact_mean, rtol=1e-3)
def test_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[0.098845481066258, 1.560752510746964],
[0.075730916041636, 1.769098974710777],
[1.488602544592235, 2.49684815275751 ]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random = Generator(MT19937(self.seed))
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[0.461720027077085, 1.100441958872451],
[1.100337455217484, 0.91421736740018 ],
[0.500811891303113, 0.826802454552058]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[ 5.03850858902096, 7.9228656732049 ],
[18.73983605132985, 19.57961681699238],
[18.17897755150825, 18.17653912505234]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random = Generator(MT19937(self.seed))
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[1, 11],
[1, 12],
[11, 17]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random = Generator(MT19937(self.seed))
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[ 4.688397515056245, -0.289514845417841],
[ 4.981176042584683, -0.633224272589149],
[-0.055915275687488, -0.333962478257953]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[ 9, 9],
[ 9, 9],
[10, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random = Generator(MT19937(self.seed))
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.156353949272393, 1.195863024830054],
[-3.435458081645966, 1.656882398925444],
[ 0.924824032467446, 1.251116432209336]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random = Generator(MT19937(self.seed))
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-4.338584631510999, 1.890171436749954],
[-4.64547787337966 , 2.514545562919217],
[ 1.495389489198666, 1.967827627577474]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[ 0.0268252166335, 13.9534486483053],
[ 0.1204014788936, 2.2422077497792],
[ 4.2484199496128, 12.0093343977523]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random = Generator(MT19937(self.seed))
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[14, 17],
[3, 18],
[5, 1]])
assert_array_equal(actual, desired)
def test_logseries_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.logseries, np.nan)
assert_raises(ValueError, random.logseries, [np.nan] * 10)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[1, 5, 1, 6, 4, 3],
[4, 2, 6, 2, 4, 2]],
[[5, 3, 2, 6, 3, 1],
[4, 4, 0, 2, 3, 7]],
[[6, 3, 1, 5, 3, 2],
[5, 5, 3, 1, 2, 4]]])
assert_array_equal(actual, desired)
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal(self, method):
random = Generator(MT19937(self.seed))
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size, method=method)
desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
[-0.9967333370066214, 10.342002097029821 ]],
[[ 0.7850019631242964, 11.181113712443013 ],
[ 0.8901349653255224, 8.873825399642492 ]],
[[ 0.7130260107430003, 9.551628690083056 ],
[ 0.7127098726541128, 11.991709234143173 ]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov, method=method)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non symmetric covariance input raises exception when
# check_valid='raises' if using default svd method.
mean = [0, 0]
cov = [[1, 2], [1, 2]]
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov,
method='eigh')
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise', method='eigh')
# check degenerate samples from singular covariance matrix
cov = [[1, 1], [1, 1]]
if method in ('svd', 'eigh'):
samples = random.multivariate_normal(mean, cov, size=(3, 2),
method=method)
assert_array_almost_equal(samples[..., 0], samples[..., 1],
decimal=6)
else:
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov, method=method)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal_basic_stats(self, method):
random = Generator(MT19937(self.seed))
n_s = 1000
mean = np.array([1, 2])
cov = np.array([[2, 1], [1, 2]])
s = random.multivariate_normal(mean, cov, size=(n_s,), method=method)
s_center = s - mean
cov_emp = (s_center.T @ s_center) / (n_s - 1)
# these are pretty loose and are only designed to detect major errors
assert np.all(np.abs(s_center.mean(-2)) < 0.1)
assert np.all(np.abs(cov_emp - cov) < 0.2)
def test_negative_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[543, 727],
[775, 760],
[600, 674]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_negative_binomial_p0_exception(self):
# Verify that p=0 raises an exception.
with assert_raises(ValueError):
x = random.negative_binomial(1, 0)
def test_noncentral_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[ 1.70561552362133, 15.97378184942111],
[13.71483425173724, 20.17859633310629],
[11.3615477156643 , 3.67891108738029]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04],
[1.14554372041263e+00, 1.38187755933435e-03],
[1.90659181905387e+00, 1.21772577941822e+00]])
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[0.82947954590419, 1.80139670767078],
[6.58720057417794, 7.00491463609814],
[6.31101879073157, 6.30982307753005]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[0.060310671139 , 0.23866058175939],
[0.86860246709073, 0.2668510459738 ],
[0.23375780078364, 1.88922102885943]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random = Generator(MT19937(self.seed))
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.618412914693162, 2.635726692647081],
[-2.116923463013243, 0.807460983059643],
[ 1.446547137248593, 2.485684213886024]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random = Generator(MT19937(self.seed))
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04],
[7.2640150889064703e-01, 3.4650454783825594e+05],
[4.5852344481994740e+04, 6.5851383009539105e+07]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random = Generator(MT19937(self.seed))
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[0, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('int64').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random = Generator(MT19937(self.seed))
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02],
[2.482442984543471e-10, 1.527108843266079e-01],
[8.188283434244285e-02, 3.950547209346948e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[4.19494429102666, 16.66920198906598],
[3.67184544902662, 17.74695521962917],
[16.27935397855501, 21.08355560691792]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random = Generator(MT19937(self.seed))
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[-1.489437778266206, -3.275389641569784],
[ 0.560102864910406, -0.680780916282552],
[-1.314912905226277, 0.295852965660225]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.standard_exponential(size=(3, 2), method='inv')
desired = np.array([[0.102031839440643, 1.229350298474972],
[0.088137284693098, 1.459859985522667],
[1.093830802293668, 1.256977002164613]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_expoential_type_error(self):
assert_raises(TypeError, random.standard_exponential, dtype=np.int32)
def test_standard_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62970724056362, 1.22379851271008],
[3.899412530884 , 4.12479964250139],
[3.74994102464584, 3.74929307690815]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gammma_scalar_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(3, dtype=np.float32)
desired = 2.9242148399353027
assert_array_almost_equal(actual, desired, decimal=6)
def test_standard_gamma_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62971, 1.2238 ],
[3.89941, 4.1248 ],
[3.74994, 3.74929]])
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gammma_float_out(self):
actual = np.zeros((3, 2), dtype=np.float32)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, dtype=np.float32)
desired = np.array([[10.14987, 7.87012],
[ 9.46284, 12.56832],
[13.82495, 7.81533]], dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gamma_unknown_type(self):
assert_raises(TypeError, random.standard_gamma, 1.,
dtype='int32')
def test_out_size_mismatch(self):
out = np.zeros(10)
assert_raises(ValueError, random.standard_gamma, 10.0, size=20,
out=out)
assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),
out=out)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random = Generator(MT19937(self.seed))
actual = random.standard_normal(size=(3, 2))
desired = np.array([[-1.870934851846581, 1.25613495182354 ],
[-1.120190126006621, 0.342002097029821],
[ 0.661545174124296, 1.181113712443012]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_normal_unsupported_type(self):
assert_raises(TypeError, random.standard_normal, dtype=np.int32)
def test_standard_t(self):
random = Generator(MT19937(self.seed))
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[-1.484666193042647, 0.30597891831161 ],
[ 1.056684299648085, -0.407312602088507],
[ 0.130704414281157, -2.038053410490321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random = Generator(MT19937(self.seed))
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[ 7.86664070590917, 13.6313848513185 ],
[ 7.68152445215983, 14.36169131136546],
[13.16105603911429, 13.72341621856971]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random = Generator(MT19937(self.seed))
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[2.13306255040998 , 7.816987531021207],
[2.015436610109887, 8.377577533009589],
[7.421792588856135, 7.891185744455209]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_uniform_zero_range(self):
func = random.uniform
result = func(1.5, 1.5)
assert_allclose(result, 1.5)
result = func([0.0, np.pi], [0.0, np.pi])
assert_allclose(result, [0.0, np.pi])
result = func([[2145.12], [2145.12]], [2145.12, 2145.12])
assert_allclose(result, 2145.12 + np.zeros((2, 2)))
def test_uniform_neg_range(self):
func = random.uniform
assert_raises(ValueError, func, 2, 1)
assert_raises(ValueError, func, [1, 2], [1, 1])
assert_raises(ValueError, func, [[0, 1],[2, 3]], 2)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[ 1.107972248690106, 2.841536476232361],
[ 1.832602376042457, 1.945511926976032],
[-0.260147475776542, 2.058047492231698]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
@pytest.mark.parametrize("kappa", [1e4, 1e15])
def test_vonmises_large_kappa(self, kappa):
random = Generator(MT19937(self.seed))
rs = RandomState(random.bit_generator)
state = random.bit_generator.state
random_state_vals = rs.vonmises(0, kappa, size=10)
random.bit_generator.state = state
gen_vals = random.vonmises(0, kappa, size=10)
if kappa < 1e6:
assert_allclose(random_state_vals, gen_vals)
else:
assert np.all(random_state_vals != gen_vals)
@pytest.mark.parametrize("mu", [-7., -np.pi, -3.1, np.pi, 3.2])
@pytest.mark.parametrize("kappa", [1e-9, 1e-6, 1, 1e3, 1e15])
def test_vonmises_large_kappa_range(self, mu, kappa):
random = Generator(MT19937(self.seed))
r = random.vonmises(mu, kappa, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_wald(self):
random = Generator(MT19937(self.seed))
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[0.26871721804551, 3.2233942732115 ],
[2.20328374987066, 2.40958405189353],
[2.07093587449261, 0.73073890064369]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random = Generator(MT19937(self.seed))
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.138613914769468, 1.306463419753191],
[0.111623365934763, 1.446570494646721],
[1.257145775276011, 1.914247725027957]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random = Generator(MT19937(self.seed))
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random = Generator(MT19937(self.seed))
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[ 1, 1],
[ 10, 867],
[354, 2]])
assert_array_equal(actual, desired)
class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def test_uniform(self):
random = Generator(MT19937(self.seed))
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])
random = Generator(MT19937(self.seed))
actual = random.uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
random = Generator(MT19937(self.seed))
desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])
random = Generator(MT19937(self.seed))
actual = random.normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.normal, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
normal = random.normal
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])
random = Generator(MT19937(self.seed))
beta = random.beta
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
random = Generator(MT19937(self.seed))
actual = random.beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
std_gamma = random.standard_gamma
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
random = Generator(MT19937(self.seed))
desired = np.array([0.04714867120827, 0.1239390327694])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])
random = Generator(MT19937(self.seed))
actual = random.chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])
random = Generator(MT19937(self.seed))
actual = random.standard_t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013])
random = Generator(MT19937(self.seed))
actual = random.pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])
random = Generator(MT19937(self.seed))
actual = random.power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])
random = Generator(MT19937(self.seed))
actual = random.logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])
random = Generator(MT19937(self.seed))
lognormal = random.lognormal
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean, sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
desired = np.array(
[1.1597068009872629,
0.6539188836253857,
1.1981526554349398]
)
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])
random = Generator(MT19937(self.seed))
actual = random.wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean, scale * 3)
assert_raises(ValueError, random.wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
binom = random.binomial
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
actual = random.binomial(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 2, 1], dtype=np.int64)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
lam = [1]
bad_lam_one = [-1]
desired = np.array([0, 0, 3])
random = Generator(MT19937(self.seed))
max_lam = random._poisson_lam_max
bad_lam_two = [max_lam * 2]
poisson = random.poisson
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
desired = np.array([1, 8, 1])
random = Generator(MT19937(self.seed))
zipf = random.zipf
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([1, 1, 3])
random = Generator(MT19937(self.seed))
geometric = random.geometric
actual = geometric(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geometric, bad_p_one * 3)
assert_raises(ValueError, geometric, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [-1]
bad_nsample_two = [4]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two)
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two)
random = Generator(MT19937(self.seed))
hypergeom = random.hypergeometric
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, -1)
assert_raises(ValueError, hypergeom, 10, 10, 25)
# ValueError for arguments that are too big.
assert_raises(ValueError, hypergeom, 2**30, 10, 20)
assert_raises(ValueError, hypergeom, 999, 2**31, 50)
assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
desired = np.array([1, 1, 1])
random = Generator(MT19937(self.seed))
logseries = random.logseries
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]],
[[1, 0, 1, 0, 2, 1],
[7, 2, 2, 1, 4, 4]],
[[0, 2, 0, 1, 2, 0],
[3, 2, 3, 3, 4, 5]]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6)
desired = np.array([[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]], dtype=np.int64)
assert_array_equal(actual, desired)
class TestThread:
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(Generator(MT19937(s)), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(Generator(MT19937(s)), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput:
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_integers(self, endpoint):
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
high = np.array([1])
low = np.array([0])
for dt in itype:
out = func(low, high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low[0], high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low, high[0], endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
@pytest.mark.parametrize("config", JUMP_TEST_DATA)
def test_jumped(config):
# Each config contains the initial seed, a number of raw steps
# the sha256 hashes of the initial and the final states' keys and
# the position of of the initial and the final state.
# These were produced using the original C implementation.
seed = config["seed"]
steps = config["steps"]
mt19937 = MT19937(seed)
# Burn step
mt19937.random_raw(steps)
key = mt19937.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
sha256 = hashlib.sha256(key)
assert mt19937.state["state"]["pos"] == config["initial"]["pos"]
assert sha256.hexdigest() == config["initial"]["key_sha256"]
jumped = mt19937.jumped()
key = jumped.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
sha256 = hashlib.sha256(key)
assert jumped.state["state"]["pos"] == config["jumped"]["pos"]
assert sha256.hexdigest() == config["jumped"]["key_sha256"]
def test_broadcast_size_error():
mu = np.ones(3)
sigma = np.ones((4, 3))
size = (10, 4, 2)
assert random.normal(mu, sigma, size=(5, 4, 3)).shape == (5, 4, 3)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=size)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=(1, 3))
with pytest.raises(ValueError):
random.normal(mu, sigma, size=(4, 1, 1))
# 1 arg
shape = np.ones((4, 3))
with pytest.raises(ValueError):
random.standard_gamma(shape, size=size)
with pytest.raises(ValueError):
random.standard_gamma(shape, size=(3,))
with pytest.raises(ValueError):
random.standard_gamma(shape, size=3)
# Check out
out = np.empty(size)
with pytest.raises(ValueError):
random.standard_gamma(shape, out=out)
# 2 arg
with pytest.raises(ValueError):
random.binomial(1, [0.3, 0.7], size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], 0.3, size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], [0.3, 0.7], size=(2, 1))
with pytest.raises(ValueError):
random.multinomial([2, 2], [.3, .7], size=(2, 1))
# 3 arg
a = random.chisquare(5, size=3)
b = random.chisquare(5, size=(4, 3))
c = random.chisquare(5, size=(5, 4, 3))
assert random.noncentral_f(a, b, c).shape == (5, 4, 3)
with pytest.raises(ValueError, match=r"Output size \(6, 5, 1, 1\) is"):
random.noncentral_f(a, b, c, size=(6, 5, 1, 1))
def test_broadcast_size_scalar():
mu = np.ones(3)
sigma = np.ones(3)
random.normal(mu, sigma, size=3)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=2)
def test_ragged_shuffle():
# GH 18142
seq = [[], [], 1]
gen = Generator(MT19937(0))
assert_no_warnings(gen.shuffle, seq)
assert seq == [1, [], []]
@pytest.mark.parametrize("high", [-2, [-2]])
@pytest.mark.parametrize("endpoint", [True, False])
def test_single_arg_integer_exception(high, endpoint):
# GH 14333
gen = Generator(MT19937(0))
msg = 'high < 0' if endpoint else 'high <= 0'
with pytest.raises(ValueError, match=msg):
gen.integers(high, endpoint=endpoint)
msg = 'low > high' if endpoint else 'low >= high'
with pytest.raises(ValueError, match=msg):
gen.integers(-1, high, endpoint=endpoint)
with pytest.raises(ValueError, match=msg):
gen.integers([-1], high, endpoint=endpoint)
@pytest.mark.parametrize("dtype", ["f4", "f8"])
def test_c_contig_req_out(dtype):
# GH 18704
out = np.empty((2, 3), order="F", dtype=dtype)
shape = [1, 2, 3]
with pytest.raises(ValueError, match="Supplied output array"):
random.standard_gamma(shape, out=out, dtype=dtype)
with pytest.raises(ValueError, match="Supplied output array"):
random.standard_gamma(shape, out=out, size=out.shape, dtype=dtype)
@pytest.mark.parametrize("dtype", ["f4", "f8"])
@pytest.mark.parametrize("order", ["F", "C"])
@pytest.mark.parametrize("dist", [random.standard_normal, random.random])
def test_contig_req_out(dist, order, dtype):
# GH 18704
out = np.empty((2, 3), dtype=dtype, order=order)
variates = dist(out=out, dtype=dtype)
assert variates is out
variates = dist(out=out, dtype=dtype, size=out.shape)
assert variates is out
|
simongibbons/numpy
|
numpy/random/tests/test_generator_mt19937.py
|
Python
|
bsd-3-clause
| 109,525
|
[
"Gaussian"
] |
d5d906e33f77cd8866cc3080a0b885e90616d09d0ef4a956880503fbbb98dc4e
|
""" Unit Test of Workflow Modules
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import itertools
import os
import copy
import shutil
from mock import MagicMock as Mock
from DIRAC import gLogger
class ModulesTestCase(unittest.TestCase):
""" Base class for the Modules test cases
"""
def setUp(self):
gLogger.setLevel('ERROR')
# import sys
# sys.modules["DIRAC"] = DIRAC.ResourceStatusSystem.test.fake_Logger
# sys.modules["DIRAC.ResourceStatusSystem.Utilities.CS"] = DIRAC.ResourceStatusSystem.test.fake_Logger
self.jr_mock = Mock()
self.jr_mock.setApplicationStatus.return_value = {'OK': True, 'Value': ''}
self.jr_mock.generateRequest.return_value = {'OK': True, 'Value': 'pippo'}
self.jr_mock.setJobParameter.return_value = {'OK': True, 'Value': 'pippo'}
self.jr_mock.generateForwardDISET.return_value = {'OK': True, 'Value': 'pippo'}
# self.jr_mock.setJobApplicationStatus.return_value = {'OK': True, 'Value': 'pippo'}
self.fr_mock = Mock()
self.fr_mock.getFiles.return_value = {}
self.fr_mock.setFileStatus.return_value = {'OK': True, 'Value': ''}
self.fr_mock.commit.return_value = {'OK': True, 'Value': ''}
self.fr_mock.generateRequest.return_value = {'OK': True, 'Value': ''}
rc_mock = Mock()
rc_mock.update.return_value = {'OK': True, 'Value': ''}
rc_mock.setDISETRequest.return_value = {'OK': True, 'Value': ''}
rc_mock.isEmpty.return_value = {'OK': True, 'Value': ''}
rc_mock.toXML.return_value = {'OK': True, 'Value': ''}
rc_mock.getDigest.return_value = {'OK': True, 'Value': ''}
rc_mock.__len__.return_value = 1
self.rc_mock = rc_mock
ar_mock = Mock()
ar_mock.commit.return_value = {'OK': True, 'Value': ''}
self.rm_mock = Mock()
self.rm_mock.getReplicas.return_value = {'OK': True, 'Value': {'Successful': {'pippo': 'metadataPippo'},
'Failed': None}}
self.rm_mock.getCatalogFileMetadata.return_value = {'OK': True, 'Value': {'Successful': {'pippo': 'metadataPippo'},
'Failed': None}}
self.rm_mock.removeFile.return_value = {'OK': True, 'Value': {'Failed': False}}
self.rm_mock.putStorageDirectory.return_value = {'OK': True, 'Value': {'Failed': False}}
self.rm_mock.addCatalogFile.return_value = {'OK': True, 'Value': {'Failed': False}}
self.rm_mock.putAndRegister.return_value = {'OK': True, 'Value': {'Failed': False}}
self.rm_mock.getFile.return_value = {'OK': True, 'Value': {'Failed': False}}
self.jsu_mock = Mock()
self.jsu_mock.setJobApplicationStatus.return_value = {'OK': True, 'Value': ''}
self.jsu_mock = Mock()
self.jsu_mock.setJobApplicationStatus.return_value = {'OK': True, 'Value': ''}
request_mock = Mock()
request_mock.addSubRequest.return_value = {'OK': True, 'Value': ''}
request_mock.setSubRequestFiles.return_value = {'OK': True, 'Value': ''}
request_mock.getNumSubRequests.return_value = {'OK': True, 'Value': ''}
request_mock._getLastOrder.return_value = 1
self.ft_mock = Mock()
self.ft_mock.transferAndRegisterFile.return_value = {'OK': True, 'Value': {'uploadedSE': ''}}
self.ft_mock.transferAndRegisterFileFailover.return_value = {'OK': True, 'Value': {}}
self.nc_mock = Mock()
self.nc_mock.sendMail.return_value = {'OK': True, 'Value': ''}
self.prod_id = 123
self.prod_job_id = 456
self.wms_job_id = 0
self.workflowStatus = {'OK': True}
self.stepStatus = {'OK': True}
self.wf_commons = [{'PRODUCTION_ID': str(self.prod_id),
'JOB_ID': str(self.prod_job_id),
'eventType': '123456789',
'jobType': 'merge',
'configName': 'aConfigName',
'configVersion': 'aConfigVersion',
'outputDataFileMask': '',
'BookkeepingLFNs': 'aa',
'ProductionOutputData': 'ProductionOutputData',
'numberOfEvents': '100',
'JobReport': self.jr_mock,
'Request': rc_mock,
'AccountingReport': ar_mock,
'FileReport': self.fr_mock,
'runNumber': 'Unknown',
'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str(self.prod_id),
'JOB_ID': str(self.prod_job_id),
'configName': 'aConfigName',
'configVersion': 'aConfigVersion',
'outputDataFileMask': '',
'jobType': 'merge',
'BookkeepingLFNs': 'aa',
'ProductionOutputData': 'ProductionOutputData',
'numberOfEvents': '100',
'JobReport': self.jr_mock,
'Request': rc_mock,
'AccountingReport': ar_mock,
'FileReport': self.fr_mock,
'LogFilePath': 'someDir',
'runNumber': 'Unknown',
'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str(self.prod_id),
'JOB_ID': str(self.prod_job_id),
'configName': 'aConfigName',
'configVersion': 'aConfigVersion',
'outputDataFileMask': '',
'jobType': 'merge',
'BookkeepingLFNs': 'aa',
'ProductionOutputData': 'ProductionOutputData',
'numberOfEvents': '100',
'JobReport': self.jr_mock,
'Request': rc_mock,
'AccountingReport': ar_mock,
'FileReport': self.fr_mock,
'LogFilePath': 'someDir',
'LogTargetPath': 'someOtherDir',
'runNumber': 'Unknown',
'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str(self.prod_id),
'JOB_ID': str(self.prod_job_id),
'configName': 'aConfigName',
'configVersion': 'aConfigVersion',
'outputDataFileMask': '',
'jobType': 'merge',
'BookkeepingLFNs': 'aa',
'ProductionOutputData': 'ProductionOutputData',
'numberOfEvents': '100',
'JobReport': self.jr_mock,
'Request': rc_mock,
'AccountingReport': ar_mock,
'FileReport': self.fr_mock,
'LogFilePath': 'someDir',
'LogTargetPath': 'someOtherDir',
'runNumber': 'Unknown',
'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str(self.prod_id),
'JOB_ID': str(self.prod_job_id),
'configName': 'aConfigName',
'configVersion': 'aConfigVersion',
'outputDataFileMask': '',
'jobType': 'reco',
'BookkeepingLFNs': 'aa',
'ProductionOutputData': 'ProductionOutputData',
'JobReport': self.jr_mock,
'Request': rc_mock,
'AccountingReport': ar_mock,
'FileReport': self.fr_mock,
'runNumber': 'Unknown',
'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str(self.prod_id),
'JOB_ID': str(self.prod_job_id),
'configName': 'aConfigName',
'configVersion': 'aConfigVersion',
'outputDataFileMask': '',
'jobType': 'reco',
'BookkeepingLFNs': 'aa',
'ProductionOutputData': 'ProductionOutputData',
'JobReport': self.jr_mock,
'Request': rc_mock,
'AccountingReport': ar_mock,
'FileReport': self.fr_mock,
'LogFilePath': 'someDir',
'runNumber': 'Unknown',
'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str(self.prod_id),
'JOB_ID': str(self.prod_job_id),
'configName': 'aConfigName',
'configVersion': 'aConfigVersion',
'outputDataFileMask': '',
'jobType': 'reco',
'BookkeepingLFNs': 'aa',
'ProductionOutputData': 'ProductionOutputData',
'JobReport': self.jr_mock,
'Request': rc_mock,
'AccountingReport': ar_mock,
'FileReport': self.fr_mock,
'LogFilePath': 'someDir',
'LogTargetPath': 'someOtherDir',
'runNumber': 'Unknown',
'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str(self.prod_id),
'JOB_ID': str(self.prod_job_id),
'configName': 'aConfigName',
'configVersion': 'aConfigVersion',
'outputDataFileMask': '',
'jobType': 'reco',
'BookkeepingLFNs': 'aa',
'ProductionOutputData': 'ProductionOutputData',
'JobReport': self.jr_mock,
'Request': rc_mock,
'AccountingReport': ar_mock,
'FileReport': self.fr_mock,
'LogFilePath': 'someDir',
'LogTargetPath': 'someOtherDir',
'runNumber': 'Unknown',
'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str(self.prod_id),
'JOB_ID': str(self.prod_job_id),
'configName': 'aConfigName',
'configVersion': 'aConfigVersion',
'outputDataFileMask': '',
'jobType': 'reco',
'BookkeepingLFNs': 'aa',
'ProductionOutputData': 'ProductionOutputData',
'JobReport': self.jr_mock,
'Request': rc_mock,
'AccountingReport': ar_mock,
'FileReport': self.fr_mock,
'LogFilePath': 'someDir',
'LogTargetPath': 'someOtherDir',
'runNumber': 'Unknown',
'InputData': '',
'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str(self.prod_id),
'JOB_ID': str(self.prod_job_id),
'configName': 'aConfigName',
'configVersion': 'aConfigVersion',
'outputDataFileMask': '',
'jobType': 'reco',
'BookkeepingLFNs': 'aa',
'ProductionOutputData': 'ProductionOutputData',
'JobReport': self.jr_mock,
'Request': rc_mock,
'AccountingReport': ar_mock,
'FileReport': self.fr_mock,
'LogFilePath': 'someDir',
'LogTargetPath': 'someOtherDir',
'runNumber': 'Unknown',
'InputData': 'foo;bar',
'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str(self.prod_id),
'JOB_ID': str(self.prod_job_id),
'configName': 'aConfigName',
'configVersion': 'aConfigVersion',
'outputDataFileMask': '',
'jobType': 'reco',
'BookkeepingLFNs': 'aa',
'ProductionOutputData': 'ProductionOutputData',
'JobReport': self.jr_mock,
'Request': rc_mock,
'AccountingReport': ar_mock,
'FileReport': self.fr_mock,
'LogFilePath': 'someDir',
'LogTargetPath': 'someOtherDir',
'runNumber': 'Unknown',
'InputData': 'foo;bar',
'ParametricInputData': '',
'appSteps': ['someApp_1']},
{'PRODUCTION_ID': str(self.prod_id),
'JOB_ID': str(self.prod_job_id),
'configName': 'aConfigName',
'configVersion': 'aConfigVersion',
'outputDataFileMask': '',
'jobType': 'reco',
'BookkeepingLFNs': 'aa',
'ProductionOutputData': 'ProductionOutputData',
'JobReport': self.jr_mock,
'Request': rc_mock,
'AccountingReport': ar_mock,
'FileReport': self.fr_mock,
'LogFilePath': 'someDir',
'LogTargetPath': 'someOtherDir',
'runNumber': 'Unknown',
'InputData': 'foo;bar',
'ParametricInputData': 'pid1;pid2;pid3',
'appSteps': ['someApp_1']},
]
self.step_commons = [{'applicationName': 'someApp',
'applicationVersion': 'v1r0',
'eventType': '123456789',
'applicationLog': 'appLog',
'extraPackages': '',
'XMLSummary': 'XMLSummaryFile',
'numberOfEvents': '100',
'BKStepID': '123',
'StepProcPass': 'Sim123',
'outputFilePrefix': 'pref_',
'STEP_INSTANCE_NAME': 'someApp_1',
'listoutput': [{'outputDataName': str(self.prod_id) + '_' + str(self.prod_job_id) + '_',
'outputDataSE': 'aaa',
'outputDataType': 'bbb'}]},
{'applicationName': 'someApp',
'applicationVersion': 'v1r0',
'eventType': '123456789',
'applicationLog': 'appLog',
'extraPackages': '',
'XMLSummary': 'XMLSummaryFile',
'numberOfEvents': '100',
'BKStepID': '123',
'StepProcPass': 'Sim123',
'outputFilePrefix': 'pref_',
'optionsLine': '',
'STEP_INSTANCE_NAME': 'someApp_1',
'listoutput': [{'outputDataName': str(self.prod_id) + '_' + str(self.prod_job_id) + '_',
'outputDataSE': 'aaa',
'outputDataType': 'bbb'}]},
{'applicationName': 'someApp',
'applicationVersion': 'v1r0',
'eventType': '123456789',
'applicationLog': 'appLog',
'extraPackages': '',
'XMLSummary': 'XMLSummaryFile',
'numberOfEvents': '100',
'BKStepID': '123',
'StepProcPass': 'Sim123',
'outputFilePrefix': 'pref_',
'extraOptionsLine': 'blaBla',
'STEP_INSTANCE_NAME': 'someApp_1',
'listoutput': [{'outputDataName': str(self.prod_id) + '_' + str(self.prod_job_id) + '_',
'outputDataSE': 'aaa',
'outputDataType': 'bbb'}]}]
self.step_number = '321'
self.step_id = '%s_%s_%s' % (self.prod_id, self.prod_job_id, self.step_number)
from DIRAC.Workflow.Modules.ModuleBase import ModuleBase
self.mb = ModuleBase()
self.mb.rm = self.rm_mock
self.mb.request = self.rc_mock
self.mb.jobReport = self.jr_mock
self.mb.fileReport = self.fr_mock
self.mb.workflow_commons = self.wf_commons[0]
from DIRAC.Workflow.Modules.FailoverRequest import FailoverRequest
self.fr = FailoverRequest()
self.fr.request = self.rc_mock
self.fr.jobReport = self.jr_mock
self.fr.fileReport = self.fr_mock
from DIRAC.Workflow.Modules.Script import Script
self.script = Script()
self.script.request = self.rc_mock
self.script.jobReport = self.jr_mock
self.script.fileReport = self.fr_mock
def tearDown(self):
for fileProd in ['appLog', 'foo.txt', 'aaa.Bhadron.dst', 'bbb.Calibration.dst', 'bar_2.py', 'foo_1.txt',
'ccc.charm.mdst', 'prova.txt', 'foo.txt', 'BAR.txt', 'FooBAR.ext.txt', 'applicationLog.txt',
'ErrorLogging_Step1_coredump.log', '123_00000456_request.xml', 'lfn1', 'lfn2',
'aaa.bhadron.dst', 'bbb.calibration.dst', 'ProductionOutputData', 'data.py',
'00000123_00000456.tar', 'someOtherDir', 'DISABLE_WATCHDOG_CPU_WALLCLOCK_CHECK',
]:
try:
os.remove(fileProd)
except OSError:
continue
for directory in ['./job', 'job']:
try:
shutil.rmtree(directory)
except Exception:
continue
#############################################################################
# ModuleBase.py
#############################################################################
class ModuleBaseSuccess(ModulesTestCase):
#################################################
def test__checkLocalExistance(self):
self.assertRaises(OSError, self.mb._checkLocalExistance, ['aaa', 'bbb'])
#################################################
def test__applyMask(self):
candidateFiles = {'00012345_00012345_4.dst':
{'lfn': '/lhcb/MC/2010/DST/00012345/0001/00012345_00012345_4.dst',
'type': 'dst',
'workflowSE': 'Tier1_MC_M-DST'},
'00012345_00012345_2.digi': {'type': 'digi', 'workflowSE': 'Tier1-RDST'},
'00012345_00012345_3.digi': {'type': 'digi', 'workflowSE': 'Tier1-RDST'},
'00012345_00012345_5.AllStreams.dst':
{'lfn': '/lhcb/MC/2010/DST/00012345/0001/00012345_00012345_5.AllStreams.dst',
'type': 'allstreams.dst',
'workflowSE': 'Tier1_MC_M-DST'},
'00012345_00012345_1.sim': {'type': 'sim', 'workflowSE': 'Tier1-RDST'}}
fileMasks = (['dst'], 'dst', ['sim'], ['digi'], ['digi', 'sim'], 'allstreams.dst')
stepMasks = ('', '5', '', ['2'], ['1', '3'], '')
results = ({'00012345_00012345_4.dst':
{'lfn': '/lhcb/MC/2010/DST/00012345/0001/00012345_00012345_4.dst',
'type': 'dst',
'workflowSE': 'Tier1_MC_M-DST'}
},
{},
{'00012345_00012345_1.sim': {'type': 'sim', 'workflowSE': 'Tier1-RDST'}
},
{'00012345_00012345_2.digi': {'type': 'digi', 'workflowSE': 'Tier1-RDST'},
},
{'00012345_00012345_3.digi': {'type': 'digi', 'workflowSE': 'Tier1-RDST'},
'00012345_00012345_1.sim': {'type': 'sim', 'workflowSE': 'Tier1-RDST'}
},
{'00012345_00012345_5.AllStreams.dst':
{'lfn': '/lhcb/MC/2010/DST/00012345/0001/00012345_00012345_5.AllStreams.dst',
'type': 'allstreams.dst',
'workflowSE': 'Tier1_MC_M-DST'}
}
)
for fileMask, result, stepMask in zip(fileMasks, results, stepMasks):
res = self.mb._applyMask(candidateFiles, fileMask, stepMask)
self.assertEqual(res, result)
#################################################
def test__checkSanity(self):
candidateFiles = {'00012345_00012345_4.dst':
{'lfn': '/lhcb/MC/2010/DST/00012345/0001/00012345_00012345_4.dst',
'type': 'dst',
'workflowSE': 'Tier1_MC_M-DST'},
'00012345_00012345_2.digi': {'type': 'digi', 'workflowSE': 'Tier1-RDST'},
'00012345_00012345_3.digi': {'type': 'digi', 'workflowSE': 'Tier1-RDST'},
'00012345_00012345_5.AllStreams.dst':
{'lfn': '/lhcb/MC/2010/DST/00012345/0001/00012345_00012345_5.AllStreams.dst',
'type': 'DST',
'workflowSE': 'Tier1_MC_M-DST'},
'00012345_00012345_1.sim': {'type': 'sim', 'workflowSE': 'Tier1-RDST'}}
self.assertRaises(ValueError, self.mb._checkSanity, candidateFiles)
#################################################
def test_getCandidateFiles(self):
# this needs to avoid the "checkLocalExistance"
open('foo_1.txt', 'w').close()
open('bar_2.py', 'w').close()
outputList = [{'outputDataType': 'txt', 'outputDataSE': 'Tier1-RDST', 'outputDataName': 'foo_1.txt'},
{'outputDataType': 'py', 'outputDataSE': 'Tier1-RDST', 'outputDataName': 'bar_2.py'}]
outputLFNs = ['/lhcb/MC/2010/DST/00012345/0001/foo_1.txt', '/lhcb/MC/2010/DST/00012345/0001/bar_2.py']
fileMask = 'txt'
stepMask = ''
result = {'foo_1.txt': {'lfn': '/lhcb/MC/2010/DST/00012345/0001/foo_1.txt',
'type': outputList[0]['outputDataType'],
'workflowSE': outputList[0]['outputDataSE']}}
res = self.mb.getCandidateFiles(outputList, outputLFNs, fileMask, stepMask)
self.assertEqual(res, result)
fileMask = ['txt', 'py']
stepMask = None
result = {'foo_1.txt': {'lfn': '/lhcb/MC/2010/DST/00012345/0001/foo_1.txt',
'type': outputList[0]['outputDataType'],
'workflowSE': outputList[0]['outputDataSE']},
'bar_2.py': {'lfn': '/lhcb/MC/2010/DST/00012345/0001/bar_2.py',
'type': outputList[1]['outputDataType'],
'workflowSE': outputList[1]['outputDataSE']},
}
res = self.mb.getCandidateFiles(outputList, outputLFNs, fileMask, stepMask)
self.assertEqual(res, result)
fileMask = ['aa']
stepMask = None
res = self.mb.getCandidateFiles(outputList, outputLFNs, fileMask, stepMask)
result = {}
self.assertEqual(res, result)
fileMask = ''
stepMask = '2'
result = {'bar_2.py': {'lfn': '/lhcb/MC/2010/DST/00012345/0001/bar_2.py',
'type': outputList[1]['outputDataType'],
'workflowSE': outputList[1]['outputDataSE']}}
res = self.mb.getCandidateFiles(outputList, outputLFNs, fileMask, stepMask)
self.assertEqual(res, result)
fileMask = ''
stepMask = 2
result = {'bar_2.py': {'lfn': '/lhcb/MC/2010/DST/00012345/0001/bar_2.py',
'type': outputList[1]['outputDataType'],
'workflowSE': outputList[1]['outputDataSE']}}
res = self.mb.getCandidateFiles(outputList, outputLFNs, fileMask, stepMask)
self.assertEqual(res, result)
fileMask = ''
stepMask = ['2', '3']
result = {'bar_2.py': {'lfn': '/lhcb/MC/2010/DST/00012345/0001/bar_2.py',
'type': outputList[1]['outputDataType'],
'workflowSE': outputList[1]['outputDataSE']}}
res = self.mb.getCandidateFiles(outputList, outputLFNs, fileMask, stepMask)
self.assertEqual(res, result)
fileMask = ''
stepMask = ['3']
result = {}
res = self.mb.getCandidateFiles(outputList, outputLFNs, fileMask, stepMask)
self.assertEqual(res, result)
def test__enableModule(self):
self.mb.production_id = self.prod_id
self.mb.prod_job_id = self.prod_job_id
self.mb.jobID = self.wms_job_id
self.mb.workflowStatus = self.workflowStatus
self.mb.stepStatus = self.stepStatus
self.mb.workflow_commons = self.wf_commons[0] # APS: this is needed
self.mb.step_commons = self.step_commons[0]
self.mb.step_number = self.step_number
self.mb.step_id = self.step_id
self.mb.execute()
self.assertFalse(self.mb._enableModule())
self.mb.jobID = 1
self.mb.execute()
self.assertTrue(self.mb._enableModule())
def test__determineStepInputData(self):
self.mb.stepName = 'DaVinci_2'
inputData = 'previousStep'
self.mb.appSteps = ['Brunel_1', 'DaVinci_2']
self.mb.workflow_commons = {'outputList': [{'stepName': 'Brunel_1',
'outputDataType': 'brunelhist',
'outputBKType': 'BRUNELHIST',
'outputDataSE': 'CERN-HIST',
'outputDataName': 'Brunel_00012345_00006789_1_Hist.root'},
{'stepName': 'Brunel_1',
'outputDataType': 'sdst',
'outputBKType': 'SDST',
'outputDataSE': 'Tier1-BUFFER',
'outputDataName': '00012345_00006789_1.sdst'}
]
}
self.mb.inputDataType = 'SDST'
first = self.mb._determineStepInputData(inputData)
second = ['00012345_00006789_1.sdst']
self.assertEqual(first, second)
inputData = 'previousStep'
self.mb.appSteps = ['Brunel_1', 'DaVinci_2']
self.mb.workflow_commons['outputList'] = [{'stepName': 'Brunel_1',
'outputDataType': 'brunelhist',
'outputBKType': 'BRUNELHIST',
'outputDataSE': 'CERN-HIST',
'outputDataName': 'Brunel_00012345_00006789_1_Hist.root'},
{'stepName': 'Brunel_1',
'outputDataType': 'sdst',
'outputBKType': 'SDST',
'outputDataSE': 'Tier1-BUFFER',
'outputDataName': 'some.sdst'},
{'stepName': 'Brunel_1',
'outputDataType': 'sdst',
'outputBKType': 'SDST',
'outputDataSE': 'Tier1-BUFFER',
'outputDataName': '00012345_00006789_1.sdst'}
]
self.mb.inputDataType = 'SDST'
first = self.mb._determineStepInputData(inputData)
second = ['some.sdst', '00012345_00006789_1.sdst']
self.assertEqual(first, second)
inputData = 'LFN:123.raw'
first = self.mb._determineStepInputData(inputData)
second = ['123.raw']
self.assertEqual(first, second)
#############################################################################
# FailoverRequest.py
#############################################################################
class FailoverRequestSuccess(ModulesTestCase):
#################################################
def test_execute(self):
self.fr.jobType = 'merge'
self.fr.stepInputData = ['foo', 'bar']
self.fr.production_id = self.prod_id
self.fr.prod_job_id = self.prod_job_id
self.fr.jobID = self.wms_job_id
self.fr.workflowStatus = self.workflowStatus
self.fr.stepStatus = self.stepStatus
self.fr.workflow_commons = self.wf_commons
self.fr.step_commons = self.step_commons[0]
self.fr.step_number = self.step_number
self.fr.step_id = self.step_id
# no errors, no input data
for wf_commons in copy.deepcopy(self.wf_commons):
for step_commons in self.step_commons:
self.fr.workflow_commons = wf_commons
self.fr.step_commons = step_commons
res = self.fr.execute()
self.assertTrue(res['OK'])
#############################################################################
# Scripy.py
#############################################################################
class ScriptSuccess(ModulesTestCase):
#################################################
def test_execute(self):
self.script.jobType = 'merge'
self.script.stepInputData = ['foo', 'bar']
self.script.production_id = self.prod_id
self.script.prod_job_id = self.prod_job_id
self.script.jobID = self.wms_job_id
self.script.workflowStatus = self.workflowStatus
self.script.stepStatus = self.stepStatus
self.script.workflow_commons = self.wf_commons
self.script.step_commons = self.step_commons[0]
self.script.step_number = self.step_number
self.script.step_id = self.step_id
self.script.executable = 'ls'
self.script.applicationLog = 'applicationLog.txt'
# no errors, no input data
for wf_commons in copy.deepcopy(self.wf_commons):
for step_commons in self.step_commons:
self.script.workflow_commons = wf_commons
self.script.step_commons = step_commons
self.script._setCommand()
self.script._executeCommand()
class ScriptFailure(ModulesTestCase):
#################################################
def test_execute(self):
self.script.jobType = 'merge'
self.script.stepInputData = ['foo', 'bar']
self.script.production_id = self.prod_id
self.script.prod_job_id = self.prod_job_id
self.script.jobID = self.wms_job_id
self.script.workflowStatus = self.workflowStatus
self.script.stepStatus = self.stepStatus
self.script.workflow_commons = self.wf_commons
self.script.step_commons = self.step_commons[0]
self.script.step_number = self.step_number
self.script.step_id = self.step_id
# no errors, no input data
for wf_commons in copy.deepcopy(self.wf_commons):
for step_commons in self.step_commons:
self.script.workflow_commons = wf_commons
self.script.step_commons = step_commons
res = self.script.execute()
self.assertFalse(res['OK'])
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(ModulesTestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ModuleBaseSuccess))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(FailoverRequestSuccess))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ScriptSuccess))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ScriptFailure))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
yujikato/DIRAC
|
src/DIRAC/Workflow/Modules/test/Test_Modules.py
|
Python
|
gpl-3.0
| 31,932
|
[
"DIRAC"
] |
0d65bf898316b5fc0ae8938c179a8a54f746aabac2592d20ccf760989f420cd7
|
#!/usr/bin/env python
"""
crate_anon/anonymise/eponyms.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <https://www.gnu.org/licenses/>.
===============================================================================
**Medical eponym handling.**
Eponyms from 2018-03-27 snapshot of:
- https://en.wikipedia.org/wiki/List_of_eponymously_named_diseases
Remember the following:
- Patient names should be removed using their identifiable information.
Removing all known names is just an additional experimental safety measure.
- The eponyms are removed from the name lists, before the name lists are
used to scrub text -- so we don't scrub "Parkinson's disease", as an
obvious example.
- Consequently, we can be quite liberal here. Including "Turner", for example
(a common UK name but also in Turner's syndrome) won't prevent a Mr Turner
from being anonymised.
- However, the point is to scrub out some inadvertent names, so maybe not
too liberal!
"""
from typing import Dict, List, Optional
from unidecode import unidecode
class EponymInfo(object):
"""
Reserved for future use, the intention being maybe some classification by
how rare or common (a) the eponymous disease is, and (b) the name itself
is.
"""
pass
EPONYM_DICT = {} # type: Dict[str, Optional[EponymInfo]]
def get_plain_eponyms(add_unaccented_versions: bool = True) -> List[str]:
"""
Returns a list of all names to be used as medical eponyms -- that is,
people who've had syndromes named after them.
Args:
add_unaccented_versions:
Add unaccented (mangled) versions of names, too? For example, do
you want Sjogren as well as Sjögren?
Returns:
alphabetically sorted list of strings
"""
eponyms = list(EPONYM_DICT.keys())
if add_unaccented_versions:
ep_set = set(eponyms)
for proper in eponyms:
deaccented = unidecode(proper)
ep_set.add(deaccented)
return sorted(ep_set)
else:
return sorted(eponyms)
def _add_eponym(composite: str,
sep: str = "–",
info: EponymInfo = None) -> None:
"""
Adds an eponym to the global eponym dictionary.
If a composite eponym is supplied, adds each part of it.
Args:
composite:
an eponym like ``"Parkinson"``, or a composite eponym like
``"Beckwith–Wiedemann"``
sep:
the string that separates parts of a composite eponym
info:
optional :class:`EponymInfo` instance; reserved for future
functionality
"""
global EPONYM_DICT
for name in composite.split(sep):
if name not in EPONYM_DICT:
EPONYM_DICT[name] = info
# noinspection PyPep8
SIMPLE_EPONYM_LIST = [
# -------------------------------------------------------------------------
# A
# -------------------------------------------------------------------------
# Aarskog–Scott syndrome (a.k.a. Aarskog syndrome)
# – Dagfinn Aarskog, Charles I. Scott, Jr.
"Aarskog–Scott",
# Aase–Smith syndrome (a.k.a. Aase syndrome)
# – Jon Morton Aase, David Weyhe Smith
"Aase–Smith",
# Abdallat–Davis–Farrage syndrome
# – Adnan Al Abdallat, S.M. Davis, James Robert Farrage
"Abdallat–Davis–Farrage",
# Abderhalden–Kaufmann–Lignac syndrome (a.k.a. Abderhalden–Lignac–Kaufmann
# disease) – Emil Abderhalden, Eduard Kauffman, George Lignac
"Abderhalden–Kaufmann–Lignac",
# Abercrombie disease (a.k.a. Abercrombie syndrome) – John Abercrombie
"Abercrombie",
# Achard–Thiers syndrome – Emile Achard, Joseph Thiers
"Achard–Thiers",
# Ackerman tumor – Lauren Ackerman
"Ackerman",
# Adams–Oliver syndrome – Robert Adams, William Oliver
"Adams–Oliver",
# Adams–Stokes syndrome (a.k.a. Gerbec–Morgagni–Adams–Stokes syndrome,
# Gerbezius–Morgagni–Adams–Stokes syndrome, Stokes–Adams syndrome)
# – Robert Adams, William Stokes
"Adams–Stokes",
# Addison disease – Thomas Addison
"Addison",
# Adson–Caffey syndrome – Alfred Washington Adson, I. R. Caffey
"Adson–Caffey",
# Ahumada–Del Castillo syndrome
# – Juan Carlos Ahumada Sotomayor, Enrique Benjamin Del Castillo
"Ahumada–Del Castillo",
# Aicardi syndrome – Jean Aicardi
"Aicardi",
# Aicardi–Goutières syndrome – Jean Aicardi, Francoise Goutieres
"Aicardi–Goutières",
# Alagille syndrome – Daniel Alagille
"Alagille",
# Albers-Schönberg disease – Heinrich Albers-Schönberg
"Albers-Schönberg",
# Albright disease (a.k.a. Albright hereditary osteodystrophy, Albright
# syndrome, McCune–Albight syndrome) – Fuller Albright
"Albright",
# Albright–Butler–Bloomberg disease – Fuller Albright, Allan Macy Butler,
# Esther Bloomberg
"Albright–Butler–Bloomberg",
# Albright–Hadorn syndrome – Fuller Albright, Walter Hadorn
"Albright–Hadorn",
# Albright IV syndrome (a.k.a. Martin–Albright syndrome) – Fuller Albright
"Albright",
# Alexander disease – William Stuart Alexander
"Alexander",
# Alibert–Bazin syndrome
# – Jean-Louis-Marc Alibert, Pierre-Antoine-Ernest Bazin
"Alibert–Bazin",
# Alpers–Huttenlocher syndrome (a.k.a. Alpers disease, Alpers syndrome)
# – Bernard Jacob Alpers, Peter Huttenlocher
"Alpers–Huttenlocher",
# Alport syndrome – Arthur Cecil Alport
"Alport",
# Alström syndrome – Carl Henry Alström
"Alström",
# Alvarez' syndrome – Walter C. Alvarez
"Alvarez",
# Alzheimer disease – Alois Alzheimer
"Alzheimer",
# Anders disease – James Meschter Anders
"Anders",
# Andersen disease – Dorothy Andersen
"Andersen",
# Andersen–Tawil syndrome (a.k.a. Andersen syndrome)
# – Ellen Andersen, Al-Rabi Tawil
"Andersen–Tawil",
# Anderson–Fabry disease – William Anderson, Johannes Fabry
"Anderson–Fabry",
# Angelman syndrome – Harry Angelman
"Angelman",
# Angelucci syndrome – Arnaldo Angelucci
"Angelucci",
# Anton–Babinski syndrome (a.k.a. Anton syndrome)
# – Gabriel Anton, Joseph Babinski
"Anton–Babinski",
# Apert syndrome – Eugène Apert
"Apert",
# Aran–Duchenne disease (a.k.a. Aran–Duchenne spinal muscular atrophy)
# – François-Amilcar Aran, Guillaume Duchenne
"Aran–Duchenne",
# Armanni–Ebstein nephropathic change – Luciano Armanni, Wilhelm Ebstein
"Armanni–Ebstein",
# Arnold–Chiari malformation – Julius Arnold, Hans Chiari
"Arnold–Chiari",
# Arthus phenomenon – Nicolas Maurice Arthus
"Arthus",
# Asherman syndrome – Joseph G. Asherman
"Asherman",
# Asperger syndrome (a.k.a. Asperger disorder) – Hans Asperger
"Asperger",
# Avellis syndrome – Georg Avellis
"Avellis",
# Ayerza–Arrillaga syndrome (a.k.a. Ayerza–Arrillaga disease, Ayerza
# syndrome, Ayerza disease) – Abel Ayerza, Francisco Arrillaga
"Ayerza–Arrillaga",
# -------------------------------------------------------------------------
# B
# -------------------------------------------------------------------------
# Baastrup syndrome – Christian Ingerslev Baastrup
"Baastrup",
# Babesiosis – Victor Babeş
# ... noun formed from name that is not itself the name
# Babington disease – Benjamin Babington
"Babington",
# Babinski–Fröhlich syndrome – Joseph Babinski, Alfred Fröhlich
"Babinski–Fröhlich",
# Babinski–Froment syndrome – Joseph Babinski, Jules Froment
"Babinski–Froment",
# Babinski–Nageotte syndrome – Joseph Babinski, Jean Nageotte
"Babinski–Nageotte",
# Baker cyst – William Morrant Baker
"Baker",
# Baller–Gerold syndrome – Friedrich Baller, M Gerold
"Baller–Gerold",
# Balo concentric sclerosis (a.k.a. Balo disease) – József Mátyás Baló
"Balo",
# Bamberger disease – Heinrich von Bamberger
"Bamberger",
# Bamberger–Marie disease – Eugen von Bamberger, Pierre Marie
"Bamberger–Marie",
# Bancroft filariasis – Joseph Bancroft
"Bancroft",
# Bang's disease – Bernhard Bang
"Bang",
# Bankart lesion – Arthur Bankart
"Bankart",
# Bannayan–Riley–Ruvalcaba syndrome
# – George A. Bannayan, Harris D. Riley, Jr., Rogelio H. A. Ruvalcaba
"Bannayan–Riley–Ruvalcaba",
# Bannayan–Zonana syndrome – George A. Bannayan, Jonathan X. Zonana
"Bannayan–Zonana",
# Banti's syndrome – Guido Banti
"Banti",
# Bárány syndrome – Robert Bárány
"Bárány",
# Bardet–Biedl syndrome (formerly, a.k.a. Laurence–Moon–Bardet–Biedl
# syndrome, now deemed an invalid synonym) – Georges Bardet, Arthur Biedl
"Bardet–Biedl", # other names e.g. under Laurence
# Barlow disease – Thomas Barlow
"Barlow",
# Barlow's syndrome – John Barlow
"Barlow",
# Barraquer–Simons syndrome – Luis Barraquer Roviralta, Arthur Simons
"Barraquer–Simons",
# Barré–Liéou syndrome – Jean Alexandre Barré, Yang-Choen Liéou
"Barré–Liéou",
# Barrett's ulcer – Norman Barrett
"Barrett",
# Bart–Pumphrey syndrome – R. S. Bart, R. E. Pumphrey
"Bart–Pumphrey",
# Barth syndrome – Peter Barth
"Barth",
# Bartholin cyst – Caspar Bartholin
"Bartholin",
# Bartter syndrome – Frederic Bartter
"Bartter",
# Basedow disease – Karl Adolph von Basedow
"Basedow",
# Basedow syndrome – Karl Adolph von Basedow
"Basedow",
# Bassen–Kornzweig syndrome – Frank Bassen, Abraham Kornzweig
"Bassen–Kornzweig",
# Batten disease – Frederick Batten
"Batten",
# Bazin disease – Pierre-Antoine-Ernest Bazin
"Bazin",
# Becker muscular dystrophy – Peter Emil Becker
"Becker",
# Beckwith–Wiedemann syndrome – John Bruce Beckwith, Hans-Rudolf Wiedemann
"Beckwith–Wiedemann",
# Behçet disease – Hulusi Behçet
"Behçet",
# Bekhterev disease – Vladimir Bekhterev
"Bekhterev",
# Bell palsy – Charles Bell
"Bell",
# Benedikt syndrome – Moritz Benedikt
"Benedikt",
# Benjamin syndrome – Erich Benjamin
"Benjamin",
# Berardinelli–Seip congenital lipodystrophy – W Berardinelli, M Seip
"Berardinelli–Seip",
# Berdon syndrome – Walter Berdon
"Berdon",
# Berger disease – Jean Berger
"Berger",
# Bergeron disease – Etienne-Jules Bergeron
"Bergeron",
# Bernard syndrome – Claude Bernard
"Bernard",
# Bernard–Soulier syndrome – Jean Bernard, Jean Pierre Soulier
"Bernard–Soulier",
# Bernhardt–Roth paraesthesia – Martin Bernhardt, Vladimir Karlovich Roth
"Bernhardt–Roth",
# Bernheim syndrome – P. I. Bernheim
"Bernheim",
# Besnier prurigo – Ernest Henri Besnier
"Besnier",
# Besnier–Boeck–Schaumann disease
# – Ernest Henri Besnier, Cæsar Peter Møller Boeck, Jörgen Nilsen Schaumann
"Besnier–Boeck–Schaumann",
# Biermer anaemia – Michael Anton Biermer
"Biermer",
# Bietti crystalline dystrophy – G. Bietti
"Bietti",
# Bickerstaff brainstem encephalitis – Edwin Bickerstaff
"Bickerstaff",
# Bilharzia – Theodor Maximilian Bilharz
# ... the disease is its own noun, not the name
# Binder syndrome – K.H. Binder
"Binder",
# Bing–Horton syndrome – Paul Robert Bing, Bayard Taylor Horton
"Bing–Horton",
# Bing–Neel syndrome – Jens Bing, Axel Valdemar Neel
"Bing–Neel",
# Binswanger dementia – Otto Binswanger
"Binswanger",
# Birt–Hogg–Dubé syndrome – Arthur Birt, Georgina Hogg, William Dubé
"Birt–Hogg–Dubé",
# Bland–White–Garland syndrome
# – Edward Franklin Bland, Paul Dudley White, Joseph Garland
"Bland–White–Garland",
# Bloch–Sulzberger syndrome – Bruno Bloch, Marion Baldur Sulzberger
"Bloch–Sulzberger",
# Bloom syndrome – David Bloom
"Bloom",
# Blount syndrome – Walter Putnam Blount
"Blount",
# Boerhaave syndrome – Herman Boerhaave
"Boerhaave",
# Bogorad syndrome – F. A. Bogorad
"Bogorad",
# Bonnevie–Ullrich syndrome – Kristine Bonnevie, Otto Ullrich
"Bonnevie–Ullrich",
# Bourneville–Pringle disease
# – Désiré-Magloire Bourneville, John James Pringle
"Bourneville–Pringle",
# Bowen disease – John T. Bowen
"Bowen",
# Brachman de Lange syndrome
# – Winfried Robert Clemens Brachmann, Cornelia Catharina de Lange
"Brachman–de Lange",
# Brailsford–Morquio syndrome – James Frederick Brailsford, Luís Morquio
"Brailsford–Morquio",
# Brandt syndrome – Thore Edvard Brandt
"Brandt",
# Brenner tumour – Fritz Brenner
"Brenner",
# Brewer kidney – George Emerson Brewer
"Brewer",
# Bright disease – Richard Bright
"Bright",
# Brill–Symmers disease – Nathan Brill, Douglas Symmers
"Brill–Symmers",
# Brill–Zinsser disease – Nathan Brill, Hans Zinsser
"Brill–Zinsser",
# Briquet syndrome – Paul Briquet
"Briquet",
# Brissaud disease – Édouard Brissaud
"Brissaud",
# Brissaud–Sicard syndrome – Édouard Brissaud, Jean-Athanase Sicard
"Brissaud–Sicard",
# Broadbent apoplexy – William Broadbent
"Broadbent",
# Brock syndrome – Russell Claude Brock
"Brock",
# Brodie abscess – Benjamin Collins Brodie
"Brodie",
# Brodie syndrome – Benjamin Collins Brodie
"Brodie",
# Brooke epithelioma – Henry Ambrose Grundy Brooke
"Brooke",
# Brown-Séquard syndrome – Charles-Édouard Brown-Séquard
"Brown-Séquard",
# Brucellosis – David Bruce
# ... its own noun
# Bruck–de Lange disease – Franz Bruck, Cornelia Catharina de Lange
"Bruck–de Lang",
# Brugada syndrome – Pedro Brugada, Josep Brugada
"Brugada",
# Bruns syndrome – Ludwig Bruns
"Bruns",
# Bruton–Gitlin syndrome – Ogden Carr Bruton, David Gitlin
"Bruton–Gitlin",
# Budd–Chiari syndrome – George Budd, Hans Chiari
"Budd–Chiari",
# Buerger disease – Leo Buerger
"Buerger",
# Bumke syndrome – Oswald Conrad Edouard Bumke
"Bumke",
# Bürger–Grütz syndrome – Max Burger, Otto Grutz
"Bürger–Grütz",
# Burkitt lymphoma – Denis Parsons Burkitt
"Burkitt",
# Burnett syndrome – Charles Hoyt Burnett
"Burnett",
# Bywaters syndrome – Eric Bywaters
"Bywaters",
# -------------------------------------------------------------------------
# C
# -------------------------------------------------------------------------
# Caffey–Silverman syndrome – John Patrick Caffey, William Silverman
"Caffey–Silverman",
# Calvé disease – Jacques Calvé
"Calvé",
# Camurati–Engelmann disease (a.k.a. Camurati–Engelmann syndrome, Engelmann
# disease, Engelmann syndrome) – M. Camurati, G. Engelmann
"Camurati–Engelmann",
# Canavan disease – Myrtelle Canavan
"Canavan",
# Cannon disease – Walter Cannon
"Cannon",
# Cantú syndrome – José María Cantú
"Cantú",
# Capgras delusion (a.k.a. Capgras syndrome) – Joseph Capgras
"Capgras",
# Caplan's syndrome – Anthony Caplan
"Caplan",
# Carney complex – J. Aidan Carney
"Carney",
# Carney triad – J. Aidan Carney
"Carney",
# Carney–Stratakis syndrome – J. Aidan Carney, C. A. Stratakis
"Carney–Stratakis",
# Caroli syndrome – Jacques Caroli
"Caroli",
# Carrión's disease – Daniel Alcides Carrión
"Carrión",
# Castleman disease – Benjamin Castleman
"Castleman",
# Céstan–Chenais syndrome
# – Étienne Jacques Marie Raymond Céstan, Louis Jean Chennais
"Céstan–Chenais",
# Chagas disease – Carlos Chagas
"Chagas",
# Charcot's disease – Jean-Martin Charcot
"Charcot",
# Charcot–Marie–Tooth disease
# – Jean-Martin Charcot, Pierre Marie, Howard Henry Tooth
"Charcot–Marie–Tooth",
# Charles Bonnet syndrome – Charles Bonnet
"Charles Bonnet"
# Cheadle's disease – Walter Butler Cheadle
"Cheadle",
# Chédiak–Higashi syndrome – Alexander Chédiak, Otokata Higashi
"Chédiak–Higashi",
# Chiari malformation – Hans Chiari
"Chiari",
# Chiari–Frommel syndrome – Johann Baptist Chiari, Richard Frommel
"Chiari–Frommel",
# Chilaiditi syndrome – Demetrius Chilaiditi
"Chilaiditi",
# Christ–Siemens–Touraine syndrome
# – Josef Christ, Hermann Werner Siemens, Albert Touraine
"Christ–Siemens–Touraine",
# Christensen–Krabbe disease – Erna Christensen, Knud Krabbe
"Christensen–Krabbe",
# Christmas disease – Stephen Christmas
"Christmas",
# Churg–Strauss syndrome – Jacob Churg, Lotte Strauss
"Churg–Strauss",
# Claude syndrome – Henri Claude
"Claude",
# Claude Bernard–Horner syndrome – Claude Bernard, Johann Friedrich Horner
"Claude Bernard–Horner",
# Clerambault syndrome – Gaëtan Gatian de Clerambault
"Clerambault",
# Clerambault–Kandinsky syndrome
# – Gaëtan Gatian de Clerambault, Victor Khrisanfovich Kandinsky
"Clerambault–Kandinsky",
# Coats' disease – George Coats
"Coats",
# Cock's peculiar tumour – Edward Cock
"Cock",
# Cockayne syndrome – Edward Alfred Cockayne
"Cockayne",
# Coffin–Lowry syndrome – Grange Coffin, Robert Lowry
"Coffin–Lowry",
# Coffin–Siris syndrome – Grange Coffin, Evelyn Siris
"Coffin–Siris",
# Cogan's syndrome – David Glendenning Cogan
"Cogan",
# Cohen syndrome – Michael Cohen
"Cohen",
# Collet–Sicard syndrome – Frédéric Justin Collet, Jean-Athanase Sicard
"Collet–Sicard",
# Concato disease – Luigi Maria Concato
"Concato",
# Conn's syndrome – Jerome Conn
"Conn",
# Cooley's anemia – Thomas Benton Cooley
"Cooley",
# Cori Disease – Carl Ferdinand Cori, Gerty Cori
"Cori",
# Cornelia de Lange syndrome – Cornelia Catharina de Lange
"Cornelia de Lange",
# Costello syndrome – Jack Costello
"Costello",
# Costen syndrome – James Bray Costen
"Costen",
# Cotard's Syndrome – Jules Cotard
"Cotard",
# Cowden's syndrome (a.k.a. Cowden's disease) – Rachel Cowden
"Cowden",
# Crigler–Najjar syndrome – John Fielding Crigler, Victor Assad Najjar
"Crigler–Najjar",
# Creutzfeldt–Jakob disease – Hans Gerhard Creutzfeldt, Alfons Maria Jakob
"Creutzfeldt–Jakob",
# Crocq–Cassirer syndrome – Jean Crocq, Richard Cassirer
"Crocq–Cassirer",
# Crohn's disease – Burrill Bernard Crohn
"Crohn",
# Cronkhite–Canada syndrome – L. W. Cronkhite, Wilma Canada
"Cronkhite–Canada",
# Crouzon syndrome – Octave Crouzon
"Crouzon",
# Cruveilhier–Baumgarten disease
# – Jean Cruveilhier, Paul Clemens von Baumgarten
"Cruveilhier–Baumgarten",
# Cruz disease – Osvaldo Gonçalves Cruz
"Cruz",
# Curling's ulcer – Thomas Blizard Curling
"Curling",
# Curschmann–Batten–Steinert syndrome
# – Hans Curschmann, Frederick Batten, Hans Gustav Steinert
"Curschmann–Batten–Steinert",
# Cushing's disease – Harvey Cushing
"Cushing",
# Cushing's ulcer – Harvey Cushing
"Cushing",
# -------------------------------------------------------------------------
# D
# -------------------------------------------------------------------------
# Da Costa syndrome – Jacob Mendez Da Costa
"Da Costa",
# Dalrymple disease – John Dalrymple
"Dalrymple",
# Danbolt–Closs syndrome
# – Niels Christian Gauslaa Danbolt, Karl Philipp Closs
"Danbolt–Closs",
# Dandy–Walker syndrome – Walter Dandy, Arthur Earl Walker
"Dandy–Walker",
# De Clérambault syndrome – Gaëtan Gatian de Clérambault
"de Clérambault",
# de Quervain disease – Fritz de Quervain
"de Quervain",
# de Quervain thyroiditis – Fritz de Quervain
"de Quervain",
# Dejerine–Sottas disease – Joseph Jules Dejerine, Jules Sottas
"Dejerine–Sottas",
# Dennie–Marfan syndrome – Charles Clayton Dennie, Antoine Marfan
"Dennie–Marfan",
# Dent disease – Charles Enrique Dent
"Dent",
# Denys–Drash syndrome – Pierre Denys, Allan L. Drash
"Denys–Drash",
# Dercum disease – Francis Xavier Dercum
"Dercum",
# Devic disease (a.k.a. Devic syndrome) – Eugène Devic
"Devic",
# Diamond–Blackfan anemia – Louis Diamond, Kenneth Blackfan
"Diamond–Blackfan",
# DiGeorge syndrome – Angelo DiGeorge
"DiGeorge",
# Di Guglielmo disease – Giovanni di Gugliemo
"Di Guglielmo",
# Diogenes syndrome (a.k.a. Havisham syndrome, Miss Havisham syndrome,
# Plyushkin syndrome)– Diogenes of Sinope (the particular usage, Diogenes
# syndrome, is deemed to be a misnomer)
"Diogenes",
# Doege–Potter syndrome – Karl W. Doege, Roy P. Potter
"Doege–Potter",
# Donnai–Barrow syndrome – Dian Donnai, Margaret Barrow
"Donnai–Barrow",
# Donovanosis – Charles Donovan
"Donovanosis",
# Down syndrome – John Langdon Down
"Down",
# Dravet syndrome – Charlotte Dravet
"Dravet",
# Dressler syndrome – William Dressler
"Dressler",
# Duane syndrome – Alexander Duane
"Duane",
# Dubin–Johnson syndrome
"Dubin–Johnson",
# Duchenne–Aran disease
# – Guillaume-Benjamin-Amand Duchenne de Boulogne, François-Amilcar Aran
"Duchenne–Aran",
# Duchenne muscular dystrophy
# – Guillaume-Benjamin-Amand Duchenne de Boulogne
"Duchenne",
# Dukes disease – Clement Dukes
"Dukes",
# Duncan disease (a.k.a. Duncan syndrome, Purtilo syndrome)
# – David Theodore Purtilo
"Duncan",
# Dupuytren contracture (a.k.a. Dupuytren disease)
# – Baron Guillaume Dupuytren
"Dupuytren",
# Duroziez disease – Paul Louis Duroziez
"Duroziez",
# -------------------------------------------------------------------------
# E
# -------------------------------------------------------------------------
# Eales disease – Henry Eales
"Eales",
# Early-onset Alzheimer disease – Alois Alzheimer
"Alzheimer",
# Ebstein's anomaly – Wilhelm Ebstein
"Ebstein",
# Edwards syndrome – John H. Edwards
"Edwards",
# Ehlers–Danlos syndrome – Edvard Ehlers, Henri-Alexandre Danlos
"Ehlers–Danlos",
# Ehrlichiosis – Paul Ehrlich
# ... noun, not name
# Eisenmenger's syndrome – Victor Eisenmenger
"Eisenmenger",
# Ekbom's Syndrome – Karl-Axel Ekbom
"Ekbom",
# Emanuel syndrome – Beverly Emanuel
"Emanuel",
# Emery–Dreifuss muscular dystrophy
# – Alan Eglin H. Emery, Fritz E. Dreifuss
"Emery–Dreifuss",
# Erb–Duchenne palsy (a.k.a. Erb palsy)
# – Wilhelm Heinrich Erb, Guillaume-Benjamin-Amand Duchenne de Boulogne
"Erb–Duchenne",
# Erdheim–Chester disease – Jakob Erdheim, William Chester
"Erdheim–Chester",
# Evans syndrome – R. S. Evans
"Evans",
# Extramammary Paget's disease – Sir James Paget
"Paget",
# -------------------------------------------------------------------------
# F
# -------------------------------------------------------------------------
# Fabry disease – Johannes Fabry
"Fabry",
# Fanconi anemia – Guido Fanconi
"Fanconi",
# Fanconi syndrome – Guido Fanconi
"Fanconi",
# Farber disease – Sidney Farber
"Farber",
# Felty's syndrome – Augustus Roi Felty
"Felty",
# Fitz-Hugh–Curtis syndrome – Thomas Fitz-Hugh Jr., Arthur Hale Curtis
"Fitz-Hugh–Curtis",
# Foix–Alajouanine syndrome – Charles Foix, Théophile Alajouanine
"Foix–Alajouanine",
# Fournier gangrene – Jean Alfred Fournier
"Fournier",
# Forbes–Albright syndrome – Anne E. Forbes, Fuller Albright
"Forbes–Albright",
# WAS: Forbe's Disease – Gilbert Burnett Forbes
# ... typo in Wikipedia; name is Forbes; FIXED 2018-03-27
# ... see also https://rarediseases.org/rare-diseases/forbes-disease/
"Forbes",
# Fregoli delusion – Leopoldo Fregoli, an Italian actor
"Fregoli",
# Frey's syndrome - Lucja Frey-Gottesman, Jewish neurosurgeon
"Frey",
# Friedreich's ataxia – Nikolaus Friedreich
"Friedreich",
# Fritsch–Asherman syndrome (a.k.a. Fritsch syndrome)
# – Heinrich Fritsch, Joseph Asherman
"Fritsch–Asherman",
# Fuchs' dystrophy – Ernst Fuchs
"Fuchs",
# -------------------------------------------------------------------------
# G
# -------------------------------------------------------------------------
# Ganser syndrome – Sigbert Ganser
"Ganser",
# Gaucher's disease – Philippe Gaucher
"Gaucher",
# Gerbec–Morgagni–Adams–Stokes syndrome (a.k.a. Adams–Stokes syndrome,
# Gerbezius–Morgagni–Adams–Stokes syndrome, Stokes–Adams syndrome)
# – Marko Gerbec, Giovanni Battista Morgagni, Robert Adams, William Stokes
"Gerbec–Morgagni–Adams–Stokes",
# Gerbezius–Morgagni–Adams–Stokes syndrome (a.k.a. Adams–Stokes syndrome,
# Gerbec–Morgagni–Adams–Stokes syndrome, Stokes–Adams syndrome)
# – Marko Gerbec (Latinized as Gerbezius), Giovanni Battista Morgagni,
# Robert Adams, William Stokes
"Gerbezius–Morgagni–Adams–Stokes",
# Ghon's complex – Anton Ghon
"Ghon",
# Ghon focus – Anton Ghon
"Ghon",
# Gilbert syndrome – Augustin Nicolas Gilbert
"Gilbert",
# Gitelman syndrome – Hillel J. Gitelman
"Gitelman",
# Glanzmann's thrombasthenia – Eduard Glanzmann
"Glanzmann",
# Goodpasture's syndrome – Ernest Goodpasture
"Goodpasture",
# Goldenhar syndrome – Maurice Goldenhar
"Goldenhar",
# Gorlin–Goltz syndrome – Robert J. Gorlin, Robert W. Goltz
"Gorlin–Goltz",
# Gouverneur’s syndrome – R. Gouverneur
"Gouverneur",
# Graves' disease – Robert James Graves
"Graves",
# Graves–Basedow disease – Robert James Graves, Karl Adolph von Basedow
"Graves–Basedow",
# Grawitz tumor – Paul Albert Grawitz
"Grawitz",
# Grinker myelinopathy – Roy R. Grinker, Sr.
"Grinker",
# Gruber syndrome – Georg Gruber
"Gruber",
# Guillain–Barré syndrome – Georges Guillain, Jean Alexandre Barré
"Guillain–Barré",
# Gunther's disease – Hans Gunther
"Gunther",
# -------------------------------------------------------------------------
# H
# -------------------------------------------------------------------------
# Hailey–Hailey disease – Hugh Edward Hailey, William Howard Hailey
"Hailey–Hailey",
# Hallervorden–Spatz disease – Julius Hallervorden, Hugo Spatz
"Hallervorden–Spatz",
# Hand–Schüller–Christian disease
# – Alfred Hand, Artur Schüller, Henry Asbury Christian
"Hand–Schüller–Christian",
# Hansen's disease – Gerhard Armauer Hansen
"Hansen",
# Hardikar Syndrome – Winita Hardikar
"Hardikar",
# Hartnup disease (a.k.a. Hartnup disorder) – Hartnup family of London, U.K.
"Hartnup",
# Hashimoto thyroiditis – Hakaru Hashimoto
"Hashimoto",
# Havisham syndrome (a.k.a. Diogenes syndrome, Miss Havisham syndrome, and
# Plyushkin syndrome)
# – Miss Havisham, a fictional character in Charles Dickens' Great
# Expectations
"Havisham",
# Hecht–Scott syndrome – Jacqueline T. Hecht, Charles I. Scott, Jr
"Hecht–Scott",
# Henoch–Schönlein purpura – Eduard Heinrich Henoch, Johann Lukas Schönlein
"Henoch–Schönlein",
# Heyde's syndrome – Edward C. Heyde
"Heyde",
# Hirschsprung disease – Harald Hirschsprung
"Hirschsprung",
# Hodgkin disease – Thomas Hodgkin
"Hodgkin",
# Holt–Oram syndrome – Mary Clayton Holt, Samuel Oram
"Holt–Oram",
# Horner syndrome – Johann Friedrich Horner
"Horner",
# Horton headache – Bayard Taylor Horton
"Horton",
# Huntington's disease – George Huntington
"Huntington",
# Hurler syndrome – Gertrud Hurler
"Hurler",
# Hurler–Scheie syndrome – Gertrud Hurler, Harold Glendon Scheie
"Hurler–Scheie",
# Hutchinson–Gilford progeria syndrome
# – Jonathan Hutchinson, Hastings Gilford
"Hutchinson–Gilford",
# -------------------------------------------------------------------------
# I
# -------------------------------------------------------------------------
# Illig syndrome – Ruth Illig
"Illig",
# Irvine–Gass syndrome – S. Rodman Irvine, J. Donald M. Gass
"Irvine–Gass",
# -------------------------------------------------------------------------
# J
# -------------------------------------------------------------------------
# Jaeken's disease – Jaak Jaeken
"Jaeken",
# Jakob–Creutzfeldt disease – Alfons Maria Jakob, Hans Gerhard Creutzfeldt
"Jakob–Creutzfeldt",
# Jarvi–Nasu–Hakola disease – O. Jarvi, T. Nasu, P. Hakola
"Jarvi–Nasu–Hakola",
# Johanson–Blizzard syndrome – Ann Johanson, Robert M. Blizzard
"Johanson–Blizzard",
# Julian syndrome – Frankie Julian, Ron Kendall, Abe Charara
"Julian",
# -------------------------------------------------------------------------
# K
# -------------------------------------------------------------------------
# Kahler's disease – Otto Kahler
"Kahler",
# Kallmann syndrome – Franz Josef Kallmann
"Kallmann",
# Kanner syndrome – Leo Kanner
"Kanner",
# Kaposi sarcoma – Moritz Kaposi
"Kaposi",
# Kartagener syndrome – Manes Kartagener
"Kartagener",
# Kasabach–Merritt syndrome
# – Haig Haigouni Kasabach, Katharine Krom Merritt
"Kasabach–Merritt",
# Kashin–Beck disease – Nicolai Ivanowich Kashin, Evgeny Vladimirovich Bek
"Kashin–Beck",
# Kawasaki disease – Tomisaku Kawasaki
"Kawasaki",
# Kearns–Sayre syndrome – Thomas P. Kearns, George Pomeroy Sayre
"Kearns–Sayre",
# Kennedy's disease – William R. Kennedy
"Kennedy",
# Kennedy's syndrome – Robert Foster Kennedy
"Kennedy",
# Kenny syndrome – Frederic Marshal Kenny
"Kenny",
# Kienbock's disease – Robert Kienböck
"Kienbock",
# Kikuchi's disease – Masahiro Kikuchi, Y.Fujimoto
"Kikuchi",
# Kimmelstiel–Wilson disease – Paul Kimmelstiel, Clifford Wilson
"Kimmelstiel–Wilson",
# Kimura's disease – T. Kimura
"Kimura",
# King–Kopetzky syndrome – P. F. King, Samuel J. Kopetzky
"King–Kopetzky",
# Kinsbourne syndrome – Marcel Kinsbourne
"Kinsbourne",
# Kjer's optic neuropathy – Poul Kjer
"Kjer",
# Klatskin's tumor – Gerald Klatskin
"Klatskin",
# Klinefelter syndrome – Harry Klinefelter
"Klinefelter",
# Klüver–Bucy syndrome – Heinrich Klüver, Paul Bucy
"Klüver–Bucy",
# Köhler disease – Alban Köhler
"Köhler",
# Korsakoff syndrome – Sergei Korsakoff
"Korsakoff",
# Kounis syndrome – Nicholas Kounis
"Kounis",
# Krabbe's disease – Knud Haraldsen Krabbe
"Krabbe",
# Krukenberg tumor – Friedrich Ernst Krukenberg
"Krukenberg",
# Kugelberg–Welander disease – Erik Klas Henrik Kugelberg, Lisa Welander
"Kugelberg–Welander",
# Kuttner's tumor – Hermann Küttner
"Kuttner",
# -------------------------------------------------------------------------
# L
# -------------------------------------------------------------------------
# Lafora's disease – Gonzalo Rodriguez Lafora
"Lafora",
# Laron syndrome – Zvi Laron
"Laron",
# Laurence–Moon syndrome – John Zachariah Laurence, Robert Charles Moon
"Laurence–Moon",
# Laurence–Moon–Bardet–Biedl syndrome (a.k.a. Laurence–Moon–Biedl–Bardet
# syndrome, a.k.a. Laurence–Moon–Biedl syndrome)
# – John Zachariah Laurence, Robert Charles Moon, Georges Bardet,
# Arthur Biedl – all now deemed invalid constructs, see instead
# Bardet–Biedl syndrome
"Laurence–Moon–Bardet–Biedl",
# Legg–Calvé–Perthes syndrome – Arthur Legg, Jacques Calvé, Georg Perthes
"Legg–Calvé–Perthes",
# Leigh's disease – Denis Archibald Leigh
"Leigh",
# Leiner syndrome – Karl Leiner, André Moussous
"Leiner",
# Leishmaniasis – Sir William Boog Leishman
# ... noun, not name
# Lejeune’s syndrome – Jérôme Lejeune
"Lejeune",
# Lemierre's syndrome – André Lemierre
"Lemierre",
# Lenègre's disease – Jean Lenègre
"Lenègre",
# Lesch–Nyhan syndrome – Michael Lesch, William Leo Nyhan
"Lesch–Nyhan",
# Letterer–Siwe disease – Erich Letterer, Sture Siwe
"Letterer–Siwe",
# Lev's disease – Maurice Lev, Jean Lenègre
"Lev",
# Lewandowsky–Lutz dysplasia – Felix Lewandowsky, Wilhelm Lutz
"Lewandowsky–Lutz",
# Li–Fraumeni syndrome – Frederick Pei Li, Joseph F. Fraumeni, Jr.
"Li–Fraumeni",
# Libman–Sacks disease – Emanuel Libman, Benjamin Sacks
"Libman–Sacks",
# Liddle's syndrome – Grant Liddle
"Liddle",
# Lisfranc injury (a.k.a. Lisfranc dislocation, a.k.a. Lisfranc fracture)
# – Jacques Lisfranc de St. Martin
"Lisfranc",
# Listeriosis – Joseph Lister
# ... noun, not name
# Lobomycosis – Jorge Lobo
# ... noun, not name
# Löffler's eosinophilic endocarditis – Wilhelm Löffler
"Löffler",
# Löfgren syndrome – Sven Halvar Löfgren
"Löfgren",
# Lou Gehrig's disease – Lou Gehrig
"Lou Gehrig",
# Lowe Syndrome – Charles Upton Lowe
"Lowe",
# Ludwig's angina – Wilhelm Friedrich von Ludwig
"Ludwig",
# Lynch syndrome – Henry T. Lynch
"Lynch",
# -------------------------------------------------------------------------
# M
# -------------------------------------------------------------------------
# Machado–Joseph disease (a.k.a. Machado–Joseph Azorean disease, Machado
# disease, Joseph's disease) – named for William Machado and Antone Joseph,
# patriarchs of families in which it was first identified
"Machado–Joseph",
# Marie–Foix–Alajouanine syndrome
# – Pierre Marie, Charles Foix, Théophile Alajouanine
"Marie–Foix–Alajouanine",
# Maladie de Charcot – Jean-Martin Charcot
"Charcot",
# Mallory–Weiss syndrome – G. Kenneth Mallory, Soma Weiss
"Mallory–Weiss",
# Mansonelliasis – Sir Patrick Manson
# ... noun, not name
# Marburg multiple sclerosis – Otto Marburg
"Marburg",
# Marfan syndrome – Antoine Marfan
"Marfan",
# Marshall syndrome – Richard E. Marshall
"Marshall",
# Marshall–Smith–Weaver syndrome (a.k.a. Marshall–Smith syndrome, Greig
# syndrome) – Richard E. Marshall, David Weyhe Smith
"Marshall–Smith–Weaver",
# Not otherwise listed; it is a person; https://www.omim.org/entry/175700
"Greig",
# Martin–Albright syndrome (a.k.a. Albright IV syndrome)
# – August E. Martin, Fuller Albright
"Martin–Albright",
# May–Hegglin anomaly – Richard May, Robert Hegglin
"May–Hegglin",
# Maydl's hernia — Karel Maydl
"Maydl",
# Mazzotti reaction – Luigi Mazzotti
"Mazzotti",
# McArdle's Disease – Brian McArdle
"McArdle",
# McCune–Albright syndrome – Donovan James McCune, Fuller Albright
"McCune–Albright",
# Meckel–Gruber syndrome (a.k.a. Meckel syndrome)
# – Johann Meckel, Georg Gruber
"Meckel–Gruber",
# Meigs' syndrome – Joe Vincent Meigs
"Meigs",
# Ménétrier's disease – Pierre Eugène Ménétrier
"Ménétrier",
# Ménière’s disease – Prosper Ménière
"Ménière",
# Menkes disease – John Hans Menkes
"Menkes",
# Middleton syndrome – Stephen John Middleton
"Middleton",
# Mikulicz's disease – Jan Mikulicz-Radecki
"Mikulicz",
# Miss Havisham syndrome (a.k.a. Diogenes syndrome, Havisham syndrome, and
# Plyushkin syndrome)
# – Miss Havisham, a fictional character in Charles Dickens' Great
# Expectations
"Havisham",
# Mondor's disease – Henri Mondor
"Mondor",
# Monge's disease – Carlos Monge
"Monge",
# Mortimer's disease – First documented by Jonathan Hutchinson, named for
# his patient Mrs. Mortimer
"Mortimer",
# Moschcowitz syndrome – Eli Moschcowitz
"Moschcowitz",
# Mowat–Wilson syndrome – David Mowat, Meredith Wilson
"Mowat–Wilson",
# Mucha–Habermann disease – Viktor Mucha, Rudolf Habermann
"Mucha–Habermann",
# Mulvihill–Smith syndrome – John J. Mulvihill, David Weyhe Smith
"Mulvihill–Smith",
# Munchausen syndrome – Baron Munchausen
"Munchausen",
# Munchausen syndrome by proxy – Baron Munchausen
# Myhre–Riley–Smith syndrome – S. Myhre, Harris D. Riley, Jr.
"Myhre–Riley–Smith",
# -------------------------------------------------------------------------
# N
# -------------------------------------------------------------------------
# Nasu–Hakola disease – T. Nasu, P. Hakola
"Nasu–Hakola",
# Non-Hodgkin's lymphoma – Thomas Hodgkin
"Hodgkin",
# Noonan syndrome – Jacqueline Noonan
"Noonan",
# -------------------------------------------------------------------------
# O
# -------------------------------------------------------------------------
# Ormond's disease – John Kelso Ormond
"Ormond",
# Osgood–Schlatter disease – Robert Bayley Osgood, Carl B. Schlatter
"Osgood–Schlatter",
# Osler–Weber–Rendu syndrome
# – William Osler, Frederick Parkes Weber, Henri Jules Louis Marie Rendu
"Osler–Weber–Rendu",
# -------------------------------------------------------------------------
# P
# -------------------------------------------------------------------------
# Paget's disease of bone (a.k.a. Paget's disease) – James Paget
# Paget's disease of the breast (a.k.a. Paget's disease of the nipple)
# – James Paget
# Paget's disease of the penis – James Paget
# Paget's disease of the vulva – James Paget
"Paget",
# Paget–Schroetter disease (a.k.a. Paget–Schroetter syndrome and Paget–von
# Schrötter disease) – James Paget, Leopold von Schrötter
"Paget–Schroetter",
# Parkinson's disease – James Parkinson
"Parkinson",
# Patau syndrome – Klaus Patau
"Patau",
# Pearson syndrome – Howard Pearson
"Pearson",
# Pelizaeus–Merzbacher disease
# – Friedrich Christoph Pelizaeus, Ludwig Merzbacher
"Pelizaeus–Merzbacher",
# Perthes syndrome – Arthur Legg, Jacques Calvé, Georg Perthes
"Perthes",
# Peutz–Jeghers syndrome – Jan Peutz, Harold Jeghers
"Peutz–Jeghers",
# Peyronie's disease – François Gigot de la Peyronie
"Peyronie",
# Pfaundler–Hurler syndrome – Meinhard von Pfaundler, Gertrud Hurler
"Pfaundler–Hurler",
# Pick's disease – Arnold Pick
"Pick",
# Pickardt syndrome – C. R. Pickardt
"Pickardt",
# Plummer's disease – Henry Stanley Plummer
"Plummer",
# Plummer–Vinson syndrome (a.k.a. Kelly–Patterson syndrome,
# Paterson–Brown–Kelly syndrome, and Waldenstrom–Kjellberg syndrome)
# – Henry Stanley Plummer and Porter Paisley Vinson
"Plummer–Vinson",
# Plyushkin syndrome (a.k.a. Diogenes syndrome, Havisham syndrome, and Miss
# Havisham syndrome)– Stepan Plyushkin, a fictional character in Nikolai
# Gogol's Dead Souls
"Plyushkin",
# Poland's syndrome – Alfred Poland
"Poland",
# Pompe's disease – Johann Cassianius Pompe
"Pompe",
# Pott's disease – Percivall Pott
# Pott's puffy tumor – Percivall Pott
"Pott",
# Potocki–Lupski syndrome – Lorraine Potocki, James R. Lupski
"Potocki–Lupski",
# Potocki–Shaffer syndrome – Lorraine Potocki, Lisa G. Shaffer
"Potocki–Shaffer",
# Potter sequence – Edith Potter
"Potter",
# Prader–Willi syndrome – Andrea Prader, Heinrich Willi
"Prader–Willi",
# Prasad's Syndrome – Ashok Prasad
"Prasad",
# Primrose syndrome – D. A. Primrose
"Primrose",
# Prinzmetal angina – Myron Prinzmetal
"Prinzmetal",
# Purtilo syndrome (a.k.a. Duncan disease and Duncan syndrome) –
"Purtilo",
# -------------------------------------------------------------------------
# Q
# -------------------------------------------------------------------------
# Quarelli syndrome – G.Quarelli
"Quarelli",
# -------------------------------------------------------------------------
# R
# -------------------------------------------------------------------------
# Ramsay Hunt syndrome – James Ramsay Hunt
"Ramsay Hunt",
# Ranke complex – Karl Ernst Ranke
"Ranke",
# Raymond Céstan syndrome – Étienne Jacques Marie Raymond Céstan
"Raymond Céstan",
# Raynaud's disease – Maurice Raynaud
"Raynaud",
# Refsum's disease – Sigvald Bernhard Refsum
"Refsum",
# Reiter's syndrome – Hans Conrad Julius Reiter (This is now a discouraged
# eponym due to Dr. Reiter's Nazi party ties. The disease is now known as
# reactive arthritis.)
"Reiter",
# Rett Syndrome – Andreas Rett
"Rett",
# Reye's syndrome – R. Douglas Reye
"Reye",
# Rickettsiosis – Howard Taylor Ricketts
# ... noun, not name
# Riddoch syndrome – Dr. George Riddoch
"Riddoch",
# Riedel's thyroiditis – Bernhard Riedel
"Riedel",
# Riggs' disease – John M. Riggs (dentist)
"Riggs",
# Riley–Day syndrome – Conrad Milton Riley, Richard Lawrence Day
"Riley–Day",
# Riley–Smith syndrome – Harris D. Riley, Jr., William R. Smith
"Riley–Smith",
# Ritter's disease – Baron Gottfried Ritter von Rittershain
"Ritter",
# Robles disease – Rodolfo Robles
"Robles",
# Roger's disease – Henri Louis Roger
"Roger",
# Rotor syndrome – Arturo Belleza Rotor
"Rotor",
# Rubinstein–Taybi syndrome – Jack Herbert Rubinstein, Hooshang Taybi
"Rubinstein–Taybi",
# Russell–Silver syndrome – Alexander Russell, Henry Silver
"Russell–Silver",
# Ruvalcaba–Myhre syndrome – Rogelio H. A. Ruvalcaba, S. Myhre
"Ruvalcaba–Myhre",
# Ruvalcaba–Myhre–Smith syndrome
# – Rogelio H. A. Ruvalcaba, S. Myhre, David Weyhe Smith
"Ruvalcaba–Myhre–Smith",
# Ruzicka–Goerz–Anton syndrome – T. Ruzicka, G. Goerz, I. Anton-Lamprecht
"Ruzicka–Goerz–Anton",
# -------------------------------------------------------------------------
# S
# -------------------------------------------------------------------------
# Saint's triad – C. F. M. Saint
"Saint",
# Sandhoff disease – Konrad Sandhoff
"Sandhoff",
# Sandifer syndrome – Paul Sandifer
"Sandifer",
# Schamberg's disease – Jay Frank Schamberg
"Schamberg",
# Scheie syndrome – Harold Glendon Scheie
"Scheie",
# Scheuermann's disease – Holger Scheuermann
"Scheuermann",
# Schilder's disease – Paul Ferdinand Schilder
"Schilder",
# Schinzel–Giedion syndrome – Albert Schinzel, Andreas Giedion
"Schinzel–Giedion",
# Schnitzler syndrome – Liliane Schnitzler
"Schnitzler",
# Seaver Cassidy syndrome – Laurie Seaver, Suzanne Cassidy
"Seaver–Cassidy"
# Seligmann's disease – Maxime Seligmann
"Seligmann",
# Sever's disease – J. W. Sever
"Sever",
# Shabbir syndrome – G. Shabbir
"Shabbir",
# Sheehan's syndrome – Harold Leeming Sheehan
"Sheehan",
# Shprintzen's syndrome – Robert Shprintzen
"Shprintzen",
# Shwachman–Bodian–Diamond syndrome
# – Harry Shwachman, Martin Bodian, Louis Klein Diamond
"Shwachman–Bodian–Diamond",
# Silver–Russell syndrome (a.k.a. Silver–Russell dwarfism)
# – Henry Silver, Alexander Russell
"Silver–Russell",
# Simmonds' syndrome – Moritz Simmonds
"Simmonds",
# Sipple's syndrome – John H. Sipple
"Sipple",
# Sjögren's syndrome – Henrik Sjögren
"Sjögren",
# Sjögren–Larsson syndrome – Torsten Sjögren, Tage Konrad Leopold Larsson
"Sjögren–Larsson",
# Smith–Lemli–Opitz syndrome – David Weyhe Smith
"Smith–Lemli–Opitz",
# Stargardt disease – Karl Stargardt
"Stargardt",
# Steele–Richardson–Olszewski syndrome –
"Steele–Richardson–Olszewski",
# Stevens–Johnson syndrome – Albert Mason Stevens, Frank Chambliss Johnson
"Stevens–Johnson",
# Sturge–Weber syndrome – William Allen Sturge, Frederick Parkes Weber
"Sturge–Weber",
# Still's disease – Sir George Frederic Still
"Sturge–Weber",
# Susac's syndrome – John Susac
"Susac",
# Sutton's disease – Richard Lightburn Sutton
"Sutton",
# -------------------------------------------------------------------------
# T
# -------------------------------------------------------------------------
# TAN syndrome – Tan Aik Kah
"TAN",
# ... Odd one! https://en.wikipedia.org/wiki/TAN_syndrome
# Takayasu's arteritis – Mikito Takayasu
"Takayasu",
# Tay–Sachs disease – Warren Tay, Bernard Sachs
"Tay–Sachs",
# Theileriosis – Sir Arnold Theiler
# ... noun, not name
# Thomsen's disease – Julius Thomsen
"Thomsen",
# Tietz syndrome – Walter Tietz
"Tietz",
# Tietze's syndrome – Alexander Tietze
"Tietze",
# Tourette syndrome – Georges Albert Édouard Brutus Gilles de la Tourette
"Tourette",
# Treacher Collins syndrome – Edward Treacher Collins
"Treacher Collins",
# Turcot syndrome – Jacques Turcot
"Turcot",
# Turner's syndrome – Henry Turner
"Turner",
# -------------------------------------------------------------------------
# U
# -------------------------------------------------------------------------
# Unverricht–Lundborg disease
# – Heinrich Unverricht, Herman Bernhard Lundborg
"Unverricht–Lundborg",
# Usher syndrome – Charles Usher
"Usher",
# -------------------------------------------------------------------------
# V
# -------------------------------------------------------------------------
# Valentino syndrome – Rudolph Valentino
"Valentino",
# Verner Morrison syndrome – J. V. Verner, A. B. Morrison
"Verner–Morrison",
# Vincent's angina – Henri Vincent
"Vincent",
# Virchow's syndrome – Rudolf Virchow
"Virchow",
# Von Gierke's disease – Edgar von Gierke
"von Gierke",
# Von Hippel–Lindau disease – Eugen von Hippel, Arvid Vilhelm Lindau
"von Hippel–Lindau",
# Von Recklinghausen's disease – Friedrich Daniel von Recklinghausen
"von Recklinghausen",
# Von Willebrand's disease – Erik Adolf von Willebrand
"von Willebrand",
# -------------------------------------------------------------------------
# W
# -------------------------------------------------------------------------
# Waardenburg syndrome – Petrus Johannes Waardenburg
"Waardenburg",
# Waldenstrom–Kjellberg syndrome – Jan G. Waldenström, S. R. Kjellberg
"Waldenström–Kjellberg",
# Waldenstrom macroglobulinaemia – Jan G. Waldenström
"Waldenström",
# Warkany syndrome 1 – Joseph Warkany
"Warkany",
# Warkany syndrome 2 – Joseph Warkany
"Warkany",
# Warthin's tumor – Aldred Scott Warthin
"Warthin",
# Waterhouse–Friderichsen syndrome – Rupert Waterhouse, Carl Friderichsen
"Waterhouse–Friderichsen",
# Watson syndrome – G.H.Watson
"Watson",
# Weber–Christian disease – Frederick Parkes Weber, Henry Asbury Christian
"Weber–Christian",
# Wegener's granulomatosis – Friedrich Wegener (This usage is now formally
# discouraged by professional medical societies due to the Nazi
# associations of the eponymous physician. The disease is now known as
# granulomatosis with polyangiitis.)
"Wegener",
# Weil's disease – Adolf Weil
"Weil",
# Welander distal myopathy – Lisa Welander
"Welander",
# Wells syndrome – George Crichton Wells
"Wells",
# Werdnig–Hoffmann disease – Guido Werdnig, Johann Hoffmann
"Werdnig–Hoffmann",
# Wermer's syndrome – Paul Wermer
"Wermer",
# Werner's syndrome – Otto Werner
"Werner",
# Wernicke's encephalopathy – Karl Wernicke
"Wernicke",
# Westerhof syndrome – Wiete Westerhof
"Westerhof",
# Westerhof–Beemer–Cormane syndrome
# – Wiete Westerhof, Frederikus Antonius Beemer, R. H.Cormane
"Westerhof–Beemer–Cormane",
# Whipple's disease – George Hoyt Whipple
"Whipple",
# Williams syndrome – J. C. P. Williams [typo fixed in Wikipedia]
"Williams",
# Wilms' tumor – Max Wilms
"Wilms",
# Wilson's disease – Samuel Alexander Kinnier Wilson
"Wilson",
# Willis–Ekbom syndrome – Thomas Willis, Karl-Axel Ekbom
"Willis–Ekbom",
# Wiskott–Aldrich syndrome – Alfred Wiskott, Robert Aldrich
"Wiskott–Aldrich",
# Wittmaack–Ekbom syndrome – Theodur Wittmaack, Karl-Axel Ekbom
"Wittmaack–Ekbom",
# Wohlfart–Kugelberg–Welander disease
# – Karl Gunnar Vilhelm Wohlfart, Erik Klas Henrik Kugelberg, Lisa Welander
"Wohlfart–Kugelberg–Welander",
# Wolff–Parkinson–White syndrome
# – Louis Wolff, John Parkinson, Paul Dudley White
"Wolff–Parkinson–White",
# Wolman disease – Moshe Wolman
"Wolman",
# -------------------------------------------------------------------------
# Y
# -------------------------------------------------------------------------
# Yesudian syndrome – Paul Yesudian
"Yesudian",
# -------------------------------------------------------------------------
# Z
# -------------------------------------------------------------------------
# Zahorsky syndrome I – John Zahorsky (a.k.a. John Von Zahorsky)
"Zahorsky",
# Zahorsky syndrome II (a.k.a. Mikulicz' Aphthae, Mikulicz' Disease, Sutton
# disease 2, Von Mikulicz' Aphthae, Von Zahorsky disease)
# – John Zahorsky (a.k.a. John Von Zahorsky)
"Zahorsky",
# Zellweger syndrome – Hans Ulrich Zellweger
"Zellweger",
# Zenker diverticulum – Friedrich Albert von Zenker
"Zenker",
# Zenker paralysis – Friedrich Albert von Zenker
"Zenker",
# Zieve syndrome – Leslie Zieve
"Zieve",
# Zimmermann–Laband syndrome (a.k.a. Laband syndrome, Laband–Zimmermann
# syndrome) – Karl Wilhelm Zimmermann
"Zimmermann–Laband",
# Zollinger–Ellison syndrome – Robert Zollinger, Edwin Ellison
"Zollinger–Ellison",
# Zondek–Bromberg–Rozin syndrome (a.k.a. Zondek syndrome)
# – Bernhard Zondek, Yehuda M. Bromberg, R.Rozin
"Zondek–Bromberg–Rozin",
# Zuelzer syndrome – Wolf William Zuelzer
"Zuelzer",
# Zuelzer–Kaplan syndrome II (a.k.a. Crosby syndrome)
# – Wolf William Zuelzer, E. Kaplan
"Zuelzer–Kaplan",
# Zuelzer–Ogden syndrome – Wolf William Zuelzer, Frank Nevin Ogden
"Zuelzer–Ogden",
# Zumbusch psoriasis – Leo Ritter von Zumbusch
"Zumbusch",
# Zumbusch syndrome (a.k.a. Csillag disease, Csillag syndrome, Hallopeau
# disease, von Zumbusch syndrome) – Leo Ritter von Zumbusch
"Zumbusch",
]
for _composite in SIMPLE_EPONYM_LIST:
_add_eponym(_composite)
|
RudolfCardinal/crate
|
crate_anon/anonymise/eponyms.py
|
Python
|
gpl-3.0
| 53,538
|
[
"Brian"
] |
f86798a5fc01c7a1bdd53a66d4b58da5cec31b23c8ef33a3fb4a9477985485c1
|
# -*- coding: utf-8 -*-
# options.py
# Global options file
# Copyright 2006 Giuseppe Venturini
# This file is part of the ahkab simulator.
#
# Ahkab is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# Ahkab is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License v2
# along with ahkab. If not, see <http://www.gnu.org/licenses/>.
"""This module contains options and configuration switches the user
may tune to meet his needs.
The default values are sensible options for the general case.
"""
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import os
import numpy as np
try:
from matplotlib import rcParams as plotting_rcParams
except ImportError:
plotting_rcParams = {}
#: Encoding of the netlist files.
encoding = 'utf8'
#: Cache size to be used in :func:`ahkab.utilities.memoize`, defaults to 512MB
cache_len = 67108864 # 512MB
#: A boolean to differentiate command line execution from module import
#: When cli is False, no printing and no weird stdout stuff.
cli = False
# global: errors
#: Voltage absolute tolerance.
vea = 1e-6
#: Voltage relative tolerance.
ver = 1e-3
#: Current absolute tolerance.
iea = 1e-9
#: Current relative tolerance.
ier = 1e-3
# global: circuit
#: Minimum conductance to ground.
gmin = 1e-12
#: Should we show to the user results pertaining to nodes introduced by
#: components or by the simulator?
print_int_nodes = True
# global: solving
#: Dense matrix limit: if the dimensions of the square MNA matrix are bigger,
#: use sparse matrices.
dense_matrix_limit = 400
#: Should we damp artificially the first NR iterations? See also
#: :func:`ahkab.dc_analysis.get_td`.
nr_damp_first_iters = False
#: In all NR iterations, lock the nodes controlling non-linear elements. See
#: also :func:`ahkab.dc_analysis.get_td`.
nl_voltages_lock = True # Apply damping - slows down solution.
#: Non-linear nodes lock factor:
#: if we allow the voltage on controlling ports to change too much, we may
#: have current/voltage overflows. Think about the diode characteristic.
#: So we allow them to change of ``nl_voltages_lock_factor``
#: :math:`\cdot V_{th}` at most and damp all variables accordingly.
nl_voltages_lock_factor = 4
#: Whether the standard solving method can be used.
use_standard_solve_method = True
#: Whether the gmin-settping homothopy can be used.
use_gmin_stepping = True
#: Whether the source-stepping homothopy can be used.
use_source_stepping = True
#: When printing out to the user, whether we can suppress trailing zeros.
print_suppress = False
#: When printing out to the user, how many decimal digits to show at maximum.
print_precision = np.get_printoptions()['precision']
# dc
#: Maximum allowed NR iterations during a DC analysis.
dc_max_nr_iter = 10000
#: Enable guessing to init the NR solver during a DC analysis.
dc_use_guess = True
#: Do not perform an init DC guess if its effort is higher than
#: this value.
dc_max_guess_effort = 250000
# shorthand to set logarithmic stepping in DC analyses.
dc_log_step = 'LOG'
# shorthand to set linear stepping in DC analyses.
dc_lin_step = 'LIN'
#: Can we skip troublesome points during DC sweeps?
dc_sweep_skip_allowed = True
# transient
#: The default differentiation method for transient analyses.
default_tran_method = "TRAP"
#: Minimum allowed discretization step for time.
hmin = 1e-20
#: Maximum number of time iterations for transient analyses
#: Notice the default (0) means no limit is enforced.
transient_max_time_iter = 0 # disabled
#: Maximum number of NR iterations for transient analyses.
transient_max_nr_iter = 20
#: In a transisent analysis, if a prediction value is avalilable,
#: use it as first guess for ``x(n+1)``, otherwise ``x(n)`` is used.
transient_prediction_as_x0 = True
#: Use aposteriori step control?
transient_use_aposteriori_step_control = True
#: Step change threshold:
#: we do not want to redo the iteraction if the aposteriori check suggests a step
#: that is very close to the one we already used. A value of 0.9 seems to be a
#: good idea.
transient_aposteriori_step_threshold = 0.9
#: Disable all step control in transient analyses.
transient_no_step_control = False
#: Minimum capacitance to ground.
cmin = 1e-18
# pss
BFPSS = 'brute-force'
SHOOTINGPSS = 'shooting'
# shooting
#: Default number of points for a shooting analysis.
shooting_default_points = 100
#: Maximum number of NR iterations for shooting analyses.
shooting_max_nr_iter = 10000
# bfpss
#: Default number of points for a BFPSS analysis.
bfpss_default_points = 100
#: Maximum number of NR iterations for BFPSS analyses.
bfpss_max_nr_iter = 10000
# symbolic
#: Enable the manual solver:
#: solve the circuit equations one at a time as you might do "manually".
symb_sympy_manual_solver = False
#: Formulate the equations with conductances and at the last moment
#: swap resistor symbols back in. It seems to make sympy play nicer.
#: Sometimes.
symb_formulate_with_gs = False
# ac
ac_log_step = 'LOG'
ac_lin_step = 'LIN'
#: Maximum number of NR iterations for AC analyses.
ac_max_nr_iter = 20
#: Use degrees instead of rads in AC phase results.
ac_phase_in_deg = False
#pz
#: Maximum considered angular frequency in rad/s for PZ analyses.
pz_max = 1e12
# plotting
# Set to None to disable writing plots to disk
#: Should plots be shown to the user? This variable is set to ``True``
#: automatically if a screen is detected in Unix systems.
#:
#: Notice that by default ahkab both shows plots *and* saves them to disk.
plotting_show_plots = ('DISPLAY' in os.environ)
#: Wait for the user to close the plot? If set to ``False``, plots are created
#: and immediately destroyed.
plotting_wait_after_plot = True
#: Format to be used when writing plots to disk.
plotting_outtype = "png"
#: Matplotlib line plot style: see matplotlib's doc.
plotting_style = "-o"
#: Plotting line width.
plotting_lw = 1.25
#: Default size for plots showed to the user, in inches.
plotting_display_figsize = (12.94, 8)
#: Default size for plots saved to disk.
plotting_save_figsize = (20, 10)
# plotting_rcParams['font.family'] = 'Baskerville'
plotting_rcParams['axes.labelsize'] = 11
plotting_rcParams['xtick.labelsize'] = 11
plotting_rcParams['ytick.labelsize'] = 11
plotting_rcParams['legend.fontsize'] = 11
# Windows
RECT_WINDOW = 'RECT'
BART_WINDOW = 'BART'
HANN_WINDOW = 'HANN'
HAMM_WINDOW = 'HAMM'
BLACK_WINDOW = 'BLACK'
HARRIS_WINDOW = 'HARRIS'
GAUSS_WINDOW = 'GAUSS'
KAISER_WINDOW = 'KAISER'
WINDOWS_NAMES = dict(RECT='RECTANGULAR', BART='BARTLETT',
HANN='HANNING', HAMM='HAMMING',
BLACK='BLACKMAN-HARRIS', HARRIS='HARRIS',
GAUSS='GAUSSIAN', KAISER='KAISER')
|
ahkab/ahkab
|
ahkab/options.py
|
Python
|
gpl-2.0
| 7,088
|
[
"Gaussian"
] |
04127cdf0b347e60e24916650833ffa82712fa68fadae4e9e0d7eded27ff89d8
|
"""Backscraper for Supreme Court of Maine 2013
CourtID: me
Court Short Name: Me.
Author: Brian W. Carver
Date created: June 20, 2014
"""
from datetime import date
from datetime import datetime
from lxml import html
from juriscraper.OpinionSite import OpinionSite
from juriscraper.opinions.united_states.state import me
class Site(me.Site):
def __init__(self):
super(Site, self).__init__()
self.court_id = self.__module__
self.url = 'http://www.courts.maine.gov/opinions_orders/supreme/lawcourt/2013/2013_index.shtml'
|
brianwc/juriscraper
|
opinions/united_states_backscrapers/state/me_2013.py
|
Python
|
bsd-2-clause
| 548
|
[
"Brian"
] |
671da93643dd5fe53d9be5f7d19cc44c15d3aa7ec48f4d0ec7209ded41e7a7a6
|
"""
Blueprint which provides the RESTful web API for JPER
"""
from flask import Blueprint, make_response, url_for, request, abort, redirect, current_app
from flask import stream_with_context, Response
import json, csv
from octopus.core import app
from octopus.lib import webapp, dates
from flask.ext.login import login_user, logout_user, current_user, login_required
from service.api import JPER, ValidationException, ParameterException, UnauthorisedException
from service import models
blueprint = Blueprint('webapi', __name__)
def _not_found():
"""
Construct a response object to represent a 404 (Not Found)
:return: Flask response for a 404, with an empty response body
"""
app.logger.debug("Sending 404 Not Found")
resp = make_response("")
resp.mimetype = "application/json"
resp.status_code = 404
return resp
def _unauthorised():
"""
Construct a response object to represent a 404 (Not Found)
:return: Flask response for a 404, with an empty response body
"""
app.logger.debug("Sending 401 Unauthorised")
resp = make_response("")
resp.status_code = 401
return resp
def _bad_request(message):
"""
Construct a response object to represent a 400 (Bad Request) around the supplied message
:return: Flask response for a 400 with a json response body containing the error
"""
app.logger.info("Sending 400 Bad Request from client: {x}".format(x=message))
resp = make_response(json.dumps({"status" : "error", "error" : message}))
resp.mimetype = "application/json"
resp.status_code = 400
return resp
def _accepted(obj):
"""
Construct a response object to represent a 202 (Accepted) for the supplied object
:param obj: the object that was accepted
:return: Flask response for a 202 with the id of the object in the json body, and the Location header set correctly
"""
app.logger.debug("Sending 202 Accepted: {x}".format(x=obj.id))
root = request.url_root
if root.endswith("/"):
root = root[:-1]
url = root + url_for("webapi.retrieve_notification", notification_id=obj.id)
resp = make_response(json.dumps({"status" : "accepted", "id" : obj.id, "location" : url }))
resp.mimetype = "application/json"
resp.headers["Location"] = url
resp.status_code = 202
return resp
@blueprint.before_request
def standard_authentication():
"""Check remote_user on a per-request basis."""
remote_user = request.headers.get('REMOTE_USER', '')
#tp, apik = request.headers.get('Authorization', '').lower().split(None, 1)
apik = False
if not apik:
try:
apik = request.values.get('API_KEY', request.values.get('api_key', False))
except:
try:
apik = request.json.get('API_KEY', request.json.get('api_key', False))
except:
pass
if remote_user:
print "remote user present " + remote_user
app.logger.debug("Remote user connecting: {x}".format(x=remote_user))
user = models.Account.pull(remote_user)
if user:
login_user(user, remember=False)
else:
abort(401)
elif apik:
print "API key provided " + apik
app.logger.debug("API key connecting: {x}".format(x=apik))
res = models.Account.query(q='api_key:"' + apik + '"')['hits']['hits']
if len(res) == 1:
user = models.Account.pull(res[0]['_source']['id'])
if user is not None:
login_user(user, remember=False)
else:
abort(401)
else:
abort(401)
else:
# FIXME: this is not ideal, as it requires knowing where the blueprint is mounted
if (request.path.startswith("/api/v1/notification") and "/content" not in request.path) or request.path.startswith("/api/v1/routed"):
return
print "aborting, no user"
app.logger.debug("Standard authentication failed")
abort(401)
class BadRequest(Exception):
"""
Generic Exception for a bad request
"""
pass
def _get_parts():
"""
Used to extract metadata and content from an incoming request
:return: a tuple containing the metadata parsed out of the incoming json, and a file-handle (read-once) for the binary content
"""
md = None
zipfile = None
if len(request.files) > 0:
# this is a multipart request, so extract the data accordingly
metadata = request.files["metadata"]
content = request.files["content"]
# now, do some basic validation on the incoming http request (not validating the content,
# that's for the underlying validation API to do
if metadata.mimetype != "application/json":
raise BadRequest("Content-Type for metadata part of multipart request must be application/json")
rawmd = metadata.stream.read()
try:
md = json.loads(rawmd)
except:
raise BadRequest("Unable to parse metadata part of multipart request as valid json")
if content.mimetype != "application/zip":
raise BadRequest("Content-Type for content part of multipart request must be application/zip")
zipfile = content.stream
else:
if "content-type" not in request.headers or request.headers["content-type"] != "application/json":
raise BadRequest("Content-Type must be application/json")
try:
md = json.loads(request.data)
except:
raise BadRequest("Unable to parse request body as valid json")
return md, zipfile
@blueprint.route("/validate", methods=["POST"])
@webapp.jsonp
def validate():
"""
Receive a POST to the /validate endpoint and process it
:return: A 400 (Bad Request) if not valid, or a 204 if successful
"""
try:
md, zipfile = _get_parts()
except BadRequest as e:
return _bad_request(e.message)
try:
JPER.validate(current_user, md, zipfile)
except ValidationException as e:
return _bad_request(e.message)
return '', 204
@blueprint.route("/notification", methods=["POST"])
@webapp.jsonp
def create_notification():
"""
Receive a POST to the /notification endpoint to create a notification, and process it
:return: A 400 (Bad Request) if not valid, or a 202 (Accepted) if successful
"""
try:
md, zipfile = _get_parts()
except BadRequest as e:
return _bad_request(e.message)
try:
notification = JPER.create_notification(current_user, md, zipfile)
if not notification:
abort(401)
except ValidationException as e:
return _bad_request(e.message)
return _accepted(notification)
@blueprint.route("/notification/<notification_id>", methods=["GET"])
@webapp.jsonp
def retrieve_notification(notification_id):
"""
Receive a GET on a specific notification, as identified by the notification id, and return the body
of the notification
:param notification_id: the id of the notification to retrieve
:return: 404 (Not Found) if not found, else 200 (OK) and the outgoing notification as a json body
"""
notification = JPER.get_notification(current_user, notification_id)
if notification is None:
return _not_found()
resp = make_response(notification.json())
resp.mimetype = "application/json"
resp.status_code = 200
return resp
@blueprint.route("/notification/<notification_id>/content", methods=["GET"])
@blueprint.route("/notification/<notification_id>/content/<filename>", methods=["GET"])
@webapp.jsonp
def retrieve_content(notification_id, filename=None):
"""
Receive a GET against the default content or a specific content file in a notification and supply the binary
in return
:param notification_id: the notification whose content to retrieve
:param filename: the filename of the content file in the notification
:return: 404 (Not Found) if either the notification or content are not found) or 200 (OK) and the binary content
"""
app.logger.debug("{x} {y} content requested".format(x=notification_id, y=filename))
if filename is None:
fn = "none"
else:
fn = filename
nt = None
try:
filestream = JPER.get_content(current_user, notification_id, filename)
nt = models.ContentLog({"user":current_user.id,"notification":notification_id,"filename":fn,"delivered_from":"store"})
return Response(stream_with_context(filestream))
except UnauthorisedException as e:
nt = models.ContentLog({"user":current_user.id,"notification":notification_id,"filename":fn,"delivered_from":"unauthorised"})
return _unauthorised()
except Exception as e:
nt = models.ContentLog({"user":current_user.id,"notification":notification_id,"filename":fn,"delivered_from":"notfound"})
return _not_found()
finally:
if nt is not None:
nt.save()
@blueprint.route("/notification/<notification_id>/proxy/<pid>", methods=["GET"])
def proxy_content(notification_id, pid):
app.logger.debug("{x} {y} proxy requested".format(x=notification_id, y=pid))
purl = JPER.get_proxy_url(current_user, notification_id, pid)
if purl is not None:
nt = models.ContentLog({"user":current_user.id,"notification":notification_id,"filename":pid,"delivered_from":"proxy"})
nt.save()
return redirect(purl)
else:
nt = models.ContentLog({"user":current_user.id,"notification":notification_id,"filename":pid,"delivered_from":"notfound"})
nt.save()
return _not_found()
def _list_request(repo_id=None):
"""
Process a list request, either against the full dataset or the specific repo_id supplied
This function will pull the arguments it requires out of the Flask request object. See the API documentation
for the parameters of these kinds of requests.
:param repo_id: the repo id to limit the request to
:return: Flask response containing the list of notifications that are appropriate to the parameters
"""
since = request.values.get("since")
page = request.values.get("page", app.config.get("DEFAULT_LIST_PAGE_START", 1))
page_size = request.values.get("pageSize", app.config.get("DEFAULT_LIST_PAGE_SIZE", 25))
if since is None or since == "":
return _bad_request("Missing required parameter 'since'")
try:
since = dates.reformat(since)
except ValueError as e:
return _bad_request("Unable to understand since date '{x}'".format(x=since))
try:
page = int(page)
except:
return _bad_request("'page' parameter is not an integer")
try:
page_size = int(page_size)
except:
return _bad_request("'pageSize' parameter is not an integer")
try:
nlist = JPER.list_notifications(current_user, since, page=page, page_size=page_size, repository_id=repo_id)
except ParameterException as e:
return _bad_request(e.message)
resp = make_response(nlist.json())
resp.mimetype = "application/json"
resp.status_code = 200
return resp
@blueprint.route("/routed", methods=["GET"])
@webapp.jsonp
def list_all_routed():
"""
List all the notifications that have been routed to any repository, limited by the parameters supplied
in the URL.
See the API documentation for more details.
:return: a list of notifications appropriate to the parameters
"""
return _list_request()
@blueprint.route("/routed/<repo_id>", methods=["GET"])
@webapp.jsonp
def list_repository_routed(repo_id):
"""
List all the notifications that have been routed to the specified repository, limited by the parameters supplied
in the URL.
See the API documentation for more details.
:param repo_id: the id of the reponsitory whose notifications to retrieve
:return: a list of notifications appropriate to the parameters
"""
return _list_request(repo_id)
@blueprint.route("/config", methods=["GET","POST"])
@blueprint.route("/config/<repoid>", methods=["GET","POST"])
@webapp.jsonp
def config(repoid=None):
app.logger.debug(current_user.id + " " + request.method + " to config route")
if repoid is None:
if current_user.has_role('repository'):
repoid = current_user.id
elif current_user.has_role('admin'):
return '' # the admin cannot do anything at /config, but gets a 200 so it is clear they are allowed
else:
abort(400)
elif not current_user.has_role('admin'): # only the superuser can set a repo id directly
abort(401)
rec = models.RepositoryConfig().pull_by_repo(repoid)
if rec is None:
rec = models.RepositoryConfig()
rec.repository = repoid
if request.method == 'GET':
# get the config for the current user and return it
# this route may not actually be needed, but is convenient during development
# also it should be more than just the strings data once complex configs are accepted
resp = make_response(json.dumps(rec.data))
resp.mimetype = "application/json"
return resp
elif request.method == 'POST':
if request.json:
saved = rec.set_repo_config(jsoncontent=request.json,repository=repoid)
else:
try:
if request.files['file'].filename.endswith('.csv'):
saved = rec.set_repo_config(csvfile=request.files['file'],repository=repoid)
elif request.files['file'].filename.endswith('.txt'):
saved = rec.set_repo_config(textfile=request.files['file'],repository=repoid)
except:
saved = False
if saved:
return ''
else:
abort(400)
|
JiscPER/jper
|
service/views/webapi.py
|
Python
|
apache-2.0
| 13,902
|
[
"Octopus"
] |
1ac4bda890abb683585248c9063cbb9ba5aacc8b50701f5423ae69ed63a8e61b
|
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "f 10 0.033, s, f 20 0.033, s, f 30 0.033, s, f 30 0.033, s, q"
tags = "particles, Sun"
import pyglet
import summa
from summa.director import director
from summa.actions import *
from summa.layer import *
from summa.particle_systems import *
from summa.particle import *
class L(Layer):
is_event_handler = True
def __init__(self):
super( L, self).__init__()
# p = Fireworks()
# p = Explosion()
# p = Fire()
# p = Flower()
p = Sun()
# p = Spiral()
# p = Meteor()
# p = Galaxy()
p.position = (320,240)
self.add( p )
p.position_type = ParticleSystem.POSITION_FREE
self.sun = p
def on_mouse_drag( self, x, y, dx, dy, buttons, modifiers ):
(x,y) = director.get_virtual_coordinates(x,y)
x,y = self.sun.position
self.sun.position = (x+dx, y+dy)
def main():
director.init( resizable=True )
main_scene = summa.scene.Scene()
main_scene.add( L() )
director.run( main_scene )
if __name__ == '__main__':
main()
|
shackra/thomas-aquinas
|
tests/test_particle_sun.py
|
Python
|
bsd-3-clause
| 1,236
|
[
"Galaxy"
] |
ae8d36dc2372ca944460ff2991b60828fd46b47bcff4a2448507f2cd3900ae6a
|
import numpy as np
from matplotlib import pyplot
from spm1d import rft1d
eps = np.finfo(float).eps #smallest float
#(0) Set parameters:
np.random.seed(0)
nResponses = 12
nNodes = 101
nIterations = 2000
FWHM = 8.5
interp = True
wrap = True
heights = [2.0, 2.2, 2.4]
c = 2
### derived parameters:
df = nResponses-1
sqrtN = np.sqrt(nResponses)
### initialize RFT calculators:
calc = rft1d.geom.ClusterMetricCalculator()
rftcalc = rft1d.prob.RFTCalculator(STAT='T', df=(1,df), nodes=nNodes, FWHM=FWHM)
#(1) Generate Gaussian 1D fields, compute test stat:
T = []
generator = rft1d.random.Generator1D(nResponses, nNodes, FWHM)
for i in range(nIterations):
y = generator.generate_sample()
t = y.mean(axis=0) / y.std(ddof=1, axis=0) * sqrtN
T.append( t )
T = np.asarray(T)
#(2) Maximum region size:
K0 = np.linspace(eps, 8, 21)
K = [[calc.cluster_extents(yy, h, interp, wrap) for yy in T] for h in heights]
### compute number of upcrossings above a threshold:
C = np.array([[[ sum([kkk>=k0 for kkk in kk]) for kk in k] for k in K] for k0 in K0])
P = np.mean(C>=c, axis=2).T
P0 = np.array([[rftcalc.p.set(c, k0, h) for h in heights] for k0 in K0/FWHM]).T
#(3) Plot results:
pyplot.close('all')
colors = ['b', 'g', 'r']
ax = pyplot.axes()
for color,p,p0,u in zip(colors,P,P0,heights):
ax.plot(K0, p, 'o', color=color)
ax.plot(K0, p0, '-', color=color, label='u = %.1f'%u)
ax.set_xlabel('x', size=16)
ax.set_ylabel('P(c, k_min) > x', size=16)
ax.set_title('Set-level inference validations (t fields)', size=20)
pyplot.show()
|
0todd0000/spm1d
|
spm1d/rft1d/examples/val_upx_1_t_set.py
|
Python
|
gpl-3.0
| 1,698
|
[
"Gaussian"
] |
225fc069002fffb90ff47e2ba7d4d1d405624862f08a30510b6f0d424de1fee3
|
"""
SLSClient class is a client for the SLS DB, looking for Status of a given Service.
"""
import socket
import urllib2
from xml.dom import minidom
from DIRAC import S_OK, S_ERROR
def getAvailabilityStatus( sls_id, timeout = None ):
"""
Return actual SLS availability status of entity in sls_id.
Use SLS API: fast!!
:params:
:attr:`sls_id`: string - sls_id of the service
Returns: { "Availability": <int>, "Weblink": <str> }
"""
socket.setdefaulttimeout( timeout )
try:
res = urllib2.urlopen("http://sls.cern.ch/sls/getServiceAvailability.php?id=" + sls_id).read()
except urllib2.URLError as exc:
return S_ERROR(str(exc))
if "ERROR: Couldn't find service" in res:
return S_ERROR( "The service is not monitored with SLS" )
elif "ERROR:" in res:
return S_ERROR("Unknown SLS error")
else:
return S_OK( { "Availability": int(res), "Weblink": "https://sls.cern.ch/sls/service.php?id=" + sls_id})
def getServiceInfo( sls_id, timeout = 120.0 ):
"""
Return actual SLS "additional service information" as a dict.
(Parse SLS update XML)
:params:
:attr:`sls_id` : string - sls_id of the service
"""
socket.setdefaulttimeout( timeout )
try:
sls = urllib2.urlopen("http://sls.cern.ch/sls/update/" + sls_id + '.xml')
doc = minidom.parse( sls )
numericValues = doc.getElementsByTagName( "numericvalue" )
except Exception as exc:
return S_ERROR(str(exc))
return S_OK(dict([(nv.getAttribute("name"), float(nv.firstChild.nodeValue)) for nv in numericValues]))
|
vmendez/DIRAC
|
Core/LCG/SLSClient.py
|
Python
|
gpl-3.0
| 1,533
|
[
"DIRAC"
] |
39932ad0ed206ad54f729de86fb7ae9fbe820ba3be3b16fd8ed3fbb09a4c21cc
|
#!/usr/bin/python3
LICENSE= """
/* VECTORBLOX ORCA
*
* Copyright (C) 2012-2018 VectorBlox Computing Inc., Vancouver, British Columbia, Canada.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of VectorBlox Computing Inc. nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This agreement shall be governed in all respects by the laws of the Province
* of British Columbia and by the laws of Canada.
*
* This file is part of the VectorBlox ORCA
*
*/
"""
from collections import namedtuple
import sys
import itertools
instruction = namedtuple('instruction',['name','bit40','bit30','bit25','bit14_12'])
arith_instr=[instruction("vadd" ,0,0,0,0),
instruction("vsub" ,0,1,0,0),
instruction("vsll" ,0,0,0,1),
instruction("vshl" ,0,0,0,1), #alias
instruction("vslt" ,0,0,0,2),
instruction("vsltu" ,0,0,0,3),
instruction("vxor" ,0,0,0,4),
instruction("vsrl" ,0,0,0,5),
instruction("vsra" ,0,1,0,5),
instruction("vshr" ,0,0,0,5), #alias
instruction("vor" ,0,0,0,6),
instruction("vand" ,0,0,0,7),
instruction("vmul" ,0,0,1,0),
instruction("vmulh" ,0,0,1,1),
instruction("vmulhi" ,0,0,1,1), #alias
instruction("vmulhus" ,0,0,1,2), #oposite order of riscv
instruction("vmulhu" ,0,0,1,3),
instruction("vdiv" ,0,0,1,4),
instruction("vdivu" ,0,0,1,5),
instruction("vrem" ,0,0,1,6),
instruction("vremu" ,0,0,1,7),
#non-riscv lve instructions
instruction('vcmv_nz' ,0,1,1,0),
instruction('vcmv_z' ,0,1,1,1),
instruction('vmov' ,0,1,1,2),
instruction('vcustom0' ,0,1,1,3),
instruction('vcustom1' ,0,1,1,4),
instruction('vcustom2' ,0,1,1,5),
instruction('vcustom3' ,0,1,1,6),
instruction('vcustom4' ,0,1,0,1),
instruction('vsgt' ,0,1,0,2),
instruction('vsgtu' ,0,1,0,3),
instruction('vcustom5' ,0,1,0,4),
instruction('vcustom6' ,0,1,0,6),
instruction('vcustom7' ,0,1,0,7),
#mxp instructions
instruction('vcmv_lez' ,1,0,0,0),
instruction('vcmv_gtz' ,1,0,0,1),
instruction('vcmv_ltz' ,1,0,0,2),
instruction('vcmv_gez' ,1,0,0,3),
instruction('vsubb' ,1,0,0,4),
instruction('vaddc' ,1,0,0,5),
instruction('vabsdiff' ,1,0,0,6),
instruction('vmulfxp' ,1,0,0,7),
instruction('vset_msk_lez' ,1,0,1,0),
instruction('vset_msk_gtz' ,1,0,1,1),
instruction('vset_msk_ltz' ,1,0,1,2),
instruction('vset_msk_gez' ,1,0,1,3),
instruction('vset_msk_nz' ,1,0,1,4),
instruction('vset_msk_z' ,1,0,1,5),
instruction('vaddfxp' ,1,0,1,6),
instruction('vsubfxp' ,1,0,1,7),
instruction('vcustom8' ,1,1,0,0),
instruction('vcustom9' ,1,1,0,1),
instruction('vcustom10' ,1,1,0,2),
instruction('vcustom11' ,1,1,0,3),
instruction('vcustom12' ,1,1,0,4),
instruction('vcustom13' ,1,1,0,5),
instruction('vcustom14' ,1,1,0,6),
instruction('vcustom15' ,1,1,0,7),
]
type_bits={'vv':0,
'sv':1,
've':2,
'se':3}
size_bits={"b":0,
"h":1,
"w":2,}
sign_bits={'u':0,
's':1}
acc_bits={".acc":1,
"":0}
lve_extension_template = '{{"{name}", "X{ext}", "d,s,t", MATCH_{uname}, MASK_{uname}, match_opcode, 0 }},\n'
def generate_arithmetic_instr( define_file,lve_extension_file):
def make_mask(instruction_tpl,
srca_size,
srcb_size,
dest_size,
srca_sign,
srcb_sign,
dest_sign):
if (srca_sign == 's' and
srcb_sign == 's' and
dest_sign == 's' and
srca_size == dest_size and
srca_size == srcb_size and
instruction_tpl.bit40 == 0):
#32bit instruction
return 0xFE00707F
else:
return 0xFFFFFFFFFE00707F
def make_match( instruction_tpl,
acc,
type_spec,
srca_size,
srcb_size,
dest_size,
srca_sign,
srcb_sign,
dest_sign):
dest_size_bits= (size_bits[dest_size]&1) | ((size_bits[dest_size]&2) << 1)
instruction = 0
#32bit instruction
instruction |= 0x2B
instruction |= (instruction_tpl.bit14_12 << 12)
instruction |= (instruction_tpl.bit25 << 25)
instruction |= (type_bits[type_spec] << 26)
instruction |= (acc_bits[acc] << 28)
instruction |= (dest_size_bits << 29)
instruction |= (instruction_tpl.bit30 << 30)
instruction |= (instruction_tpl.bit40 << 40)
if not (srca_sign == 's' and
srcb_sign == 's' and
dest_sign == 's' and
srca_size == dest_size and
srca_size == srcb_size and
instruction_tpl.bit40 == 0):
# 64bit instruction
instruction|= 0x3F
instruction|= size_bits[srca_size] <<32
instruction|= size_bits[srcb_size] <<34
instruction|= sign_bits[dest_sign] <<36
instruction|= sign_bits[srca_sign] <<37
instruction|= sign_bits[srcb_sign] <<38
return instruction
for ai in arith_instr:
for acc in acc_bits:
for type_spec in type_bits:
for sd,sa,sb in itertools.product(sign_bits.keys(),repeat=3):
for dsz,asz,bsz in itertools.product(size_bits.keys(),repeat=3):
name="{name}.{type}{size}{acc}".format(name=ai.name,
type=type_spec,
size=dsz+asz+bsz+sd+sa+sb,
acc=acc)
uname=name.replace('.','_').upper()
mask=make_mask(ai,asz,bsz,dsz,sa,sb,sd)
match=make_match(instruction_tpl=ai,
acc=acc,
type_spec=type_spec,
srca_size=asz,
srcb_size=bsz,
dest_size=dsz,
srca_sign=sa,
srcb_sign=sb,
dest_sign=sd)
#perhaps we will want to seperate these in the future
ext = "lve" if mask < 0xFFFFFFFF else "lve"
define_file.write("#define MATCH_{} 0x{:X}\n".format(uname,match))
define_file.write("#define MASK_{} 0x{:X}\n".format(uname,mask))
lve_extension_file.write(lve_extension_template.format(name=name,
ext=ext,
uname=uname))
def generate_special_instr( define_file,lve_extension_file):
instruction = namedtuple('instruction',['name','bit29_26','registers','alias'])
special_inst = [instruction('vbx_set_vl',0,'"s,t,d"',0),
instruction('vbx_set_2d',1,'"s,t,d"',0),
instruction('vbx_set_3d',2,'"s,t,d"',0),
instruction('vbx_sync',3,'""',"INSN_ALIAS"),
instruction('vbx_get',3,'"d,s"',0),
instruction('vbx_dma_tohost',4,'"s,t,d"',0),
instruction('vbx_dma_tovec',5,'"s,t,d"',0),
instruction('vbx_dma_tohost2d',0xC,'"s,t,d"',0),
instruction('vbx_dma_tovec2d',0xD,'"s,t,d"',0),
instruction('vbx_dma_2dsetup',6,'"s,t,d"',0)]
special_inst_template='{{"{name}", "X{ext}", {regs}, MATCH_{uname}, MASK_{uname}, match_opcode, {alias} }},\n'
for si in special_inst:
mask=0xFE00707F
if "t" not in si.registers:
mask |= (0x1F << 20)
if "s" not in si.registers:
mask |= (0x1F << 15)
if "d" not in si.registers:
mask |= (0x1F << 7)
match=0x4200702B | (si.bit29_26 <<26)
name = si.name
uname = name.upper()
ext = "lve"
define_file.write("#define MATCH_{} 0x{:X}\n".format(uname,match))
define_file.write("#define MASK_{} 0x{:X}\n".format(uname,mask))
lve_extension_file.write(special_inst_template.format(name=name,
regs=si.registers,
ext=ext,
uname=uname,
alias=si.alias))
if __name__ == '__main__':
with open("riscv-lve.h","w") as define_file, open("lve-extensions.h","w") as lve_extension_file:
define_file.write(LICENSE)
lve_extension_file.write(LICENSE)
generate_arithmetic_instr(define_file,lve_extension_file)
generate_special_instr(define_file,lve_extension_file)
|
VectorBlox/risc-v
|
tools/riscv-toolchain/opcodes-lve.py
|
Python
|
bsd-3-clause
| 11,209
|
[
"ORCA"
] |
be12a45b2cd90723b9c0d98bfcf15f15459ea0adadd3923c49fa7acffa9ce4f4
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkJPEGReader(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkJPEGReader(), 'Reading vtkJPEG.',
(), ('vtkJPEG',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
chrisidefix/devide
|
modules/vtk_basic/vtkJPEGReader.py
|
Python
|
bsd-3-clause
| 468
|
[
"VTK"
] |
f486fa2b544b2ca21aa8549bc1a9b25b7554e14aaa409a220cd4c695591e7e29
|
# -*- coding: utf-8 -*-
# Copyright © 2014 Casey Dahlin
#
# this file is part of foobar.
#
# foobar is free software: you can redistribute it and/or modify
# it under the terms of the gnu general public license as published by
# the free software foundation, either version 3 of the license, or
# (at your option) any later version.
#
# foobar is distributed in the hope that it will be useful,
# but without any warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose. see the
# gnu general public license for more details.
#
# you should have received a copy of the gnu general public license
# along with foobar. if not, see <http://www.gnu.org/licenses/>.
from scratchwire.model import db, User, VerifyUrl, Alias
from flask import session, abort, flash
from scratchwire.form import Form, element, hidden_element
from validate_email import validate_email
from scratchwire.util import bail_redirect
from datetime import datetime
class LoginForm(Form):
"""
Standard issue login form. Nothing exciting here.
"""
@element(label="E-Mail", ftype="email")
def email(self, field):
"""
Email address form field. We use this as our primary user identifier.
"""
if not validate_email(field.content):
field.complaints.append("You must enter a valid e-mail address")
else:
field.value = field.content
@element(label="Password", ftype="password")
def password(self, field):
"""
Password form field.
"""
if len(field.content) < 6:
field.complaints.append("Password must be at least 6 characters")
else:
field.value = field.content
def global_validate(self, valid_so_far):
"""
Validate the entered user and fetch his database entry
"""
if not valid_so_far:
return
email = self.email.value
password = self.password.value
user = User.query.filter_by(email=email).first()
if user == None or not user.check_pass(password):
self.email.complaints.append("Invalid email or password")
elif not user.email_verified:
self.password.complaints.append("""You have not yet validated your
email address""")
if len(self.email.complaints) == 0:
self.user = user
return
def handle_valid(self):
"""
Handle a valid submission of this form
"""
session["User"] = self.user
return bail_redirect()
action = 'login'
template = 'login'
class RegistrationForm(LoginForm):
@element(label="Confirm Password", ftype="password")
def confirm_password(self, field):
"""
Confirm password form field.
"""
field.value = field.content
def global_validate(self, valid_so_far):
"""
Validate the user and add him
"""
email = self.email.value
password = self.password.value
confirm_password = self.confirm_password.value
user = User.query.filter_by(email=email).first()
if user == None and password == confirm_password and valid_so_far:
user = User()
user.email = email
user.set_pass(password)
self.user = user
return
if user != None:
self.email.complaints.append("E-mail address already in use")
if password != confirm_password:
self.password.complaints.append("Passwords do not match")
def handle_valid(self):
verify_url = VerifyUrl(self.user)
db.session.add(self.user)
db.session.add(verify_url)
db.session.commit()
verify_url.send_email()
flash(
"""We have sent you an email to confirm your email address.
Please click on the link to confirm your registration.""")
return bail_redirect()
action = 'register'
template = 'register'
class VerifyForm(LoginForm):
email = None
def setup(self):
id = self.action_vars['verify_id']
self.verify = VerifyUrl.query.filter(VerifyUrl.id == id,
VerifyUrl.expires > datetime.utcnow()).first()
if not self.verify:
abort(404)
def global_validate(self, valid_so_far):
"""
Validate the verified user
"""
password = self.password.value
if not valid_so_far:
return
if not self.verify.user.check_pass(password):
self.password.complaints.append("Invalid password")
def handle_valid(self):
"""
Verify the user
"""
session['User'] = self.verify.user
session['User'].email_verified = True
db.session.add(session['User'])
db.session.commit()
flash("Your email address has been verified successfully")
return bail_redirect()
action = 'verify'
template = 'verify'
class DeleteAlias(Form):
meth = hidden_element('_method', 'DELETE')
action = 'delete_alias'
template = 'delete_alias'
|
sadmac7000/Scratchwire.com
|
scratchwire/app_forms.py
|
Python
|
gpl-3.0
| 5,131
|
[
"exciting"
] |
122315ee9a35d26f50aa0d85eecb2eafcd99800292118ad7ab5111efb0b13747
|
from __future__ import absolute_import
from .Errors import error, message
from . import ExprNodes
from . import Nodes
from . import Builtin
from . import PyrexTypes
from .. import Utils
from .PyrexTypes import py_object_type, unspecified_type
from .Visitor import CythonTransform, EnvTransform
class TypedExprNode(ExprNodes.ExprNode):
# Used for declaring assignments of a specified type without a known entry.
def __init__(self, type):
self.type = type
object_expr = TypedExprNode(py_object_type)
class MarkParallelAssignments(EnvTransform):
# Collects assignments inside parallel blocks prange, with parallel.
# Perhaps it's better to move it to ControlFlowAnalysis.
# tells us whether we're in a normal loop
in_loop = False
parallel_errors = False
def __init__(self, context):
# Track the parallel block scopes (with parallel, for i in prange())
self.parallel_block_stack = []
super(MarkParallelAssignments, self).__init__(context)
def mark_assignment(self, lhs, rhs, inplace_op=None):
if isinstance(lhs, (ExprNodes.NameNode, Nodes.PyArgDeclNode)):
if lhs.entry is None:
# TODO: This shouldn't happen...
return
if self.parallel_block_stack:
parallel_node = self.parallel_block_stack[-1]
previous_assignment = parallel_node.assignments.get(lhs.entry)
# If there was a previous assignment to the variable, keep the
# previous assignment position
if previous_assignment:
pos, previous_inplace_op = previous_assignment
if (inplace_op and previous_inplace_op and
inplace_op != previous_inplace_op):
# x += y; x *= y
t = (inplace_op, previous_inplace_op)
error(lhs.pos,
"Reduction operator '%s' is inconsistent "
"with previous reduction operator '%s'" % t)
else:
pos = lhs.pos
parallel_node.assignments[lhs.entry] = (pos, inplace_op)
parallel_node.assigned_nodes.append(lhs)
elif isinstance(lhs, ExprNodes.SequenceNode):
for i, arg in enumerate(lhs.args):
if not rhs or arg.is_starred:
item_node = None
else:
item_node = rhs.inferable_item_node(i)
self.mark_assignment(arg, item_node)
else:
# Could use this info to infer cdef class attributes...
pass
def visit_WithTargetAssignmentStatNode(self, node):
self.mark_assignment(node.lhs, node.with_node.enter_call)
self.visitchildren(node)
return node
def visit_SingleAssignmentNode(self, node):
self.mark_assignment(node.lhs, node.rhs)
self.visitchildren(node)
return node
def visit_CascadedAssignmentNode(self, node):
for lhs in node.lhs_list:
self.mark_assignment(lhs, node.rhs)
self.visitchildren(node)
return node
def visit_InPlaceAssignmentNode(self, node):
self.mark_assignment(node.lhs, node.create_binop_node(), node.operator)
self.visitchildren(node)
return node
def visit_ForInStatNode(self, node):
# TODO: Remove redundancy with range optimization...
is_special = False
sequence = node.iterator.sequence
target = node.target
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name:
entry = self.current_env().lookup(function.name)
if not entry or entry.is_builtin:
if function.name == 'reversed' and len(sequence.args) == 1:
sequence = sequence.args[0]
elif function.name == 'enumerate' and len(sequence.args) == 1:
if target.is_sequence_constructor and len(target.args) == 2:
iterator = sequence.args[0]
if iterator.is_name:
iterator_type = iterator.infer_type(self.current_env())
if iterator_type.is_builtin_type:
# assume that builtin types have a length within Py_ssize_t
self.mark_assignment(
target.args[0],
ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
target = target.args[1]
sequence = sequence.args[0]
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name:
entry = self.current_env().lookup(function.name)
if not entry or entry.is_builtin:
if function.name in ('range', 'xrange'):
is_special = True
for arg in sequence.args[:2]:
self.mark_assignment(target, arg)
if len(sequence.args) > 2:
self.mark_assignment(
target,
ExprNodes.binop_node(node.pos,
'+',
sequence.args[0],
sequence.args[2]))
if not is_special:
# A for-loop basically translates to subsequent calls to
# __getitem__(), so using an IndexNode here allows us to
# naturally infer the base type of pointers, C arrays,
# Python strings, etc., while correctly falling back to an
# object type when the base type cannot be handled.
self.mark_assignment(target, ExprNodes.IndexNode(
node.pos,
base=sequence,
index=ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type)))
self.visitchildren(node)
return node
def visit_ForFromStatNode(self, node):
self.mark_assignment(node.target, node.bound1)
if node.step is not None:
self.mark_assignment(node.target,
ExprNodes.binop_node(node.pos,
'+',
node.bound1,
node.step))
self.visitchildren(node)
return node
def visit_WhileStatNode(self, node):
self.visitchildren(node)
return node
def visit_ExceptClauseNode(self, node):
if node.target is not None:
self.mark_assignment(node.target, object_expr)
self.visitchildren(node)
return node
def visit_FromCImportStatNode(self, node):
pass # Can't be assigned to...
def visit_FromImportStatNode(self, node):
for name, target in node.items:
if name != "*":
self.mark_assignment(target, object_expr)
self.visitchildren(node)
return node
def visit_DefNode(self, node):
# use fake expressions with the right result type
if node.star_arg:
self.mark_assignment(
node.star_arg, TypedExprNode(Builtin.tuple_type))
if node.starstar_arg:
self.mark_assignment(
node.starstar_arg, TypedExprNode(Builtin.dict_type))
EnvTransform.visit_FuncDefNode(self, node)
return node
def visit_DelStatNode(self, node):
for arg in node.args:
self.mark_assignment(arg, arg)
self.visitchildren(node)
return node
def visit_ParallelStatNode(self, node):
if self.parallel_block_stack:
node.parent = self.parallel_block_stack[-1]
else:
node.parent = None
nested = False
if node.is_prange:
if not node.parent:
node.is_parallel = True
else:
node.is_parallel = (node.parent.is_prange or not
node.parent.is_parallel)
nested = node.parent.is_prange
else:
node.is_parallel = True
# Note: nested with parallel() blocks are handled by
# ParallelRangeTransform!
# nested = node.parent
nested = node.parent and node.parent.is_prange
self.parallel_block_stack.append(node)
nested = nested or len(self.parallel_block_stack) > 2
if not self.parallel_errors and nested and not node.is_prange:
error(node.pos, "Only prange() may be nested")
self.parallel_errors = True
if node.is_prange:
child_attrs = node.child_attrs
node.child_attrs = ['body', 'target', 'args']
self.visitchildren(node)
node.child_attrs = child_attrs
self.parallel_block_stack.pop()
if node.else_clause:
node.else_clause = self.visit(node.else_clause)
else:
self.visitchildren(node)
self.parallel_block_stack.pop()
self.parallel_errors = False
return node
def visit_YieldExprNode(self, node):
if self.parallel_block_stack:
error(node.pos, "Yield not allowed in parallel sections")
return node
def visit_ReturnStatNode(self, node):
node.in_parallel = bool(self.parallel_block_stack)
return node
class MarkOverflowingArithmetic(CythonTransform):
# It may be possible to integrate this with the above for
# performance improvements (though likely not worth it).
might_overflow = False
def __call__(self, root):
self.env_stack = []
self.env = root.scope
return super(MarkOverflowingArithmetic, self).__call__(root)
def visit_safe_node(self, node):
self.might_overflow, saved = False, self.might_overflow
self.visitchildren(node)
self.might_overflow = saved
return node
def visit_neutral_node(self, node):
self.visitchildren(node)
return node
def visit_dangerous_node(self, node):
self.might_overflow, saved = True, self.might_overflow
self.visitchildren(node)
self.might_overflow = saved
return node
def visit_FuncDefNode(self, node):
self.env_stack.append(self.env)
self.env = node.local_scope
self.visit_safe_node(node)
self.env = self.env_stack.pop()
return node
def visit_NameNode(self, node):
if self.might_overflow:
entry = node.entry or self.env.lookup(node.name)
if entry:
entry.might_overflow = True
return node
def visit_BinopNode(self, node):
if node.operator in '&|^':
return self.visit_neutral_node(node)
else:
return self.visit_dangerous_node(node)
visit_UnopNode = visit_neutral_node
visit_UnaryMinusNode = visit_dangerous_node
visit_InPlaceAssignmentNode = visit_dangerous_node
visit_Node = visit_safe_node
def visit_assignment(self, lhs, rhs):
if (isinstance(rhs, ExprNodes.IntNode)
and isinstance(lhs, ExprNodes.NameNode)
and Utils.long_literal(rhs.value)):
entry = lhs.entry or self.env.lookup(lhs.name)
if entry:
entry.might_overflow = True
def visit_SingleAssignmentNode(self, node):
self.visit_assignment(node.lhs, node.rhs)
self.visitchildren(node)
return node
def visit_CascadedAssignmentNode(self, node):
for lhs in node.lhs_list:
self.visit_assignment(lhs, node.rhs)
self.visitchildren(node)
return node
class PyObjectTypeInferer(object):
"""
If it's not declared, it's a PyObject.
"""
def infer_types(self, scope):
"""
Given a dict of entries, map all unspecified types to a specified type.
"""
for name, entry in scope.entries.items():
if entry.type is unspecified_type:
entry.type = py_object_type
class SimpleAssignmentTypeInferer(object):
"""
Very basic type inference.
Note: in order to support cross-closure type inference, this must be
applies to nested scopes in top-down order.
"""
def set_entry_type(self, entry, entry_type):
entry.type = entry_type
for e in entry.all_entries():
e.type = entry_type
def infer_types(self, scope):
enabled = scope.directives['infer_types']
verbose = scope.directives['infer_types.verbose']
if enabled == True:
spanning_type = aggressive_spanning_type
elif enabled is None: # safe mode
spanning_type = safe_spanning_type
else:
for entry in scope.entries.values():
if entry.type is unspecified_type:
self.set_entry_type(entry, py_object_type)
return
# Set of assignemnts
assignments = set()
assmts_resolved = set()
dependencies = {}
assmt_to_names = {}
for name, entry in scope.entries.items():
for assmt in entry.cf_assignments:
names = assmt.type_dependencies()
assmt_to_names[assmt] = names
assmts = set()
for node in names:
assmts.update(node.cf_state)
dependencies[assmt] = assmts
if entry.type is unspecified_type:
assignments.update(entry.cf_assignments)
else:
assmts_resolved.update(entry.cf_assignments)
def infer_name_node_type(node):
types = [assmt.inferred_type for assmt in node.cf_state]
if not types:
node_type = py_object_type
else:
entry = node.entry
node_type = spanning_type(
types, entry.might_overflow, entry.pos)
node.inferred_type = node_type
def infer_name_node_type_partial(node):
types = [assmt.inferred_type for assmt in node.cf_state
if assmt.inferred_type is not None]
if not types:
return
entry = node.entry
return spanning_type(types, entry.might_overflow, entry.pos)
def resolve_assignments(assignments):
resolved = set()
for assmt in assignments:
deps = dependencies[assmt]
# All assignments are resolved
if assmts_resolved.issuperset(deps):
for node in assmt_to_names[assmt]:
infer_name_node_type(node)
# Resolve assmt
inferred_type = assmt.infer_type()
assmts_resolved.add(assmt)
resolved.add(assmt)
assignments.difference_update(resolved)
return resolved
def partial_infer(assmt):
partial_types = []
for node in assmt_to_names[assmt]:
partial_type = infer_name_node_type_partial(node)
if partial_type is None:
return False
partial_types.append((node, partial_type))
for node, partial_type in partial_types:
node.inferred_type = partial_type
assmt.infer_type()
return True
partial_assmts = set()
def resolve_partial(assignments):
# try to handle circular references
partials = set()
for assmt in assignments:
if assmt in partial_assmts:
continue
if partial_infer(assmt):
partials.add(assmt)
assmts_resolved.add(assmt)
partial_assmts.update(partials)
return partials
# Infer assignments
while True:
if not resolve_assignments(assignments):
if not resolve_partial(assignments):
break
inferred = set()
# First pass
for entry in scope.entries.values():
if entry.type is not unspecified_type:
continue
entry_type = py_object_type
if assmts_resolved.issuperset(entry.cf_assignments):
types = [assmt.inferred_type for assmt in entry.cf_assignments]
if types and all(types):
entry_type = spanning_type(
types, entry.might_overflow, entry.pos)
inferred.add(entry)
self.set_entry_type(entry, entry_type)
def reinfer():
dirty = False
for entry in inferred:
types = [assmt.infer_type()
for assmt in entry.cf_assignments]
new_type = spanning_type(types, entry.might_overflow, entry.pos)
if new_type != entry.type:
self.set_entry_type(entry, new_type)
dirty = True
return dirty
# types propagation
while reinfer():
pass
if verbose:
for entry in inferred:
message(entry.pos, "inferred '%s' to be of type '%s'" % (
entry.name, entry.type))
def find_spanning_type(type1, type2):
if type1 is type2:
result_type = type1
elif type1 is PyrexTypes.c_bint_type or type2 is PyrexTypes.c_bint_type:
# type inference can break the coercion back to a Python bool
# if it returns an arbitrary int type here
return py_object_type
else:
result_type = PyrexTypes.spanning_type(type1, type2)
if result_type in (PyrexTypes.c_double_type, PyrexTypes.c_float_type,
Builtin.float_type):
# Python's float type is just a C double, so it's safe to
# use the C type instead
return PyrexTypes.c_double_type
return result_type
def simply_type(result_type, pos):
if result_type.is_reference:
result_type = result_type.ref_base_type
if result_type.is_const:
result_type = result_type.const_base_type
if result_type.is_cpp_class:
result_type.check_nullary_constructor(pos)
if result_type.is_array:
result_type = PyrexTypes.c_ptr_type(result_type.base_type)
return result_type
def aggressive_spanning_type(types, might_overflow, pos):
return simply_type(reduce(find_spanning_type, types), pos)
def safe_spanning_type(types, might_overflow, pos):
result_type = simply_type(reduce(find_spanning_type, types), pos)
if result_type.is_pyobject:
# In theory, any specific Python type is always safe to
# infer. However, inferring str can cause some existing code
# to break, since we are also now much more strict about
# coercion from str to char *. See trac #553.
if result_type.name == 'str':
return py_object_type
else:
return result_type
elif result_type is PyrexTypes.c_double_type:
# Python's float type is just a C double, so it's safe to use
# the C type instead
return result_type
elif result_type is PyrexTypes.c_bint_type:
# find_spanning_type() only returns 'bint' for clean boolean
# operations without other int types, so this is safe, too
return result_type
elif result_type.is_ptr:
# Any pointer except (signed|unsigned|) char* can't implicitly
# become a PyObject, and inferring char* is now accepted, too.
return result_type
elif result_type.is_cpp_class:
# These can't implicitly become Python objects either.
return result_type
elif result_type.is_struct:
# Though we have struct -> object for some structs, this is uncommonly
# used, won't arise in pure Python, and there shouldn't be side
# effects, so I'm declaring this safe.
return result_type
# TODO: double complex should be OK as well, but we need
# to make sure everything is supported.
elif (result_type.is_int or result_type.is_enum) and not might_overflow:
return result_type
return py_object_type
def get_type_inferer():
return SimpleAssignmentTypeInferer()
|
jos4uke/getSeqFlankBlatHit
|
lib/python2.7/site-packages/Cython/Compiler/TypeInference.py
|
Python
|
gpl-2.0
| 20,899
|
[
"VisIt"
] |
a87d51b1a43c006c893980bcd942a7bf996acda0952b45571617e4f707fa08e4
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Test the Hirshfeld Method in cclib"""
import sys
import os
import logging
import unittest
import numpy
from cclib.method import Hirshfeld, volume
from cclib.parser import Psi4
from cclib.io import ccread
from cclib.method.calculationmethod import MissingAttributeError
from numpy.testing import assert_allclose
from ..test_data import getdatafile
class HirshfeldTest(unittest.TestCase):
"""Hirshfeld method tests."""
def setUp(self):
super(HirshfeldTest, self).setUp()
self.parse()
def parse(self):
self.data, self.logfile = getdatafile(Psi4, "basicPsi4-1.2.1", ["water_mp2.out"])
self.volume = volume.Volume((-4, -4, -4), (4, 4, 4), (0.2, 0.2, 0.2))
def testmissingrequiredattributes(self):
"""Is an error raised when required attributes are missing?"""
for missing_attribute in Hirshfeld.required_attrs:
self.parse()
delattr(self.data, missing_attribute)
with self.assertRaises(MissingAttributeError):
trialBader = Hirshfeld(
self.data, self.volume, os.path.dirname(os.path.realpath(__file__))
)
def test_proatom_read(self):
"""Are proatom densities imported correctly?"""
self.parse()
self.analysis = Hirshfeld(self.data, self.volume, os.path.dirname(os.path.realpath(__file__)))
refH_den = [
2.66407645e-01,
2.66407645e-01,
2.66407643e-01,
2.66407612e-01,
2.66407322e-01,
] # Hydrogen first five densities
refH_r = [
1.17745807e-07,
4.05209491e-06,
3.21078677e-05,
1.39448474e-04,
4.35643929e-04,
] # Hydrogen first five radii
refO_den = [
2.98258510e02,
2.98258510e02,
2.98258509e02,
2.98258487e02,
2.98258290e02,
] # Oxygen first five densities
refO_r = [
5.70916728e-09,
1.97130512e-07,
1.56506399e-06,
6.80667366e-06,
2.12872046e-05,
] # Oxygen first five radii
assert_allclose(self.analysis.proatom_density[0][0:5], refO_den, rtol=1e-3)
assert_allclose(self.analysis.proatom_density[1][0:5], refH_den, rtol=1e-3)
assert_allclose(self.analysis.proatom_density[2][0:5], refH_den, rtol=1e-3)
def test_water_charges(self):
""" Are Hirshfeld charges calculated correctly for water?
Note. Table 1 in doi:10.1007/BF01113058 reports Hirshfeld charge for Hydrogen atom as
0.11 when STO-3G basis set was used and
0.18 when 6-311G** basis set was used.
Here, Psi4 calculation was done using STO-3G.
"""
self.parse()
# use precalculated fine cube file
imported_vol = volume.read_from_cube(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "water_fine.cube")
)
analysis = Hirshfeld(self.data, imported_vol, os.path.dirname(os.path.realpath(__file__)))
analysis.calculate()
# Check assigned charges
assert_allclose(analysis.fragcharges, [-0.29084274, 0.14357639, 0.14357639], atol=0.1)
def test_chgsum_h2(self):
""" Are Hirshfeld charges for hydrogen atoms in nonpolar H2 small as expected?
"""
h2path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "h2.out")
data = ccread(h2path)
vol = volume.Volume((-3, -3, -3), (3, 3, 3), (0.1, 0.1, 0.1))
analysis = Hirshfeld(data, vol, os.path.dirname(os.path.realpath(__file__)))
analysis.calculate()
self.assertAlmostEqual(numpy.sum(analysis.fragcharges), 0, delta=1e-2)
self.assertAlmostEqual(analysis.fragcharges[0], analysis.fragcharges[1], delta=1e-6)
def test_chgsum_co(self):
""" Are Hirshfeld charges for carbon monoxide reported as expected?
Note. Table 1 in doi:10.1007/BF01113058 reports Hirshfeld charge for Carbon atom as
0.06 when STO-3G basis set was used and
0.14 when 6-311G** basis set was used.
Here, Psi4 calculation was done using STO-3G.
"""
copath = os.path.join(os.path.dirname(os.path.realpath(__file__)), "co.out")
data = ccread(copath)
vol = volume.read_from_cube(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "co.cube")
)
analysis = Hirshfeld(data, vol, os.path.dirname(os.path.realpath(__file__)))
analysis.calculate()
self.assertAlmostEqual(numpy.sum(analysis.fragcharges), 0, delta=1e-2)
assert_allclose(analysis.fragcharges, [ 0.10590126, -0.11277786], atol=1e-3)
|
ATenderholt/cclib
|
test/method/testhirshfeld.py
|
Python
|
bsd-3-clause
| 5,023
|
[
"Psi4",
"cclib"
] |
5f6724ffb6c4dc44976f6c8e44404a6f33b8aad205eb0ecb413ae13cf958721e
|
# -*- coding: utf-8 -*-
# This file is part of MOOSE simulator: http://moose.ncbs.res.in.
# MOOSE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# MOOSE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with MOOSE. If not, see <http://www.gnu.org/licenses/>.
"""setup.py:
Script to install python targets.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import os
from distutils.core import setup
script_dir = os.path.dirname( os.path.abspath( __file__ ) )
version = '3.1'
try:
with open( os.path.join( script_dir, '..', 'VERSION'), 'r' ) as f:
version = f.read( )
except Exception as e:
print( 'Failed to read VERSION %s' % e )
print( 'Using default 3.1' )
try:
import importlib.machinery
suffix = importlib.machinery.EXTENSION_SUFFIXES[0]
except Exception as e:
suffix = '.so'
setup(
name='moose',
version=version,
description='MOOSE python scripting module.',
author='MOOSERes',
author_email='bhalla@ncbs.res.in',
maintainer='Dilawar Singh',
maintainer_email='dilawars@ncbs.res.in',
url='http://moose.ncbs.res.in',
packages=[
'rdesigneur'
, 'moose'
, 'moose.SBML'
, 'moose.neuroml'
, 'moose.genesis'
, 'moose.chemUtil'
, 'moose.merge'
],
package_dir = {
'moose' : 'moose'
, 'rdesigneur' : 'rdesigneur'
},
package_data = { 'moose' : ['_moose' + suffix] },
)
|
dharmasam9/moose-core
|
python/setup.py
|
Python
|
gpl-3.0
| 2,269
|
[
"MOOSE"
] |
edc1791e50aea84680c3faf32323467a72fcac0bcff5909442c9b46619a052fd
|
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.axes_grid1
import utils
import scipy.sparse
import tensorflow as tf
# Global parameters
dataset_name = 'standard'
patch_size = (32, 32) # (patch_height, patch_width)
compression_percent = 60
# Data acquisition
# Generate original images
utils.generate_original_images(dataset_name)
# Generate training set
train_name_list = ['airplane'] # optional, default is in generate_train_images()
utils.generate_train_images(dataset_name, train_name_list)
# utils.generate_train_images(dataset_name)
# Generate test set
# test_name_list = ['fruits', 'frymire']
# utils.generate_test_images(dataset_name, test_name_list)
utils.generate_test_images(dataset_name)
# Pre-process data
# Load train and test sets
data_paths = utils.get_data_paths(dataset_name)
train_image_list, train_name_list = utils.load_images(data_paths['train'], file_ext='.png')
test_image_list, test_name_list = utils.load_images(data_paths['test'], file_ext='.png')
# Split in non-overlapping patches and vectorize
test_set_ref = utils.generate_vec_set(test_image_list, patch_size)
full_train_set_ref = utils.generate_vec_set(train_image_list, patch_size)
train_set_ref, val_set_ref \
= utils.generate_cross_validation_sets(full_train_set_ref, fold_number=5, fold_combination=5)
# Mix and compress train and test sets
mm_type = 'gaussian-rip' # or 'bernoulli-rip'
M = utils.create_measurement_model(mm_type, patch_size, compression_percent)
train_set = np.matmul(M, train_set_ref)
val_set = np.matmul(M, val_set_ref)
test_set = np.matmul(M, test_set_ref) # in this case identical to np.dot()
# Wavelet parameters
patch_vec = train_set_ref[:, 0]
patch_vec = np.dot(M.transpose(), train_set[:,0])
patch_mat = utils.reshape_vec_in_patch(patch_vec, patch_size)
# TODO: configuration of the network
algorithm = 'ISTA' # -> to create the network
transform_name = ['wavelet', 'wavelet', 'wavelet', 'wavelet']
wavelet_type = ['db1', 'db2', 'db4', 'db4']
level = [1, 2, 2, 1]
mode = ['symmetric', 'symmetric', 'symmetric', 'periodization']
transform_list = utils.generate_transform_list(patch_size, transform_name, wavelet_type, level, mode)
# Parameters set up (ISTA)
lmdb = 1e-2
# L >= 2 x max eigevalue of M
L, _ = scipy.sparse.linalg.eigsh(np.dot(M.transpose(), M), k=1, which='LM')
# Filter matrix
We = 1/L*M.transpose()
# Mutual inhibition matrix
S = np.eye(M.shape[1]) - 1/L*np.dot(M.transpose(), M)
# TODO: bornes pour theta?
theta = lmdb/L
# theta = lmdb/(2*L)?
# Wavelet decomposition
coeffs_vec = []
bk_mat = []
scale_factor = np.sqrt(len(transform_list))
# scale factor can be applied either to the patch_vec or to the coeffs (i.e. cv)
for transform in transform_list:
cv, bk = utils.wavelet_decomposition(patch_vec/scale_factor, transform)
coeffs_vec.append(cv)
bk_mat.append(bk)
# THRESHOLDING
# Theoretical threshold
theta = [] # MUST always be a list even if there is only one decomposition basis
threshold_fact = 0.4
for cv in coeffs_vec:
#theta.append(np.linspace(0, 1, cv.shape[0])*threshold_fact*np.linalg.norm(cv, ord=np.inf))
theta.append(threshold_fact*np.linalg.norm(cv, ord=np.inf))
# Plot coeffs and thresholds
fig = plt.figure()
ax = plt.subplot(1, 1, 1)
for i, (cv, transform, th) in enumerate(zip(coeffs_vec, transform_list, theta)):
utils.plot_decomposition_coeffs(cv, th,
label='{}, level={}, mode={}'.format(transform['wavelet_type'],
transform['level'],
transform['mode']))
ax.legend()
# plt.show()
# Wavelet reconstruction
patch_vec_rec = np.zeros((np.prod(patch_size)))
scale_factor = np.sqrt(len(transform_list))
for i, (cv, bk, tl) in enumerate(zip(coeffs_vec, bk_mat, transform_list)):
patch_vec_rec += utils.wavelet_reconstruction(cv, bk, tl)/scale_factor
patch_mat_rec = utils.reshape_vec_in_patch(patch_vec_rec, patch_size)
fig = plt.figure(num=2, figsize=(15, 5))
fig.suptitle('Wavelet dec + rec check', fontsize=18)
# Patch before wavelet decomposition
ax1 = fig.add_subplot(1, 3, 1)
p1 = ax1.imshow(patch_mat, cmap='gray')
ax1.set_axis_off()
ax1.set_title('Reference patch', fontsize=12)
# create an axes on the right side of ax. The width of cax will be 5%
# of ax and the padding between cax and ax will be fixed at 0.05 inch.
divider = mpl_toolkits.axes_grid1.make_axes_locatable(ax1)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(p1, cax=cax)
# Patch after wavelet decompostion and reconstruction
ax2 = fig.add_subplot(1, 3, 2)
p2 = ax2.imshow(patch_mat_rec, cmap='gray')
ax2.set_axis_off()
ax2.set_title('Reconstructed patch', fontsize=12)
ax2.set_ylim(ax1.get_ylim())
# create an axes on the right side of ax. The width of cax will be 5%
# of ax and the padding between cax and ax will be fixed at 0.05 inch.
divider = mpl_toolkits.axes_grid1.make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(p2, cax=cax)
# Diff
ax3 = fig.add_subplot(1, 3, 3)
p3 = ax3.imshow(np.abs(patch_mat - patch_mat_rec), cmap='gray')
ax3.set_axis_off()
ax3.set_title('Absolute difference', fontsize=12)
ax3.set_ylim(ax1.get_ylim())
# create an axes on the right side of ax. The width of cax will be 5%
# of ax and the padding between cax and ax will be fixed at 0.05 inch.
divider = mpl_toolkits.axes_grid1.make_axes_locatable(ax3)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(p3, cax=cax)
plt.show()
print(np.linalg.norm(patch_mat_rec))
print(np.linalg.norm(patch_mat))
print('done')
# print(np.linalg.norm(M, axis=0))
# Normalize by the norm of the columns
# normc_M = np.linalg.norm(M, axis=0)
|
mdeff/ntds_2016
|
project/reports/compressed_sensing/main.py
|
Python
|
mit
| 5,839
|
[
"Gaussian"
] |
b124de1832f8e8caddc35b13721431b7c141407e01ec366dc85e6949cb121c74
|
import os
import logging
import unittest
import deepchem as dc
logger = logging.getLogger(__name__)
class TestDrop(unittest.TestCase):
"""
Test how loading of malformed compounds is handled.
Called TestDrop since these compounds were silently and erroneously dropped.
"""
def test_drop(self):
"""Test on dataset where RDKit fails on some strings."""
current_dir = os.path.dirname(os.path.realpath(__file__))
logger.info("About to load emols dataset.")
dataset_file = os.path.join(current_dir, "mini_emols.csv")
# Featurize emols dataset
logger.info("About to featurize datasets.")
featurizer = dc.feat.CircularFingerprint(size=1024)
emols_tasks = ['activity']
loader = dc.data.CSVLoader(
tasks=emols_tasks, feature_field="smiles", featurizer=featurizer)
dataset = loader.create_dataset(dataset_file)
X, y, w, ids = (dataset.X, dataset.y, dataset.w, dataset.ids)
assert len(X) == len(y) == len(w) == len(ids)
|
deepchem/deepchem
|
deepchem/data/tests/test_drop.py
|
Python
|
mit
| 982
|
[
"RDKit"
] |
9fe94eb55d4d55484a2ea2527f09f120d197a335c8316f0e9742ce6594daef83
|
# coding: utf-8
# In[ ]:
#!/bin/env python
"""
This solves for the terms common scalar phase, tec, and complex gain using MH algorithm.
author: Joshua Albert
albert@strw.leidenuniv.nl
"""
import numpy as np
import pylab as plt
from dask import delayed
def phase_unwrapp1d(theta):
'''the difference between two timesteps is unaliased by assumption so theta_i+1 - theta_i < pi.
So Wrap(theta_i+1 - theta_i) is the real gradient and we can integrate them'''
def wrap(phi):
res = 1j*phi
np.exp(res,out=res)
return np.atleast_1d(np.angle(res))
grad = wrap(theta[1:] - theta[:-1])
unwrapped_theta = np.zeros(len(theta),dtype=np.double)
np.cumsum(grad,out=unwrapped_theta[1:])
unwrapped_theta += theta[0]
return unwrapped_theta
TECU = 10e16
def vertex(x1,x2,x3,y1,y2,y3):
'''Given three pairs of (x,y) points return the vertex of the
parabola passing through the points. Vectorized and common expression reduced.'''
#Define a sequence of sub expressions to reduce redundant flops
x0 = 1/x2
x4 = x1 - x2
x5 = 1/x4
x6 = x1**2
x7 = 1/x6
x8 = x2**2
x9 = -x7*x8 + 1
x10 = x0*x1*x5*x9
x11 = 1/x1
x12 = x3**2
x13 = x11*x12
x14 = 1/(x0*x13 - x0*x3 - x11*x3 + 1)
x15 = x14*y3
x16 = x10*x15
x17 = x0*x5
x18 = -x13 + x3
x19 = y2*(x1*x17 + x14*x18*x6*x9/(x4**2*x8))
x20 = x2*x5
x21 = x11*x20
x22 = x14*(-x12*x7 + x18*x21)
x23 = y1*(-x10*x22 - x21)
x24 = x16/2 - x19/2 - x23/2
x25 = -x17*x9 + x7
x26 = x0*x1*x14*x18*x5
x27 = 1/(-x15*x25 + y1*(x20*x7 - x22*x25 + x7) + y2*(-x17 + x25*x26))
x28 = x24*x27
return x28,x15 + x22*y1 + x24**2*x27 - x26*y2 + x28*(-x16 + x19 + x23)
def clock_tec_solveMH(obs_phase, freqs, times, m0, cov, Cd_error, Ct_ratio, plot = False):
'''Solves for the terms phase(CS,TEC,delay) = CS + e^2/(4pi ep0 me c) * TEC/nu + 2pi*nu*delay
Assumes phase is in units of radians, freqs in is units of Hz,
and times is in units of seconds with arbitrary offset
obs_phase is shape (num_freqs, num_times)'''
binning = 50
convergence = binning**2 * 3
def calc_phase(m, freqs):
phase = np.multiply.outer(np.ones(len(freqs)),m[:,2]) + 8.44797256e-7*TECU * np.multiply.outer(1./freqs,m[:,1]) + 2.*np.pi*np.multiply.outer(freqs,m[:,0])
return phase
def neglogL(obs_phase,phase,CdCt):
L2 = obs_phase - phase
L2 *= L2
L2 /= (CdCt+1e-15)
return np.sum(L2,axis=0)/2.
def sample_prior(last, cov):
"""Last is tau,tec,cs in matrix of size [len(times),3], return similar shaped next point"""
return last + np.random.multivariate_normal(mean = [0,0,0], cov=cov,size = last.shape[0])
cs = m0[:,2]
tec = m0[:,1]
tau = m0[:,0]
print("Initial CS: {}".format(cs))
print("Initial TEC: {}".format(tec))
print("Initial delay: {}".format(tau))
m = m0.copy()
# if plot:
# plt.plot(times,cs0,label="CS0")
# plt.plot(times,tec0,label="TEC0")
# plt.plot(times,delay0,label="delay0")
# plt.legend(frameon=False)
# plt.show()
Ct = (Ct_ratio*np.abs(obs_phase))**2
Cd = (Cd_error*np.pi/180.)**2
CdCt = Cd+Ct
Si = neglogL(obs_phase,calc_phase(m,freqs),CdCt)
print("Initial Si: {}".format(Si))
max_iter = 100*convergence
posterior = np.zeros([convergence,len(times),3],dtype=np.double)
multiplicity = np.zeros([convergence,len(times)],dtype=np.double)
posterior[0,:,:] = m
minS = Si
minSol = m.copy()
accepted = np.ones(len(times),dtype=np.int)
cov_prior = np.diag([1e-10, 1e-6,0.5])**2 + cov
iter = 1
while np.max(accepted) < convergence and iter < max_iter:
#sample
last = np.array([posterior[accepted[i] - 1,i,:] for i in range(len(times))])
m_j = sample_prior(last,cov_prior)
Sj = neglogL(obs_phase,calc_phase(m_j,freqs),CdCt)
Lj = np.exp(-Sj)
accept_mask = np.bitwise_or(Sj < Si, np.log(np.random.uniform(size=len(Sj))) < Si - Sj)
#print(accept_mask)
Si[accept_mask] = Sj[accept_mask]
for i in range(len(times)):
if accept_mask[i]:
posterior[accepted[i],i,:] = m_j[i,:]
multiplicity[accepted[i],i] += 1
accepted[i] += 1
else:
multiplicity[accepted[i]-1,i] += 1
if np.any(accept_mask):
#print(m_j)
#print("{} accepted".format(np.sum(accept_mask)))
pass
maxL_mask = Sj < minS
minSol[maxL_mask,:] = m_j[maxL_mask]
minS[maxL_mask] = Sj[maxL_mask]
iter += 1
if iter != max_iter:
print("Converged in {} steps with mean acceptance rate of {}".format(iter,np.mean(accepted)/iter))
posteriors = []
multiplicities = []
means = []
stds = []
maxLs = []
for i in range(len(times)):
posteriors.append(posterior[:accepted[i],i,:])
multiplicities.append(multiplicity[:accepted[i],i])
means.append(np.sum(posteriors[i].T*multiplicities[i],axis=1)/np.sum(multiplicities[i]))
stds.append(np.sqrt(np.sum(posteriors[i].T**2*multiplicities[i],axis=1)/np.sum(multiplicities[i]) - means[i]**2))
maxLs.append(minSol[i,:])
print ("Sol {}, (Gaussian) sol is {} +- {}".format(i, means[i],stds[i]))
print(" maxL sol is {}".format(maxLs[i]))
if plot:
plt.hist(posteriors[0][:,0],weights = multiplicities[0],label='tau')
plt.legend(frameon=False)
plt.show()
plt.hist(posteriors[0][:,1],weights = multiplicities[0],label='tec')
plt.legend(frameon=False)
plt.show()
plt.hist(posteriors[0][:,2],weights = multiplicities[0],label='cs')
plt.legend(frameon=False)
plt.show()
return maxLs
@delayed
def clock_tec_solve_dask(obs_phase, freqs, m0, cov, Cd_error, Ct_ratio, plot=False):
'''Solves for the terms phase(CS,TEC,delay) = CS + e^2/(4pi ep0 me c) * TEC/nu + 2pi*nu*delay
Assumes phase is in units of radians, freqs in is units of Hz
obs_phase is shape (num_freqs)'''
binning = 50
convergence = binning**2 * 3
def calc_phase(m, freqs):
tau,tec,cs = m[:]
phase = freqs*(tau * np.pi * 2.)
phase += (8.44797256e-7 * TECU * tec)/freqs
phase += cs
return phase
def neglogL(obs_phase,phase,CdCt):
L2 = obs_phase - phase
L2 *= L2
L2 /= (CdCt+1e-15)
return np.sum(L2)/2.
def sample_prior(last, cov):
"""Last is tau,tec,cs in matrix of size [len(times),3], return similar shaped next point"""
return last + np.random.multivariate_normal(mean = [0,0,0], cov=cov)
#m0 = last[0] + np.random.uniform(low = -np.sqrt(cov[0,0])/2., high = np.sqrt(cov[0,0])/2.)
#m1 = last[1] + np.random.uniform(low = -np.sqrt(cov[1,1])/2., high = np.sqrt(cov[1,1])/2.)
#m2 = last[2] + np.random.uniform(low = -np.sqrt(cov[2,2])/2., high = np.sqrt(cov[2,2])/2.)
#return np.array([m0,m1,m2])
cs = m0[2]
tec = m0[1]
tau = m0[0]
print("Initial CS: {}".format(cs))
print("Initial TEC: {}".format(tec))
print("Initial delay: {}".format(tau))
m = m0.copy()
Ct = (Ct_ratio*np.abs(obs_phase))**2
Cd = (Cd_error*np.pi/180.)**2
CdCt = Cd+Ct
Si = neglogL(obs_phase,calc_phase(m,freqs),CdCt)
print("Initial Si: {}".format(Si))
max_iter = 100*convergence
posterior = np.zeros([convergence,3],dtype=np.double)
multiplicity = np.zeros([convergence],dtype=np.double)
posterior[0,:] = m
minS = Si
minSol = m.copy()
accepted = 1
cov_prior = np.diag([1e-9, 1e-4,1e-1])**2 + cov
iter = 1
while accepted < convergence and iter < max_iter:
#sample
#m_j = sample_prior(posterior[accepted-1],cov_prior)
m_j = sample_prior(minSol,cov_prior)
#m_j = sample_prior(m0,cov_prior)
Sj = neglogL(obs_phase,calc_phase(m_j,freqs),CdCt)
Lj = np.exp(-Sj)
if Sj < Si or np.log(np.random.uniform()) < Si - Sj:
Si = Sj
posterior[accepted,:] = m_j
multiplicity[accepted] += 1
accepted += 1
else:
multiplicity[accepted-1] += 1
if Sj < minS:
minSol = m_j
minS = Sj
iter += 1
if iter != max_iter:
print("Converged in {} steps with mean acceptance rate of {}".format(iter,np.mean(accepted)/iter))
posterior = posterior[:accepted,:]
multiplicity = multiplicity[:accepted,]
mean = np.sum(posterior.T*multiplicity,axis=1)/np.sum(multiplicity)
std = np.sqrt(np.sum(posterior.T**2*multiplicity,axis=1)/np.sum(multiplicity) - mean**2)
maxLsol = minSol
print ("(Gaussian) sol is {} +- {}".format(mean,std))
print(" maxL sol is {}".format(maxLsol))
if plot:
plt.hist(posteriors[0][:,0],weights = multiplicities[0],label='tau')
plt.legend(frameon=False)
plt.show()
plt.hist(posteriors[0][:,1],weights = multiplicities[0],label='tec')
plt.legend(frameon=False)
plt.show()
plt.hist(posteriors[0][:,2],weights = multiplicities[0],label='cs')
plt.legend(frameon=False)
plt.show()
return maxLsol
def clock_test_solve_both(obs_phase, freqs, times, m0, cov, Cd_error, Ct_ratio):
m,cov = least_squares_solve(obs_phase, freqs, times,Cd_error,Ct_ratio)
m = clock_tec_solveMH(phase, freqs, times, m, np.mean(cov,axis=0), Cd_error, Ct_ratio,plot=True)
return m
def test_clock_tec_solveMH():
times = np.arange(2)
freqs = np.linspace(110e6,170e6,100)
cs = times*0.01
tec = np.random.uniform(size=len(times))*0.01
delay = np.ones(len(times)) * 1e-9# 10ns
phase = np.multiply.outer(np.ones(len(freqs)),cs) + 8.44797256e-7*TECU*np.multiply.outer(1./freqs,tec) + 2.*np.pi*np.multiply.outer(freqs,delay)
phase += 10.*np.pi/180.*np.random.normal(size=[len(freqs),len(times)])
plt.imshow(phase,origin='lower',extent=(times[0],times[-1],freqs[0],freqs[-1]),aspect='auto')
plt.colorbar()
plt.xlabel('times (s)')
plt.ylabel('freqs (Hz)')
plt.show()
clock_tec_solveMH(phase, freqs, times, plot=True)
def test_clock_tec_solve_l1l2():
times = np.arange(2)
freqs = np.linspace(110e6,170e6,100)
cs = times*0.00001
tec = np.random.uniform(size=len(times))*0.01
phase = np.multiply.outer(np.ones(len(freqs)),cs) + 8.44797256e-7*TECU*np.multiply.outer(1./freqs,tec)
phase += 10.*np.pi/180.*np.random.normal(size=[len(freqs),len(times)])
plt.imshow(phase,origin='lower',extent=(times[0],times[-1],freqs[0],freqs[-1]),aspect='auto')
plt.colorbar()
plt.xlabel('times (s)')
plt.ylabel('freqs (Hz)')
plt.show()
m0,cov = least_squares_solve(phase, freqs, times,10,Ct_ratio=0.01)
print(m0)
l1data_l2model_solve(phase,freqs,times,10,Ct_ratio=0.01,m0=m0)
def test_clock_tec_solve_error():
import pylab as plt
times = np.arange(100)
Cd_errors = np.linspace(1,100,100)
f,(ax1,ax2,ax3) = plt.subplots(3,1)
ax1.set_yscale('log')
ax2.set_yscale('log')
ax3.set_yscale('log')
for num_freq in [10,100,1000,10000]:
freqs = np.linspace(110e6,170e6,num_freq)
sol_acc = []
for Cd_error in Cd_errors:
cs = times
tec = np.random.uniform(size=len(times))*0.01
delay = np.ones(len(times)) * 1e-9# 10ns
phase = np.multiply.outer(np.ones(len(freqs)),cs) + 8.44797256e-7*TECU*np.multiply.outer(1./freqs,tec) + 2.*np.pi*np.multiply.outer(freqs,delay)
phase += Cd_error*np.pi/180.*np.random.normal(size=[len(freqs),len(times)])
#plt.imshow(phase,origin='lower',extent=(times[0],times[-1],freqs[0],freqs[-1]),aspect='auto')
#plt.colorbar()
#plt.xlabel('times (s)')
#plt.ylabel('freqs (Hz)')
#plt.show()
m,cov = least_squares_solve(phase, freqs, times,Cd_error,Ct_ratio=0.01)
m_exact = np.array([delay,tec,cs]).T
sol_acc.append(np.mean(np.abs(m - m_exact),axis=0))
sol_acc_ = np.array(sol_acc)
ax1.plot(Cd_errors,sol_acc_[:,0])
#plt.show()
ax2.plot(Cd_errors,sol_acc_[:,1])
#plt.show()
ax3.plot(Cd_errors,sol_acc_[:,2])
plt.show()
def test_clock_tec_solve():
import pylab as plt
times = np.arange(2)
freqs = np.linspace(110e6,170e6,1000)
cs = np.array([1,1])*0
tec = np.array([0.1,0.2])
delay = np.ones(len(times)) * 2e-9# 10ns
phase = np.multiply.outer(np.ones(len(freqs)),cs) + 8.44797256e-7*TECU*np.multiply.outer(1./freqs,tec)# + 2.*np.pi*np.multiply.outer(freqs,delay)
phase += 5*np.pi/180.*np.random.normal(size=[len(freqs),len(times)])
#plt.imshow(phase,origin='lower',extent=(times[0],times[-1],freqs[0],freqs[-1]),aspect='auto')
#plt.colorbar()
#plt.xlabel('times (s)')
#plt.ylabel('freqs (Hz)')
#plt.show()
m,cov = least_squares_solve(phase, freqs, times,5,Ct_ratio=0.01)
print(m)
#m_exact = np.array([delay,tec,cs]).T
#clock_tec_solveMH(phase, freqs, times, m, np.max(cov,axis=0), 5, 0.01, plot = True)
def test_clock_tec_solve_dask():
np.random.seed(1234)
import pylab as plt
times = np.arange(2)
freqs = np.linspace(110e6,170e6,1000)
cs = np.array([1,1])
tec = np.array([0.1,0.2])
delay = np.ones(len(times)) * 2e-9# 10ns
phase = np.multiply.outer(np.ones(len(freqs)),cs) + 8.44797256e-7*TECU*np.multiply.outer(1./freqs,tec) + 2.*np.pi*np.multiply.outer(freqs,delay)
phase += 15*np.pi/180.*np.random.normal(size=[len(freqs),len(times)])
#plt.imshow(phase,origin='lower',extent=(times[0],times[-1],freqs[0],freqs[-1]),aspect='auto')
#plt.colorbar()
#plt.xlabel('times (s)')
#plt.ylabel('freqs (Hz)')
#plt.show()
m,cov = least_squares_solve(phase, freqs, times,15,Ct_ratio=0.01)
m_exact = np.array([delay,tec,cs]).T
import dask.array as da
solsMH = [da.from_delayed(clock_tec_solve_dask(phase[:,i],freqs,m[i,:], cov[i,:,:],15,0.01),shape=(3,),dtype=np.double) for i in range(len(times))]
sol_stacked = da.stack(solsMH, axis = 0)
sol = sol_stacked.compute()
print(sol)
if __name__ == '__main__':
#test_clock_tec_solve()
#a b ctest_clock_tec_solve()
#test_clock_tec_solve_dask()
test_clock_tec_solve_l1l2()
# In[ ]:
def derive_solution():
from sympy import symbols, Matrix, cse, cos, sin, Abs, Rational,acos,asin
cs,K,tec,nu,phase,sigma_phase,alpha,beta,tec_p,cs_p,sigma_tec,sigma_cs = symbols('cs K tec nu phase sigma_phase alpha beta tec_p cs_p sigma_tec sigma_cs', real=True)
g = K*tec/nu + cs*alpha
L = Abs(g - phase)/sigma_phase + beta*((tec - tec_p)**Rational(2)/sigma_tec**Rational(2)/Rational(2) + (cs - cs_p)**Rational(2)/sigma_cs**Rational(2)/Rational(2))
req,res = cse(L,optimizations='basic')
for line in req:
print("{} = {}".format(line[0],line[1]).replace("Abs","np.abs").replace("cos","np.cos").replace("sin","np.sin").replace("sign","np.sign"))
print("{}".format(res[0]).replace("Abs","np.abs").replace("cos","np.cos").replace("sin","np.sin").replace("sign","np.sign"))
print()
grad = Matrix([sigma_tec**Rational(2)*L.diff(tec), sigma_cs**Rational(2)*L.diff(cs)])
req,res = cse(grad,optimizations='basic')
for line in req:
print("{} = {}".format(line[0],line[1]).replace("Abs","np.abs").replace("cos","np.cos").replace("sin","np.sin").replace("sign","np.sign"))
print("{}".format(res[0]).replace("Abs","np.abs").replace("cos","np.cos").replace("sin","np.sin").replace("sign","np.sign"))
print()
H = Matrix([[L.diff(tec).diff(tec),L.diff(tec).diff(cs)],[L.diff(cs).diff(tec),L.diff(cs).diff(cs)]])
req,res = cse(H,optimizations='basic')
for line in req:
print("{} = {}".format(line[0],line[1]).replace("Abs","np.abs").replace("cos","np.cos").replace("sin","np.sin").replace("sign","np.sign"))
print("{}".format(res[0]).replace("Abs","np.abs").replace("cos","np.cos").replace("sin","np.sin").replace("sign","np.sign"))
derive_solution()
# In[ ]:
import numpy as np
import pylab as plt
def phase_unwrapp1d(theta):
'''the difference between two timesteps is unaliased by assumption so theta_i+1 - theta_i < pi.
So Wrap(theta_i+1 - theta_i) is the real gradient and we can integrate them'''
def wrap(phi):
res = 1j*phi
np.exp(res,out=res)
return np.atleast_1d(np.angle(res))
grad = wrap(theta[1:] - theta[:-1])
unwrapped_theta = np.zeros(len(theta),dtype=np.double)
np.cumsum(grad,out=unwrapped_theta[1:])
unwrapped_theta += theta[0]
return unwrapped_theta
TECU = 10e16
def least_squares_solve(obs_phase,freqs,times,Cd_error,Ct_ratio=0.01,m0=None):
'''Solves for the terms phase(CS,TEC,delay) = CS + e^2/(4pi ep0 me c) * TEC/nu + 2pi*nu*delay
Assumes phase is in units of radians, freqs in is units of Hz,
and times is in units of seconds with arbitrary offset
obs_phase is shape (num_freqs, num_times)'''
f = np.multiply.outer(freqs,np.ones(len(times)))
def calc_phase(cs,tec,delay, freqs):
phase = np.multiply.outer(np.ones(len(freqs)),cs) + 8.44797256e-7*TECU * np.multiply.outer(1./freqs,tec) + 2.*np.pi*np.multiply.outer(freqs,delay)
return phase
def neglogL(obs_phase,phase,CdCt):
'''Return per timestep'''
L2 = obs_phase - phase
L2 *= L2
L2 /= (CdCt + 1e-15)
return np.sum(L2,axis=0)/2.
def calc_grad(cs,tec,delay,f,obs_phase,CdCt):
grad = np.zeros([len(times),3],dtype=np.double)
phase = calc_phase(cs,tec,delay,f[:,0])
dd = obs_phase - phase
dd /= CdCt
#tau comp
gtau = dd*f
gtau *= -2.*np.pi
gtau = np.sum(gtau,axis=0)
#tec comp
gtec = dd/f
gtec *= -8.44797256e-7*TECU
gtec = np.sum(gtec,axis=0)
#cs comp
gcs = -dd
gcs = np.sum(gcs,axis=0)
grad[:,0] = gtau
grad[:,1] = gtec
grad[:,2] = gcs
return grad
def calc_Hessian(f,CdCt):
H = np.zeros([len(times),3,3],dtype = np.double)
x0 = f/CdCt
H[:,0,0] = np.sum(4*np.pi**2 * x0*f,axis=0)
H[:,0,1] = np.sum(2*np.pi*8.44797256e-7*TECU/CdCt,axis=0)
H[:,0,2] = np.sum(2*np.pi*x0,axis=0)
H[:,1,1] = np.sum((8.44797256e-7*TECU)**2/(f**2*CdCt),axis=0)
H[:,1,2] = np.sum(8.44797256e-7*TECU/(f*CdCt),axis=0)
H[:,2,2] = np.sum(1./CdCt,axis=0)
H[:,1,0] = H[:,0,1]
H[:,2,0] = H[:,0,2]
H[:,2,1] = H[:,1,2]
return H
def inv_Hessian(H):
a = H[:,0,0]
b = H[:,0,1]
c = H[:,0,2]
d = H[:,1,1]
e = H[:,1,2]
f = H[:,2,2]
x0 = 1/a
x1 = b**2
x2 = x0*x1
x3 = 1/(d - x2)
x4 = b*c
x5 = e - x0*x4
x6 = x3*x5
x7 = b*x6 - c
x8 = a*d
x9 = -x1 + x8
x10 = 1/(a*e**2 + c**2*d - 2*e*x4 + f*x1 - f*x8)
x11 = x0*x10*x9
x12 = x10*x9
x13 = x0*x3*(-b + x12*x5*x7)
x14 = -x11*x7
x15 = x12*x6
Hinv = np.zeros([len(a),3,3])
Hinv[:,0,0] = x0*(-x11*x7**2 + x2*x3 + 1)
Hinv[:,0,1] = x13
Hinv[:,0,2] = x14
Hinv[:,1,0] = Hinv[:,0,1]
Hinv[:,1,1] = x3*(-x12*x3*x5**2 + 1)
Hinv[:,1,2] = x15
Hinv[:,2,0] = Hinv[:,0,2]
Hinv[:,2,1] = Hinv[:,1,2]
Hinv[:,2,2] = -x12
return Hinv
def calc_epsilon_n(dir,tau_i,tec_i,cs_i,freqs,CdCt,obs_phase,step=1e-3):
"""Approximate stepsize"""
g0 = calc_phase(cs_i,tec_i,tau_i, freqs)
gp = calc_phase(cs_i+step*dir[:,2], tec_i + step*dir[:,1], tau_i + step*dir[:,0], freqs)
Gm = (gp - g0)/step
dd = obs_phase - g0
epsilon_n = (np.sum(Gm*dd/CdCt,axis=0)/np.sum(Gm/CdCt*Gm,axis=0))
return epsilon_n
if m0 is None:
# come up with initial guess
cs0 = np.zeros(len(times),dtype=np.double)
delay0 = np.zeros(len(times),dtype=np.double)
tec0 = np.zeros(len(times),dtype=np.double)
# # d/dnu (phase*nu) = cs + 4pi*nu*delay
x0 = (freqs*obs_phase.T).T
x1 = ((x0[1:,:] - x0[:-1,:]).T/(freqs[1:] - freqs[:-1])).T
# # d^2/dnu^2 (phase*nu) = 4pi*delay
x2 = ((x1[1:,:] - x1[:-1,:]).T/(freqs[1:-1] - freqs[:-2])).T
tau0 = np.mean(x2,axis=0)/4./np.pi
x3 = 2*np.pi*np.multiply.outer(freqs,delay0)
cs0 = np.mean(x1 - 2.*x3[1:,:],axis=0)
cs = cs0*0
tau = tau0
tec = tec0
else:
cs = m0[:,2]
tec = m0[:,1]
tau = m0[:,0]
#print("Initial CS: {}".format(cs0))
#print("Initial TEC: {}".format(tec0))
#print("Initial delay: {}".format(delay0))
Ct = (Ct_ratio*np.abs(obs_phase))**2
Cd = (Cd_error*np.pi/180.)**2
CdCt = Cd+Ct
S = neglogL(obs_phase,calc_phase(cs,tec,tau,freqs),CdCt)
#print("Initial neglogL: {}".format(S))
iter = 0
Nmax = 1
while iter < Nmax:
grad = calc_grad(cs,tec,tau,f,obs_phase,CdCt)
H = calc_Hessian(f,CdCt)
Hinv = inv_Hessian(H)
dir = np.einsum("ijk,ik->ij",Hinv,grad)
epsilon_n = calc_epsilon_n(dir,tau,tec,cs,freqs,CdCt,obs_phase,step=1e-3)
#print("epsilon_n: {}".format(epsilon_n))
cs, tec, tau = cs+epsilon_n*dir[:,2], tec + epsilon_n*dir[:,1], tau + epsilon_n*dir[:,0]
S = neglogL(obs_phase,calc_phase(cs,tec,tau,freqs),CdCt)
m = np.array([tau,tec,cs]).T
#print("Model: {}".format(m))
#print("iter {}: neglogL: {}, log|dm/m|: {}, |grad|: {}".format(iter, S, np.mean(np.log(np.abs(np.einsum("i,ij->ij",epsilon_n,dir)/m))),np.sum(np.abs(grad))))
iter += 1
#print(Hinv)
print("Final neglogL: {}".format(S))
return m,Hinv
def l1data_l2model_solve(obs_phase,freqs,times,Cd_error,Ct_ratio=0.01,m0=None):
'''Solves for the terms phase(CS,TEC,delay) = CS + e^2/(4pi ep0 me c) * TEC/nu
Delay is taken out.
Assumes phase is in units of radians, freqs in is units of Hz,
and times is in units of seconds with arbitrary offset
obs_phase is shape (num_freqs, num_times)'''
alpha = 1.
beta = 0.
def calc_phase(m, freqs):
tec = m[:,0]
cs = m[:,1]
phase = 8.44797256e-7*TECU * np.multiply.outer(1./freqs,tec) + alpha*cs
return phase
def neglogL(obs_phase,m,CdCt_phase,m0,cov_m,freqs):
'''Return per timestep'''
K = 8.44797256e-7*TECU
nu = np.multiply.outer(1./freqs,np.ones(obs_phase.shape[1]))
tec = m[:,0]
cs = m[:,1]
tec_p = m0[:,0]
cs_p = m0[:,1]
sigma_tec2 = cov_m[0]
sigma_cs2 = cov_m[1]
sigma_phase = np.sqrt(CdCt_phase)
phase = obs_phase
#return np.sum(np.abs(K*np.multiply.outer(1./freqs,tec) - phase)/sigma_phase,axis=0)
return beta*((tec - tec_p)**2/sigma_tec2 + (cs - cs_p)**2/sigma_cs2)/2 + np.sum(np.abs(K*np.multiply.outer(1./freqs,tec) + alpha*cs - phase)/sigma_phase,axis=0)
def calc_grad(obs_phase,m,CdCt_phase,m0,cov_m,freqs):
K = 8.44797256e-7*TECU
nu = np.multiply.outer(1./freqs,np.ones(obs_phase.shape[1]))
tec = m[:,0]
cs = m[:,1]
tec_p = m0[:,0]
cs_p = m0[:,1]
sigma_tec2 = cov_m[0]
sigma_cs2 = cov_m[1]
sigma_phase = np.sqrt(CdCt_phase)
phase = obs_phase
x0 = sigma_tec2
x1 = K/nu
x1_ = K*np.multiply.outer(1./freqs,tec)
x2 = np.sign(alpha*cs - phase + x1_)/sigma_phase
x3 = sigma_cs2
grad = np.zeros([obs_phase.shape[1],2])
grad[:,0] = x0*(beta*(tec - tec_p)/x0 + np.sum((x1*x2),axis=0))
grad[:,1] = x3 * (beta*(cs - cs_p)/x3 + np.sum(alpha*x2,axis=0))
return grad
def calc_epsilon_n(dir,m,freqs,CdCt,obs_phase,step=1e-3):
"""Approximate stepsize"""
g0 = calc_phase(m, freqs)
gp = calc_phase(m + step*dir, freqs)
Gm = (gp - g0)/step
dd = obs_phase - g0
epsilon_n = (np.sum(Gm*dd/CdCt,axis=0)/np.sum(Gm/CdCt*Gm,axis=0))
return epsilon_n
if m0 is None:
m = np.zeros([len(times),2],dtype=np.double)
else:
m = m0.copy()
cov_m = np.array([1e-4,1e-4])
#print( calc_phase(m,freqs) - obs_phase)
Ct = (Ct_ratio*np.abs(obs_phase))**2
Cd = (Cd_error*np.pi/180.)**2
CdCt = Cd+Ct
#print(np.sqrt(CdCt))
#print( np.sum(np.abs(calc_phase(m,freqs) - obs_phase)/np.sqrt(CdCt),axis=0))
S = neglogL(obs_phase,m,CdCt,m0,cov_m,freqs)
print("Initial neglogL: {}".format(S))
iter = 0
Nmax = 3
while iter < Nmax:
grad = calc_grad(obs_phase,m,CdCt,m0,cov_m,freqs)
dir = grad
epsilon_n = calc_epsilon_n(dir,m,freqs,CdCt,obs_phase,step=1e-3)
#print("epsilon_n: {}".format(epsilon_n))
m += dir*epsilon_n
S = neglogL(obs_phase,m,CdCt,m0,cov_m,freqs)
#print("Model: {}".format(m))
#print("iter {}: neglogL: {}, log|dm/m|: {}, |grad|: {}".format(iter, S, np.mean(np.log(np.abs(np.einsum("i,ij->ij",epsilon_n,dir)/m))),np.sum(np.abs(grad))))
iter += 1
#print(Hinv)
print("Final neglogL: {}".format(S))
return m
def test_clock_tec_solve_l1l2():
np.random.seed(1234)
times = np.arange(1)
freqs = np.linspace(110e6,170e6,100)
cs = times*0.00
tec = times + 0.01
phase = np.multiply.outer(np.ones(len(freqs)),cs) + 8.44797256e-7*TECU*np.multiply.outer(1./freqs,tec)
phase += 10*np.pi/180.*np.random.normal(size=[len(freqs),len(times)])
#phase = np.angle(np.exp(1j*phase))
m0 = np.array([[0.,0.]])
#m0,cov = least_squares_solve(phase, freqs, times,10,Ct_ratio=1)
#print(m0)
m = m0[:,1:3]
m = l1data_l2model_solve(phase,freqs,times,10,Ct_ratio=1,m0=m0)
rec_phase = np.multiply.outer(np.ones(len(freqs)),m[:,1]) + 8.44797256e-7*TECU*np.multiply.outer(1./freqs,m[:,0])
print(m)
plt.plot(freqs,phase)
plt.plot(freqs,rec_phase)
plt.show()
def test_clock_tec_solve_l1l2_tolerance():
times = np.arange(1)
freqs = np.linspace(110e6,170e6,100)
cs = times+0
tec0 = 0.0001
tec = times+tec0
phase = np.multiply.outer(np.ones(len(freqs)),cs) + 8.44797256e-7*TECU*np.multiply.outer(1./freqs,tec)
phase += 40*np.pi/180.*np.random.normal(size=[len(freqs),len(times)])
#phase = np.angle(np.exp(1j*phase))
m0,cov = least_squares_solve(phase, freqs, times,10,Ct_ratio=1)
dist = []
final = []
m0 = m0[:,1:3]
for tec in np.linspace(tec0-5,tec0 + 5,1000):
dist.append(tec - tec0)
m0[:,0] = tec
m0[:,1] = 0.
m = l1data_l2model_solve(phase,freqs,times,40,Ct_ratio=0.01,m0=m0)
#print(m)
final.append(m[0,0] - tec0)
plt.plot(dist,final)
plt.show()
test_clock_tec_solve_l1l2()
# In[ ]:
# In[ ]:
|
Joshuaalbert/RadioAstronomyThings
|
src/rathings/notebooks/tec_solver.py
|
Python
|
apache-2.0
| 27,398
|
[
"Gaussian"
] |
aa79c4075432f2b2682582a2c2d3a61ab24743e14e11a01c9f56f4400e267347
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @file gd_algorithm.py
# @brief
# @author QRS
# @blog qrsforever.github.io
# @version 1.0
# @date 2019-09-23 11:25:05
################################ jupyter-vim #######################################
# https://github.com/qrsforever/vim/blob/master/bundle/.configs/jupyter-vim_conf.vim
# %pylab --no-import-all # noqa
#####################################################################################
import numpy as np
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
np.random.seed(678)
#####################################################################################
# <codecell> activity function and derivative function
#####################################################################################
def sigmoid(x):
return 1 / (1 + np.exp(-1 * x))
def d_sigmoid(x):
return sigmoid(x) * (1 - sigmoid(x))
##
def tanh(x):
return np.tanh(x)
def d_tanh(x):
return 1 - np.tanh(x) ** 2
##
def relu(x):
mask = (x > 0.0) * 1.0
return x * mask
def d_relu(x):
mask = (x > 0.0) * 1.0
return mask
##
def elu(matrix):
mask = (matrix <= 0) * 1.0
less_zero = matrix * mask
safe = (matrix > 0) * 1.0
greater_zero = matrix * safe
final = 3.0 * (np.exp(less_zero) - 1) * less_zero
return greater_zero + final
def d_elu(matrix):
safe = (matrix > 0) * 1.0
mask2 = (matrix <= 0) * 1.0
temp = matrix * mask2
final = (3.0 * np.exp(temp))*mask2
return (matrix * safe) + final
#####################################################################################
# <codecell> train data
#####################################################################################
mnist = input_data.read_data_sets("/home/lidong/Datasets/ML/mnist", one_hot=False)
train = mnist.test
images, labels = train.images, train.labels
images.shape, labels.shape, labels[0:5]
## select 0,1 labels and images
zero_index, one_index = np.where(labels == 0)[0], np.where(labels == 1)[0]
zero_image, one_image = images[[zero_index]], images[[one_index]]
zero_label, one_label = np.expand_dims(labels[[zero_index]], axis=1), np.expand_dims(labels[[one_index]], axis=1)
zero_image.shape, one_image.shape, zero_label.shape, one_label.shape
## meld 0, 1 labels and images
images_org = np.vstack((zero_image, one_image))
labels_org = np.vstack((zero_label, one_label))
images_org.shape, labels_org.shape, labels_org[2:5], labels[2:5]
## shuffle method 1: sklearn.utils.shuffle
images, labels = shuffle(images_org, labels_org)
images.shape, labels.shape
## shuffle method 2: np.random.shuffle
# images_labels = np.hstack((images_org, labels_org))
# np.random.shuffle(images_labels)
# images, labels = images_labels[:, 0:-1], np.expand_dims(images_labels[:, -1], axis=1)
# images.shape, labels.shape
## train / test data
train_num, test_num = 50, 20
train_images, train_labels = images[0:train_num, :], labels[0:train_num, :]
test_images, test_labels = images[-test_num-1:-1, :], labels[-test_num-1:-1, :]
train_images.shape, test_images.shape
#####################################################################################
# <codecell> Graph
#####################################################################################
#
# *****
# * x * elu
# ***** ***** l1A
# * * tanh
# ***** ***** l2A
# ***** * *
# * * ***** ***** sigmoid
# ***** * * ***** l3A
# --------> ***** --------> --------> * *
# . . *****
# w1:784x256 . w2:256x128 . w3:128x1
# . . .
# .
# . *****
# ***** * *
# * * *****
# ***** *****
# * *
# *****
# 1x784 1x256 1x128 1x1
# input layer-1 layer-2 layer-3
#
# 损失函数:
#
# (sigmoid(w3 * tanh(w2 * elu(w1 * x))) - label)^2 * 0.2
# | | | | | ------ x d(w1)
# | | | | | l1
# | | | | +-------- d_elu(l1) d(l1)
# | | | | l1A
# | | | +-------------- l1A d(w2)
# | | | l2
# | | +------------------ d_tanh(l2) d(l2)
# | | l2A
# | +------------------------ l2A d_sigmoid(l3) (l3A - label) d(w3) |
# | l3 |
# +------------------------------ d_sigmoid(l3) (l3A - label) d(l3) |w3
# l3A |
# --------------------------------------------------- (l3A - label) d(l3A)|
# cost
#
#
# 0-9数字图像只选取了0和1, 所以简化模型, 采用全连接, 加最后一层的sigmoid,而不是softmax
# 矩阵求导是个难点, 需要基本的了解, 否则代码是很难理解, 什么时候转置, 什么时候点乘等.
#####################################################################################
# <codecell> Global param
#####################################################################################
## weight
_w1 = np.random.randn(784, 256) * 0.2
_w2 = np.random.randn(256, 128) * 0.2
_w3 = np.random.randn(128, 1) * 0.2
## hyper parameters
learn_rate = 0.0003
num_epoch = 100
cost_array = {}
#####################################################################################
# <codecell> SGD
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
# layer1
l1 = image.dot(w1)
l1A = elu(l1)
# layer2
l2 = l1A.dot(w2)
l2A = tanh(l2)
# layer3
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
# loss
total_cost += np.square(l3A - label).sum() * 0.5
# eval gradient
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32) # 128x1
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22) # 256x128
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12) # 784x256
# update weight
w3 = w3 - learn_rate * g3
w2 = w2 - learn_rate * g2
w1 = w1 - learn_rate * g1
if iter % 10 == 0:
print("SGD current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['sgd'] = cost_temp_array
#####################################################################################
# <codecell> Momentum
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
v1, v2, v3 = 0, 0, 0
alpha = 0.001
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12)
v3 = v3 * alpha + learn_rate * g3
v2 = v2 * alpha + learn_rate * g2
v1 = v1 * alpha + learn_rate * g1
w3 = w3 - v3
w2 = w2 - v2
w1 = w1 - v1
if iter % 10 == 0:
print("Momentum current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['Momentum'] = cost_temp_array
#####################################################################################
# <codecell> NAG: Nesterov accelerated gradient
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
v1, v2, v3 = 0, 0, 0
alpha = 0.001
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
# 预知的能力, 提前使用动量的信息, 然后预知下一时刻的梯度
fake_w3 = w3 - alpha * v3
fake_w2 = w2 - alpha * v2
fake_w1 = w1 - alpha * v1
l1 = image.dot(fake_w1)
l1A = elu(l1)
l2 = l1A.dot(fake_w2)
l2A = tanh(l2)
l3 = l2A.dot(fake_w3)
l3A = sigmoid(l3)
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3_fake = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(fake_w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2_fake = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(fake_w2.T)
g12 = d_elu(l1)
g13 = image
g1_fake = g13.T.dot(g11 * g12)
v3 = v3 * alpha + learn_rate * g3_fake
v2 = v2 * alpha + learn_rate * g2_fake
v1 = v1 * alpha + learn_rate * g1_fake
w3 = w3 - v3
w2 = w2 - v2
w1 = w1 - v1
if iter % 10 == 0:
print("Nesterov accelerated gradient current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['NAG'] = cost_temp_array
#####################################################################################
# <codecell> Adagrad
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
vlr_1, vlr_2, vlr_3 = 0, 0, 0
epsilon = 0.00000001
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12)
# 累加梯度平方, 自适应
vlr_3 = vlr_3 + g3 ** 2 # 128x1
vlr_2 = vlr_2 + g2 ** 2 # 256x128
vlr_1 = vlr_1 + g1 ** 2 # 784x256
w3 = w3 - (learn_rate / np.sqrt(vlr_3 + epsilon)) * g3
w2 = w2 - (learn_rate / np.sqrt(vlr_2 + epsilon)) * g2
w1 = w1 - (learn_rate / np.sqrt(vlr_1 + epsilon)) * g1
if iter % 10 == 0:
print("Adagrad current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['Adagrad'] = cost_temp_array
#####################################################################################
# <codecell> Adadelta
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
epsilon, gamma = 0.000001, 0.001
vlr_1, vlr_2, vlr_3 = 0, 0, 0
wlr_1, wlr_2, wlr_3 = 0, 0, 0
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12)
# 梯度平方和衰减平均
vlr_3 = gamma * vlr_3 + (1-gamma) * g3 ** 2
vlr_2 = gamma * vlr_2 + (1-gamma) * g2 ** 2
vlr_1 = gamma * vlr_1 + (1-gamma) * g1 ** 2
delta_3 = - (np.sqrt(wlr_3 + epsilon) / np.sqrt(vlr_3 + epsilon)) * g3
delta_2 = - (np.sqrt(wlr_2 + epsilon) / np.sqrt(vlr_2 + epsilon)) * g2
delta_1 = - (np.sqrt(wlr_1 + epsilon) / np.sqrt(vlr_1 + epsilon)) * g1
# Delta权重平方和衰减平均
wlr_3 = gamma * wlr_3 + (1-gamma) * delta_3 ** 2
wlr_2 = gamma * wlr_2 + (1-gamma) * delta_2 ** 2
wlr_1 = gamma * wlr_1 + (1-gamma) * delta_1 ** 2
w3 = w3 + delta_3
w2 = w2 + delta_2
w1 = w1 + delta_1
if iter % 10 == 0:
print("Adadelta current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['Adadelta'] = cost_temp_array
#####################################################################################
# <codecell> RMSprop: 是Adadelta一个特殊情况
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
epsilon, gamma = 0.00000001, 0.9
vlr_1, vlr_2, vlr_3 = 0, 0, 0
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12)
vlr_3 = gamma * vlr_3 + (1 - gamma) * g3 ** 2
vlr_2 = gamma * vlr_2 + (1 - gamma) * g2 ** 2
vlr_1 = gamma * vlr_1 + (1 - gamma) * g1 ** 2
w3 = w3 - (learn_rate/np.sqrt(vlr_3 + epsilon)) * g3
w2 = w2 - (learn_rate/np.sqrt(vlr_2 + epsilon)) * g2
w1 = w1 - (learn_rate/np.sqrt(vlr_1 + epsilon)) * g1
if iter % 10 == 0:
print("RMSprop current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['RMSprop'] = cost_temp_array
#####################################################################################
# <codecell> Adam (自适应矩估计, 一阶均值, 二阶方差(非中心)
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
epsilon, beta_1, beta_2 = 0.00000001, 0.9, 0.999
mlr_1, mlr_2, mlr_3 = 0, 0, 0
vlr_1, vlr_2, vlr_3 = 0, 0, 0
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12)
# 一阶mean: 梯度衰减均值
mlr_3 = beta_1 * mlr_3 + (1 - beta_1) * g3
mlr_2 = beta_1 * mlr_2 + (1 - beta_1) * g2
mlr_1 = beta_1 * mlr_1 + (1 - beta_1) * g1
# 二阶variance: 梯度指数衰减(梯度平方衰减均值)
vlr_3 = beta_2 * vlr_3 + (1 - beta_2) * g3 ** 2
vlr_2 = beta_2 * vlr_2 + (1 - beta_2) * g2 ** 2
vlr_1 = beta_2 * vlr_1 + (1 - beta_2) * g1 ** 2
# 矫正
mlr_3_hat = mlr_3 / (1 - beta_1)
mlr_2_hat = mlr_2 / (1 - beta_1)
mlr_1_hat = mlr_1 / (1 - beta_1)
vlr_3_hat = vlr_3 / (1 - beta_2)
vlr_2_hat = vlr_2 / (1 - beta_2)
vlr_1_hat = vlr_1 / (1 - beta_2)
w3 = w3 - (learn_rate / (np.sqrt(vlr_3_hat) + epsilon)) * mlr_3_hat
w2 = w2 - (learn_rate / (np.sqrt(vlr_2_hat) + epsilon)) * mlr_2_hat
w1 = w1 - (learn_rate / (np.sqrt(vlr_1_hat) + epsilon)) * mlr_1_hat
if iter % 10 == 0:
print("Adam current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['Adam'] = cost_temp_array
#####################################################################################
# <codecell> Nadam (incorporate NAG into Adam)
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
epsilon, beta_1, beta_2 = 0.00000001, 0.9, 0.999
mlr_1, mlr_2, mlr_3 = 0, 0, 0
vlr_1, vlr_2, vlr_3 = 0, 0, 0
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12)
mlr_3 = beta_1 * mlr_3 + (1 - beta_1) * g3
mlr_2 = beta_1 * mlr_2 + (1 - beta_1) * g2
mlr_1 = beta_1 * mlr_1 + (1 - beta_1) * g1
vlr_3 = beta_2 * vlr_3 + (1 - beta_2) * g3 ** 2
vlr_2 = beta_2 * vlr_2 + (1 - beta_2) * g2 ** 2
vlr_1 = beta_2 * vlr_1 + (1 - beta_2) * g1 ** 2
mlr_3_hat = mlr_3 / (1 - beta_1)
mlr_2_hat = mlr_2 / (1 - beta_1)
mlr_1_hat = mlr_1 / (1 - beta_1)
vlr_3_hat = vlr_3 / (1 - beta_2)
vlr_2_hat = vlr_2 / (1 - beta_2)
vlr_1_hat = vlr_1 / (1 - beta_2)
w3 = w3 - (learn_rate/(np.sqrt(vlr_3_hat) + epsilon)) * (beta_1 * mlr_3_hat + (((1 - beta_1) * g3) / (1 - beta_1)))
w2 = w2 - (learn_rate/(np.sqrt(vlr_2_hat) + epsilon)) * (beta_1 * mlr_2_hat + (((1 - beta_1) * g2) / (1 - beta_1)))
w1 = w1 - (learn_rate/(np.sqrt(vlr_1_hat) + epsilon)) * (beta_1 * mlr_1_hat + (((1 - beta_1) * g1) / (1 - beta_1)))
if iter % 10 == 0:
print("Nadam current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['Nadam'] = cost_temp_array
#####################################################################################
# <codecell> SGD with Gaussian Noise
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
eta = 0.001
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index],axis=0)
label = np.expand_dims(train_labels[index],axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12)
noise = np.random.normal(loc=0, scale=eta / (np.power((1 + iter), 0.55)))
w3 = w3 - learn_rate * (g3 + noise)
w2 = w2 - learn_rate * (g2 + noise)
w1 = w1 - learn_rate * (g1 + noise)
if iter % 10 == 0:
print("SGD with Gaussian Noise current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['SGDGN'] = cost_temp_array
len(cost_array)
#####################################################################################
# <codecell> Plot
#####################################################################################
colors = ['r', 'g', 'b', 'c', 'k', 'y', 'm', 'gold', 'peru']
for i, (name, costs) in enumerate(cost_array.items()):
plt.plot(np.arange(num_epoch), costs,
color=colors[i], linewidth=3, label=name)
plt.title("Total Cost per Training")
plt.legend()
|
qrsforever/workspace
|
ML/ai/gd_algorithm.py
|
Python
|
mit
| 22,271
|
[
"Gaussian"
] |
4a5a0b4dc731f76c0a3749699d9b301d2c9e647c0fdda3e282ef8561062dfecd
|
"""
.. module:: utilities
:platform: Unix
:synopsis: Helpful function for ScatterBrane
.. moduleauthor:: Katherine Rosenfeld <krosenf@gmail.com>
.. moduleauthor:: Michael Johnson
"""
from __future__ import print_function
import numpy as np
from scipy.interpolate import RectBivariateSpline
from scipy.ndimage.filters import gaussian_filter
from astropy.io import fits
def smoothImage(img,dx,fwhm):
'''
Returns Image smoothed by a gaussian kernel.
:param img: ``(n, n)``
numpy array
:param dx: scalar
Pixel scale in microarcseconds
:param fwhm: scalar
Gaussian full width at half maximum in microarcseconds
'''
return gaussian_filter(img,fwhm/(2*np.sqrt(np.log(4)))/dx)
def getCoherenceLength(theta,wavelength=1.3e-3,magnification=0.448):
'''
:param theta: scalar
FWHM of scattering kernel at 1 cm in milli-arcseconds.
:param wavelength: (optional) scalar
Observing wavelength in meters
:param magnification: (optional) scalar
Magnification factor (scatterer-observer)/(source-scatterer).
:returns: scalar
Coherence length in km.
'''
#return (wavelength*1e-3)*np.sqrt(np.log(4))/(np.pi*np.sqrt(1+magnification)**2*np.radians(1e-3/3600*theta*(wavelength*1e2)**2))
return (wavelength*1e-3)*np.sqrt(np.log(4))/(np.pi*(1+magnification)*np.radians(1e-3/3600*theta*(wavelength*1e2)**2))
def ensembleSmooth(img,dx,brane,return_kernel=False):
'''
Generates ensemble averaged image given scattering kernel parameters.
:param img: ``(n, n)``
numpy array
:param dx: scalar
Pixel scale in microarcseconds
:param brane: Brane object.
:param return_kernel: (optional) bool
Return tuple with uv kernel (:func:`nump.fft.rfft2` format). See :func:`getUVKernel` for an alternate method.
'''
nx = img.shape[0]
# scattering kernel parameters in wavelengths
sigma_maj = brane.wavelength*np.sqrt(np.log(4)) / (np.pi*(1.+brane.m)*brane.r0) / (2*np.sqrt(np.log(4)))
sigma_min = sigma_maj / brane.anisotropy
v = np.dot(np.transpose([np.fft.fftfreq(nx,d=dx*np.radians(1.)/(3600*1e6))]),[np.ones(nx/2 + 1)])
u = np.dot(np.transpose([np.ones(nx)]),[np.fft.rfftfreq(nx,d=dx*np.radians(1.)/(3600*1e6))])
# rotate
if brane.pa != None:
theta = np.radians(90-brane.pa)
else:
theta = np.radians(0.)
u_ = np.cos(theta)*u - np.sin(theta)*v
v = np.sin(theta)*u + np.cos(theta)*v
# rotate
G = np.exp(-2*np.pi**2*(u_**2*sigma_maj**2 + v**2*sigma_min**2))
V = np.fft.rfft2(img)
if return_kernel:
return (np.fft.irfft2(V*G,s=img.shape),G)
else:
return np.fft.irfft2(V*G,s=img.shape)
def getUVKernel(u,v,brane):
'''
Get ensemble kernel in visibility plane for specified uv points. See func:`ensembleSmooth` for an althernate method.
:param u: ``(n, )``
Samples of u in units of wavelengths.
:param v: ``(n, )``
Samples of v in units of wavelengths.
:param brane: Brane object
:returns: ``(n, )`` Ensemble kernel complex visibility
'''
# scattering kernel parameters in wavelengths
sigma_maj = brane.wavelength*np.sqrt(np.log(4)) / (np.pi*(1.+brane.m)*brane.r0) / (2*np.sqrt(np.log(4)))
sigma_min = sigma_maj / brane.anisotropy
# rotate
if brane.pa != None:
theta = np.radians(90-brane.pa)
else:
theta = np.radians(0.)
u_ = np.cos(theta)*u - np.sin(theta)*v
v_ = np.sin(theta)*u + np.cos(theta)*v
# rotate and return
return np.exp(-2*np.pi**2*(u_**2*sigma_maj**2 + v_**2*sigma_min**2))
def loadSettings(filename):
'''
Loads simulation settings from a file generated by :func:`Brane.save_settings`.
:param filename: string
File name that contains simulation settings.
:returns: A dictionary with simulation settings.
'''
return dict(np.genfromtxt(filename,\
dtype=[('a','|S10'),('f','float')],delimiter='\t',autostrip=True))
def regrid(a,inx,idx,onx,odx):
'''
Regrids array with a new resolution and pixel number.
:param a: ``(n, n)``
Input numpy image
:param inx: int
Number of input pixels on a side
:param idx: scalar
Input resolution element
:param onx: int
Number of output pixels on a side
:param odx: scalar
Output resolution element
:returns: Array regridded to the new resolution and field of view.
'''
x = idx * (np.arange(inx) - 0.5 * (inx - 1))
f = RectBivariateSpline(x,x,a)
x_ = odx * (np.arange(onx) - 0.5 * (onx - 1))
xx_,yy_ = np.meshgrid(x_,x_,indexing='xy')
m = f.ev(yy_.flatten(),xx_.flatten()).reshape((onx,onx))
return m*(odx/idx)**2
def writefits(m,dx,dest='image.fits',obsra=266.4168370833333,obsdec=-29.00781055555555,freq=230e9):
'''
Write fits file with header. Defaults are set for Sgr A* at 1.3mm.
:param m: ``(n, n)``
numpy image array
:param dx: scalar
Pixel size in microarcseconds
:param dest: (optional) string
Output fits file name
:param obsra: (optional) scalar
Source right ascension
:param obsdec: (optional) scalar
Source declination
'''
hdu = fits.PrimaryHDU(m)
hdu.header['CDELT1'] = -1*dx*np.radians(1.)/(3600.*1e6)
hdu.header['CDELT2'] = dx*np.radians(1.)/(3600.*1e6)
hdu.header['OBSRA'] = obsra
hdu.header['OBSDEC'] = obsdec
hdu.header['FREQ'] = freq
hdu.writeto(dest,clobber=True)
def FTElementFast(img,dx,baseline):
'''
Return complex visibility.
:param img: ``(n, n)``
numpy image array
:param dx: scalar
Pixel size in microarcseconds
:param baseline: ``(2, )``
(u,v) point in wavelengths
.. note:: To shift center try multipliny by :math:`\\mathrm{exp}(\\pi i u n_x\\Delta_x)` and watch out for the axis orientation.
'''
nx = img.shape[-1]
du = 1./(nx * dx * np.radians(1.)/(3600*1e6))
ind = np.arange(nx)
return np.sum(img * np.dot(\
np.transpose([np.exp(-2j*np.pi/du/nx*baseline[1]*np.flipud(ind))]),\
[np.exp(-2j*np.pi/du/nx*baseline[0]*ind)]))
|
krosenfeld/scatterbrane
|
scatterbrane/utilities.py
|
Python
|
mit
| 6,159
|
[
"Gaussian"
] |
d60a6912070b799491adbaa8a63ecdf241f8bb991d439754c0f40ab4d40eb51b
|
import numpy as np
from ..antechamber.atomtype import atomtype
import math
def pdb(molecule, ff='amber', fn='cnt.pdb', save_dir='.'):
"""Creates a .pdb (protein data bank) type list for use with molecular dynamics packages.
pdb_lines returns list holding every line of .pdb file."""
amber_2_opls = {"CA": "CA", "HC": "HA"}
atom_lines = []
conect_lines = []
inc_index_bondList = molecule.bondList + 1 # sets index to same index as "serial", works
conect_header_bare = "CONECT"
for i in range(len(molecule.posList)):
serial = i + 1
if ff == 'amber':
name = molecule.atomtypes[i]
elif ff == 'oplsaa':
name = amber_2_opls[molecule.atomtypes[i]]
else:
print('Check ff input')
raise SystemExit
altloc = " "
#resname = "CNT"
resname = name
chainid = "A"
resseq = serial
icode = " "
x = round(molecule.posList[i][0], 3)
y = round(molecule.posList[i][1], 3)
z = round(molecule.posList[i][2], 3)
occupancy = 1.00
tempfactor = 0.00
element = atomtype.inv_atomicSymDict[molecule.zList[i]] # atomic number
charge = " "
atom_header = "ATOM {0:>5} {1:<3}{2}{3:>3} {4}{5:>4}{6} {7:>8.3f}{8:>8.3f}{9:>8.3f}{10:>6.2f}{11:>6.2f} " \
" {12:>2}{13:>2}".format(serial, name, altloc, resname,
chainid, resseq, icode, x, y, z,
occupancy, tempfactor, element, charge)
atom_lines.append(atom_header)
for j in range(len(inc_index_bondList)):
conect_header_temp = conect_header_bare
for k in range(len(inc_index_bondList[j])): # builds variable size conect header
conect_adder = "{0:>5}".format(inc_index_bondList[j][k])
conect_header_temp += conect_adder
conect_lines.append(conect_header_temp)
pdb_lines = atom_lines + conect_lines
pdb_lines.append("TER")
pdb_lines.append("END")
save_file(pdb_lines, save_dir, fn)
print('Successfully exported %s to %s' % (fn, save_dir))
def gro(molecule, scale=2.0, fn='cnt.gro', save_dir='.', periodic=False):
"""Creates a .gro file for gromacs, holds atom coordinates and unit cell size
Coordinates exported in nm, originally in angstroms"""
gro_lines = []
res_num = 1
res_name = "CNT"
elemtypes = []
a_num = []
for i in range(len(molecule.atomtypes)):
a_num_temp = atomtype.inv_atomicSymDict[molecule.zList[i]] # get atomic number
elemtypes_temp = molecule.atomtypes[i][0] # element only (first char.)
a_num.append(a_num_temp)
elemtypes.append(elemtypes_temp)
dist_to_orig = []
for i in range(len(molecule.posList)):
temp_dist = np.sqrt(molecule.posList[i][0]**2 + molecule.posList[i][1]**2 + molecule.posList[i][2]**2)
dist_to_orig.append(temp_dist)
index_min = np.argmin(dist_to_orig) # closest pt in object to origin
# move tube to true origin at 0,0,0
move_dist = np.abs(molecule.posList[index_min])
posList_cent = molecule.posList
#posList_cent += move_dist # now centered at origin
x_list, y_list, z_list = zip(*molecule.posList)
# center tube in quadrant 1 box
max_x = np.max(x_list)
max_y = np.max(y_list)
max_z = np.max(z_list)
min_x = np.min(x_list)
min_y = np.min(y_list)
min_z = np.min(z_list)
length_x = np.abs(max_x-min_x)
length_y = np.abs(max_y-min_y)
length_z = np.abs(max_z-min_z)
dist_to_move = np.abs([min_x, min_y, min_z])
max_length = np.max([length_x, length_y, length_z])
idx_max_dim = np.argmax([length_x, length_y, length_z])
dims = ['X','Y','Z']
dims_dict = {0:x_list, 1:y_list, 2:z_list}
length_str = dims[idx_max_dim]
print('Length of tube is in the %s direction.' % length_str)
box_dim = scale * max_length
new_move_dist = box_dim/2.0
# measure dist to new box origin in move in every direction
posDist_new = posList_cent
posDist_new += (new_move_dist + dist_to_move)
# now tube is centered in quadrant 0 box
if periodic:
dir_to_cut = dims_dict[idx_max_dim]
left = np.min(dir_to_cut) # picks first smallest value in list
right = np.max(dir_to_cut)
split = (right-left) / 2.0
print('Splitting %s direction around %.2f' % (length_str, split))
# find an atom closest to split line
dist_to_split = []
for i in range(len(molecule.posList)):
temp_dist = np.abs(posDist_new[i][idx_max_dim] - split)
dist_to_split.append(temp_dist)
index_split = np.argmin(dist_to_split) # closest pt in object to split
# scale everything by bond
posDist_new *= 0.1
box_dim *= 0.1
print("Box with be %dX the maximum dimension of the object.\nUsing a %.2fX%.2fX%.2f box." %
(scale, box_dim, box_dim, box_dim))
# lets write to list
tit = "SWCNT armchair"
tot_atoms = len(posDist_new)
num_atoms_line = "{0:>5}".format(tot_atoms)
gro_lines.append(tit)
gro_lines.append(num_atoms_line)
for i in range(len(posDist_new)):
_index = i + 1
temp_dist = np.sqrt(posDist_new[i][0] ** 2 + posDist_new[i][1] ** 2 + posDist_new[i][2] ** 2)
temp_line = "{0:>5}{1:<5}{2:>5}{3:>5}{4:>8.3f}{5:>8.3f}{6:>8.3f}"\
.format(res_num, res_name, a_num[i], _index,
posDist_new[i][0], posDist_new[i][1], posDist_new[i][2])
gro_lines.append(temp_line)
box_line = "{0:>8.3f}{1:>8.3f}{2:>8.3f}".format(box_dim, box_dim, box_dim)
gro_lines.append(box_line)
save_file(gro_lines, save_dir, fn)
print('Successfully exported %s to %s' % (fn, save_dir))
def restrains(mol, fn='posre.itp', save_dir='.', fc=1000):
"""Generates posre.itp file used by GROMACS to restrain atoms to a location, can be read by x2top"""
# force constant of position restraint (kJ mol^-1 nm^-2)
# IF NEEDED. REMOVING CM TRANSLATION AND ROTATION IS PROBABLY BEST.
# **********MAKE SURE MOLECULE IS HYDROGENATED FIRST********** #
itp_lines = []
funct = 1
itp_lines.extend(["; file for defining restraints in CNT, read in through X.top", ""])
itp_lines.extend(["[ position_restraints ]", "; ai funct fcx fcy fcz"])
for i in mol.hcap:
index = i + 1
temp_line = "{0:>4}{1:>6}{2:>9}{3:>8}{4:>8}".format(index, funct, fc, fc, fc)
itp_lines.append(temp_line)
itp_lines.append("") # EOL
save_file(itp_lines, save_dir, fn)
print('Successfully exported %s to %s' % (fn, save_dir))
def lammps(molecule, fn='cnt.lammps', save_dir='.', type_list=None):
"""Generates data file for use in LAMMPS
Assuming 'real' units (the unit type)
mass = grams/mole
distance = Angstroms
time = femtoseconds
energy = Kcal/mole
velocity = Angstroms/femtosecond
force = Kcal/mole-Angstrom
torque = Kcal/mole
temperature = Kelvin
pressure = atmospheres
dynamic viscosity = Poise
charge = multiple of electron charge (1.0 is a proton)
dipole = charge*Angstroms
electric field = volts/Angstrom
density = gram/cm^dim
bond_const_K = Kcal/(mole*Angstrom^2)
bond_const_r0 = Angstrom
"""
vdwDict = {1: 1.2, 6: 1.7, 7: 1.55, 8: 1.52, 9: 1.47, 15: 1.8, 16: 1.8, 17: 2.75}
amuDict = {1: 1.008, 6: 12.01, 7: 14.01, 8: 16.00, 9: 19.00,
15: 30.79, 16: 32.065, 17: 35.45}
if type_list is None:
type_list = np.ones(len(molecule.posList))
l_lines = []
l_lines.append('LAMMPS Description')
l_lines.append('')
l_lines.append('%d atoms' % len(molecule.posList))
l_lines.append('%d bonds' % len(molecule.bondList))
l_lines.append('%d angles' % len(molecule.angleList))
l_lines.append('%d dihedrals' % 0)
l_lines.append('%d impropers' % 0)
l_lines.append('')
l_lines.append('%d atom types' % len(molecule.zList))
l_lines.append('%d bond types' % len(molecule.bondList))
l_lines.append('%d angle types' % len(molecule.angleList))
l_lines.append('')
# find box dims
box_min = np.ceil(np.min(molecule.posList)) - 5.0
box_max = np.ceil(np.max(molecule.posList)) + 5.0
l_lines.append('%d %d xlo xhi' % (box_min, box_max))
l_lines.append('%d %d ylo yhi' % (box_min, box_max))
l_lines.append('%d %d zlo zhi' % (box_min, box_max))
l_lines.append('')
l_lines.append('Masses')
l_lines.append('')
for i in range(len(molecule.mass)):
l_lines.append('%d %.5f' % ((i+1), molecule.mass[i]))
l_lines.append('')
l_lines.append('Bond Coeffs')
l_lines.append('')
for i in range(len(molecule.bondList)):
l_lines.append('%d %.5f %.5f' % ((i+1), molecule.kb[i], molecule.b0[i]))
l_lines.append('')
l_lines.append('Angle Coeffs')
l_lines.append('')
for i in range(len(molecule.angleList)):
l_lines.append('%d %.5f %.5f' % ((i + 1), molecule.kt[i], molecule.t0[i]))
l_lines.append('')
l_lines.append('Atoms')
l_lines.append('')
for i in range(len(molecule.posList)):
l_lines.append('%d %d %d %.5f %.5f %.5f' % ((i+1), type_list[i], (i+1), molecule.posList[i,0], molecule.posList[i,1], molecule.posList[i,2]))
l_lines.append('')
l_lines.append('Bonds')
l_lines.append('')
for i in range(len(molecule.bondList)):
l_lines.append('%d 1 %d %d' % ((i+1), (molecule.bondList[i,0]+1), (molecule.bondList[i,1]+1)))
l_lines.append('')
l_lines.append('Angles')
l_lines.append('')
for i in range(len(molecule.angleList)):
l_lines.append('%d 1 %d %d %d' % ((i+1), (molecule.angleList[i,0]+1), (molecule.angleList[i,1]+1), (molecule.angleList[i,2]+1)))
l_lines.append('')
save_file(l_lines, save_dir, fn)
print('Successfully exported %s to %s' % (fn, save_dir))
def top(mol, ff='amber', fn='cnt.top', save_dir='.'):
"""Creates a .top (topology) type list for use in MD packages
AMBER99SB or OPLS-AA forcefields can being used"""
print("Recommended that MD export is done using .gro file only.")
if ff == 'amber':
tit = 'AMBER99SB'
ffloc = './amber99sb.ff/forcefield.itp'
elif ff == 'oplsaa':
tit = 'OPLS-AA'
ffloc = './oplsaa.ff/forcefield.itp'
else:
print('Check ff input')
raise SystemExit
top_lines = [";", "; Topology file for %s" % mol.name, ";%s force field" % ff,";"]
top_lines.extend(["; Include forcefield parameters", '#include "%s"' % ffloc, ""])
# we call our molecule or residue CNT, encompassing all atoms of the tube/functionalized ends
top_lines.extend(["[ moleculetype ]", "; Name nrexcl", "CNT 3", ""])
# ATOMS
top_lines.extend(["[ atoms ]", "; nr type resnr residue atom cgnr charge"])
for i in range(len(mol.atomtypes)):
index = i + 1
if (ff == 'oplsaa') and (mol.atomtypes[i] == 'HC'):
temp_atomtype = 'HA'
else:
temp_atomtype = mol.atomtypes[i]
a_num = atomtype.inv_atomicSymDict[mol.zList[i]] # atomic number
temp_line = "{0:>6}{1:>8}{2:>8}{3:>8}{4:>8}{5:>8}{6:>7.3f}"\
.format(index, temp_atomtype, 1, 'CNT', a_num, index, 0.000)
top_lines.append(temp_line)
# BONDS
top_lines.extend(["", "[ bonds ]", "; ai aj funct c0 c1"])
for i in range(len(mol.bondList)):
funct = 1
temp_line = "{0:>5}{1:>6}{2:>6}{3:>13}{4:>13}"\
.format(mol.bondList[i][0]+1, mol.bondList[i][1]+1, funct, "", "")
top_lines.append(temp_line)
# PAIRS
# Let L-J and Coulomb pairs auto generate from the cutoffs
# ANGLES
top_lines.extend(["", "[ angles ]", "; ai aj ak funct c0 c1"])
for i in range(len(mol.angleList)):
funct = 1
temp_line = "{0:>5}{1:>6}{2:>6}{3:>6}{4:>13}{5:>13}"\
.format(mol.angleList[i][0]+1, mol.angleList[i][1]+1, mol.angleList[i][2]+1, funct, "", "")
top_lines.append(temp_line)
# DIHEDRALS
top_lines.extend(["", "[ dihedrals ]", "; ai aj ak al funct c0 c1"])
for i in range(len(mol.dihList)):
if ff == 'amber':
funct = 9
elif ff == 'oplsaa':
funct = 3
temp_line = "{0:>5}{1:>6}{2:>6}{3:>6}{4:>6}{5:>13}{6:>13}"\
.format(mol.dihList[i][0]+1, mol.dihList[i][1]+1, mol.dihList[i][2]+1, mol.dihList[i][3]+1, funct, "", "")
top_lines.append(temp_line)
top_lines.extend(["", "[ system ]", "CNT"])
top_lines.extend(["", "[ molecules ]", "CNT 1", ""])
save_file(top_lines, save_dir, fn)
print('Successfully exported %s to %s' % (fn, save_dir))
def _build_lines(columns, spaces, size, innerColumns):
listSection = []
line = ";"
for column in columns:
line += ' '*spaces
line += column
listSection.append(line)
for i in range(size):
line = ' '
for count, col in enumerate([x[i] for x in innerColumns]):
entryLength = spaces+len(columns[count])
line += col.rjust(entryLength, ' ')
listSection.append(line)
listSection.append(' ')
return listSection
def save_file(txt_object, save_dir, name):
f = open(save_dir + "/%s" % name, 'w')
for i in range(len(txt_object)):
f.write(txt_object[i] + "\n")
f.close()
|
ajkerr0/kappa
|
kappa/md/generate.py
|
Python
|
mit
| 13,545
|
[
"Amber",
"Gromacs",
"LAMMPS"
] |
300ed0f225ea9e51dbfcb43fc5a821cb309f62623003276bed08fb578cf69ffc
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(https://crbug.com/1262296): Update this after Python2 trybots retire.
# pylint: disable=deprecated-module
import optparse
import threading
import py_utils
from devil.android import device_utils
from systrace import trace_result
from systrace import tracing_agents
from py_trace_event import trace_time as trace_time_module
TRACE_FILE_PATH = \
'/sdcard/Android/data/org.chromium.latency.walt/files/trace.txt'
CLOCK_DOMAIN_MARKER = '# clock_type=LINUX_CLOCK_MONOTONIC\n'
def try_create_agent(options):
if options.is_walt_enabled:
return WaltAgent()
return None
class WaltConfig(tracing_agents.TracingConfig):
def __init__(self, device_serial_number, is_walt_enabled):
tracing_agents.TracingConfig.__init__(self)
self.device_serial_number = device_serial_number
self.is_walt_enabled = is_walt_enabled
def add_options(parser):
# TODO(https://crbug.com/1262296): Update this after Python2 trybots retire.
# pylint: disable=deprecated-module
options = optparse.OptionGroup(parser, 'WALT trace options')
options.add_option('--walt', dest='is_walt_enabled', default=False,
action='store_true', help='Use the WALT tracing agent. '
'WALT is a device for measuring latency of physical '
'sensors on phones and computers. '
'See https://github.com/google/walt')
return options
def get_config(options):
return WaltConfig(options.device_serial_number, options.is_walt_enabled)
class WaltAgent(tracing_agents.TracingAgent):
"""
This tracing agent requires the WALT app to be installed on the Android phone,
and requires the WALT device to be attached to the phone. WALT is a device
for measuring latency of physical sensors and outputs on phones and
computers. For more information, visit https://github.com/google/walt
"""
def __init__(self):
# TODO(https://crbug.com/1262296): Update this after Python2 trybots retire.
# pylint: disable=super-with-arguments
super(WaltAgent, self).__init__()
self._trace_contents = None
self._config = None
self._device_utils = None
self._clock_sync_marker = None
self._collection_thread = None
def __repr__(self):
return 'WaltAgent'
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StartAgentTracing(self, config, timeout=None):
del timeout # unused
self._config = config
self._device_utils = device_utils.DeviceUtils(
self._config.device_serial_number)
if self._device_utils.PathExists(TRACE_FILE_PATH):
# clear old trace events so they are not included in the current trace
self._device_utils.WriteFile(TRACE_FILE_PATH, '')
return True
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StopAgentTracing(self, timeout=None):
"""Stops tracing and starts collecting results.
To synchronously retrieve the results after calling this function,
call GetResults().
"""
del timeout # unused
self._collection_thread = threading.Thread(
target=self._collect_trace_data)
self._collection_thread.start()
return True
def _collect_trace_data(self):
self._trace_contents = self._device_utils.ReadFile(TRACE_FILE_PATH)
def SupportsExplicitClockSync(self):
return True
# TODO(https://crbug.com/1262296): Update this after Python2 trybots retire.
# pylint: disable=arguments-differ
def RecordClockSyncMarker(self, sync_id, did_record_clock_sync_callback):
cmd = 'cat /proc/timer_list | grep now'
t1 = trace_time_module.Now()
command_result = self._device_utils.RunShellCommand(cmd, shell=True)
nsec = command_result[0].split()[2]
self._clock_sync_marker = format_clock_sync_marker(sync_id, nsec)
did_record_clock_sync_callback(t1, sync_id)
@py_utils.Timeout(tracing_agents.GET_RESULTS_TIMEOUT)
def GetResults(self, timeout=None):
del timeout # unused
self._collection_thread.join()
self._collection_thread = None
return trace_result.TraceResult('waltTrace', self._get_trace_result())
def _get_trace_result(self):
result = '# tracer: \n' + CLOCK_DOMAIN_MARKER + self._trace_contents
if self._clock_sync_marker is not None:
result += self._clock_sync_marker
return result
def format_clock_sync_marker(sync_id, nanosec_time):
return ('<0>-0 (-----) [001] ...1 ' + str(float(nanosec_time) / 1e9)
+ ': tracing_mark_write: trace_event_clock_sync: name='
+ sync_id + '\n')
|
catapult-project/catapult
|
systrace/systrace/tracing_agents/walt_agent.py
|
Python
|
bsd-3-clause
| 4,648
|
[
"VisIt"
] |
87b2612c9ea0b24b56c642f78669488c13eaa227f103bf546a01138aa1d13948
|
from __future__ import division
from Bio.Blast import NCBIXML
from utils_parse_BLAST import *
from utils_analysis import *
import pysam
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from numpy import *
import rpy2.robjects as ro
from rpy2.robjects import r
from rpy2.robjects.numpy2ri import numpy2ri
import os, os.path
import sys
directory_simulated_data=sys.argv[1]
simulated_files=[]
for name in os.listdir(directory_simulated_data):
files = re.search('.sam',name)
if files is not None:
simulated_files.append(directory_simulated_data + name)
simulated_files=sorted(simulated_files)
directory_aligner_data=sys.argv[2]
aligner_files=[]
for name in os.listdir(directory_aligner_data):
files = re.search('.xml',name)
if files is not None:
aligner_files.append(directory_aligner_data + name)
aligner_files=sorted(aligner_files)
if len(simulated_files) != len(aligner_files):
print 'Error: files should be of same size.'
exit()
# read the junction info file
junction_file = open(sys.argv[3], 'r')
t_strand={}
exon_j_pos={}
exonStart={}
exonEnd={}
intron_pos={}
for line in junction_file:
jdata = line.split('\t')
t_strand[jdata[0]] = jdata[1]
exon_j_pos[jdata[0]] = jdata[2].split(',')
exonStart[jdata[0]] = jdata[3].split(',')
exonEnd[jdata[0]] = jdata[4].split(',')
intron_pos[jdata[0]] = jdata[5].rstrip('\n').split(',')
junction_file.close()
tot_reads=[]
tot_reads_jctn=[]
mapped_aln_tot_non_jctn=[]
mapped_aln_tot_jctn=[]
base_accu_non_jctn=[]
base_accu_jctn=[]
stats_aln_mean_non_jctn=[]
stats_aln_mean_jctn=[]
stats_aln_mean_non_jctn2=[]
stats_aln_mean_jctn2=[]
for index00, file00 in enumerate(simulated_files):
print 'reading file:', index00, file00
samfile_true = pysam.Samfile(file00, 'r')
readTrueStart = []
readTrueEnd = []
readTrueId = []
readTrueRef = []
readTrueStrand = []
readTrueSeq = []
readTrueCig = []
readTrueLength = []
readMDTrue = []
scoresTrue=[]
avgQualTrue=[]
low_base_quality=[]
for align in samfile_true.fetch():
scoresTrue = [ord(m)-33 for m in align.qqual]
scores_a = array(scoresTrue)
# python base 0 need to add 1 to the start position
readTrueStart.append(int(align.pos) + 1)
align_length = align.rlen
readTrueLength.append(align.rlen)
read_end = int(align.pos) + align_length
readTrueEnd.append(read_end)
readTrueId.append(align.qname)
readTrueRef.append(samfile_true.getrname(align.tid))
strand = -1 if align.is_reverse else 1
readTrueStrand.append(strand)
readTrueSeq.append(align.query)
readTrueCig.append(align.cigar)
try:
readMDTrue.append(align.opt('MD'))
except KeyError:
readMDTrue.append(None)
# read the aligner file
result_handle = open(aligner_files[index00])
blast_records = NCBIXML.parse(result_handle)
print 'Read aligner file:', aligner_files[index00]
readstart = []
readend = []
readid = []
readRef = []
readstrand = []
readseq = []
readcig = []
readAS=[]
readqstart=[]
readqend=[]
for blast_record in blast_records:
for alignment in blast_record.alignments:
for hsp in alignment.hsps:
readstrand.append(hsp.frame[1])
if hsp.frame[1] == -1:
readstart.append(hsp.sbjct_end)
readend.append(hsp.sbjct_start)
elif hsp.frame[1] == 1:
readstart.append(hsp.sbjct_start)
readend.append(hsp.sbjct_end)
readqstart.append(hsp.query_start)
readqend.append(hsp.query_end)
readid.append(str(blast_record.query))
readRef.append(str(alignment.title))
readcig.append(blast_to_cigar(hsp,int(sys.argv[5])))
readAS.append(hsp.bits)
read_junction=[]
junction_type=[]
junction_adjoint=[]
stats_o=[]
stats_no=[]
base_accu=[]
strand_accuracy=[]
junction_type_lstats=[]
junction_adjoint_lstats=[]
mapped_read=[]
for read in readTrueId:
# in case of multiple alignments
# readid.index(read) will get the first alignment which is the most sig one
try:
indx_aln = find_index_all(readid,read)
if len(indx_aln) == 0:
mapped_read.append(False)
indx_aln=''
else:
mapped_read.append(True)
except ValueError:
indx_aln = ''
mapped_read.append(False)
indx = readTrueId.index(read)
coordinate_change = transfrom_MRNA_to_DNA_ref_frame(readTrueStart[indx],readTrueEnd[indx],exonEnd[readTrueRef[indx]],exonStart[readTrueRef[indx]],exon_j_pos[readTrueRef[indx]],readTrueLength[indx],int(intron_pos[readTrueRef[indx]][0]))
if len(indx_aln) == 1:
indx_aln = indx_aln[0]
elif len(indx_aln) > 1:
max_AS_score = max([readAS[int(x)] for x in indx_aln])
AS_scores_a = array([readAS[int(x)] for x in indx_aln])
indx_aln_a = array(indx_aln)
for indx_AS in indx_aln:
if (readAS[int(indx_AS)] == max_AS_score) and (readstart[indx_AS] - int(coordinate_change[0])) == 0:
# take the read with max AS and best position
indx_aln = int(indx_AS)
break
else:
# take the first read with max AS
indx_aln = indx_aln_a[AS_scores_a == max_AS_score][0]
if indx_aln != '':
read_junction.append(coordinate_change[3])
junction_adjoint.append(coordinate_change[4])
if readstrand[indx_aln] == readTrueStrand[indx]:
strand_accuracy.append(True)
else:
strand_accuracy.append(False)
if coordinate_change[3]:
junction_type.append(coordinate_change[2])
else:
junction_type.append('')
cig_aln = array(readcig[indx_aln])
if readstrand[indx_aln] == readTrueStrand[indx]:
if readstrand[indx_aln] == -1:
cig_aln = cig_aln[::-1]
cig_true = array(readTrueCig[indx])
stats=base_accuracy_blast(cig_aln,cig_true,readstart[indx_aln],readend[indx_aln],coordinate_change)
base_accu.append(stats[0])
stats_o.append(stats[1])
stats_no.append(stats[2])
else:
base_accu.append(0)
stats_o.append([(1,0,0,0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (7, 0), (8, 0)])
stats_no.append([(1,0,0,0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (7, 0), (8, 0)])
else:
read_junction.append(coordinate_change[3])
if coordinate_change[3]:
junction_type.append(coordinate_change[2])
junction_adjoint.append(coordinate_change[4])
else:
junction_type.append('')
junction_adjoint.append(True)
print 'Runnning calc on stats.'
# Generate stats:
# total number of simulated reads
tot_nmb_reads=len(readTrueId)
# total number of simulated junctions
jt=array(junction_type)
j_adj=array(junction_adjoint)
j_adj_ee=j_adj[jt == 'EE']
jt_ee=jt[jt == 'EE']
tot_nmb_reads_jctn=len(jt_ee[j_adj_ee == False])
tot_reads.append(tot_nmb_reads)
tot_reads_jctn.append(tot_nmb_reads_jctn)
# total numb of mapped reads across non-junctions
strand_accuracy_a=array(strand_accuracy)
mapped_read_a=array(mapped_read)
jt_mapped=jt[mapped_read_a == True][strand_accuracy_a == True]
mapped_aln_tot_non_jctn.append(len(jt_mapped[jt_mapped == ''])/(tot_nmb_reads - tot_nmb_reads_jctn)*100)
if len(mapped_read_a[mapped_read_a == True]) == 0 or len(base_accu) == 0:
mapped_aln_tot_jctn.append(0)
base_accu_non_jctn.append(0)
base_accu_jctn.append(0)
stats_aln_mean_jctn.append([0,0,0,0,0,0,0,0])
stats_aln_mean_non_jctn.append([0,0,0,0,0,0,0,0])
stats_aln_mean_non_jctn2.append([0,0,0,0,0,0,0,0])
stats_aln_mean_jctn2.append([0,0,0,0,0,0,0,0])
continue
# total number of mapped reads across junctions
j_adj_mapped=j_adj[mapped_read_a == True][strand_accuracy_a == True]
j_adj_mapped_ee=j_adj_mapped[jt_mapped == 'EE']
jt_mapped_ee=jt_mapped[jt_mapped == 'EE']
mapped_aln_tot_jctn.append(len(jt_mapped_ee[j_adj_mapped_ee == False])/tot_nmb_reads_jctn*100)
# base accuracy across non-junctions
base_accu_a=array(base_accu)
base_accu_non_jctn.append(base_accu_a[strand_accuracy_a == True][jt_mapped == ''].mean())
if mapped_aln_tot_jctn[index00] == 0:
base_accu_jctn.append(0)
else:
base_accu_jctn.append(base_accu_a[strand_accuracy_a == True][jt_mapped == 'EE'][j_adj_mapped_ee == False].mean())
del base_accu_a,jt,j_adj,mapped_read_a
# calculate stats on alignments
# inside of alignment
stats_aln=[]
stats_o_a = array(stats_o)
for x in stats_o_a:
stats_aln.append([x[0][1] - x[0][3], x[1][1], x[2][1], x[3][1], x[4][1], x[5][1], x[6][1], x[7][1]])
stats_aln_a=array(stats_aln)
# stats on alignment across non_junctions
stats_aln_mean_non_jctn.append([stats_aln_a[strand_accuracy_a == True][jt_mapped == ''][:,0].mean(),stats_aln_a[strand_accuracy_a == True][jt_mapped == ''][:,1].mean(),stats_aln_a[strand_accuracy_a == True][jt_mapped == ''][:,2].mean(),stats_aln_a[strand_accuracy_a == True][jt_mapped == ''][:,3].mean(),stats_aln_a[strand_accuracy_a == True][jt_mapped == ''][:,4].mean(),stats_aln_a[strand_accuracy_a == True][jt_mapped == ''][:,5].mean(),stats_aln_a[strand_accuracy_a == True][jt_mapped == ''][:,6].mean(),stats_aln_a[strand_accuracy_a == True][jt_mapped == ''][:,7].mean()])
if mapped_aln_tot_jctn[index00] == 0:
stats_aln_mean_jctn.append([0,0,0,0,0,0,0,0])
else:
stats_aln_mean_jctn.append([stats_aln_a[strand_accuracy_a == True][jt_mapped == 'EE'][j_adj_mapped_ee == False][:,0].mean(),stats_aln_a[strand_accuracy_a == True][jt_mapped == 'EE'][j_adj_mapped_ee == False][:,1].mean(),stats_aln_a[strand_accuracy_a == True][jt_mapped == 'EE'][j_adj_mapped_ee == False][:,2].mean(),stats_aln_a[strand_accuracy_a == True][jt_mapped == 'EE'][j_adj_mapped_ee == False][:,3].mean(),stats_aln_a[strand_accuracy_a == True][jt_mapped == 'EE'][j_adj_mapped_ee == False][:,4].mean(),stats_aln_a[strand_accuracy_a == True][jt_mapped == 'EE'][j_adj_mapped_ee == False][:,5].mean(),stats_aln_a[strand_accuracy_a == True][jt_mapped == 'EE'][j_adj_mapped_ee == False][:,6].mean(),stats_aln_a[strand_accuracy_a == True][jt_mapped == 'EE'][j_adj_mapped_ee == False][:,7].mean()])
del stats_aln_a
# outside of alignment
stats_aln2=[]
stats_no_a = array(stats_no)
for x in stats_no_a:
stats_aln2.append([x[0][1] - x[0][3], x[1][1], x[2][1], x[3][1], x[4][1], x[5][1], x[6][1], x[7][1]])
stats_aln_a2=array(stats_aln2)
# stats on alignment across non_junctions
stats_aln_mean_non_jctn2.append([stats_aln_a2[strand_accuracy_a == True][jt_mapped == ''][:,0].mean(),stats_aln_a2[strand_accuracy_a == True][jt_mapped == ''][:,1].mean(),stats_aln_a2[strand_accuracy_a == True][jt_mapped == ''][:,2].mean(),stats_aln_a2[strand_accuracy_a == True][jt_mapped == ''][:,3].mean(),stats_aln_a2[strand_accuracy_a == True][jt_mapped == ''][:,4].mean(),stats_aln_a2[strand_accuracy_a == True][jt_mapped == ''][:,5].mean(),stats_aln_a2[strand_accuracy_a == True][jt_mapped == ''][:,6].mean(),stats_aln_a2[strand_accuracy_a == True][jt_mapped == ''][:,7].mean()])
# stats on alignment across junctions
if mapped_aln_tot_jctn[index00] == 0:
stats_aln_mean_jctn2.append([0,0,0,0,0,0,0,0])
else:
stats_aln_mean_jctn2.append([stats_aln_a2[strand_accuracy_a == True][jt_mapped == 'EE'][j_adj_mapped_ee == False][:,0].mean(),stats_aln_a2[strand_accuracy_a == True][jt_mapped == 'EE'][j_adj_mapped_ee == False][:,1].mean(),stats_aln_a2[strand_accuracy_a == True][jt_mapped == 'EE'][j_adj_mapped_ee == False][:,2].mean(),stats_aln_a2[strand_accuracy_a == True][jt_mapped == 'EE'][j_adj_mapped_ee == False][:,3].mean(),stats_aln_a2[strand_accuracy_a == True][jt_mapped == 'EE'][j_adj_mapped_ee == False][:,4].mean(),stats_aln_a2[strand_accuracy_a == True][jt_mapped == 'EE'][j_adj_mapped_ee == False][:,5].mean(),stats_aln_a2[strand_accuracy_a == True][jt_mapped == 'EE'][j_adj_mapped_ee == False][:,6].mean(),stats_aln_a2[strand_accuracy_a == True][jt_mapped == 'EE'][j_adj_mapped_ee == False][:,7].mean()])
del stats_aln_a2
# get stats out as R data file
r_out_file=sys.argv[4] + '/Rdata_multi/aligner_stats.gzip'
stats1 = r['cbind'](tot_reads,tot_reads_jctn,mapped_aln_tot_non_jctn,mapped_aln_tot_jctn,base_accu_non_jctn,base_accu_jctn)
stats1.colnames=ro.StrVector(['Total reads(non-jctn)','Total reads(jctn)','Aligned reads(non-jctn)','Aligned reads(jctn)','Base Accu(non-jctn)','Base Accu(jctn)'])
r.assign("aligner_stats", stats1)
r("save(aligner_stats," + "file=" "'" + r_out_file + "'" + ", compress=TRUE)")
r_out_file=sys.argv[4] + '/Rdata_multi/stats_o.gzip'
stats2=r['cbind'](stats_aln_mean_non_jctn,stats_aln_mean_jctn)
stats2.colnames=ro.StrVector(['Non-jctn','Jctn'])
robj = numpy2ri(stats2)
r.assign("stats_o", robj)
r("save(stats_o," + "file=" "'" + r_out_file + "'" + ", compress=TRUE)")
r_out_file=sys.argv[4] + '/Rdata_multi/stats_no.gzip'
stats3=r['cbind'](stats_aln_mean_non_jctn2,stats_aln_mean_jctn2)
stats3.colnames=ro.StrVector(['Non-jctn','Jctn'])
robj = numpy2ri(stats3)
r.assign("stats_no", robj)
r("save(stats_no," + "file=" "'" + r_out_file + "'" + ", compress=TRUE)")
|
oicr-ibc/riser
|
bin/analysis_BLAST.py
|
Python
|
gpl-3.0
| 13,325
|
[
"BLAST",
"pysam"
] |
48f53ff96a2fd834f20fa72a93fa9ea74c86cc6e4a9d0de9ca864da5a24ae531
|
#!/usr/bin/env python
# CREATED:2013-03-11 18:14:30 by Brian McFee <brm2132@columbia.edu>
# unit tests for librosa.beat
from __future__ import print_function
# Disable cache
import os
try:
os.environ.pop('LIBROSA_CACHE_DIR')
except:
pass
import pytest
import numpy as np
import scipy.stats
import librosa
from test_core import files, load
__EXAMPLE_FILE = os.path.join('tests', 'data', 'test1_22050.wav')
@pytest.mark.parametrize('infile', files(os.path.join('data', 'beat-onset-*.mat')))
def test_onset_strength(infile):
DATA = load(infile)
# Compute onset envelope using the same spectrogram
onsets = librosa.onset.onset_strength(y=None,
sr=8000,
S=DATA['D'],
lag=1,
max_size=1,
center=False,
detrend=True,
aggregate=np.mean)
assert np.allclose(onsets[1:], DATA['onsetenv'][0])
@pytest.mark.parametrize('tempo', [60, 80, 110, 160])
@pytest.mark.parametrize('sr', [22050, 44100])
@pytest.mark.parametrize('hop_length', [512, 1024])
@pytest.mark.parametrize('ac_size', [4, 8])
@pytest.mark.parametrize('aggregate', [None, np.mean])
@pytest.mark.parametrize('prior', [None, scipy.stats.uniform(60, 240)])
def test_tempo(tempo, sr, hop_length, ac_size, aggregate, prior):
y = np.zeros(20 * sr)
delay = librosa.time_to_samples(60./tempo, sr=sr).item()
y[::delay] = 1
tempo_est = librosa.beat.tempo(y=y, sr=sr, hop_length=hop_length,
ac_size=ac_size,
aggregate=aggregate,
prior=prior)
# Being within 5% for the stable frames is close enough
if aggregate is None:
win_size = int(ac_size * sr // hop_length)
assert np.all(np.abs(tempo_est[win_size:-win_size] - tempo) <= 0.05 * tempo)
else:
assert np.abs(tempo_est - tempo) <= 0.05 * tempo, (tempo, tempo_est)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_beat_no_input():
librosa.beat.beat_track(y=None, onset_envelope=None)
def test_beat_no_onsets():
sr = 22050
hop_length = 512
duration = 30
onsets = np.zeros(duration * sr // hop_length)
tempo, beats = librosa.beat.beat_track(onset_envelope=onsets,
sr=sr,
hop_length=hop_length)
assert np.allclose(tempo, 0)
assert len(beats) == 0
@pytest.mark.parametrize('start_bpm', [40, 60, 117, 235])
@pytest.mark.parametrize('aggregate', [None, np.mean])
@pytest.mark.parametrize('onsets', [np.zeros(30 * 22050 // 512)])
@pytest.mark.parametrize('sr', [22050])
@pytest.mark.parametrize('hop_length', [512])
def test_tempo_no_onsets(start_bpm, aggregate, onsets, sr, hop_length):
tempo = librosa.beat.tempo(onset_envelope=onsets, sr=sr,
hop_length=hop_length,
start_bpm=start_bpm,
aggregate=aggregate)
# Depending on bin resolution, we might not be able to match exactly
assert np.allclose(tempo, start_bpm, atol=1e0)
def test_beat():
y, sr = librosa.load(__EXAMPLE_FILE)
hop_length = 512
onset_env = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length)
def __test(with_audio, with_tempo, start_bpm, bpm, trim, tightness, prior):
if with_audio:
_y = y
_ons = None
else:
_y = None
_ons = onset_env
tempo, beats = librosa.beat.beat_track(y=_y,
sr=sr,
onset_envelope=_ons,
hop_length=hop_length,
start_bpm=start_bpm,
tightness=tightness,
trim=trim,
bpm=bpm,
prior=prior)
assert tempo >= 0
if len(beats) > 0:
assert beats.min() >= 0
assert beats.max() <= len(onset_env)
for with_audio in [False, True]:
for with_tempo in [False, True]:
for trim in [False, True]:
for start_bpm in [-20, 0, 60, 120, 240]:
for bpm in [-20, 0, None, 150, 360]:
for tightness in [0, 100, 10000]:
for prior in [None, scipy.stats.uniform(60, 240)]:
if (tightness <= 0 or
(bpm is not None and bpm <= 0) or
(start_bpm is not None and
bpm is None and start_bpm <= 0)):
tf = pytest.mark.xfail(__test, raises=librosa.ParameterError)
else:
tf = __test
yield (tf, with_audio, with_tempo,
start_bpm, bpm, trim, tightness, prior)
@pytest.mark.parametrize('sr', [None, 44100])
@pytest.mark.parametrize('hop_length', [512, 1024])
@pytest.mark.parametrize('units', ['frames', 'time', 'samples',
pytest.mark.xfail('bad units', raises=librosa.ParameterError)])
def test_beat_units(sr, hop_length, units):
y, sr = librosa.load(__EXAMPLE_FILE, sr=sr)
tempo, b1 = librosa.beat.beat_track(y=y, sr=sr, hop_length=hop_length)
_, b2 = librosa.beat.beat_track(y=y, sr=sr, hop_length=hop_length,
units=units)
t1 = librosa.frames_to_time(b1, sr=sr, hop_length=hop_length)
if units == 'time':
t2 = b2
elif units == 'samples':
t2 = librosa.samples_to_time(b2, sr=sr)
elif units == 'frames':
t2 = librosa.frames_to_time(b2, sr=sr, hop_length=hop_length)
assert np.allclose(t1, t2)
@pytest.mark.parametrize('sr', [22050])
@pytest.mark.parametrize('hop_length', [256, 512])
@pytest.mark.parametrize('win_length', [192, 384])
@pytest.mark.parametrize('use_onset', [False, True])
@pytest.mark.parametrize('tempo_min, tempo_max', [(30, 300),
(None, 240),
(60, None),
pytest.mark.xfail((120, 80),
raises=librosa.ParameterError)])
@pytest.mark.parametrize('prior', [None, scipy.stats.lognorm(s=1, loc=np.log(120), scale=120)])
def test_plp(sr, hop_length, win_length, tempo_min, tempo_max, use_onset, prior):
y, sr = librosa.load(__EXAMPLE_FILE, sr=sr)
oenv = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length)
if use_onset:
pulse = librosa.beat.plp(y=y, sr=sr, onset_envelope=oenv,
hop_length=hop_length,
win_length=win_length,
tempo_min=tempo_min,
tempo_max=tempo_max,
prior=prior)
else:
pulse = librosa.beat.plp(y=y, sr=sr,
hop_length=hop_length,
win_length=win_length,
tempo_min=tempo_min,
tempo_max=tempo_max,
prior=prior)
assert len(pulse) == len(oenv)
assert np.all(pulse >= 0)
assert np.all(pulse <= 1)
# Beat tracking regression test is no longer enabled due to librosa's
# corrections
@pytest.mark.skip
@pytest.mark.parametrize('infile', files(os.path.join('data', 'beat-beat-*.mat')))
def deprecated_test_beat(infile):
DATA = load(infile)
(bpm, beats) = librosa.beat.beat_track(y=None,
sr=8000,
hop_length=32,
onset_envelope=DATA['onsetenv'][0])
beat_times = librosa.frames_to_time(beats, sr=8000, hop_length=32)
assert np.allclose(beat_times, DATA['beats'])
|
carlthome/librosa
|
tests/test_beat.py
|
Python
|
isc
| 8,484
|
[
"Brian"
] |
6b1302dc0b71ecb8f8ada9f3048bb96cd1895cd0b8abe02614676c0e7e54b0ed
|
"""
Quatfit routines for PDB2PQR
This module is used to find the coordinates of a new
atom based on a reference set of
coordinates and a definition set of coordinates.
Original Code by David J. Heisterberg
The Ohio Supercomputer Center
1224 Kinnear Rd.
Columbus, OH 43212-1163
(614)292-6036
djh@osc.edu djh@ohstpy.bitnet ohstpy::djh
Translated to C from fitest.f program and interfaced with
Xmol program by Jan Labanowski, jkl@osc.edu jkl@ohstpy.bitnet
ohstpy::jkl
----------------------------
PDB2PQR -- An automated pipeline for the setup, execution, and analysis of
Poisson-Boltzmann electrostatics calculations
Nathan A. Baker (baker@biochem.wustl.edu)
Todd Dolinsky (todd@ccb.wustl.edu)
Dept. of Biochemistry and Molecular Biophysics
Center for Computational Biology
Washington University in St. Louis
Jens Nielsen (Jens.Nielsen@ucd.ie)
University College Dublin
Additional contributing authors listed in documentation and supporting
package licenses.
Copyright (c) 2003-2005. Washington University in St. Louis.
All Rights Reserved.
This file is part of PDB2PQR.
PDB2PQR is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
PDB2PQR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PDB2PQR; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
----------------------------
"""
__date__ = "30 September 2003"
__author__ = "David Heisterberg, Jan Labanowski, Jens Erik Nielsen, Todd Dolinsky"
import math
from utilities import *
def findCoordinates(numpoints, refcoords, defcoords, defatomcoords):
"""
Driver for the quaternion file. Provide the coordinates as inputs
and obtain the coordinates for the new atom as output.
Parameters
numpoints: The number of points in each list (int)
refcoords: The reference coordinates, a list of lists of form
[x,y,z] (list)
defcoords: The definition coordinates, a list of lists of form
[x,y,z] (list)
defatomcoords: The definition coordinates for the atom to be
placed in the reference frame (list)
Returns
newcoords: The coordinates of the new atom in the
reference frame (list)
"""
refcenter, fitcenter, rotation = qfit(numpoints, refcoords, defcoords)
newcoords = qtransform(1, defatomcoords, refcenter, fitcenter, rotation)
# Only return the first coordinates
return newcoords[0]
def qtransform(numpoints, defcoords, refcenter, fitcenter, rotation):
"""
Transform the set of defcoords using the reference center, the fit
center, and a rotation matrix.
Parameters
numpoints: The number of points in each list (int)
defcoords: Definition coordinates (list)
refcenter: The reference center (list)
defcenter: The definition center (list)
rotation: The rotation matrix (list)
Returns
newcoords: The coordinates of the new point (list)
"""
if numpoints == 1:
defcoords = [defcoords]
fitcoords = translate(numpoints, defcoords, fitcenter, 1)
rotated = rotmol(numpoints, fitcoords, rotation)
newcoords = translate(numpoints, rotated, refcenter, 2)
return newcoords
def qfit(numpoints, refcoords, defcoords):
"""
Method for getting new atom coordinates from sets of reference
and definition coordinates.
Parameters
numpoints: The number of points in each list (int)
refcoords: List of reference coordinates, with each set
a list of form [x,y,z] (list)
defcoords: List of definition coordinates, with each set
a list of form [x,y,z] (list)
"""
nrot = 30
refcenter, refcoords = center(numpoints, refcoords)
defcenter, defcoords = center(numpoints, defcoords)
q, u = qtrfit(numpoints, defcoords, refcoords, nrot)
#rotated = rotmol(numpoints, defcoords, u)
#newcoords = translate(numpoints, rotated, refcenter, 2)
return refcenter, defcenter, u
def qchichange(initcoords, refcoords, angle):
"""
Change the chiangle of the reference coordinate using the
initcoords and the given angle
Parameters
initcoords: Coordinates based on the point and basis atoms
(one dimensional list)
difchi : The angle to use (float)
refcoords : The atoms to analyze (list of many coordinates)
Returns
newcoords : The new coordinates of the atoms (list of many coords)
"""
# Initialize
L,R = [],[]
for i in range(3):
L.append(0.0)
R.append([0.0,0.0,0.0])
# Convert to radians and normalize
radangle = math.pi * angle/180.0
normalized = normalize(initcoords)
L[0] = normalized[0]
L[1] = normalized[1]
L[2] = normalized[2]
# Construct the rotation matrix
R[0][0] = math.cos(radangle) + L[0]*L[0] * (1.0 - math.cos(radangle))
R[1][1] = math.cos(radangle) + L[1]*L[1] * (1.0 - math.cos(radangle))
R[2][2] = math.cos(radangle) + L[2]*L[2] * (1.0 - math.cos(radangle))
R[1][0] = L[0]*L[1]*(1.0 - math.cos(radangle)) - L[2] * math.sin(radangle)
R[2][0] = L[0]*L[2]*(1.0 - math.cos(radangle)) + L[1] * math.sin(radangle)
R[0][1] = L[1]*L[0]*(1.0 - math.cos(radangle)) + L[2] * math.sin(radangle)
R[2][1] = L[1]*L[2]*(1.0 - math.cos(radangle)) - L[0] * math.sin(radangle)
R[0][2] = L[2]*L[0]*(1.0 - math.cos(radangle)) - L[1] * math.sin(radangle)
R[1][2] = L[2]*L[1]*(1.0 - math.cos(radangle)) + L[0] * math.sin(radangle)
numpoints = len(refcoords)
newcoords = rotmol(numpoints, refcoords, R)
return newcoords
def rotmol(numpoints, x, u):
"""
Rotate a molecule
Parameters
numpoints: The number of points in the list (int)
x: The input coordinates (list)
u: The left rotation matrix (list)
Returns
out: The rotated coordinates out=u * x (list)
"""
out = []
for i in range(numpoints):
out.append([])
out[i].append(u[0][0] *x[i][0] + u[1][0] * x[i][1] + u[2][0] * x[i][2])
out[i].append(u[0][1] *x[i][0] + u[1][1] * x[i][1] + u[2][1] * x[i][2])
out[i].append(u[0][2] *x[i][0] + u[1][2] * x[i][1] + u[2][2] * x[i][2])
return out
def qtrfit(numpoints, defcoords, refcoords, nrot):
"""
Find the quaternion, q, [and left rotation matrix, u] that minimizes
| qTXq - Y | ^ 2 [|uX - Y| ^ 2]
This is equivalent to maximizing Re (qTXTqY)
The left rotation matrix, u, is obtained from q by
u = qT1q
Parameters
numpoints: The number of points in each list (int)
defcoords: List of definition coordinates, with each set
a list of form [x,y,z] (list)
refcoords: List of fitted coordinates, with each set
a list of form [x,y,z] (list)
nrot : The maximum number of jacobi sweeps
Returns
q : The best-fit quaternion
u : The best-fit left rotation matrix
"""
xxyx = 0.0
xxyy = 0.0
xxyz = 0.0
xyyx = 0.0
xyyy = 0.0
xyyz = 0.0
xzyx = 0.0
xzyy = 0.0
xzyz = 0.0
q = []
c = []
for i in range(numpoints):
xxyx = xxyx + defcoords[i][0] * refcoords[i][0]
xxyy = xxyy + defcoords[i][0] * refcoords[i][1]
xxyz = xxyz + defcoords[i][0] * refcoords[i][2]
xyyx = xyyx + defcoords[i][1] * refcoords[i][0]
xyyy = xyyy + defcoords[i][1] * refcoords[i][1]
xyyz = xyyz + defcoords[i][1] * refcoords[i][2]
xzyx = xzyx + defcoords[i][2] * refcoords[i][0]
xzyy = xzyy + defcoords[i][2] * refcoords[i][1]
xzyz = xzyz + defcoords[i][2] * refcoords[i][2]
for i in range(4):
c.append([])
for j in range(4):
c[i].append(0.0)
c[0][0] = xxyx + xyyy + xzyz
c[0][1] = xzyy - xyyz
c[1][1] = xxyx - xyyy - xzyz
c[0][2] = xxyz - xzyx
c[1][2] = xxyy + xyyx
c[2][2] = xyyy - xzyz - xxyx
c[0][3] = xyyx - xxyy
c[1][3] = xzyx + xxyz
c[2][3] = xyyz + xzyy
c[3][3] = xzyz - xxyx - xyyy
d,v = jacobi(c, nrot) # diagonalize c
for i in range(4):
q.append(v[i][3])
u = q2mat(q)
return q,u
def jacobi(a, nrot):
"""
Jacobi diagonalizer with sorted output, only good for 4x4 matrices
Parameters
a: Matrix to diagonalize (4x4 list)
nrot: Maximum number of sweeps
Returns
d: Eigenvalues
v: Eigenvectors
"""
v = []
d = []
for j in range(4):
d.append(0)
v.append([])
for i in range(4):
v[j].append(0.0)
v[j][j] = 1.0
d[j] = a[j][j]
for l in range(nrot):
dnorm = 0.0
onorm = 0.0
for j in range(4):
dnorm = dnorm + abs(d[j])
for i in range(j):
onorm = onorm + abs(a[i][j])
if dnorm != 0:
if onorm/dnorm <= 1e-12: break
for j in range(1,4):
for i in range(j):
b = a[i][j]
if abs(b) > 0.0:
dma = d[j] - d[i]
if abs(dma) + abs(b) <= abs(dma):
t = b / dma
else:
q = 0.5 * dma/b
t = 1.0/(abs(q) + math.sqrt(1 + q*q))
if q < 0:
t = t * -1
c = 1.0/math.sqrt(t*t + 1)
s = t*c
a[i][j] = 0.0
for k in range(i):
atemp = c * a[k][i] - s * a[k][j]
a[k][j] = s * a[k][i] + c * a[k][j]
a[k][i] = atemp
for k in range(i+1 ,j):
atemp = c * a[i][k] - s * a[k][j]
a[k][j] = s * a[i][k] + c * a[k][j]
a[i][k] = atemp
for k in range(j+1, 4):
atemp = c * a[i][k] - s * a[j][k]
a[j][k] = s * a[i][k] + c * a[j][k]
a[i][k] = atemp
for k in range(4):
vtemp = c * v[k][i] - s * v[k][j]
v[k][j] = s * v[k][i] + c * v[k][j]
v[k][i] = vtemp
dtemp = c*c*d[i] + s*s*d[j] - 2.0*c*s*b
d[j] = s*s*d[i] + c*c*d[j] + 2.0*c*s*b
d[i] = dtemp
nrot = l
for j in range(3):
k = j
dtemp = d[k]
for i in range(j+1,4):
if d[i] < dtemp:
k = i
dtemp = d[k]
if k > j:
d[k] = d[j]
d[j] = dtemp
for i in range(4):
dtemp = v[i][k]
v[i][k] = v[i][j]
v[i][j] = dtemp
return d,v
def q2mat(q):
"""
Generate a left rotation matrix from a normalized quaternion
Parameters
q: The normalized quaternion (list)
Returns
u: The rotation matrix (2-dimensional list)
"""
u = []
for i in range(3):
u.append([])
for j in range(3):
u[i].append(0.0)
u[0][0] = q[0]*q[0] + q[1]*q[1] - q[2]*q[2] - q[3]*q[3]
u[0][1] = 2.0 * (q[1] * q[2] - q[0] * q[3])
u[0][2] = 2.0 * (q[1] * q[3] + q[0] * q[2])
u[1][0] = 2.0 * (q[2] * q[1] + q[0] * q[3])
u[1][1] = q[0]*q[0] - q[1]*q[1] + q[2]*q[2] - q[3]*q[3]
u[1][2] = 2.0 * (q[2] * q[3] - q[0] * q[1])
u[2][0] = 2.0 *(q[3] * q[1] - q[0] * q[2])
u[2][1] = 2.0 * (q[3] * q[2] + q[0] * q[1])
u[2][2] = q[0]*q[0] - q[1]*q[1] - q[2]*q[2] + q[3]*q[3]
return u
def center(numpoints, refcoords):
"""
Center a molecule using equally weighted points
Parameters
numpoints: Number of points
refcoords: List of reference coordinates, with each set
a list of form [x,y,z] (list)
Returns
refcenter: Center of the set of points (list)
relcoords: Moved refcoords relative to refcenter (list)
"""
refcenter = []
relcoords = []
for i in range(3):
refcenter.append(0.0)
for i in range(numpoints):
refcenter[0] += refcoords[i][0]
refcenter[1] += refcoords[i][1]
refcenter[2] += refcoords[i][2]
for i in range(3):
refcenter[i] = refcenter[i] / numpoints
for i in range(numpoints):
relcoords.append([])
relcoords[i].append(refcoords[i][0] - refcenter[0])
relcoords[i].append(refcoords[i][1] - refcenter[1])
relcoords[i].append(refcoords[i][2] - refcenter[2])
return refcenter, relcoords
def translate(numpoints, refcoords, center, mode):
"""
Translate a molecule using equally weighted points
Parameters
numpoints: Number of points
refcoords: List of reference coordinates, with each set
a list of form [x,y,z] (list)
center: Center of the system(list)
mode: If 1, center will be subtracted from refcoords
If 2, center will be added to refcoords
Returns
relcoords: Moved refcoords relative to refcenter (list)
"""
relcoords = []
if mode == 1:
modif = -1
elif mode == 2:
modif = 1
for i in range(numpoints):
relcoords.append([])
relcoords[i].append(refcoords[i][0] + modif * center[0])
relcoords[i].append(refcoords[i][1] + modif * center[1])
relcoords[i].append(refcoords[i][2] + modif * center[2])
return relcoords
|
dmnfarrell/peat
|
Protool/quatfit.py
|
Python
|
mit
| 14,790
|
[
"COLUMBUS"
] |
c13e260ced31224966343da85f7dd3e2608f1a2648f0e801bf12eb70e216a6ed
|
from __future__ import print_function
import mdtraj as md
from mdtraj.testing import get_fn, eq, DocStringFormatTester, skipif
from mdtraj.utils import six
import os
import shutil
import tarfile
import tempfile
from fahmunge import fah
def test_fah_core17_1():
filename = get_fn('frame0.xtc')
tempdir = tempfile.mkdtemp()
tar_filename = os.path.join(tempdir, "results-000.tar.bz2")
archive = tarfile.open(tar_filename, mode='w:bz2')
tar = tarfile.open(tar_filename, "w:bz2")
tar.add(filename, arcname="positions.xtc")
tar.close()
shutil.copy(tar_filename, os.path.join(tempdir, "results-001.tar.bz2"))
trj0 = md.load(get_fn("frame0.xtc"), top=get_fn("frame0.h5"))
output_filename = os.path.join(tempdir, "traj.h5")
fah.concatenate_core17(tempdir, trj0, output_filename)
trj = md.load(output_filename)
eq(trj.n_atoms, trj0.n_atoms)
eq(trj.n_frames, trj0.n_frames * 2)
shutil.copy(tar_filename, os.path.join(tempdir, "results-002.tar.bz2"))
fah.concatenate_core17(tempdir, trj0, output_filename)
# Should notice the new file and append it to the HDF file.
trj = md.load(output_filename)
eq(trj.n_atoms, trj0.n_atoms)
eq(trj.n_frames, trj0.n_frames * 3)
|
steven-albanese/FAHMunge
|
FAHMunge/tests/test_fah.py
|
Python
|
lgpl-2.1
| 1,251
|
[
"MDTraj"
] |
26d7b88e94f3d1884b97786ec58d54f2adccf00191cb706b9ceadc687f8dd45d
|
""" An utility to load modules and objects in DIRAC and extensions, being sure that the extensions are considered
"""
import collections
import os
import re
import pkgutil
import DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities import List
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Utilities.DIRACSingleton import DIRACSingleton
from DIRAC.Core.Utilities.Extensions import extensionsByPriority, recurseImport
class ObjectLoader(metaclass=DIRACSingleton):
"""Class for loading objects. Example:
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
ol = ObjectLoader()
ol.loadObject('TransformationSystem.Client.TransformationClient')
"""
def __init__(self, baseModules=False):
"""init"""
# We save the original arguments in case
# we need to reinitialize the rootModules
# CAUTION: we cant do it after doing
# baseModules = ['DIRAC']
# because then baseModules, self.baseModules, and __rootModules
# are the same and edited in place by __generateRootModules !!
# (Think of it, it's a binding to a list)
self.originalBaseModules = baseModules
self._init(baseModules)
def _init(self, baseModules):
"""Actually performs the initialization"""
if not baseModules:
baseModules = ["DIRAC"]
self.__rootModules = baseModules
self.__objs = {}
self.__generateRootModules(baseModules)
def reloadRootModules(self):
"""Retrigger the initialization of the rootModules.
This should be used with care.
Currently, its only use is (and should stay) to retrigger
the initialization after the CS has been fully initialized in
LocalConfiguration.enableCS
"""
# Load the original baseModule argument that was given
# to the constructor
baseModules = self.originalBaseModules
# and replay the init sequence
self._init(baseModules)
def __rootImport(self, modName, hideExceptions=False):
"""Auto search which root module has to be used"""
for rootModule in self.__rootModules:
impName = modName
if rootModule:
impName = "%s.%s" % (rootModule, impName)
gLogger.debug("Trying to load %s" % impName)
result = recurseImport(impName, hideExceptions=hideExceptions)
if not result["OK"]:
return result
if result["Value"]:
return S_OK((impName, result["Value"]))
return S_OK()
def __generateRootModules(self, baseModules):
"""Iterate over all the possible root modules"""
self.__rootModules = baseModules
for rootModule in reversed(extensionsByPriority()):
if rootModule not in self.__rootModules:
self.__rootModules.append(rootModule)
self.__rootModules.append("")
# Reversing the order because we want first to look in the extension(s)
self.__rootModules.reverse()
def loadModule(self, importString, hideExceptions=False):
"""Load a module from an import string"""
result = self.__rootImport(importString, hideExceptions=hideExceptions)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR(DErrno.EIMPERR, "No module %s found" % importString)
return S_OK(result["Value"][1])
def loadObject(self, importString, objName=False, hideExceptions=False):
"""Load an object from inside a module"""
if not objName:
objName = importString.split(".")[-1]
result = self.loadModule(importString, hideExceptions=hideExceptions)
if not result["OK"]:
return result
modObj = result["Value"]
try:
result = S_OK(getattr(modObj, objName))
result["ModuleFile"] = modObj.__file__
return result
except AttributeError:
return S_ERROR(DErrno.EIMPERR, "%s does not contain a %s object" % (importString, objName))
def getObjects(self, modulePath, reFilter=None, parentClass=None, recurse=False, continueOnError=False):
"""Search for modules under a certain path
modulePath is the import string needed to access the parent module.
Root modules will be included automatically (like DIRAC). For instance "ConfigurationSystem.Service"
reFilter is a regular expression to filter what to load. For instance ".*Handler"
parentClass is a class object from which the loaded modules have to import from. For instance RequestHandler
:param continueOnError: if True, continue loading further module even if one fails
"""
modules = collections.OrderedDict()
if isinstance(reFilter, str):
reFilter = re.compile(reFilter)
for rootModule in self.__rootModules:
impPath = modulePath
if rootModule:
impPath = "%s.%s" % (rootModule, impPath)
gLogger.debug("Trying to load %s" % impPath)
result = recurseImport(impPath)
if not result["OK"]:
return result
if not result["Value"]:
continue
parentModule = result["Value"]
gLogger.verbose("Loaded module %s at %s" % (impPath, parentModule.__path__))
for _modLoader, modName, isPkg in pkgutil.walk_packages(parentModule.__path__):
if reFilter and not reFilter.match(modName):
continue
if isPkg:
if recurse:
result = self.getObjects(
"%s.%s" % (modulePath, modName), reFilter=reFilter, parentClass=parentClass, recurse=recurse
)
if not result["OK"]:
return result
modules.update(result["Value"])
continue
modKeyName = "%s.%s" % (modulePath, modName)
if modKeyName in modules:
continue
fullName = "%s.%s" % (impPath, modName)
result = recurseImport(fullName)
if not result["OK"]:
if continueOnError:
gLogger.error(
"Error loading module but continueOnError is true",
"module %s error %s" % (fullName, result),
)
continue
return result
if not result["Value"]:
continue
modClass = getattr(result["Value"], modName, None)
if not modClass:
gLogger.warn("%s does not contain a %s object" % (fullName, modName))
continue
if parentClass and not issubclass(modClass, parentClass):
continue
modules[modKeyName] = modClass
return S_OK(modules)
def loadObjects(path, reFilter=None, parentClass=None):
"""
:param str path: the path to the syetem for example: DIRAC/AccountingSystem
:param object reFilter: regular expression used to found the class
:param object parentClass: class instance
:return: dictionary containing the name of the class and its instance
"""
if not reFilter:
reFilter = re.compile(r".*[a-z1-9]\.py$")
pathList = List.fromChar(path, "/")
objectsToLoad = {}
# Find which object files match
for parentModule in extensionsByPriority():
objDir = os.path.join(os.path.dirname(os.path.dirname(DIRAC.__file__)), parentModule, *pathList)
if not os.path.isdir(objDir):
continue
for objFile in os.listdir(objDir):
if reFilter.match(objFile):
pythonClassName = objFile[:-3]
if pythonClassName not in objectsToLoad:
gLogger.info("Adding to load queue %s/%s/%s" % (parentModule, path, pythonClassName))
objectsToLoad[pythonClassName] = parentModule
# Load them!
loadedObjects = {}
for pythonClassName in objectsToLoad:
parentModule = objectsToLoad[pythonClassName]
try:
# Where parentModule can be DIRAC, pathList is something like [ "AccountingSystem", "Client", "Types" ]
# And the python class name is.. well, the python class name
objPythonPath = "%s.%s.%s" % (parentModule, ".".join(pathList), pythonClassName)
objModule = __import__(objPythonPath, globals(), locals(), pythonClassName)
objClass = getattr(objModule, pythonClassName)
except Exception as e:
gLogger.error("Can't load type", "%s/%s: %s" % (parentModule, pythonClassName, str(e)))
continue
if parentClass == objClass:
continue
if parentClass and not issubclass(objClass, parentClass):
gLogger.warn("%s is not a subclass of %s. Skipping" % (objClass, parentClass))
continue
gLogger.info("Loaded %s" % objPythonPath)
loadedObjects[pythonClassName] = objClass
return loadedObjects
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/Utilities/ObjectLoader.py
|
Python
|
gpl-3.0
| 9,279
|
[
"DIRAC"
] |
9ef381741ce0c040bc3955c30b371b38678a5e1b5fd81af19c77e910c5a6fc56
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import random
import unittest
from collections import Counter
import numpy as np
from pymatgen.analysis.defects.utils import (
ChargeDensityAnalyzer,
ChargeInsertionAnalyzer,
QModel,
StructureMotifInterstitial,
TopographyAnalyzer,
calculate_vol,
closestsites,
converge,
eV_to_k,
generate_R_and_G_vecs,
generate_reciprocal_vectors_squared,
genrecip,
tune_for_gamma,
)
from pymatgen.core import PeriodicSite
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.outputs import Chgcar
from pymatgen.util.testing import PymatgenTest
try:
from skimage.feature import peak_local_max
except ImportError:
peak_local_max = None
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "chgden")
class DefectsUtilsTest(PymatgenTest):
def test_qmodel(self):
qm = QModel()
modqm = QModel(beta=2.0, expnorm=0.5, gamma=0.1)
# test rho_rec
self.assertEqual(qm.rho_rec(1.0), 0.77880078307140488)
self.assertEqual(modqm.rho_rec(1.0), 0.6814583156907158)
# test rho_rec_limit0
self.assertEqual(qm.rho_rec_limit0, -0.25)
self.assertEqual(modqm.rho_rec_limit0, -0.51)
def test_eV_to_k(self):
self.assertAlmostEqual(eV_to_k(1.0), 0.9681404248678961)
def test_genrecip(self):
a = 6.0
lattconsts = [a, a / 2.0, 3.0 * a]
lattvectors = [[lattconsts[i] if i == j else 0.0 for j in range(3)] for i in range(3)]
recip_list = list(genrecip(lattvectors[0], lattvectors[1], lattvectors[2], 300))
self.assertEqual(len(recip_list), 25620)
def test_generate_reciprocal_vectors_squared(self):
# test cubic case
a = 6.0
lattvectors = [[a if i == j else 0.0 for j in range(3)] for i in range(3)]
brecip = [1.0966227112321507 for i in range(6)]
self.assertAlmostEqual(
list(generate_reciprocal_vectors_squared(lattvectors[0], lattvectors[1], lattvectors[2], 1.3)),
brecip,
)
# test orthorhombic case
lattconsts = [a, a / 2.0, 3.0 * a]
lattvectors = [[lattconsts[i] if i == j else 0.0 for j in range(3)] for i in range(3)]
brval = 0.4873878716587337
brecip = [brval, brval / 4.0, brval / 4.0, brval]
self.assertAlmostEqual(
list(generate_reciprocal_vectors_squared(lattvectors[0], lattvectors[1], lattvectors[2], 1.0)),
brecip,
)
# test triclinic case
lattvectors = [[1.5, 0.2, 0.3], [0.3, 1.2, 0.2], [0.5, 0.4, 1.3]]
brval = 24.28330561545568
brecip = [brval, brval]
self.assertAlmostEqual(
list(generate_reciprocal_vectors_squared(lattvectors[0], lattvectors[1], lattvectors[2], 30.0)),
brecip,
)
def test_closest_sites(self):
struct = PymatgenTest.get_structure("VO2")
# test O vacancy
dstruct = struct.copy()
dstruct.remove_sites([0])
pos = struct.sites[0].coords
bsite, dsite = closestsites(struct, dstruct, pos)
self.assertEqual(bsite[2], 0) # test against index
self.assertEqual(dsite[2], 4)
# test V vacancy
dstruct = struct.copy()
dstruct.remove_sites([4])
pos = struct.sites[4].coords
bsite, dsite = closestsites(struct, dstruct, pos)
self.assertEqual(bsite[2], 4) # test against index
self.assertTrue(dsite[2] in [1, 3]) # index 1 and index 3 are the same distance
def test_converges(self):
self.assertAlmostEqual(converge(np.sqrt, 0.1, 0.1, 1.0), 0.6324555320336759)
def test_tune_for_gamma(self):
lattice = Lattice([[4.692882, -8.12831, 0.0], [4.692882, 8.12831, 0.0], [0.0, 0.0, 10.03391]])
epsilon = 10.0 * np.identity(3)
gamma = tune_for_gamma(lattice, epsilon)
self.assertAlmostEqual(gamma, 0.19357221)
def test_generate_R_and_G_vecs(self):
gamma = 0.19357221
prec = 28
lattice = Lattice([[4.692882, -8.12831, 0.0], [4.692882, 8.12831, 0.0], [0.0, 0.0, 10.03391]])
epsilon = 10.0 * np.identity(3)
g_vecs, recip_summation, r_vecs, real_summation = generate_R_and_G_vecs(gamma, prec, lattice, epsilon)
self.assertEqual(len(g_vecs[0]), 16418)
self.assertAlmostEqual(recip_summation[0], 2.8946556e-15)
self.assertEqual(len(r_vecs[0]), 16299)
self.assertAlmostEqual(real_summation[0], 0.00679361)
class StructureMotifInterstitialTest(PymatgenTest):
def setUp(self):
self.silicon = Structure(
Lattice.cubic(5.47),
["Si", "Si", "Si", "Si", "Si", "Si", "Si", "Si"],
[
[0.000000, 0.000000, 0.500000],
[0.750000, 0.750000, 0.750000],
[0.000000, 0.500000, 1.000000],
[0.750000, 0.250000, 0.250000],
[0.500000, 0.000000, 1.000000],
[0.250000, 0.750000, 0.250000],
[0.500000, 0.500000, 0.500000],
[0.250000, 0.250000, 0.750000],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=False,
site_properties=None,
)
self.smi = StructureMotifInterstitial(
self.silicon,
"Si",
motif_types=["tetrahedral", "octahedral"],
op_threshs=[0.3, 0.5],
dl=0.4,
doverlap=1.0,
facmaxdl=1.51,
)
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264], [0, 0, 2.528]]),
["C0+", "C0+"],
[[2.554, 1.806, 4.423], [0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.nacl = Structure(
Lattice([[3.485, 0, 2.012], [1.162, 3.286, 2.012], [0, 0, 4.025]]),
["Na1+", "Cl1-"],
[[0, 0, 0], [2.324, 1.643, 4.025]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.cscl = Structure(
Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"],
[[2.105, 2.105, 2.105], [0, 0, 0]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.square_pyramid = Structure(
Lattice([[100, 0, 0], [0, 100, 0], [0, 0, 100]]),
["C", "C", "C", "C", "C", "C"],
[[0, 0, 0], [1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0], [0, 0, 1]],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
self.trigonal_bipyramid = Structure(
Lattice([[100, 0, 0], [0, 100, 0], [0, 0, 100]]),
["P", "Cl", "Cl", "Cl", "Cl", "Cl"],
[
[0, 0, 0],
[0, 0, 2.14],
[0, 2.02, 0],
[1.74937, -1.01, 0],
[-1.74937, -1.01, 0],
[0, 0, -2.14],
],
validate_proximity=False,
to_unit_cell=False,
coords_are_cartesian=True,
site_properties=None,
)
def test_all(self):
self.assertIsInstance(self.smi, StructureMotifInterstitial)
self.assertEqual(len(self.smi.enumerate_defectsites()), 1)
self.assertIsInstance(self.smi.enumerate_defectsites()[0], PeriodicSite)
self.assertEqual("Si", self.smi.enumerate_defectsites()[0].species_string)
self.assertEqual("tetrahedral", self.smi.get_motif_type(0))
elem_cn_dict = self.smi.get_coordinating_elements_cns(0)
self.assertEqual(len(list(elem_cn_dict.keys())), 1)
self.assertEqual(list(elem_cn_dict.keys())[0], "Si")
self.assertEqual(elem_cn_dict["Si"], 4)
structs = self.smi.make_supercells_with_defects(np.array([1, 1, 1]))
self.assertEqual(len(structs), 2)
self.assertIsInstance(structs[0], Structure)
def tearDown(self):
del self.smi
del self.silicon
del self.diamond
del self.nacl
del self.cscl
class TopographyAnalyzerTest(unittest.TestCase):
def setUp(self):
feo4 = Structure.from_file(os.path.join(test_dir, "LiFePO4.cif"))
feo4.remove_species(["Li"])
feo4.remove_oxidation_states()
self.feo4 = feo4
def test_topography_analyzer(self):
# check interstitial sites for FePO4 using Voronoi Tessellation
vor_feo4 = TopographyAnalyzer(self.feo4, framework_ions=["O"], cations=["P", "Fe"], check_volume=False)
vor_feo4.cluster_nodes(tol=1.2)
vor_feo4.remove_collisions(1.2)
s_feo4 = vor_feo4.get_structure_with_nodes()
sites_feo4 = np.array([s_feo4[i].frac_coords for i in range(len(s_feo4)) if s_feo4[i].species_string == "X0+"])
# check total number of vnodes
self.assertAlmostEqual(len(vor_feo4.vnodes), 24)
# check four sites that match Li sites in LiFePO4(mp-19017)
site_predicted = [[0, 0, 0], [0.5, 0.5, 0.5], [0.5, 0, 0.5], [0, 0.5, 0]]
for i in range(0, 4):
is_site_matched = False
for site in sites_feo4:
distance = s_feo4.lattice.get_distance_and_image(site, site_predicted[i])
if distance[0] < 0.01:
is_site_matched = True
else:
continue
self.assertTrue(is_site_matched)
vor_feo4.print_stats()
def test_calculate_vol(self):
s = Structure.from_file(os.path.join(test_dir, "LiFePO4.cif"))
# a = TopographyAnalyzer(
# s, framework_ions=["O"], cations=["P", "Fe"], check_volume=False
# )
coords = [s[i].coords for i in [20, 23, 25, 17, 24, 19]]
vol = calculate_vol(coords=coords)
vol_expected = 12.8884 # LiO6 volume calculated by VESTA
self.assertAlmostEqual(vol, vol_expected, 4)
@unittest.skipIf(not peak_local_max, "skimage.feature.peak_local_max module not present.")
class ChgDenAnalyzerTest(unittest.TestCase):
def setUp(self):
# This is a CHGCAR_sum file with reduced grid size
chgcar_path = os.path.join(test_dir, "CHGCAR.FePO4")
chg_FePO4 = Chgcar.from_file(chgcar_path)
self.chgcar_path = chgcar_path
self.chg_FePO4 = chg_FePO4
self.ca_FePO4 = ChargeDensityAnalyzer(chg_FePO4)
self.s_LiFePO4 = Structure.from_file(os.path.join(test_dir, "LiFePO4.cif"))
def test_get_local_extrema(self):
ca = ChargeDensityAnalyzer.from_file(self.chgcar_path)
threshold_frac = random.random()
threshold_abs_min = random.randrange(2, 14)
threshold_abs_max = random.randrange(27e2, 28e4)
# Minima test
full_list_min = self.ca_FePO4.get_local_extrema(find_min=True, threshold_frac=1.0)
frac_list_min_frac = self.ca_FePO4.get_local_extrema(find_min=True, threshold_frac=threshold_frac)
frac_list_min_abs = self.ca_FePO4.get_local_extrema(find_min=True, threshold_abs=threshold_abs_min)
self.assertAlmostEqual(len(full_list_min) * threshold_frac, len(frac_list_min_frac), delta=1)
ca.get_local_extrema(find_min=True)
df_expected = ca.extrema_df[ca.extrema_df["Charge Density"] <= threshold_abs_min]
self.assertEqual(len(frac_list_min_abs), len(df_expected))
# Maxima test
full_list_max = self.ca_FePO4.get_local_extrema(find_min=False, threshold_frac=1.0)
frac_list_max = self.ca_FePO4.get_local_extrema(find_min=False, threshold_frac=threshold_frac)
frac_list_max_abs = self.ca_FePO4.get_local_extrema(find_min=False, threshold_abs=threshold_abs_max)
self.assertAlmostEqual(len(full_list_max) * threshold_frac, len(frac_list_max), delta=1)
# Local maxima should finds all center of atoms
self.assertEqual(len(self.ca_FePO4.structure), len(full_list_max))
ca.get_local_extrema(find_min=False)
df_expected = ca.extrema_df[ca.extrema_df["Charge Density"] >= threshold_abs_max]
self.assertEqual(len(frac_list_max_abs), len(df_expected))
def test_remove_collisions(self):
ca = ChargeDensityAnalyzer(self.chg_FePO4)
ca.get_local_extrema(threshold_frac=0)
ca.remove_collisions() # should not trigger error
self.assertEqual(len(ca.extrema_df), 0)
self.ca_FePO4.get_local_extrema(find_min=False, threshold_frac=1.0)
self.ca_FePO4.remove_collisions(min_dist=0.5)
self.assertEqual(len(self.ca_FePO4.extrema_df), 0)
def test_cluster_nodes(self):
ca = ChargeDensityAnalyzer(self.chg_FePO4)
ca.get_local_extrema()
ca.cluster_nodes(tol=20)
self.assertEqual(len(ca.extrema_df), 1)
def test_get_structure_with_nodes(self):
s_FePO4 = self.ca_FePO4.get_structure_with_nodes(find_min=True)
sites_predicted = np.array(
[
self.s_LiFePO4[i].frac_coords
for i in range(len(self.s_LiFePO4))
if self.s_LiFePO4[i].species_string == "Li"
]
)
sites_guess = np.array(
[s_FePO4[i].frac_coords for i in range(len(s_FePO4)) if s_FePO4[i].species_string == "X0+"]
)
distances = s_FePO4.lattice.get_all_distances(sites_predicted, sites_guess).flatten()
distances = [d for d in distances if d < 0.1]
self.assertEqual(len(distances), len(sites_predicted))
def test_from_file(self):
ca = ChargeDensityAnalyzer.from_file(self.chgcar_path)
self.assertTrue(isinstance(ca, ChargeDensityAnalyzer))
def test_sort_sites_by_integrated_chg(self):
print(self.chgcar_path)
ca = ChargeDensityAnalyzer.from_file(self.chgcar_path)
ca.get_local_extrema()
ca.sort_sites_by_integrated_chg()
print(ca._extrema_df.iloc[0], 0.5)
print(ca._extrema_df.iloc[0]["avg_charge_den"])
self.assertAlmostEqual(ca._extrema_df.iloc[0]["a"], 0.0)
self.assertAlmostEqual(ca._extrema_df.iloc[0]["b"], 0.5)
self.assertAlmostEqual(ca._extrema_df.iloc[0]["c"], 0.0)
self.assertAlmostEqual(ca._extrema_df.iloc[0]["Charge Density"], 1.65288944124)
self.assertAlmostEqual(ca._extrema_df.iloc[0]["avg_charge_den"], 0.006831484178753711)
@unittest.skipIf(not peak_local_max, "skimage.feature.peak_local_max module not present.")
class TestChargeInsertionAnalyzer(unittest.TestCase):
def setUp(self):
# This is a CHGCAR_sum file with reduced grid size
chgcar_path = os.path.join(test_dir, "CHGCAR.FePO4")
chg_FePO4 = Chgcar.from_file(chgcar_path)
self.chgcar_path = chgcar_path
self.chg_FePO4 = chg_FePO4
self.cia_FePO4 = ChargeInsertionAnalyzer(chg_FePO4)
def test_get_label(self):
self.cia_FePO4.get_labels()
self.assertEqual(len(self.cia_FePO4._extrema_df), 12)
counts = Counter(self.cia_FePO4._extrema_df.site_label)
self.assertEqual(counts, {0: 4, 1: 4, 2: 4})
def test_filter(self):
filtered_FePO4 = ChargeInsertionAnalyzer(self.chg_FePO4, max_avg_charge=0.05)
filtered_FePO4.get_labels()
counts = Counter(filtered_FePO4._extrema_df.site_label)
self.assertEqual(counts, {0: 4, 1: 4})
if __name__ == "__main__":
unittest.main()
|
gmatteo/pymatgen
|
pymatgen/analysis/defects/tests/test_utils.py
|
Python
|
mit
| 15,780
|
[
"VASP",
"pymatgen"
] |
c897fb8095004024e84f426d420ef6bdace3090d78400cc100f4dab12e05ae5a
|
# Copyright (c) 2015-2016 Claudiu Popa <pcmanticore@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import contextlib
import unittest
import astroid
from astroid import extract_node
from astroid.test_utils import require_version
from astroid import InferenceError
from astroid import nodes
from astroid import util
from astroid.node_classes import AssignName, Const, Name, Starred
@contextlib.contextmanager
def _add_transform(manager, node, transform, predicate=None):
manager.register_transform(node, transform, predicate)
try:
yield
finally:
manager.unregister_transform(node, transform, predicate)
class ProtocolTests(unittest.TestCase):
def assertConstNodesEqual(self, nodes_list_expected, nodes_list_got):
self.assertEqual(len(nodes_list_expected), len(nodes_list_got))
for node in nodes_list_got:
self.assertIsInstance(node, Const)
for node, expected_value in zip(nodes_list_got, nodes_list_expected):
self.assertEqual(expected_value, node.value)
def assertNameNodesEqual(self, nodes_list_expected, nodes_list_got):
self.assertEqual(len(nodes_list_expected), len(nodes_list_got))
for node in nodes_list_got:
self.assertIsInstance(node, Name)
for node, expected_name in zip(nodes_list_got, nodes_list_expected):
self.assertEqual(expected_name, node.name)
def test_assigned_stmts_simple_for(self):
assign_stmts = extract_node("""
for a in (1, 2, 3): #@
pass
for b in range(3): #@
pass
""")
for1_assnode = next(assign_stmts[0].nodes_of_class(AssignName))
assigned = list(for1_assnode.assigned_stmts())
self.assertConstNodesEqual([1, 2, 3], assigned)
for2_assnode = next(assign_stmts[1].nodes_of_class(AssignName))
self.assertRaises(InferenceError,
list, for2_assnode.assigned_stmts())
@require_version(minver='3.0')
def test_assigned_stmts_starred_for(self):
assign_stmts = extract_node("""
for *a, b in ((1, 2, 3), (4, 5, 6, 7)): #@
pass
""")
for1_starred = next(assign_stmts.nodes_of_class(Starred))
assigned = next(for1_starred.assigned_stmts())
self.assertEqual(assigned, util.Uninferable)
def _get_starred_stmts(self, code):
assign_stmt = extract_node("{} #@".format(code))
starred = next(assign_stmt.nodes_of_class(Starred))
return next(starred.assigned_stmts())
def _helper_starred_expected_const(self, code, expected):
stmts = self._get_starred_stmts(code)
self.assertIsInstance(stmts, nodes.List)
stmts = stmts.elts
self.assertConstNodesEqual(expected, stmts)
def _helper_starred_expected(self, code, expected):
stmts = self._get_starred_stmts(code)
self.assertEqual(expected, stmts)
def _helper_starred_inference_error(self, code):
assign_stmt = extract_node("{} #@".format(code))
starred = next(assign_stmt.nodes_of_class(Starred))
self.assertRaises(InferenceError, list, starred.assigned_stmts())
@require_version(minver='3.0')
def test_assigned_stmts_starred_assnames(self):
self._helper_starred_expected_const(
"a, *b = (1, 2, 3, 4) #@", [2, 3, 4])
self._helper_starred_expected_const(
"*a, b = (1, 2, 3) #@", [1, 2])
self._helper_starred_expected_const(
"a, *b, c = (1, 2, 3, 4, 5) #@",
[2, 3, 4])
self._helper_starred_expected_const(
"a, *b = (1, 2) #@", [2])
self._helper_starred_expected_const(
"*b, a = (1, 2) #@", [1])
self._helper_starred_expected_const(
"[*b] = (1, 2) #@", [1, 2])
@require_version(minver='3.0')
def test_assigned_stmts_starred_yes(self):
# Not something iterable and known
self._helper_starred_expected("a, *b = range(3) #@", util.Uninferable)
# Not something inferrable
self._helper_starred_expected("a, *b = balou() #@", util.Uninferable)
# In function, unknown.
self._helper_starred_expected("""
def test(arg):
head, *tail = arg #@""", util.Uninferable)
# These cases aren't worth supporting.
self._helper_starred_expected(
"a, (*b, c), d = (1, (2, 3, 4), 5) #@", util.Uninferable)
@require_version(minver='3.0')
def test_assign_stmts_starred_fails(self):
# Too many starred
self._helper_starred_inference_error("a, *b, *c = (1, 2, 3) #@")
# Too many lhs values
self._helper_starred_inference_error("a, *b, c = (1, 2) #@")
# This could be solved properly, but it complicates needlessly the
# code for assigned_stmts, without offering real benefit.
self._helper_starred_inference_error(
"(*a, b), (c, *d) = (1, 2, 3), (4, 5, 6) #@")
def test_assigned_stmts_assignments(self):
assign_stmts = extract_node("""
c = a #@
d, e = b, c #@
""")
simple_assnode = next(assign_stmts[0].nodes_of_class(AssignName))
assigned = list(simple_assnode.assigned_stmts())
self.assertNameNodesEqual(['a'], assigned)
assnames = assign_stmts[1].nodes_of_class(AssignName)
simple_mul_assnode_1 = next(assnames)
assigned = list(simple_mul_assnode_1.assigned_stmts())
self.assertNameNodesEqual(['b'], assigned)
simple_mul_assnode_2 = next(assnames)
assigned = list(simple_mul_assnode_2.assigned_stmts())
self.assertNameNodesEqual(['c'], assigned)
@require_version(minver='3.6')
def test_assigned_stmts_annassignments(self):
annassign_stmts = extract_node("""
a: str = "abc" #@
b: str #@
""")
simple_annassign_node = next(annassign_stmts[0].nodes_of_class(AssignName))
assigned = list(simple_annassign_node.assigned_stmts())
self.assertEqual(1, len(assigned))
self.assertIsInstance(assigned[0], Const)
self.assertEqual(assigned[0].value, "abc")
empty_annassign_node = next(annassign_stmts[1].nodes_of_class(AssignName))
assigned = list(empty_annassign_node.assigned_stmts())
self.assertEqual(1, len(assigned))
self.assertIs(assigned[0], util.Uninferable)
def test_sequence_assigned_stmts_not_accepting_empty_node(self):
def transform(node):
node.root().locals['__all__'] = [node.value]
manager = astroid.MANAGER
with _add_transform(manager, astroid.Assign, transform):
module = astroid.parse('''
__all__ = ['a']
''')
module.wildcard_import_names()
def test_not_passing_uninferable_in_seq_inference(self):
class Visitor(object):
def visit(self, node):
for child in node.get_children():
child.accept(self)
visit_module = visit
visit_assign = visit
visit_binop = visit
visit_list = visit
visit_const = visit
visit_name = visit
def visit_assignname(self, node):
for _ in node.infer():
pass
parsed = extract_node("""
a = []
x = [a*2, a]*2*2
""")
parsed.accept(Visitor())
if __name__ == '__main__':
unittest.main()
|
lucidmotifs/auto-aoc
|
.venv/lib/python3.5/site-packages/astroid/tests/unittest_protocols.py
|
Python
|
mit
| 7,587
|
[
"VisIt"
] |
2d8d8ae0d3c3158a09a3bce08235396193ed67421274e7667a1080e6b0d35a7f
|
########################################################################
# $HeadURL$
# File : InputDataAgent.py
########################################################################
"""
The Input Data Agent queries the file catalog for specified job input data and adds the
relevant information to the job optimizer parameters to be used during the
scheduling decision.
"""
__RCSID__ = "$Id$"
import time
import random
from DIRAC.WorkloadManagementSystem.Executor.Base.OptimizerExecutor import OptimizerExecutor
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Core.Utilities.SiteSEMapping import getSitesForSE, getSEsForSite
from DIRAC.Core.Utilities import DictCache
from DIRAC.Core.Security import Properties
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus
from DIRAC.StorageManagementSystem.Client.StorageManagerClient import StorageManagerClient
from DIRAC import S_OK, S_ERROR
class InputDataValidation( OptimizerExecutor ):
"""
The specific Optimizer must provide the following methods:
- initializeOptimizer() before each execution cycle
- checkJob() - the main method called for each job
"""
@classmethod
def initializeOptimizer( cls ):
""" Initialization of the Agent.
"""
random.seed()
cls.__SEStatus = DictCache.DictCache()
cls.__sitesForSE = DictCache.DictCache()
try:
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
except ImportError, excp :
return S_ERROR( "Could not import JobDB: %s" % str( excp ) )
try:
cls.__jobDB = JobDB()
except RuntimeError:
return S_ERROR( "Cannot connect to JobDB" )
cls.__siteStatus = SiteStatus()
cls.ex_setOption( "FailedStatus", "Input Data Not Available" )
return S_OK()
def optimizeJob( self, jid, jobState ):
result = self.doTheThing( jid, jobState )
if not result[ 'OK' ]:
jobState.setAppStatus( result[ 'Message' ] )
return result
return S_OK()
def doTheThing( self, jid, jobState ):
result = jobState.getInputData()
if not result[ 'OK' ]:
self.jobLog.error( "Can't retrieve input data: %s" % result[ 'Message' ] )
return result
lfnData = result[ 'Value' ]
result = jobState.getManifest()
if not result[ 'OK' ]:
return result
manifest = result[ 'Value' ]
result = self.freezeByBannedSE( manifest, lfnData )
if not result[ 'OK' ]:
return result
result = self.selectSiteStage( manifest, lfnData )
if not result[ 'OK' ]:
return result
candidates, lfn2Stage = result[ 'Value' ]
if not lfn2Stage:
jobState.setOptParameter( "DataSites", ",".join( candidates ) )
self.jobLog.notice( "No need to stage. Sending to next optimizer" )
return self.setNextOptimizer()
if self.ex_getOption( "RestrictDataStage", False ):
if not self.__checkStageAllowed( jobState ):
return S_ERROR( "Stage not allowed" )
result = jobState.getOptParameter( "StageRequestedForSites" )
if not result[ 'OK' ]:
raise RuntimeError( "Can't retrieve optimizer parameter! (%s)" % result[ 'Message' ] )
stageRequested = result[ 'Value' ]
if stageRequested:
jobState.setOptParameter( "DataSites", stageRequested )
self.jobLog.info( "Stage already requested. Sending to next optimizer" )
return self.setNextOptimizer()
result = self.requestStage( jobState, candidates, lfnData )
if not result[ 'OK' ]:
return result
stageCandidates = result[ 'Value' ]
self.jobLog.notice( "Requested stage at sites %s" % ",".join( stageCandidates ) )
result = jobState.setOptParameter( "StageRequestedForSites", ",".join( stageCandidates ) )
if not result[ 'OK' ]:
return result
#TODO: What if more than one stage site?
jobState.setAttribute( 'Site', list( stageCandidates )[0] )
return S_OK()
def __checkStageAllowed( self, jobState ):
"""Check if the job credentials allow to stage date """
result = jobState.getAttribute( "OwnerGroup" )
if not result[ 'OK' ]:
self.jobLog.error( "Cannot retrieve OwnerGroup from DB: %s" % result[ 'Message' ] )
return S_ERROR( "Cannot get OwnerGroup" )
group = result[ 'Value' ]
return Properties.STAGE_ALLOWED in Registry.getPropertiesForGroup( group )
def freezeByBannedSE( self, manifest, lfnData ):
targetSEs = manifest.getOption( "TargetSEs", [] )
if targetSEs:
self.jobLog.info( "TargetSEs defined: %s" % ", ".join( targetSEs ) )
for lfn in lfnData:
replicas = lfnData[ lfn ][ 'Replicas' ]
anyBanned = False
for seName in list( replicas.keys() ):
if targetSEs and seName not in targetSEs:
self.jobLog.info( "Ignoring replica in %s (not in TargetSEs)" % seName )
replicas.pop( seName )
continue
result = self.__getSEStatus( seName )
if not result[ 'OK' ]:
self.jobLog.error( "Can't retrieve status for SE %s" % seName )
replicas.pop( seName )
anyBanned = True
continue
seStatus = result[ 'Value' ]
if not seStatus[ 'Read' ]:
self.jobLog.info( "Ignoring replica in %s (SE is not readable)" % seName )
replicas.pop( seName )
anyBanned = True
continue
if anyBanned:
raise OptimizerExecutor.FreezeTask( "Banned SE makes access to Input Data impossible" )
if not replicas:
return S_ERROR( "%s has no replicas in any target SE" % lfn )
return S_OK()
def selectSiteStage( self, manifest, lfnData ):
lfnSite = {}
tapelfn = {}
disklfn = set()
for lfn in lfnData:
replicas = lfnData[ lfn ][ 'Replicas' ]
lfnSite[ lfn ] = set()
for seName in replicas:
result = self.__getSitesForSE( seName )
if not result[ 'OK' ]:
return result
sites = result[ 'Value' ]
lfnSite[ lfn ].update( sites )
if lfn not in disklfn:
if replicas[ seName ][ 'Disk' ]:
disklfn.add( lfn )
try:
tapelfn.pop( lfn )
except KeyError:
pass
else:
if lfn not in tapelfn:
tapelfn[ lfn ] = set()
for site in sites:
tapelfn[ lfn ].add( site )
candidates = set.intersection( *[ lfnSite[ lfn ] for lfn in lfnSite ] )
userSites = manifest.getOption( "Site", [] )
if userSites:
candidates = set.intersection( candidates, userSites )
bannedSites = manifest.getOption( "BannedSites", [] )
candidates = candidates.difference( bannedSites )
if not candidates:
return S_ERROR( "No candidate sites available" )
self.jobLog.info( "Sites with access to input data are %s" % ",".join( candidates ) )
if not tapelfn:
self.jobLog.info( "No need to stage. Candidates are %s" % ", ".join( candidates ) )
return S_OK( ( candidates, False ) )
self.jobLog.info( "Need to stage %s files at most" % len( tapelfn ) )
tapeCandidates = {}
for lfn in tapelfn:
for site in set.intersection( candidates, tapelfn[lfn] ):
if site not in tapeCandidates:
tapeCandidates[ site ] = 0
tapeCandidates[ site ] += 1
if len( tapeCandidates ) == 1:
stageSite = tapeCandidates.keys()[0]
minStage = tapeCandidates[ stageSite ]
tapeCandidates = set( [ stageSite ] )
else:
minStage = min( *[ tapeCandidates[ site ] for site in tapeCandidates ] )
tapeCandidates = set( [ site for site in tapeCandidates if tapeCandidates[ site ] == minStage ] )
self.jobLog.info( "Sites %s need to stage %d files" % ( ",".join( tapeCandidates ), minStage ) )
result = self.__siteStatus.getUnusableSites( 'ComputingAccess' )
if result[ 'OK' ]:
for site in result[ 'Value' ]:
tapeCandidates.discard( site )
if not tapeCandidates:
raise OptimizerExecutor.FreezeTask( "All stageable sites are banned" )
finalCandidates = set.intersection( tapeCandidates, candidates )
if not finalCandidates:
self.jobLog.error( "No site that can stage is allowed to run" )
return S_ERROR( "No site can fullfill requirements" )
self.jobLog.info( "Candidate sites for staging are %s" % ", ".join( finalCandidates ) )
return S_OK( ( finalCandidates, set( tapelfn ) ) )
def __getSitesForSE( self, seName ):
result = self.__sitesForSE.get( seName )
if result == False:
result = getSitesForSE( seName )
if not result['OK']:
return result
self.__sitesForSE.add( seName, 600, result )
return result
def __getSEStatus( self, seName ):
result = self.__SEStatus.get( seName )
if result == False:
seObj = StorageElement( seName )
result = seObj.getStatus()
if not result[ 'OK' ]:
return result
self.__SEStatus.add( seName, 600, result )
return result
def requestStage( self, jobState, candidates, lfnData ):
#Any site is as good as any so random time!
stageSite = random.sample( candidates, 1 )[0]
self.jobLog.info( "Site selected %s for staging" % stageSite )
result = getSEsForSite( stageSite )
if not result['OK']:
return S_ERROR( 'Could not determine SEs for site %s' % stageSite )
siteSEs = result['Value']
tapeSEs = []
diskSEs = []
for seName in siteSEs:
result = self.__getSEStatus( seName )
if not result[ 'OK' ]:
self.jobLog.error( "Cannot retrieve SE %s status: %s" % ( seName, result[ 'Message' ] ) )
return S_ERROR( "Cannot retrieve SE status" )
seStatus = result[ 'Value' ]
if seStatus[ 'Read' ] and seStatus[ 'TapeSE' ]:
tapeSEs.append( seName )
if seStatus[ 'Read' ] and seStatus[ 'DiskSE' ]:
diskSEs.append( seName )
if not tapeSEs:
return S_ERROR( "No Local SEs for site %s" % stageSite )
self.jobLog.verbose( "Tape SEs are %s" % ( ", ".join( tapeSEs ) ) )
stageLFNs = {}
lfnToStage = []
for lfn in lfnData:
replicas = lfnData[ lfn ][ 'Replicas' ]
# Check SEs
seStage = []
for seName in replicas:
_surl = replicas[ seName ][ 'SURL' ]
if seName in diskSEs:
# This lfn is in disk. Skip it
seStage = []
break
if seName not in tapeSEs:
# This lfn is not in this tape SE. Check next SE
continue
seStage.append( seName )
for seName in seStage:
if seName not in stageLFNs:
stageLFNs[ seName ] = []
stageLFNs[ seName ].append( lfn )
if lfn not in lfnToStage:
lfnToStage.append( lfn )
if not stageLFNs:
return S_ERROR( "Cannot find tape replicas" )
# Check if any LFN is in more than one SE
# If that's the case, try to stage from the SE that has more LFNs to stage to group the request
# 1.- Get the SEs ordered by ascending replicas
sortedSEs = reversed( sorted( [ ( len( stageLFNs[ seName ] ), seName ) for seName in stageLFNs.keys() ] ) )
for lfn in lfnToStage:
found = False
# 2.- Traverse the SEs
for _stageCount, seName in sortedSEs:
if lfn in stageLFNs[ seName ]:
# 3.- If first time found, just mark as found. Next time delete the replica from the request
if found:
stageLFNs[ seName ].remove( lfn )
else:
found = True
# 4.-If empty SE, remove
if len( stageLFNs[ seName ] ) == 0:
stageLFNs.pop( seName )
self.jobLog.info( "Stage request will be \n\t%s" % "\n\t".join( [ "%s:%s" % ( lfn, stageLFNs[ lfn ] ) for lfn in stageLFNs ] ) )
stagerClient = StorageManagerClient()
result = stagerClient.setRequest( stageLFNs, 'WorkloadManagement',
'stageCallback@WorkloadManagement/OptimizationMind',
int( jobState.jid ) )
if not result[ 'OK' ]:
self.jobLog.error( "Could not send stage request: %s" % result[ 'Message' ] )
return S_ERROR( "Problem sending staging request" )
rid = str( result[ 'Value' ] )
self.jobLog.info( "Stage request %s sent" % rid )
jobState.setParameter( "StageRequest", rid )
result = jobState.setStatus( self.ex_getOption( 'StagingStatus', 'Staging' ),
self.ex_getOption( 'StagingMinorStatus', 'Request Sent' ),
appStatus = "",
source = self.ex_optimizerName() )
if not result[ 'OK' ]:
return result
stageCandidates = []
for seName in stageLFNs:
result = self.__getSitesForSE( seName )
if result[ 'OK' ]:
stageCandidates.append( result[ 'Value' ] )
stageCandidates = candidates.intersection( *[ sC for sC in stageCandidates ] ).union( [ stageSite ] )
return S_OK( stageCandidates )
|
Sbalbp/DIRAC
|
WorkloadManagementSystem/Executor/InputDataValidation.py
|
Python
|
gpl-3.0
| 13,219
|
[
"DIRAC"
] |
c23f4ac6877fc8cdfe27b882fc3bc3d266f7b897bfe7706e309b58c239a54ee9
|
import os
import pdb
import numpy
import netCDF4
import sys
import pandas
import logging
import plot
import constants
import pygeoutil.util as util
import pygeoutil.rgeo as rgeo
import tempfile
# TODO: Clean up output paths, currently windows specific
# TODO: Check if creating butler map is working (not high priority at all)
class ShftCult:
"""
"""
def __init__(self, use_andreas=False, file_sc=constants.ASC_BUTLER, default_rate=0.067, start_yr=850, end_yr=2015,
skiprows=0):
"""
:return:
"""
# Shifting cultivation constants
self.start_yr = start_yr
self.end_yr = end_yr
self.default_rate = default_rate
# Properties of ascii file
self.asc_prop = ['ncols', 'nrows', 'xllcorner', 'yllcorner', 'cellsize', 'NODATA_value']
# Constants
self.dict_cont = {0: 'Antartica', 1: 'North_America', 2: 'South_America', 3: 'Europe', 4: 'Asia', 5: 'Africa',
6: 'Australia'}
self.CCODES = 'country_codes'
self.CONT_CODES = 'continent_codes'
# Lats and lons
self.num_lats = constants.NUM_LATS
self.num_lons = constants.NUM_LONS
# continent and country code files
self.ccodes_file = constants.ccodes_file
self.contcodes_file = constants.contcodes_file
self.ccodes_asc = numpy.genfromtxt(constants.CNTRY_CODES, skip_header=skiprows, delimiter=' ')
self.file_raw_sc = file_sc
if use_andreas:
self.data_source = 'Andreas'
else:
self.data_source = 'Butler'
def get_ascii_properties(self, ascii_fl, name_property='ncols'):
"""
Get value for property of ascii file.
Valid property names are: ncols, nrows, xllcorner, yllcorner, cellsize, NODATA_value
:param ascii_fl:
:param name_property:
:return:
"""
# From https://en.wikipedia.org/wiki/Esri_grid, this is ESRI ascii format
# ncols and nrows are the numbers of rows and columns, respectively (represented as integers);
# xllcorner and yllcorner are the western (left) x-coordinate and southern (bottom) y-coordinates, such as
# easting and northing (represented as real numbers with an optional decimal point)
# cellsize is the length of one side of a square cell (a real number); and,
# nodata_value is the value that is regarded as "missing" or "not applicable"; this line is optional, but highly
# recommended as some programs expect this line to be declared (a real number).
asc_data = numpy.genfromtxt(ascii_fl, max_rows=6, dtype=None)
try:
val = [item for item in asc_data if item[0] == name_property][0][1]
except:
logging.info(name_property + ' is not a valid property for GIS ascii file')
return val
def do_andreas(self, to_field='SC2010Cat'):
"""
Args:
to_field: can be 'SC2090Cat'
Returns:
"""
path_asc_andreas = os.path.dirname(constants.TIF_ANDREAS) + os.sep + \
os.path.basename(os.path.splitext(constants.TIF_ANDREAS)[0]) + '.asc'
# Get 2010 shifting cultivation map
path_out_lup = tempfile.mkstemp(suffix='.tif')[1]
rgeo.lookup(self.file_raw_sc, path_out_ds=path_out_lup, from_field='Value', to_field=to_field)
# Call convert_raster_to_ascii with appropriate output file name.
rgeo.convert_raster_to_ascii(path_input_raster=path_out_lup, path_ascii_output=path_asc_andreas)
asc_sc = self.create_global_sc_from_andreas(path_andreas_asc=path_asc_andreas, new_res=0.25)
asc_binary_sc = asc_sc[:]
asc_binary_sc[(asc_binary_sc > 0.0) & (asc_binary_sc < 65536.0)] = 1.0
return asc_binary_sc
def create_global_sc_from_andreas(self, path_andreas_asc, new_res=0.25):
"""
:param path_andreas_asc: input andreas ascii file that is missing rows
:param new_res:
:return:
"""
if self.data_source != 'Andreas':
logging.error('Incorrect data source. Should be Andreas')
sys.exit(0)
# Get properties of Andreas ascii file
ncols, nrows, xllcorner, yllcorner, cellsize, NODATA_value = \
[self.get_ascii_properties(path_andreas_asc, prop) for prop in self.asc_prop]
andreas_data = numpy.genfromtxt(path_andreas_asc, skip_header=6, delimiter=' ')
# We want to create a file with same resolution as Andreas's file, but global and not regional like Andreas
global_xll = -180.0 # x lower left
global_yll = -90.0 # y lower left
global_xlr = 180.0 # x lower right
global_yur = 90.0 # y upper right
andreas_end_row = numpy.ceil(global_yur - yllcorner).astype(int)
andreas_start_row = numpy.ceil(andreas_end_row - nrows).astype(int)
andreas_start_col = numpy.ceil(xllcorner - global_xll).astype(int)
andreas_end_col = numpy.ceil(andreas_start_col + ncols).astype(int)
# Create numpy 2D array,
global_sc = numpy.zeros((int(180//cellsize), int(360//cellsize)))
global_sc.fill(65536.0)
# Replace global sc array with andreas's data
global_sc[andreas_start_row:andreas_end_row, andreas_start_col:andreas_end_col] = andreas_data
# Create new global sc file
new_res_global_sc = global_sc.repeat(1.0/new_res, 0).repeat(1.0/new_res, 1)
return new_res_global_sc
def combine_country_continent(self):
"""
:return:
"""
ccodes = pandas.read_csv(self.ccodes_file, header=None)
ccodes.columns = [self.CCODES]
contcodes = pandas.read_csv(self.contcodes_file, header=None)
contcodes.columns = [self.CONT_CODES]
return pandas.concat([ccodes, contcodes], axis=1)
def replace_country_by_continent(self, arr, lup_codes):
"""
stackoverflow.com/questions/34321025/replace-values-in-numpy-2d-array-based-on-pandas-dataframe/34328891#34328891
:param arr:
:return:
"""
old_val = numpy.array(lup_codes[self.CCODES])
new_val = numpy.array(lup_codes[self.CONT_CODES])
mask = numpy.in1d(arr, old_val)
idx = numpy.searchsorted(old_val, arr.ravel()[mask])
arr.ravel()[mask] = new_val[idx]
return arr
def make_shifting_cult_nc(self, out_path, ccode=0.0, new_rate=0.15, desc='netCDF'):
"""
Args:
out_path:
ccode:
new_rate:
desc:
Returns:
"""
# Compute dimensions of nc file based on # rows/cols in ascii file
fl_res = self.num_lats/self.asc_sc.shape[0]
if fl_res != self.num_lons/self.asc_sc.shape[1]:
print('Incorrect dimensions in ascii file')
sys.exit(0)
# Initialize nc file
out_nc = out_path+os.sep + os.path.basename(self.file_raw_sc)[:-4] + '.nc'
nc_data = netCDF4.Dataset(out_nc, 'w', format='NETCDF4')
nc_data.description = desc
# dimensions
nc_data.createDimension('lon', self.asc_sc.shape[1])
nc_data.createDimension('lat', self.asc_sc.shape[0])
tme = numpy.arange(self.start_yr, self.end_yr + 1)
nc_data.createDimension('time', numpy.shape(tme)[0])
# Populate and output nc file
longitudes = nc_data.createVariable('longitude', 'f4', ('lon',))
latitudes = nc_data.createVariable('latitude', 'f4', ('lat',))
time = nc_data.createVariable('time', 'i4', ('time',))
data = nc_data.createVariable('shift_cult', 'f4', ('time', 'lat', 'lon',), fill_value=numpy.nan)
cntr_codes = nc_data.createVariable('cntry_codes', 'f4', ('lat', 'lon',), fill_value=0.0)
data.units = 'fraction of gridcell area'
data.long_name = 'shifting cultivation fraction of gridcell area'
# Assign values to dimensions and data
latitudes[:] = numpy.arange(90.0 - fl_res/2.0, -90.0, -fl_res)
longitudes[:] = numpy.arange(-180.0 + fl_res/2.0, 180.0, fl_res)
time[:] = tme
cntr_codes[:] = self.ccodes_asc[:, :] # Read in the country codes data
# Assign default shifting cultivation rate
self.asc_sc[self.asc_sc > 0.0] = self.default_rate
if constants.shft_by_country:
# Store data into netCDF file
for idx, j in enumerate(tme):
print idx, j
if j > 1970 and j <= 2015:
self.asc_sc[self.asc_sc > 0.0] = self.default_rate - \
((self.default_rate - new_rate) * (j - 1970) / 45.0)
data[idx, :, :] = self.asc_sc[:, :]
else:
# Replace country codes by respective continent codes
lup_codes = self.combine_country_continent()
if lup_codes.empty:
logging.error('Empty lookup table (country-continent)')
sys.exit(0)
cont_shftCult = self.replace_country_by_continent(self.ccodes_asc, lup_codes)
# Replace default shifting cultivation rate for some continents
for idx, j in enumerate(tme):
if j > 1970 and j <= 2015:
# After 1970, Asia declines by 90%
new_rate = self.default_rate * 0.10
tmp_arr = self.asc_sc[self.asc_sc > 0.0][cont_shftCult[:] == 4]
tmp_arr = self.default_rate - ((self.default_rate - new_rate) * (j - 1970)/45.0)
# After 1970, S America declines by 70%
new_rate = self.default_rate * 0.30
tmp_arr = self.asc_sc[self.asc_sc > 0.0][cont_shftCult[:] == 2]
tmp_arr = self.default_rate - ((self.default_rate - new_rate) * (j - 1970)/45.0)
data[idx, :, :] = tmp_arr
# Change default rate of shifting cultivation for some countries
if ccode > 0.0:
self.asc_sc[cntr_codes[:] == ccode] = new_rate
nc_data.close()
return out_nc
def interpolate_asc(self, start_asc, end_asc, start_pt, current_pt, end_pt):
"""
Args:
start_asc:
end_asc:
start_pt:
current_pt:
end_pt:
Returns:
"""
if end_pt <= start_pt:
print('End point should be less than starting point')
interp_asc = end_asc + (start_asc - end_asc) * ((end_pt - current_pt) / (end_pt - start_pt))
return interp_asc
def create_andreas_nc(self):
"""
Returns:
"""
path_out = constants.input_dir + os.sep + 'shift_cult'
asc_2010 = self.do_andreas(to_field='SC2010Cat')
asc_2090 = self.do_andreas(to_field='SC2090Cat')
static_map = numpy.copy(asc_2010)
sc_map_2090 = numpy.copy(asc_2090)
# Frequency of occurrence is used as a proxy for the fraction of cropland area in each grid cell that is
# associated with shifting cultivation. Assuming a 1 year cultivation period, and re-clearing from secondary
# land unless secondary is less than 10*(cropland in SC).
# From Heineman et al.
# Each grid cell was classified into: None, very low, low, moderate, high (shifting cultivation)
# This corresponds to ranges of area share of shifting cultivation (cultivated fields plus fallow) within an
# entire one-degree cell
# 0 (none): < 1%
# 1 (very-low): 1 - 9%
# 2 (low): 10 - 19%
# 3 (moderate): 20 - 39%
# 4. (high): >= 40%
# 5. (historic): 70%
static_map[static_map == 1.0] = 0.05
static_map[static_map == 0.0] = 0.05
static_map[static_map == 2.0] = 0.05
static_map[static_map == 3.0] = 0.15
static_map[static_map == 4.0] = 0.3
static_map[static_map == 5.0] = 0.7
static_map = static_map * 3.0 # Back in 1850, move each category 3 levels up
static_map[static_map >= 0.7] = 0.7 # cap
# static_map[static_map >= 0.0] = 0.7 # Constant rate of SC rate in 1850 approach 0
rate_map = numpy.copy(asc_2010)
rate_map[rate_map == 1.0] = 0.05
rate_map[rate_map == 0.0] = 0.05
rate_map[rate_map == 2.0] = 0.05
rate_map[rate_map == 3.0] = 0.15
rate_map[rate_map == 4.0] = 0.3
rate_map[rate_map == 5.0] = 0.7
sc_map_2090[sc_map_2090 == 1.0] = 0.05
sc_map_2090[sc_map_2090 == 0.0] = 0.05
sc_map_2090[sc_map_2090 == 2.0] = 0.05
sc_map_2090[sc_map_2090 == 3.0] = 0.15
sc_map_2090[sc_map_2090 == 4.0] = 0.3
sc_map_2090[sc_map_2090 == 5.0] = 0.7
rate_map[rate_map == -9999.0] = 0.0
sc_map_2090[sc_map_2090 <= -9999.0] = 0.0
static_map[static_map <= -9999.0] = 0.0 # approach 0
# Compute dimensions of nc file based on # rows/cols in ascii file
fl_res = self.num_lats/asc_2010.shape[0]
if fl_res != self.num_lons/asc_2010.shape[1]:
print('Incorrect dimensions in ascii file')
sys.exit(0)
# Initialize nc file
out_nc = path_out + os.sep + 'andreas_approach1.nc'
nc_data = netCDF4.Dataset(out_nc, 'w', format='NETCDF4')
nc_data.description = ''
# dimensions
nc_data.createDimension('lon', asc_2010.shape[1])
nc_data.createDimension('lat', asc_2010.shape[0])
tme = numpy.arange(self.start_yr, 2100 + 1)
nc_data.createDimension('time', numpy.shape(tme)[0])
# Populate and output nc file
longitudes = nc_data.createVariable('longitude', 'f4', ('lon',))
latitudes = nc_data.createVariable('latitude', 'f4', ('lat',))
time = nc_data.createVariable('time', 'i4', ('time',))
data = nc_data.createVariable('shift_cult', 'f4', ('time', 'lat', 'lon',), fill_value=numpy.nan)
cntr_codes = nc_data.createVariable('cntry_codes', 'f4', ('lat', 'lon',), fill_value=0.0)
data.units = 'fraction of gridcell area'
data.long_name = 'shifting cultivation fraction of gridcell area'
# Assign values to dimensions and data
latitudes[:] = numpy.arange(90.0 - fl_res/2.0, -90.0, -fl_res)
longitudes[:] = numpy.arange(-180.0 + fl_res/2.0, 180.0, fl_res)
time[:] = tme
cntr_codes[:] = self.ccodes_asc[:, :] # Read in the country codes data
for idx, j in enumerate(time):
print j
if j <= 1850:
data[idx, :, :] = static_map
elif j <= 2015:
# SC rate is same as static_map in 1850, it is same as rate_map in 2015
mod_rate = self.interpolate_asc(static_map, rate_map, 1850, j, 2015)
data[idx, :, :] = mod_rate
else: # Upto 2100
# SC rate is same as rate_map in 2015 and sc_ma_2100 in 2100
mod_rate = self.interpolate_asc(static_map, rate_map, 2015, j, 2100)
data[idx, :, :] = mod_rate
nc_data.close()
return out_nc
def use_butler_map():
obj = ShftCult(use_andreas=False, file_sc=constants.ASC_BUTLER, default_rate=0.067, start_yr=850, end_yr=2015,
skiprows=0)
# Read ASCII File of shifting cultivation (butler ascii map)
asc_sc = numpy.genfromtxt(self.file_raw_sc, skip_header=skiprows, delimiter=' ')
asc_binary_sc = asc_sc[:]
if not constants.shft_by_country:
obj.combine_country_continent()
shift_nc_file = obj.make_shifting_cult_nc(ccode=0.0, new_rate=0.033, out_path=constants.out_dir)
# Shifting cultivation netCDF
# 1. Read in ascii file of continent and country codes
# 2. For our 3 key continents, combine with butler map
# 3. Change shft cult rates
# Get list of unique countries from country code 2D ascii file
# Merge continent code and country code (both 1-D) files
# Double check whether unique countries in 2D file and 1D list match
# Replace country codes in 2D ascii file by continent codes
# Apply shifting cultivation algorithm
def use_andreas_map():
obj = ShftCult(use_andreas=True, file_sc=constants.TIF_ANDREAS, skiprows=0)
path_nc = obj.create_andreas_nc()
pdb.set_trace()
# Plot maps
ds = util.open_or_die(path_nc)
lat = ds.variables['latitude'][:]
lon = ds.variables['longitude'][:]
imgs_for_movie = plot.plot_maps_ts(path_nc,
'shift_cult', lon, lat,
out_path='C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\shift_cult\\andreas\\',
save_name='shift_cult', xlabel='Shifting cultivation frequency on croplands',
title='', land_bg=False, grid=True)
plot.make_movie(imgs_for_movie, 'C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\shift_cult\\andreas\\', out_fname='shift_cult.gif')
ncc = util.open_or_die('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\shift_cult\\andreas\\andreas.nc')
crop_data = util.open_or_die('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\GLM\\Input\\LUH\\v0.3_historical\\states.nc')
tme = numpy.arange(0, 1165 + 1)
mult = numpy.zeros(len(tme))
mult_one = numpy.zeros(len(tme))
cell_area = util.open_or_die(constants.path_GLM_carea)
nc_data = ncc.variables['shift_cult'][int(1), :, :]
all_one = numpy.copy(nc_data)
all_one[all_one > 0.0] = 1.0
for idx, t in enumerate(tme):
print t
nc_data = ncc.variables['shift_cult'][int(t), :, :]
all_crops = crop_data.variables['c3ann'][int(t), :, :] + crop_data.variables['c4ann'][int(t), :, :] +\
crop_data.variables['c3per'][int(t), :, :] + crop_data.variables['c4per'][int(t), :, :] +\
crop_data.variables['c3nfx'][int(t), :, :]
mult[idx] = numpy.ma.sum(all_crops * nc_data * cell_area)
mult_one[idx] = numpy.ma.sum(all_crops * all_one * cell_area)
pdb.set_trace()
import matplotlib.pyplot as plt
ax = plt.plot(mult_one, label='butler')
plt.plot(mult, label='andreas')
plt.show()
if __name__ == '__main__':
use_andreas_map()
|
ritviksahajpal/LUH2
|
LUH2/GLM/shifting_cult.py
|
Python
|
mit
| 18,333
|
[
"NetCDF"
] |
0a765a6cfab51ba544c8e73e487efb544d13e67f67cc6fef40326ff40533fb0c
|
#!/usr/bin/env python3
#
# Data manager for reference data for the 'mothur_toolsuite' Galaxy tools
import io
import json
import optparse
import os
import shutil
import sys
import tarfile
import tempfile
import urllib.error
import urllib.parse
import urllib.request
import zipfile
from functools import reduce
# When extracting files from archives, skip names that
# start with the following strings
IGNORE_PATHS = ('.', '__MACOSX/', '__')
# Map file extensions to data table names
MOTHUR_FILE_TYPES = {".map": "map",
".fasta": "aligndb",
".align": "aligndb",
".pat": "lookup",
".tax": "taxonomy"}
# Reference data URLs
MOTHUR_REFERENCE_DATA = {
# Look up data
# http://www.mothur.org/wiki/Lookup_files
"lookup_titanium": {
"GS FLX Titanium": ["https://mothur.s3.us-east-2.amazonaws.com/wiki/lookup_titanium.zip", ]
},
"lookup_gsflx": {
"GSFLX": ["https://mothur.s3.us-east-2.amazonaws.com/wiki/lookup_gsflx.zip", ]
},
"lookup_gs20": {
"GS20": ["https://mothur.s3.us-east-2.amazonaws.com/wiki/lookup_gs20.zip", ]
},
# RDP reference files
# http://www.mothur.org/wiki/RDP_reference_files
"RDP_v18": {
"16S rRNA RDP training set 18":
[
"https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset18_062020.rdp.tgz", ],
"16S rRNA PDS training set 18":
[
"https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset18_062020.pds.tgz", ],
},
"RDP_v16": {
"16S rRNA RDP training set 16":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset16_022016.rdp.tgz", ],
"16S rRNA PDS training set 16":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset16_022016.pds.tgz", ],
},
"RDP_v14": {
"16S rRNA RDP training set 14":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset14_032015.rdp.tgz", ],
"16S rRNA PDS training set 14":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset14_032015.pds.tgz", ],
},
"RDP_v10": {
"16S rRNA RDP training set 10":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset10_082014.rdp.tgz", ],
"16S rRNA PDS training set 10":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset10_082014.pds.tgz", ],
},
"RDP_v9": {
"16S rRNA RDP training set 9":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset9_032012.rdp.zip", ],
"16S rRNA PDS training set 9":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset9_032012.pds.zip", ],
},
"RDP_v7": {
"16S rRNA RDP training set 7":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset7_112011.rdp.zip", ],
"16S rRNA PDS training set 7":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset7_112011.pds.zip", ],
"8S rRNA Fungi training set 7":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/fungilsu_train_v7.zip", ],
},
"RDP_v6": {
"RDP training set 6":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/rdptrainingset.zip", ],
},
# Silva reference files
# http://www.mothur.org/wiki/Silva_reference_files
"silva_release_138.1": {
"SILVA release 138.1":
[
"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.nr_v138_1.tgz",
"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.seed_v138_1.tgz", ],
},
"silva_release_128": {
"SILVA release 128":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.nr_v128.tgz",
"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.seed_v128.tgz", ],
},
"silva_release_123": {
"SILVA release 123":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.nr_v123.tgz",
"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.seed_v123.tgz", ],
},
"silva_release_119": {
"SILVA release 119":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.nr_v119.tgz",
"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.seed_v119.tgz", ],
},
"silva_release_102": {
"SILVA release 102":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.bacteria.zip",
"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.archaea.zip",
"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.eukarya.zip", ],
},
"silva_gold_bacteria": {
"SILVA gold":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.gold.bacteria.zip", ],
},
# Greengenes
# http://www.mothur.org/wiki/Greengenes-formatted_databases
"greengenes_August2013": {
"Greengenes August 2013":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/gg_13_8_99.refalign.tgz",
"https://mothur.s3.us-east-2.amazonaws.com/wiki/gg_13_8_99.taxonomy.tgz", ],
},
"greengenes_May2013": {
"Greengenes May 2013":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/gg_13_5_99.refalign.tgz",
"https://mothur.s3.us-east-2.amazonaws.com/wiki/gg_13_5_99.taxonomy.tgz", ],
},
"greengenes_old": {
"Greengenes pre-May 2013":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/greengenes.alignment.zip",
"https://mothur.s3.us-east-2.amazonaws.com/wiki/greengenes.tax.tgz", ],
},
"greengenes_gold_alignment": {
"Greengenes gold alignment":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/greengenes.gold.alignment.zip", ],
},
# Secondary structure maps
# http://www.mothur.org/wiki/Secondary_structure_map
"secondary_structure_maps_silva": {
"SILVA":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/silva_ss_map.zip", ],
},
"secondary_structure_maps_greengenes": {
"Greengenes":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/gg_ss_map.zip", ],
},
# Lane masks: not used here?
"lane_masks": {
"Greengenes-compatible":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/Lane1241.gg.filter",
"https://mothur.s3.us-east-2.amazonaws.com/wiki/lane1287.gg.filter",
"https://mothur.s3.us-east-2.amazonaws.com/wiki/lane1349.gg.filter", ],
"SILVA-compatible":
["https://mothur.s3.us-east-2.amazonaws.com/wiki/lane1349.silva.filter", ]
},
}
# Utility functions for interacting with Galaxy JSON
def read_input_json(jsonfile):
"""Read the JSON supplied from the data manager tool
Returns a tuple (param_dict,extra_files_path)
'param_dict' is an arbitrary dictionary of parameters
input into the tool; 'extra_files_path' is the path
to a directory where output files must be put for the
receiving data manager to pick them up.
NB the directory pointed to by 'extra_files_path'
doesn't exist initially, it is the job of the script
to create it if necessary.
"""
with open(jsonfile) as fh:
params = json.load(fh)
return (params['param_dict'],
params['output_data'][0]['extra_files_path'])
# Utility functions for creating data table dictionaries
#
# Example usage:
# >>> d = create_data_tables_dict()
# >>> add_data_table(d,'my_data')
# >>> add_data_table_entry(dict(dbkey='hg19',value='human'))
# >>> add_data_table_entry(dict(dbkey='mm9',value='mouse'))
# >>> print(json.dumps(d))
def create_data_tables_dict():
"""Return a dictionary for storing data table information
Returns a dictionary that can be used with 'add_data_table'
and 'add_data_table_entry' to store information about a
data table. It can be converted to JSON to be sent back to
the data manager.
"""
d = {}
d['data_tables'] = {}
return d
def add_data_table(d, table):
"""Add a data table to the data tables dictionary
Creates a placeholder for a data table called 'table'.
"""
d['data_tables'][table] = []
def add_data_table_entry(d, table, entry):
"""Add an entry to a data table
Appends an entry to the data table 'table'. 'entry'
should be a dictionary where the keys are the names of
columns in the data table.
Raises an exception if the named data table doesn't
exist.
"""
try:
d['data_tables'][table].append(entry)
except KeyError:
raise Exception("add_data_table_entry: no table '%s'" % table)
# Utility functions for downloading and unpacking archive files
def download_file(url, target=None, wd=None):
"""Download a file from a URL
Fetches a file from the specified URL.
If 'target' is specified then the file is saved to this
name; otherwise it's saved as the basename of the URL.
If 'wd' is specified then it is used as the 'working
directory' where the file will be save on the local
system.
Returns the name that the file is saved with.
"""
print(f"Downloading {url}")
if not target:
target = os.path.basename(url)
if wd:
target = os.path.join(wd, target)
print(f"Saving to {target}")
with open(target, 'wb') as fh:
url_h = urllib.request.urlopen(url)
while True:
buffer = url_h.read(io.DEFAULT_BUFFER_SIZE)
if buffer == b"":
break
fh.write(buffer)
return target
def unpack_zip_archive(filen, wd=None):
"""Extract files from a ZIP archive
Given a ZIP archive, extract the files it contains
and return a list of the resulting file names and
paths.
'wd' specifies the working directory to extract
the files to, otherwise they are extracted to the
current working directory.
Once all the files are extracted the ZIP archive
file is deleted from the file system.
"""
if not zipfile.is_zipfile(filen):
print(f"{filen}: not ZIP formatted file")
return [filen]
file_list = []
with zipfile.ZipFile(filen) as z:
for name in z.namelist():
if reduce(lambda x, y: x or name.startswith(y), IGNORE_PATHS, False):
print(f"Ignoring {name}")
continue
if wd:
target = os.path.join(wd, name)
else:
target = name
if name.endswith('/'):
# Make directory
print(f"Creating dir {target}")
try:
os.makedirs(target)
except OSError:
pass
else:
# Extract file
print("Extracting {target}")
try:
os.makedirs(os.path.dirname(target))
except OSError:
pass
with open(target, 'wb') as fh:
fh.write(z.read(name))
file_list.append(target)
print(f"Removing {filen}")
os.remove(filen)
return file_list
def unpack_tar_archive(filen, wd=None):
"""Extract files from a TAR archive
Given a TAR archive (which optionally can be
compressed with either gzip or bz2), extract the
files it contains and return a list of the
resulting file names and paths.
'wd' specifies the working directory to extract
the files to, otherwise they are extracted to the
current working directory.
Once all the files are extracted the TAR archive
file is deleted from the file system.
"""
file_list = []
if not tarfile.is_tarfile(filen):
print(f"{filen}: not TAR file")
return [filen]
with tarfile.open(filen) as t:
for name in t.getnames():
# Check for unwanted files
if reduce(lambda x, y: x or name.startswith(y), IGNORE_PATHS, False):
print(f"Ignoring {name}")
continue
# Extract file
print(f"Extracting {name}")
t.extract(name, wd)
if wd:
target = os.path.join(wd, name)
else:
target = name
file_list.append(target)
print(f"Removing {filen}")
os.remove(filen)
return file_list
def unpack_archive(filen, wd=None):
"""Extract files from an archive
Wrapper function that calls the appropriate
unpacking function depending on the archive
type, and returns a list of files that have
been extracted.
'wd' specifies the working directory to extract
the files to, otherwise they are extracted to the
current working directory.
"""
print(f"Unpack {filen}")
ext = os.path.splitext(filen)[1]
print(f"Extension: {ext}")
if ext == ".zip":
return unpack_zip_archive(filen, wd=wd)
elif ext == ".tgz":
return unpack_tar_archive(filen, wd=wd)
else:
return [filen]
def fetch_files(urls, wd=None, files=None):
"""Download and unpack files from a list of URLs
Given a list of URLs, download and unpack each
one, and return a list of the extracted files.
'wd' specifies the working directory to extract
the files to, otherwise they are extracted to the
current working directory.
If 'files' is given then the list of extracted
files will be appended to this list before being
returned.
"""
if files is None:
files = []
for url in urls:
filen = download_file(url, wd=wd)
files.extend(unpack_archive(filen, wd=wd))
return files
# Utility functions specific to the Mothur reference data
def identify_type(filen):
"""Return the data table name based on the file name
"""
ext = os.path.splitext(filen)[1]
try:
return MOTHUR_FILE_TYPES[ext]
except KeyError:
print(f"WARNING: unknown file type for {filen}, skipping")
return None
def get_name(filen):
"""Generate a descriptive name based on the file name
"""
# type_ = identify_type(filen)
name = os.path.splitext(os.path.basename(filen))[0]
for delim in ('.', '_'):
name = name.replace(delim, ' ')
return name
def fetch_from_mothur_website(data_tables, target_dir, datasets):
"""Fetch reference data from the Mothur website
For each dataset in the list 'datasets', download (and if
necessary unpack) the related files from the Mothur website,
copy them to the data manager's target directory, and add
references to the files to the appropriate data table.
The 'data_tables' dictionary should have been created using
the 'create_data_tables_dict' and 'add_data_table' functions.
Arguments:
data_tables: a dictionary containing the data table info
target_dir: directory to put the downloaded files
datasets: a list of dataset names corresponding to keys in
the MOTHUR_REFERENCE_DATA dictionary
"""
# Make working dir
wd = tempfile.mkdtemp(suffix=".mothur", dir=os.getcwd())
print(f"Working dir {wd}")
# Iterate over all requested reference data URLs
for dataset in datasets:
print(f"Handling dataset '{dataset}'")
for name in MOTHUR_REFERENCE_DATA[dataset]:
for f in fetch_files(MOTHUR_REFERENCE_DATA[dataset][name], wd=wd):
type_ = identify_type(f)
name_from_file = os.path.splitext(os.path.basename(f))[0]
entry_name = f"{name_from_file} ({name})"
print(f"{type_}\t\'{entry_name}'\t.../{os.path.basename(f)}")
if type_ is not None:
# Move to target dir
ref_data_file = os.path.basename(f)
f1 = os.path.join(target_dir, ref_data_file)
print(f"Moving {f} to {f1}")
shutil.move(f, f1)
# Add entry to data table
table_name = f"mothur_{type_}"
add_data_table_entry(data_tables, table_name, dict(name=entry_name, value=ref_data_file))
# Remove working dir
print(f"Removing {wd}")
shutil.rmtree(wd)
def files_from_filesystem_paths(paths):
"""Return list of file paths from arbitrary input paths
Given a list of filesystem paths, return a list of
full paths corresponding to all files found recursively
from under those paths.
"""
# Collect files to add
files = []
for path in paths:
path = os.path.abspath(path)
print(f"Examining '{path}'...")
if os.path.isfile(path):
# Store full path for file
files.append(path)
elif os.path.isdir(path):
# Descend into directory and collect the files
for f in os.listdir(path):
files.extend(files_from_filesystem_paths((os.path.join(path, f), )))
else:
print("Not a file or directory, ignored")
return files
def import_from_server(data_tables, target_dir, paths, description, link_to_data=False):
"""Import reference data from filesystem paths
Creates references to the specified file(s) on the Galaxy
server in the appropriate data table (determined from the
file extension).
The 'data_tables' dictionary should have been created using
the 'create_data_tables_dict' and 'add_data_table' functions.
Arguments:
data_tables: a dictionary containing the data table info
target_dir: directory to put copy or link to the data file
paths: list of file and/or directory paths to import
description: text to associate with the files
link_to_data: boolean, if False then copy the data file
into Galaxy (default); if True then make a symlink to
the data file
"""
# Collect list of files based on input paths
files = files_from_filesystem_paths(paths)
# Handle each file individually
for f in files:
type_ = identify_type(f)
if type_ is None:
print(f"{f}: unrecognised type, skipped")
continue
ref_data_file = os.path.basename(f)
target_file = os.path.join(target_dir, ref_data_file)
entry_name = "%s" % os.path.splitext(ref_data_file)[0]
if description:
entry_name += " (%s)" % description
print(f"{type_}\t\'{entry_name}'\t.../{ref_data_file}")
# Link to or copy the data
if link_to_data:
os.symlink(f, target_file)
else:
shutil.copyfile(f, target_file)
# Add entry to data table
table_name = f"mothur_{type_}"
add_data_table_entry(data_tables, table_name, dict(name=entry_name, value=ref_data_file))
if __name__ == "__main__":
print("Starting...")
# Read command line
parser = optparse.OptionParser()
parser.add_option('--source', action='store', dest='data_source')
parser.add_option('--datasets', action='store', dest='datasets', default='')
parser.add_option('--paths', action='store', dest='paths', default=[])
parser.add_option('--description', action='store', dest='description', default='')
parser.add_option('--link', action='store_true', dest='link_to_data')
options, args = parser.parse_args()
print(f"options: {options}")
print(f"args : {args}")
# Check for JSON file
if len(args) != 1:
sys.stderr.write("Need to supply JSON file name")
sys.exit(1)
jsonfile = args[0]
# Read the input JSON
params, target_dir = read_input_json(jsonfile)
# Make the target directory
print(f"Making {target_dir}")
os.mkdir(target_dir)
# Set up data tables dictionary
data_tables = create_data_tables_dict()
add_data_table(data_tables, 'mothur_lookup')
add_data_table(data_tables, 'mothur_aligndb')
add_data_table(data_tables, 'mothur_map')
add_data_table(data_tables, 'mothur_taxonomy')
# Fetch data from specified data sources
if options.data_source == 'mothur_website':
datasets = options.datasets.split(',')
fetch_from_mothur_website(data_tables, target_dir, datasets)
elif options.data_source == 'filesystem_paths':
# Check description text
description = options.description.strip()
# Get list of paths (need to remove any escapes for '\n' and '\r'
# that might have been inserted by Galaxy)
paths = options.paths.replace('__cn__', '\n').replace('__cr__', '\r').split()
import_from_server(data_tables, target_dir, paths, description, link_to_data=options.link_to_data)
# Write output JSON
print("Outputting JSON")
with open(jsonfile, 'w') as fh:
json.dump(data_tables, fh, sort_keys=True)
print("Done.")
|
galaxyproject/tools-iuc
|
data_managers/data_manager_mothur_toolsuite/data_manager/fetch_mothur_reference_data.py
|
Python
|
mit
| 20,605
|
[
"Galaxy"
] |
bcf69114809bdf879311492c1a905ea3dd766d9be39f0d0877d84bda18ac7208
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example shows how to use validateOnly SOAP header.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Api: AdWordsOnly
"""
import suds
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Initialize appropriate service with validate only flag enabled.
client.validate_only = True
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201502')
# Construct operations to add a text ad.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'finalUrls': {
'urls': ['http://www.example.com']
},
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'headline': 'Luxury Cruise to Mars'
}
}
}]
ad_group_ad_service.mutate(operations)
# No error means the request is valid.
# Now let's check an invalid ad using a very long line to trigger an error.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for all astronauts in orbit',
'headline': 'Luxury Cruise to Mars'
}
}
}]
try:
ad_group_ad_service.mutate(operations)
except suds.WebFault, e:
print 'Validation correctly failed with \'%s\'.' % str(e)
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
|
wubr2000/googleads-python-lib
|
examples/adwords/v201502/campaign_management/validate_text_ad.py
|
Python
|
apache-2.0
| 2,766
|
[
"VisIt"
] |
b576366710a8431661f8d3252c035df462a7eccab6e1504f097ba0b1b8e4e28e
|
r"""
Homology :mod:`homology`
========================
Tools for "symmetrizing" a period matrix.
There exists a symplectic transformation on the period matrix of a real curve
such that the corresponding a- and b-cycles have certain transformation
properties until the anti-holomorphic involution on said Riemann surface.
.. note::
The algorithm described in Kalla, Klein actually operates on the transposes
of the a- and b-period matrices. All intermediate functions assume the input
period matrices are transposed. The primary function in this module,
:func:`symmetrize_periods`
Functions
---------
.. autosummary::
symmetrize_periods
symmetric_transformation_matrix
References
----------
.. [KallaKlein] C. Kalla, C. Klein "Computation of the Topological Type of a
Real Riemann Surface"
Contents
--------
"""
import numpy
from sage.all import (
real, imag, Matrix, ZZ, QQ, RDF, CDF, GF, identity_matrix, zero_matrix)
def Re(M):
return M.apply_map(real)
def Im(M):
return M.apply_map(imag)
def involution_matrix(Pa, Pb, tol=1e-4):
r"""Returns the transformation matrix `R` corresponding to the anti-holomorphic
involution on the periods of the Riemann surface.
Given an aritrary `2g x g` period matrix `[Pa, Pb]^T` of a genus `g`
Riemann surface `X` the action of the anti-holomorphic involution on `X` of
these periods is given by left-multiplication by a `2g x 2g` matrix `R`.
That is, .. math::
[\tau P_a^T, \tau P_b^T]^T = R [P_a^T, P_b^T]^T
Parameters
----------
Pa : complex matrix
Pb : complex matrix
The a- and b-periods, respectively, of a genus `g` Riemann surface.
tol : double
(Default: 1e-4) Tolerance used to veryify integrality of transformation
matrix. Dependent on precision of period matrices.
Returns
-------
R : complex matrix
The anti-holomorphic involution matrix.
Todo
----
For numerical stability, replace matrix inversion with linear system
solves.
"""
g,g = Pa.dimensions()
R_RDF = Matrix(RDF, 2*g, 2*g)
Ig = identity_matrix(RDF, g)
M = Im(Pb.T)*Re(Pa) - Im(Pa.T)*Re(Pb)
Minv = M.inverse()
R_RDF[:g,:g] = (2*Re(Pb)*Minv*Im(Pa.T) + Ig).T
R_RDF[:g,g:] = -2*Re(Pa)*Minv*Im(Pa.T)
R_RDF[g:,:g] = 2*Re(Pb)*Minv*Im(Pb.T)
R_RDF[g:,g:] = -(2*Re(Pb)*Minv*Im(Pa.T) + Ig)
R = R_RDF.round().change_ring(ZZ)
# sanity check: make sure that R_RDF is close to integral. we perform this
# test here since the matrix returned should be over ZZ
error = (R_RDF.round() - R_RDF).norm()
if error > tol:
raise ValueError("The anti-holomorphic involution matrix is not "
"integral. Try increasing the precision of the input "
"period matrices.")
return R
def integer_kernel_basis(R):
r"""Returns the Z-basis `[S1 \\ S2]` of the kernel of the anti-holomorphic
involution matrix `R`.
The `2g x g` matrix `[S1 \\ S2]` represents a Z-basis of the kernel space
.. math::
K_\mathbb{Z} = \text{ker}(R^T - \mathbb{I}_{2g})
That is, the basis of the space of all vectors fixed by the
anti-holomorphic involution `R`.
Used as input in `N1_matrix`.
Parameters
----------
R : integer matrix
The anti-holomorphic involution matrix of a genus `g` Riemann surface.
Returns
-------
S : integer matrix
A `2g x g` matrix where each column is a basis element of the fixed
point space of `R`.
"""
twog, twog = R.dimensions()
g = twog/2
K = R.T - identity_matrix(ZZ, twog)
r = K.rank()
# sanity check: the rank of the kernel should be the genus of the curve
if r != g:
raise ValueError("The rank of the integer kernel of K should be "
"equal to the genus.")
# compute the integer kernel from the Smith normal form of K
D,U,V = K.smith_form()
S = V[:,g:]
return S
def N1_matrix(Pa, Pb, S, tol=1e-4):
r"""Returns the matrix `N1` from the integer kernel of the anti-holomorphic
involution matrix.
This matrix `N1` is used directly to determine the topological type of a
Riemann surface. Used as input in `symmetric_block_diagonalize`.
Paramters
---------
S : integer matrix
A `2g x g` Z-basis of the kernel of the anti-holomorphic involution.
(See `integer_kernel_basis`.)
tol : double
(Default: 1e-4) Tolerance used to veryify integrality of the matrix.
Dependent on precision of period matrices.
Returns
-------
N1 : GF(2) matrix
A `g x g` matrix from which we can compute the topological type.
"""
# compute the Smith normal form of S, itself
g = S.ncols()
S1 = S[:g,:]
S2 = S[g:,:]
ES, US, VS = S.smith_form()
# construct the matrix N1 piece by piece
Nper = zero_matrix(RDF, 2*g,g)
Nper[:g,:] = -Re(Pb)[:,:]
Nper[g:,:] = Re(Pa)[:,:]
Nhat = (S1.T*Re(Pa) + S2.T*Re(Pb)).inverse()
Ntilde = 2*US*Nper*Nhat
N1_RDF = VS*Ntilde[:g,:]
N1 = N1_RDF.round().change_ring(GF(2))
# sanity check: N1 should be integral
error = (N1_RDF.round() - N1_RDF).norm()
if error > tol:
raise ValueError("The N1 matrix is not integral. Try increasing the "
"precision of the input period matrices.")
return N1
def symmetric_block_diagonalize(N1):
r"""Returns matrices `H` and `Q` such that `N1 = Q*H*Q.T` and `H` is block
diagonal.
The algorithm used here is as follows. Whenever a row operation is
performed (via multiplication on the left by a transformation matrix `q`)
the corresponding symmetric column operation is also performed via
multiplication on the right by `q^T`.
For each column `j` of `N1`:
1. If column `j` consists only of zeros then swap with the last column with
non-zero entries.
2. If there is a `1` in position `j` of the column (i.e. a `1` lies on the
diagonal in this column) then eliminate further entries below as in
standard Gaussian elimination.
3. Otherwise, if there is a `1` in the column, but not in position `j` then
rows are swapped in a way that it appears in the position `j+1` of the
column. Eliminate further entries below as in standard Gaussian
elimination.
4. After elimination, if `1` lies on the diagonal in column `j` then
increment `j` by one. If instead the block matrix `[0 1 \\ 1 0]` lies
along the diagonal then eliminate under the `(j,j+1)` element (the upper
right element) of this `2 x 2` block and increment `j` by two.
5. Repeat until `j` passes the final column or until further columns
consists of all zeros.
6. Finally, perform the appropriate transformations such that all `2 x 2`
blocks in `H` appear first in the diagonalization. (Uses the
`diagonal_locations` helper function.)
Parameters
----------
N1 : GF(2) matrix
Returns
-------
H : GF(2) matrix
Symmetric `g x g` matrix where the diagonal elements consist of either
a "1" or a `2 x 2` block matrix `[0 1 \\ 1 0]`.
Q : GF(2) matrix
The corresponding transformation matrix.
"""
g = N1.nrows()
H = zero_matrix(GF(2), g)
Q = identity_matrix(GF(2), g)
# if N1 is the zero matrix the H is also the zero matrix (and Q is the
# identity transformation)
if (N1 % 2) == 0:
return H,Q
# perform the "modified gaussian elimination"
B = Matrix(GF(2),[[0,1],[1,0]])
H = N1.change_ring(GF(2))
j = 0
while (j < g) and (H[:,j:] != 0):
# if the current column is zero then swap with the last non-zero column
if H.column(j) == 0:
last_non_zero_col = max(k for k in range(j,g) if H.column(k) != 0)
Q.swap_columns(j,last_non_zero_col)
H = Q.T*N1*Q
# if the current diagonal element is 1 then gaussian eliminate as
# usual. otherwise, swap rows so that a "1" appears in H[j+1,j] and
# then eliminate from H[j+1,j]
if H[j,j] == 1:
rows_to_eliminate = (r for r in range(g) if H[r,j] == 1 and r != j)
for r in rows_to_eliminate:
Q.add_multiple_of_column(r,j,1)
H = Q.T*N1*Q
else:
# find the first non-zero element in the column after the diagonal
# element and swap rows with this element
first_non_zero = min(k for k in range(j,g) if H[k,j] != 0)
Q.swap_columns(j+1,first_non_zero)
H = Q.T*N1*Q
# eliminate *all* other ones in the column, including those above
# the element (j,j+1)
rows_to_eliminate = (r for r in range(g) if H[r,j] == 1 and r != j+1)
for r in rows_to_eliminate:
Q.add_multiple_of_column(r,j+1,1)
H = Q.T*N1*Q
# increment the column based on the diagonal element
if H[j,j] == 1:
j += 1
elif H[j:(j+2),j:(j+2)] == B:
# in the block diagonal case, need to eliminate below the j+1 term
rows_to_eliminate = (r for r in range(g) if H[r,j+1] == 1 and r != j)
for r in rows_to_eliminate:
Q.add_multiple_of_column(r,j,1)
H = Q.T*N1*Q
j += 2
# finally, check if there are blocks of "special" form. that is, shift all
# blocks such that they occur first along the diagonal of H
index_one, index_B = diagonal_locations(H)
while index_one < index_B:
j = index_B
Qtilde = zero_matrix(GF(2), g)
Qtilde[0,0] = 1
Qtilde[j,0] = 1; Qtilde[j+1,0] = 1
Qtilde[0,j] = 1; Qtilde[0,j+1] = 1
Qtilde[j:(j+2),j:(j+2)] = B
Q = Q*Qtilde
H = Q.T*N1*Q
# continue until none are left
index_one, index_B = diagonal_locations(H)
# above, we used Q to store column operations on N1. switch to rows
# operations on H so that N1 = Q*H*Q.T
Q = Q.T.inverse()
return H,Q
def diagonal_locations(H):
r"""Returns the indices of the last `1` along the diagonal and the first block
along the diagonal of `H`.
Parameters
----------
H : symmetric GF(2) matrix
Contains either 1's along the diagonal or anti-symmetric blocks.
Returns
-------
index_one : integer
The last occurrence of a `1` along the diagonal of `H`. Equal to `g`
if there are no ones along the diagonal.
index_B : integer
The first occurrence of a block along the diagonal of `H`. Equal to
`-1` if there are no blocks along the diagonal.
"""
g = H.nrows()
B = Matrix(GF(2),[[0,1],[1,0]])
try:
index_one = min(j for j in range(g) if H[j,j] == 1)
except ValueError:
index_one = g
try:
index_B = max(j for j in range(g-1) if H[j:(j+2),j:(j+2)] == B)
except ValueError:
index_B = -1
return index_one, index_B
def symmetric_transformation_matrix(Pa, Pb, S, H, Q, tol=1e-4):
r"""Returns the symplectic matrix `\Gamma` mapping the period matrices `Pa,Pb`
to a symmetric period matrices.
A helper function to :func:`symmetrize_periods`.
Parameters
----------
Pa : complex matrix
A `g x g` a-period matrix.
Pb : complex matrix
A `g x g` b-period matrix.
S : integer matrix
Integer kernel basis matrix.
H : integer matrix
Topological type classification matrix.
Q : integer matrix
The transformation matrix from `symmetric_block_diagonalize`.
tol : double
(Default: 1e-4) Tolerance used to verify integrality of intermediate
matrices. Dependent on precision of period matrices.
Returns
-------
Gamma : integer matrix
A `2g x 2g` symplectic matrix.
"""
# compute A and B
g,g = Pa.dimensions()
rhs = S*Q.change_ring(ZZ)
A = rhs[:g,:g].T
B = rhs[g:,:g].T
H = H.change_ring(ZZ)
# compute C and D
half = QQ(1)/QQ(2)
temp = (A*Re(Pa) + B*Re(Pb)).inverse()
CT = half*A.T*H - Re(Pb)*temp
CT_ZZ = CT.round().change_ring(ZZ)
C = CT_ZZ.T
DT = half*B.T*H + Re(Pa)*temp
DT_ZZ = DT.round().change_ring(ZZ)
D = DT_ZZ.T
# sanity checks: make sure C and D are integral
C_error = (CT.round() - CT).norm()
D_error = (DT.round() - DT).norm()
if (C_error > tol) or (D_error > tol):
raise ValueError("The symmetric transformation matrix is not integral. "
"Try increasing the precision of the input period "
"matrices.")
# construct Gamma
Gamma = zero_matrix(ZZ, 2*g, 2*g)
Gamma[:g,:g] = A
Gamma[:g,g:] = B
Gamma[g:,:g] = C
Gamma[g:,g:] = D
return Gamma
def symmetrize_periods(Pa, Pb, tol=1e-4):
r"""Returns symmetric a- and b-periods `Pa_symm` and `Pb_symm`, as well as the
corresponding symplectic operator `Gamma` such that `Gamma [Pa \\ Pb] =
[Pa_symm \\ Pb_symm]`.
Parameters
----------
Pa : complex matrix
Pb : complex matrix
The a- and b-periods, respectively, of a genus `g` Riemann surface.
tol : double
(Default: 1e-4) Tolerance used to verify integrality of intermediate
matrices. Dependent on precision of period matrices.
Returns
-------
Gamma : integer matrix
The symplectic transformation operator.
Pa : complex matrix
Pb : complex matrix
Symmetric a- and b-periods, respectively, of a genus `g` Riemann surface.
Notes
-----
The algorithm described in Kalla, Klein actually operates on the transposes
of the a- and b-period matrices.
"""
# coerce from numpy, if necessary
if isinstance(Pa, numpy.ndarray):
Pa = Matrix(CDF, numpy.ascontiguousarray(Pa))
if isinstance(Pb, numpy.ndarray):
Pb = Matrix(CDF, numpy.ascontiguousarray(Pb))
# use the transposes of the period matrices and coerce to Sage matrices
Pa = Pa.T
Pb = Pb.T
# use above functions to obtain topological type matrix
g,g = Pa.dimensions()
R = involution_matrix(Pa, Pb, tol=tol)
S = integer_kernel_basis(R)
N1 = N1_matrix(Pa, Pb, S, tol=tol)
H,Q = symmetric_block_diagonalize(N1)
Gamma = symmetric_transformation_matrix(Pa, Pb, S, H, Q, tol=tol)
# compute the corresponding symmetric periods
stacked_periods = zero_matrix(CDF, 2*g, g)
stacked_periods[:g,:] = Pa
stacked_periods[g:,:] = Pb
stacked_symmetric_periods = Gamma*stacked_periods
Pa_symm = stacked_symmetric_periods[:g,:]
Pb_symm = stacked_symmetric_periods[g:,:]
# transpose results back
Pa_symm = Pa_symm.T
Pb_symm = Pb_symm.T
return Pa_symm, Pb_symm
|
cswiercz/abelfunctions
|
abelfunctions/homology.py
|
Python
|
bsd-3-clause
| 14,848
|
[
"Gaussian"
] |
bc9bdfcfa80512b0fdc6585d4ff7da17e5ad3b55fc3c9123a5bbea4fdb2d14b7
|
########################################################################
# File : CPUNormalization.py
# Author : Ricardo Graciani
########################################################################
""" DIRAC Workload Management System Client module that encapsulates all the
methods necessary to handle CPU normalization
"""
__RCSID__ = "$Id$"
import os, random
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.SiteCEMapping import getQueueInfo
# TODO: This should come from some place in the configuration
NORMALIZATIONCONSTANT = 60. / 250. # from minutes to seconds and from SI00 to HS06 (ie min * SI00 -> sec * HS06 )
UNITS = { 'HS06': 1. , 'SI00': 1. / 250. }
def queueNormalizedCPU( ceUniqueID ):
""" Report Normalized CPU length of queue
"""
result = getQueueInfo( ceUniqueID )
if not result['OK']:
return result
ceInfoDict = result['Value']
siteCSSEction = ceInfoDict['SiteCSSEction']
queueCSSection = ceInfoDict['QueueCSSection']
benchmarkSI00 = __getQueueNormalization( queueCSSection, siteCSSEction )
maxCPUTime = __getMaxCPUTime( queueCSSection )
if maxCPUTime and benchmarkSI00:
normCPUTime = NORMALIZATIONCONSTANT * maxCPUTime * benchmarkSI00
else:
if not benchmarkSI00:
subClusterUniqueID = ceInfoDict['SubClusterUniqueID']
return S_ERROR( 'benchmarkSI00 info not available for %s' % subClusterUniqueID )
if not maxCPUTime:
return S_ERROR( 'maxCPUTime info not available' )
return S_OK( normCPUTime )
def getQueueNormalization( ceUniqueID ):
""" Report Normalization Factor applied by Site to the given Queue
"""
result = getQueueInfo( ceUniqueID )
if not result['OK']:
return result
ceInfoDict = result['Value']
siteCSSEction = ceInfoDict['SiteCSSEction']
queueCSSection = ceInfoDict['QueueCSSection']
subClusterUniqueID = ceInfoDict['SubClusterUniqueID']
benchmarkSI00 = __getQueueNormalization( queueCSSection, siteCSSEction )
if benchmarkSI00:
return S_OK( benchmarkSI00 )
else:
return S_ERROR( 'benchmarkSI00 info not available for %s' % subClusterUniqueID )
#errorList.append( ( subClusterUniqueID , 'benchmarkSI00 info not available' ) )
#exitCode = 3
def __getQueueNormalization( queueCSSection, siteCSSEction ):
""" Query the CS and return the Normalization
"""
benchmarkSI00Option = '%s/%s' % ( queueCSSection, 'SI00' )
benchmarkSI00 = gConfig.getValue( benchmarkSI00Option, 0.0 )
if not benchmarkSI00:
benchmarkSI00Option = '%s/%s' % ( siteCSSEction, 'SI00' )
benchmarkSI00 = gConfig.getValue( benchmarkSI00Option, 0.0 )
return benchmarkSI00
def __getMaxCPUTime( queueCSSection ):
""" Query the CS and return the maxCPUTime
"""
maxCPUTimeOption = '%s/%s' % ( queueCSSection, 'maxCPUTime' )
maxCPUTime = gConfig.getValue( maxCPUTimeOption, 0.0 )
# For some sites there are crazy values in the CS
maxCPUTime = max( maxCPUTime, 0 )
maxCPUTime = min( maxCPUTime, 86400 * 12.5 )
return maxCPUTime
def getCPUNormalization( reference = 'HS06', iterations = 1 ):
""" Get Normalized Power of the current CPU in [reference] units
"""
if reference not in UNITS:
return S_ERROR( 'Unknown Normalization unit %s' % str( reference ) )
try:
max( min( int( iterations ), 10 ), 1 )
except ( TypeError, ValueError ), x :
return S_ERROR( x )
# This number of iterations corresponds to 250 HS06 seconds
n = int( 1000 * 1000 * 12.5 )
calib = 250.0 / UNITS[reference]
m = long( 0 )
m2 = long( 0 )
p = 0
p2 = 0
# Do one iteration extra to allow CPUs with variable speed
for i in range( iterations + 1 ):
if i == 1:
start = os.times()
# Now the iterations
for _j in range( n ):
t = random.normalvariate( 10, 1 )
m += t
m2 += t * t
p += t
p2 += t * t
end = os.times()
cput = sum( end[:4] ) - sum( start[:4] )
wall = end[4] - start[4]
if not cput:
return S_ERROR( 'Can not get used CPU' )
return S_OK( {'CPU': cput, 'WALL':wall, 'NORM': calib * iterations / cput, 'UNIT': reference } )
def getCPUTime( CPUNormalizationFactor ):
""" Trying to get CPUTime (in seconds) from the CS. The default is a (low) 10000s.
This is a generic method, independent from the middleware of the resource.
"""
CPUTime = gConfig.getValue( '/LocalSite/CPUTimeLeft', 0 )
if CPUTime:
# This is in HS06sseconds
# We need to convert in real seconds
if not CPUNormalizationFactor: # if CPUNormalizationFactor passed in is 0, try get it from the local cfg
CPUNormalizationFactor = gConfig.getValue( '/LocalSite/CPUNormalizationFactor', 0.0 )
# if CPUNormalizationFactor is not even in the local cfg, it's a problem, and yes the next line will raise an exception
CPUTime = CPUTime / int( CPUNormalizationFactor )
else:
# now we know that we have to find the CPUTimeLeft by looking in the CS
gridCE = gConfig.getValue( '/LocalSite/GridCE' )
CEQueue = gConfig.getValue( '/LocalSite/CEQueue' )
if not CEQueue:
# we have to look for a CEQueue in the CS
# A bit hacky. We should better profit from something generic
gLogger.warn( "No CEQueue in local configuration, looking to find one in CS" )
siteName = gConfig.getValue( '/LocalSite/Site' )
queueSection = '/Resources/Sites/%s/%s/CEs/%s/Queues' % ( siteName.split( '.' )[0], siteName, gridCE )
res = gConfig.getSections( queueSection )
if not res['OK']:
raise RuntimeError( res['Message'] )
queues = res['Value']
CPUTimes = []
for queue in queues:
CPUTimes.append( gConfig.getValue( queueSection + '/' + queue + '/maxCPUTime', 10000 ) )
cpuTimeInMinutes = min( CPUTimes )
# These are (real, wall clock) minutes - damn BDII!
CPUTime = int( cpuTimeInMinutes ) * 60
else:
queueInfo = getQueueInfo( '%s/%s' % ( gridCE, CEQueue ) )
CPUTime = 10000
if not queueInfo['OK'] or not queueInfo['Value']:
gLogger.warn( "Can't find a CE/queue, defaulting CPUTime to %d" % CPUTime )
else:
queueCSSection = queueInfo['Value']['QueueCSSection']
# These are (real, wall clock) minutes - damn BDII!
cpuTimeInMinutes = gConfig.getValue( '%s/maxCPUTime' % queueCSSection )
if cpuTimeInMinutes:
CPUTime = int( cpuTimeInMinutes ) * 60
gLogger.info( "CPUTime for %s: %d" % ( queueCSSection, CPUTime ) )
else:
gLogger.warn( "Can't find maxCPUTime for %s, defaulting CPUTime to %d" % ( queueCSSection, CPUTime ) )
return CPUTime
|
marcelovilaca/DIRAC
|
WorkloadManagementSystem/Client/CPUNormalization.py
|
Python
|
gpl-3.0
| 6,590
|
[
"DIRAC"
] |
69b132e25cac2410d6cb6c8a14bdc6a16e72e5d039dee090bfb8ce3eb66be88e
|
# $Id$
#
# Copyright (C) 2001-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" unit testing code for the C++ BitVects
"""
from __future__ import print_function
import unittest,os,sys
from rdkit.six.moves import cPickle
from rdkit.DataStructs import cDataStructs
klass = cDataStructs.SparseBitVect
def feq(n1,n2,tol=1e-4):
return abs(n1-n2)<=tol
def ieq(n1,n2):
return abs(n1-n2)==0
class TestCase(unittest.TestCase):
def setUp(self):
print('\n%s: '%self.shortDescription(),end='')
sys.stdout.flush()
def testSparseIdx(self):
""" test indexing into SparseBitVects
"""
v = klass(10)
ok = 1
v[0] = 1
v[2] = 1
v[9] = 1
try:
v[10] = 1
except IndexError:
ok = 1
except:
assert 0, 'setting high bit should have failed with an IndexError'
else:
assert 0, 'setting high bit should have failed'
assert v[0] == 1, 'bad bit'
assert v[1] == 0, 'bad bit'
assert v[2] == 1, 'bad bit'
assert v[9] == 1, 'bad bit'
assert v[-1] == 1, 'bad bit'
assert v[-2] == 0, 'bad bit'
try:
foo = v[10]
except IndexError:
ok = 1
except:
assert 0, 'getting high bit should have failed with an IndexError'
else:
assert 0, 'getting high bit should have failed'
def testSparseBitGet(self):
""" test operations to get sparse bits
"""
v = klass(10)
v[0] = 1
v[2] = 1
v[6] = 1
assert len(v)==10,'len(SparseBitVect) failed'
assert v.GetNumOnBits()==3,'NumOnBits failed'
assert tuple(v.GetOnBits())==(0,2,6), 'GetOnBits failed'
def testSparseBitOps(self):
""" test bit operations on SparseBitVects
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
assert tuple((v1&v2).GetOnBits()) == (0,6),'binary & failed'
assert tuple((v1&v2).GetOnBits()) == (0,6),'binary & failed'
assert tuple((v1|v2).GetOnBits()) == (0,2,3,6),'binary | failed'
assert tuple((v1^v2).GetOnBits()) == (2,3),'binary ^ failed'
def testTanimotoSim(self):
""" test Tanimoto Similarity measure
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = klass(10)
v3[1] = 1
v3[4] = 1
v3[8] = 1
assert feq(cDataStructs.TanimotoSimilarity(v1,v1),1.0),'bad v1,v1 TanimotoSimilarity'
assert feq(cDataStructs.TanimotoSimilarity(v2,v2),1.0),'bad v2,v2 TanimotoSimilarity'
assert feq(cDataStructs.TanimotoSimilarity(v1,v2),0.5),'bad v1,v2 TanimotoSimilarity'
assert feq(cDataStructs.TanimotoSimilarity(v2,v1),0.5),'bad v2,v1 TanimotoSimilarity'
assert feq(cDataStructs.TanimotoSimilarity(v1,v3),0.0),'bad v1,v3 TanimotoSimilarity'
assert feq(cDataStructs.TanimotoSimilarity(v2,v3),0.0),'bad v2,v3 TanimotoSimilarity'
def testOnBitSim(self):
""" test On Bit Similarity measure
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = klass(10)
v3[1] = 1
v3[4] = 1
v3[8] = 1
assert feq(cDataStructs.OnBitSimilarity(v1,v1),1.0),'bad v1,v1 OnBitSimilarity'
assert feq(cDataStructs.OnBitSimilarity(v2,v2),1.0),'bad v2,v2 OnBitSimilarity'
assert feq(cDataStructs.OnBitSimilarity(v1,v2),0.5),'bad v1,v2 OnBitSimilarity'
assert feq(cDataStructs.OnBitSimilarity(v2,v1),0.5),'bad v2,v1 OnBitSimilarity'
assert feq(cDataStructs.OnBitSimilarity(v1,v3),0.0),'bad v1,v3 OnBitSimilarity'
assert feq(cDataStructs.OnBitSimilarity(v2,v3),0.0),'bad v2,v3 OnBitSimilarity'
def testNumBitsInCommon(self):
""" test calculation of Number of Bits in Common
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = klass(10)
v3[1] = 1
v3[4] = 1
v3[8] = 1
assert ieq(cDataStructs.NumBitsInCommon(v1,v1),10),'bad v1,v1 NumBitsInCommon'
assert ieq(cDataStructs.NumBitsInCommon(v2,v2),10),'bad v2,v2 NumBitsInCommon'
assert ieq(cDataStructs.NumBitsInCommon(v1,v2),8),'bad v1,v2 NumBitsInCommon'
assert ieq(cDataStructs.NumBitsInCommon(v2,v1),8),'bad v2,v1 NumBitsInCommon'
assert ieq(cDataStructs.NumBitsInCommon(v1,v3),4),'bad v1,v3 NumBitsInCommon'
assert ieq(cDataStructs.NumBitsInCommon(v2,v3),4),'bad v2,v3 NumBitsInCommon'
def testAllBitSim(self):
""" test All Bit Similarity measure
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = klass(10)
v3[1] = 1
v3[4] = 1
v3[8] = 1
assert feq(cDataStructs.AllBitSimilarity(v1,v1),1.0),'bad v1,v1 AllBitSimilarity'
assert feq(cDataStructs.AllBitSimilarity(v2,v2),1.0),'bad v2,v2 AllBitSimilarity'
assert feq(cDataStructs.AllBitSimilarity(v1,v2),0.8),'bad v1,v2 AllBitSimilarity'
assert feq(cDataStructs.AllBitSimilarity(v2,v1),0.8),'bad v2,v1 AllBitSimilarity'
assert feq(cDataStructs.AllBitSimilarity(v1,v3),0.4),'bad v1,v3 AllBitSimilarity'
assert feq(cDataStructs.AllBitSimilarity(v2,v3),0.4),'bad v2,v3 AllBitSimilarity'
def testStringOps(self):
""" test serialization operations
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
s = v1.ToBinary()
v2 = klass(s)
assert tuple(v2.GetOnBits())==tuple(v1.GetOnBits()),'To/From string failed'
def testOnBitsInCommon(self):
""" test OnBitsInCommon
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = cDataStructs.OnBitsInCommon(v1,v2)
assert tuple(v3)==(0,6),'bad on bits in common'
def testOffBitsInCommon(self):
""" test OffBitsInCommon
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
v3 = cDataStructs.OffBitsInCommon(v1,v2)
assert tuple(v3)==(1,4,5,7,8,9),'bad off bits in common'
def testOnBitProjSimilarity(self):
""" test OnBitProjSimilarity
"""
v1 = klass(10)
v1[1] = 1
v1[2] = 1
v1[3] = 1
v2 = klass(10)
v2[2] = 1
v2[3] = 1
res = cDataStructs.OnBitProjSimilarity(v1,v2)
assert feq(res[0],0.666667),'bad 1st OnBitsProjSimilarity'
assert feq(res[1],1.0),'bad 2nd OnBitsProjSimilarity'
res = cDataStructs.OnBitProjSimilarity(v2,v1)
assert feq(res[1],0.666667),'bad 1st OnBitsProjSimilarity'
assert feq(res[0],1.0),'bad 2nd OnBitsProjSimilarity'
def testOffBitProjSimilarity(self):
""" test OffBitProjSimilarity
"""
v1 = klass(10)
v1[1] = 1
v1[2] = 1
v1[3] = 1
v2 = klass(10)
v2[2] = 1
v2[3] = 1
res = cDataStructs.OffBitProjSimilarity(v1,v2)
assert feq(res[0],1.0),'bad 1st OffBitsProjSimilarity'
assert feq(res[1],0.875),'bad 2nd OffBitsProjSimilarity'
res = cDataStructs.OffBitProjSimilarity(v2,v1)
assert feq(res[1],1.0),'bad 1st OffBitsProjSimilarity'
assert feq(res[0],0.875),'bad 2nd OffBitsProjSimilarity'
def testPkl(self):
""" test pickling
"""
v1 = klass(10)
v1[1] = 1
v1[2] = 1
v1[3] = 1
pklName = 'foo.pkl'
outF = open(pklName,'wb+')
cPickle.dump(v1,outF)
outF.close()
inF = open(pklName,'rb')
v2 = cPickle.load(inF)
inF.close()
os.unlink(pklName)
assert tuple(v1.GetOnBits())==tuple(v2.GetOnBits()),'pkl failed'
def testFingerprints(self):
" test the parsing of daylight fingerprints "
#actual daylight output:
rawD="""
0,Cc1n[nH]c(=O)nc1N,.b+HHa.EgU6+ibEIr89.CpX0g8FZiXH+R0+Ps.mr6tg.2
1,Cc1n[nH]c(=O)[nH]c1=O,.b7HEa..ccc+gWEIr89.8lV8gOF3aXFFR.+Ps.mZ6lg.2
2,Cc1nnc(NN)nc1O,.H+nHq2EcY09y5EIr9e.8p50h0NgiWGNx4+Hm+Gbslw.2
3,Cc1nnc(N)nc1C,.1.HHa..cUI6i5E2rO8.Op10d0NoiWGVx.+Hm.Gb6lo.2
"""
dists="""0,0,1.000000
0,1,0.788991
0,2,0.677165
0,3,0.686957
1,1,1.000000
1,2,0.578125
1,3,0.591304
2,2,1.000000
2,3,0.732759
3,3,1.000000
"""
fps = []
for line in rawD.split('\n'):
if line:
sbv = klass(256)
id,smi,fp=line.split(',')
cDataStructs.InitFromDaylightString(sbv,fp)
fps.append(sbv)
ds = dists.split('\n')
whichd=0
for i in range(len(fps)):
for j in range(i,len(fps)):
idx1,idx2,tgt = ds[whichd].split(',')
whichd += 1
tgt = float(tgt)
dist = cDataStructs.TanimotoSimilarity(fps[i],fps[j])
assert feq(tgt,dist),'tanimoto between fps %d and %d failed'%(int(idx1),int(idx2))
def testFold(self):
""" test folding fingerprints
"""
v1 = klass(16)
v1[1] = 1
v1[12] = 1
v1[9] = 1
try:
v2 = cDataStructs.FoldFingerprint(v1)
except:
assert 0,'Fold with no args failed'
assert v1.GetNumBits()/2==v2.GetNumBits(),'bad num bits post folding'
try:
v2 = cDataStructs.FoldFingerprint(v1,2)
except:
assert 0,'Fold with arg failed'
assert v1.GetNumBits()/2==v2.GetNumBits(),'bad num bits post folding'
v2 = cDataStructs.FoldFingerprint(v1,4)
assert v1.GetNumBits()/4==v2.GetNumBits(),'bad num bits post folding'
def testOtherSims(self):
""" test other similarity measures
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
assert feq(cDataStructs.CosineSimilarity(v1,v2),.6667)
assert feq(cDataStructs.KulczynskiSimilarity(v1,v2),.6667)
assert feq(cDataStructs.DiceSimilarity(v1,v2),.6667)
assert feq(cDataStructs.SokalSimilarity(v1,v2),.3333)
assert feq(cDataStructs.McConnaugheySimilarity(v1,v2),.3333)
assert feq(cDataStructs.AsymmetricSimilarity(v1,v2),.6667)
assert feq(cDataStructs.BraunBlanquetSimilarity(v1,v2),.6667)
assert feq(cDataStructs.RusselSimilarity(v1,v2),.2000)
assert feq(cDataStructs.RogotGoldbergSimilarity(v1,v2),.7619)
def testQuickSims(self):
""" the asymmetric similarity stuff (bv,pkl)
"""
v1 = klass(10)
v1[0] = 1
v1[2] = 1
v1[6] = 1
v2 = klass(10)
v2[0] = 1
v2[3] = 1
v2[6] = 1
pkl = v2.ToBinary()
v2 = pkl
assert feq(cDataStructs.CosineSimilarity(v1,v2),.6667)
assert feq(cDataStructs.KulczynskiSimilarity(v1,v2),.6667)
assert feq(cDataStructs.DiceSimilarity(v1,v2),.6667)
assert feq(cDataStructs.SokalSimilarity(v1,v2),.3333)
assert feq(cDataStructs.McConnaugheySimilarity(v1,v2),.3333)
assert feq(cDataStructs.AsymmetricSimilarity(v1,v2),.6667)
assert feq(cDataStructs.BraunBlanquetSimilarity(v1,v2),.6667)
assert feq(cDataStructs.RusselSimilarity(v1,v2),.2000)
assert feq(cDataStructs.RogotGoldbergSimilarity(v1,v2),.7619)
if __name__ == '__main__':
unittest.main()
|
soerendip42/rdkit
|
rdkit/DataStructs/UnitTestcBitVect.py
|
Python
|
bsd-3-clause
| 10,981
|
[
"RDKit"
] |
b8b98a4270349e66f2b61176eb2eb322bbed4bba1d65497bb40b5353c5fc3347
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# File: ponies.py
# by Arzaroth Lekva
# lekva@arzaroth.com
#
from collections import OrderedDict
PONY_LIST = OrderedDict([
('Pony_Ace', 'Ace'),
('Pony_Action_Shot', 'Action Shot'),
('Pony_AKYearling', 'AKYearling'),
('Pony_Aloe', 'Aloe'),
('Pony_Apple_Bloom', 'Apple Bloom'),
('Pony_Apple_Bottoms', 'Apple Bottoms'),
('Pony_Apple_Bumpkin', 'Apple Bumpkin'),
('Pony_Apple_Cider', 'Apple Cider'),
('Pony_Apple_Cinnamon', 'Apple Cinnamon'),
('Pony_Apple_Cobbler', 'Apple Cobbler'),
('Pony_Apple_Dumpling', 'Apple Dumpling'),
('Pony_Apple_Honey', 'Apple Honey'),
('Pony_Apple_Leaves', 'Apple Leaves'),
('Pony_Apple_Pie', 'Apple Pie'),
('Pony_Apple_Rose', 'Apple Rose'),
('Pony_Apple_Stars', 'Apple Stars'),
('Pony_Apple_Strudel', 'Apple Strudel'),
('Pony_Applefritter', 'Apple Fritter'),
('Pony_Applejack', 'Applejack'),
('Pony_Architecture_Unicorn', 'Architecture Unicorn'),
('Pony_Aunt_Applesauce', 'Aunt Applesauce'),
('Pony_Aunt_Orange', 'Aunt Orange'),
('Pony_Babs_Seed', 'Babs Seed'),
('Pony_Banana_Bliss', 'Banana Bliss'),
('Pony_Beauty_Brass', 'Beauty Brass'),
('Pony_Berry_Punch', 'Berry Punch'),
('Pony_Big_Daddy', 'Big Daddy McColt'),
('Pony_Big_Mac', 'Big Macintosh'),
('Pony_Big_Shot', 'Big Shot'),
('Pony_Blue_moon', 'Blue Moon'),
('Pony_Bon_Bon', 'Bon Bon'),
('Pony_Braeburn', 'Braeburn'),
('Pony_Bright_Unicorn', 'Bright Unicorn'),
('Pony_Bulk_Biceps', 'Bulk Biceps'),
('Pony_Candy_Apples', 'Candy Apples'),
('Pony_Caramel', 'Caramel'),
('Pony_Caramel_Apple', 'Caramel Apple'),
('Pony_Charity_Kindheart', 'Charity Kindheart'),
('Pony_Cheerilee', 'Cheerilee'),
('Pony_Cheese_Sandwich', 'Cheese Sandwich'),
('Pony_Cherry_Fizzy', 'Cherry Fizzy'),
('Pony_Cherry_Jubilee', 'Cherry Jubilee'),
('Pony_CherryBerry', 'Cherry Berry'),
('Pony_Claude_the_Puppeteer', 'Claude the Puppeteer'),
('Pony_Clear_Skies', 'Clear Skies'),
('Pony_Clumsy_Clownspony', 'Clumsy Clownspony'),
('Pony_Coco_Crusoe', 'Coco Crusoe'),
('Pony_Coco_Pommel', 'Coco Pommel'),
('Pony_Comet_Tail', 'Comet Tail'),
('Pony_Compass_Star', 'Compass Star'),
('Pony_Conductor', 'Conductor'),
('Pony_Countess_Coloratura', 'Countess Coloratura'),
('Pony_Crescent_Moon', 'Crescent Moon'),
('Pony_Curio_Shopkeeper', 'Curio Shopkeeper'),
('Pony_Daisy', 'Daisy'),
('Pony_Dancer_Pony_1', 'Coloratura\'s Rocker'),
('Pony_Dancer_Pony_2', 'Coloratura\'s Stylist'),
('Pony_Dancer_Pony_3', 'Coloratura\'s Choreographer'),
('Pony_Dancer_Pony_4', 'Coloratura\'s Hype Pony'),
('Pony_Dancer_Pony_5', 'Coloratura\'s Lyricist'),
('Pony_Dancer_Pony_6', 'Coloratura\'s Breakdancer'),
('Pony_Dancing_Clownspony', 'Dancing Clownspony'),
('Pony_Daring', 'Daring Do'),
('Pony_Diamond_Tiara', 'Diamond Tiara'),
('Pony_Discord', 'Discord'),
('Pony_Dj_Pon3', 'Dj Pon3 (Ponyville)'),
('Pony_Dj_Pon3_Canterlot', 'Dj Pon3 (Canterlot)'),
('Pony_Double_Diamond', 'Double Diamond'),
('Pony_DrWhooves', 'Dr. Hooves'),
('Pony_Eclaire_Creme', 'Eclaire Creme'),
('Pony_Eff_Stop', 'Eff Stop'),
('Pony_Elite_Male', 'Elite Pony'),
('Pony_Emerald_Gem', 'Emerald Gem'),
('Pony_Emerald_Green', 'Emerald Green'),
('Pony_Fancypants', 'Fancypants'),
('Pony_Fast_Clip', 'Fast Clip'),
('Pony_Fashion_Plate', 'Fashion Plate'),
('Pony_Fashionable_Unicorn', 'Fashionable Unicorn'),
('Pony_Featherweight', 'Featherweight'),
('Pony_Filthy_Rich', 'Filthy Rich'),
('Pony_Fine_Line', 'Fine Line'),
('Pony_FireChief', 'Dinky Doo (Fire Chief)'),
('Pony_Flam', 'Flam'),
('Pony_Flash_Sentry', 'Flash Sentry'),
('Pony_Flashy_Pony', 'Flashy Pony'),
('Pony_Fleetfoot', 'Fleetfoot'),
('Pony_Fleur_Dis_Lee', 'Fleur Dis Lee'),
('Pony_Flim', 'Flim'),
('Pony_Fluttershy', 'Fluttershy'),
('Pony_Forsythia', 'Forsythia'),
('Pony_Four_Step', 'Four Step'),
('Pony_Frederic', 'Frederic'),
('Pony_Gala_Appleby', 'Gala Appleby'),
('Pony_Gilda', 'Gilda'),
('Pony_Gleeful_Clownspony', 'Gleeful Clownspony'),
('Pony_Golden_Delicious', 'Golden Delicious'),
('Pony_Golden_Harvest', 'Golden Harvest'),
('Pony_Goldie_Delicious', 'Goldie Delicious'),
('Pony_Goth_Unicorn', 'Goth Unicorn'),
('Pony_Grampa_Gruff', 'Grampa Gruff'),
('Pony_Granny_Smith', 'Granny Smith'),
('Pony_Green_Jewel', 'Green Jewel'),
('Pony_Greta', 'Greta'),
('Pony_Griffon_Shopkeeper', 'Griffon Shopkeeper'),
('Pony_Gustave_le_Grand', 'Gustave le Grand'),
('Pony_Half_Baked_Apple', 'Half Baked Apple'),
('Pony_Hayseed_Turnip_Truck', 'Hayseed Turnip Truck'),
('Pony_Hoity_Toity', 'Hoity Toity'),
('Pony_Horticultural_Pegasus', 'Horticultural Pegasus'),
('Pony_Jeff_Letrotski', 'Jeff Letrotski'),
('Pony_Jet_Set', 'Jet Set'),
('Pony_Jigging_Clownspony', 'Jigging Clownspony'),
('Pony_Joe', 'Joe'),
('Pony_Jokester_Clownspony', 'Jokester Clownspony'),
('Pony_Junebug', 'Junebug'),
('Pony_Junior_Deputy', 'Junior Deputy'),
('Pony_King_Sombra', 'King Sombra'),
('Pony_Lassoing_Clownspony', 'Lassoing Clownspony'),
('Pony_Lemon_Hearts', 'Lemon Hearts'),
('Pony_Lemony_Gem', 'Lemony Gem'),
('Pony_Li_I_Griffon', 'Li\'l Griffon'),
('Pony_Lightning_Dust', 'Lightning Dust'),
('Pony_Lily_Valley', 'Lily Valley'),
('Pony_Limestone_Pie', 'Limestone Pie'),
('Pony_Lotus_Blossom', 'Lotus Blossom'),
('Pony_Lovestruck', 'Lovestruck'),
('Pony_Lucky_Clover', 'Lucky Clover'),
('Pony_Lucky_Dreams', 'Lucky Dreams'),
('Pony_Luna_Guard', 'Luna Guard'),
('Pony_Lyra', 'Lyra'),
('Pony_Lyrica', 'Lyrica'),
('Pony_Ma_Hooffield', 'Ma Hooffield'),
('Pony_Magnum', 'Hondo Flanks (Magnum)'),
('Pony_Mane_Goodall', 'Mane Goodall'),
('Pony_Mane_iac', 'Mane-iac'),
('Pony_Manehattan_Delegate', 'Manehattan Delegate'),
('Pony_Marble_Pie', 'Marble Pie'),
('Pony_Maud_Pie', 'Maud Pie'),
('Pony_Mayor', 'Mayor'),
('Pony_Minuette', 'Minuette'),
('Pony_Misty_Fly', 'Misty Fly'),
('Pony_Moondancer', 'Moondancer'),
('Pony_Mr_Breezy', 'Mr. Breezy'),
('Pony_Mr_Cake', 'Mr. Cake'),
('Pony_Mr_Greenhooves', 'Mr. Greenhooves'),
('Pony_Mr_Waddle', 'Mr. Waddle'),
('Pony_Mrs_Cake', 'Mrs. Cake'),
('Pony_MsHarshwhinny', 'Ms. Harshwhinny'),
('Pony_Musical_Clownspony', 'Musical Clownspony'),
('Pony_Neon_Lights', 'Neon Lights'),
('Pony_Nerdpony', 'Nerdpony'),
('Pony_Night_Glider', 'Night Glider'),
('Pony_Noteworthy', 'Noteworthy'),
('Pony_Nurse_Redheart', 'Nurse Redheart'),
('Pony_Octavia', 'Octavia'),
('Pony_Open_Skies', 'Open Skies'),
('Pony_Parish', 'Parish'),
('Pony_Party_Favor', 'Party Favor'),
('Pony_Peachy_Pie', 'Peachy Pie'),
('Pony_Peachy_Sweet', 'Peachy Sweet'),
('Pony_Pearl', 'Cookie Crumbles (Betty Bouffant)'),
('Pony_Perfect_Pace', 'Perfect Pace'),
('Pony_Pest_Control_Pony', 'Pest Control Pony'),
('Pony_Photofinish', 'Photo Finish'),
('Pony_Pinkie_Pie', 'Pinkie Pie'),
('Pony_Pinkiepies_Dad', 'Igneous Rock'),
('Pony_Pinkiepies_Mom', 'Cloudy Quartz'),
('Pony_Pipsqueak', 'Pipsqueak'),
('Pony_Posh_Unicorn', 'Posh Unicorn'),
('Pony_Press_Pass', 'Press Pass'),
('Pony_Prim_Hemline', 'Prim Hemline'),
('Pony_Prince_Blueblood', 'Prince Blueblood'),
('Pony_Princess_Cadence', 'Princess Cadence'),
('Pony_Princess_Celestia', 'Princess Celestia'),
('Pony_Princess_Luna', 'Princess Luna'),
('Pony_Professor', 'Bill Neigh (Professor)'),
('Pony_Public_Works_Pony', 'Public Works Pony'),
('Pony_Purple_Wave', 'Purple Wave'),
('Pony_Quake', 'Quake'),
('Pony_Rainbow_Dash', 'Rainbow Dash'),
('Pony_Randolph', 'Randolph the Butler'),
('Pony_Rara', 'Rara'),
('Pony_Rare_Find', 'Rare Find'),
('Pony_Rarity', 'Rarity'),
('Pony_Red_Delicious', 'Red Delicious'),
('Pony_Red_Gala', 'Red Gala'),
('Pony_Renfairpony', 'Richard (the) Hoovenheart'),
('Pony_Royal_Guard', 'Royal Guard'),
('Pony_Royal_Pin', 'Royal Pin'),
('Pony_Royal_Ribbon', 'Royal Ribbon'),
('Pony_Royal_Riff', 'Royal Riff'),
('Pony_Rumble', 'Rumble'),
('Pony_Sapphire_Shores', 'Sapphire Shores'),
('Pony_Sassy_Saddles', 'Sassy Saddles'),
('Pony_Savoir_Fare', 'Savoir Fare'),
('Pony_Scootaloo', 'Scootaloo'),
('Pony_Sea_Swirl', 'Sea Swirl'),
('Pony_Senior_Deputy', 'Senior Deputy'),
('Pony_Shadow_Surprise', 'The Shadowbolts'),
('Pony_Sheriff_Silverstar', 'Sheriff Silverstar'),
('Pony_Shining_Armour', 'Shining Armour'),
('Pony_Shooting_Star', 'Shooting Star'),
('Pony_Silver_Shill', 'Silver Shill'),
('Pony_Silver_Spanner', 'Silver Spanner'),
('Pony_Silver_Spoon', 'Silver Spoon'),
('Pony_Snails', 'Snails'),
('Pony_Snappy_Scoop', 'Reporter Pony (Snappy Scoop)'),
('Pony_Snips', 'Snips'),
('Pony_Soarin', 'Soarin'),
('Pony_Spike', 'Spike'),
('Pony_Spitfire', 'Spitfire'),
('Pony_Spoiled_Rich', 'Spoiled Rich'),
('Pony_Sprinkle_Stripe', 'Sprinkle Stripe'),
('Pony_Starlight_Glimmer', 'Starlight Glimmer'),
('Pony_Studious_Delegate', 'Studious Delegate'),
('Pony_Sugar_Belle', 'Sugar Belle'),
('Pony_Sunny_Daze', 'Sunny Daze'),
('Pony_Sunsetshimmer', 'Sunset Shimmer'),
('Pony_Sunshower', 'Sunshower'),
('Pony_Suri_Polomare', 'Suri Polomare'),
('Pony_Svengallop', 'Svengallop'),
('Pony_Swan_Song', 'Swan Song'),
('Pony_Sweetiebelle', 'Sweetie Belle'),
('Pony_Thunderlane', 'Thunderlane'),
('Pony_Toe_Tapper', 'Toe Tapper'),
('Pony_Torch_Song', 'Torch Song'),
('Pony_Tracy_Flash', 'Photographer Pony (Tracy Flash)'),
('Pony_Traveling_Gentlecolt', 'Traveling Gentlecolt'),
('Pony_Traveling_Mare', 'Traveling Mare'),
('Pony_Traveling_Pony', 'Traveling Pony'),
('Pony_Tree_Hugger', 'Tree Hugger'),
('Pony_Trenderhoof', 'Trenderhoof'),
('Pony_Trixie', 'Trixie'),
('Pony_Trouble_Shoes', 'Trouble Shoes'),
('Pony_Truffle', "Teacher's Pet"),
('Pony_Twilight_Sparkle', 'Twilight Sparkle'),
('Pony_Twilight_Velvet', 'Twilight Velvet'),
('Pony_Twilights_Dad', "Night Light (Twilight's Dad)"),
('Pony_Twinkleshine', 'Twinkleshine'),
('Pony_Twist', 'Twist'),
('Pony_Uncle_Orange', 'Uncle Orange'),
('Pony_Unicorn_Guard', 'Unicorn Guard'),
('Pony_Unicorn_Painter', 'Unicorn Painter'),
('Pony_Uppercrust', 'Upper Crust'),
('Pony_Walter', 'Walter (Bowling Pony)'),
('Pony_Wensley', 'Wensley'),
('Pony_Whinnyapolis_Delegate', 'Whinnyapolis Delegate'),
('Pony_Wild_Fire', 'Wild Fire'),
('Pony_Wind_Rider', 'Wind Rider'),
('Pony_Zecora', 'Zecora'),
('Pony_Zipporwhill', 'Zipporwhill')
])
|
Arzaroth/CelestiaSunrise
|
celestia/utility/ponies.py
|
Python
|
bsd-2-clause
| 10,899
|
[
"FLEUR"
] |
4ca211c8fa345eae3950b15c17bea42c56be6b2efa664cde5a580e0693f8e0d5
|
#!/usr/bin/env python
# encoding: utf-8
"""PGEM test configuration model.
Default connect to configuration.db which save the test items settings.
"""
__version__ = "0.1"
__author__ = "@fanmuzhi, @boqiling"
__all__ = ["PGEMConfig", "TestItem"]
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Float, String, Boolean
from sqlalchemy import ForeignKey, UniqueConstraint
from sqlalchemy.orm import relationship
SQLBase = declarative_base()
class PGEMConfig(SQLBase):
__tablename__ = "configuration"
id = Column(Integer, primary_key=True)
partnumber = Column(String(20), nullable=False)
description = Column(String(50))
revision = Column(String(5), nullable=False)
testitems = relationship("TestItem", backref="configuration",
cascade="all, delete-orphan")
__table_args__ = (UniqueConstraint('partnumber',
'revision',
name='_partnumber_revision_uc_'),)
def to_dict(self):
items_list = {}
for item in self.testitems:
items_list.update(item.to_dict())
# items_list = {"ITEM": items_list}
return {"partnumber": self.partnumber,
"description": self.description,
"revision": self.revision,
"testitems": items_list}
class TestItem(SQLBase):
__tablename__ = "test_item"
id = Column(Integer, primary_key=True)
configid = Column(Integer, ForeignKey("configuration.id"))
name = Column(String(10), nullable=False)
description = Column(String(30))
enable = Column(Boolean, nullable=False)
min = Column(Float)
max = Column(Float)
stoponfail = Column(Boolean, default=True)
misc = Column(String(50))
def to_dict(self):
return {
self.name: {
"description": self.description,
"enable": int(self.enable),
"min": self.min,
"max": self.max,
"stoponfail": int(self.stoponfail),
"misc": self.misc
}
}
if __name__ == "__main__":
from session import SessionManager
dburi = "sqlite:///configuration.db"
sm = SessionManager()
session = sm.get_session(dburi)
sm.prepare_db(dburi, [PGEMConfig, TestItem])
# Insert Example
CrystalConfig = PGEMConfig()
CrystalConfig.partnumber = "AGIGA9601-002BCA"
CrystalConfig.description = "Crystal"
CrystalConfig.revision = "04"
CheckTemp = TestItem()
CheckTemp.name = "Check_Temp"
CheckTemp.description = "Check Temperature on chip SE97BTP, data in degree"
CheckTemp.enable = True
CheckTemp.min = 5.0
CheckTemp.max = 30.0
CheckTemp.stoponfail = False
Charge = TestItem()
Charge.name = "Charge"
Charge.description = "Charge DUT with BQ24707, limition in seconds"
Charge.enable = True
Charge.min = 30.0
Charge.max = 120.0
Charge.stoponfail = True
try:
CrystalConfig.testitems.append(CheckTemp)
CrystalConfig.testitems.append(Charge)
session.add(CrystalConfig)
session.commit()
except Exception as e:
print e
session.rollback()
# Query Example
crystal = session.query(PGEMConfig).filter(
PGEMConfig.partnumber == "AGIGA9601-002BCA",
PGEMConfig.revision == "04").first()
for testitem in crystal.testitems:
if testitem.name == "Charge":
print testitem.name
print testitem.description
print testitem.max
print crystal.to_dict()
|
hardanimal/UFT_UPGEM
|
src/UFT/backend/configuration.py
|
Python
|
gpl-3.0
| 3,635
|
[
"CRYSTAL"
] |
b48f8a4584146ffac3fd1c3b5f88a7b0787347ce17b8717857759817e793349e
|
# (c) 2013-2016, Michael DeHaan <michael.dehaan@gmail.com>
# Stephen Fromm <sfromm@gmail.com>
# Brian Coca <briancoca+dev@gmail.com>
# Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import codecs
import os
import os.path
import re
import tempfile
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native, to_text
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
from ansible.utils.hashing import checksum_s
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False):
''' assemble a file from a directory of fragments '''
tmpfd, temp_path = tempfile.mkstemp()
tmp = os.fdopen(tmpfd, 'wb')
delimit_me = False
add_newline = False
for f in (to_text(p, errors='surrogate_or_strict') for p in sorted(os.listdir(src_path))):
if compiled_regexp and not compiled_regexp.search(f):
continue
fragment = u"%s/%s" % (src_path, f)
if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
continue
fragment_content = open(self._loader.get_real_file(fragment), 'rb').read()
# always put a newline between fragments if the previous fragment didn't end with a newline.
if add_newline:
tmp.write(b'\n')
# delimiters should only appear between fragments
if delimit_me:
if delimiter:
# un-escape anything like newlines
delimiter = codecs.escape_decode(delimiter)[0]
tmp.write(delimiter)
# always make sure there's a newline after the
# delimiter, so lines don't run together
if delimiter[-1] != b'\n':
tmp.write(b'\n')
tmp.write(fragment_content)
delimit_me = True
if fragment_content.endswith(b'\n'):
add_newline = False
else:
add_newline = True
tmp.close()
return temp_path
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if self._play_context.check_mode:
result['skipped'] = True
result['msg'] = "skipped, this module does not support check_mode."
return result
src = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
delimiter = self._task.args.get('delimiter', None)
remote_src = self._task.args.get('remote_src', 'yes')
regexp = self._task.args.get('regexp', None)
follow = self._task.args.get('follow', False)
ignore_hidden = self._task.args.get('ignore_hidden', False)
if src is None or dest is None:
result['failed'] = True
result['msg'] = "src and dest are required"
return result
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
if boolean(remote_src):
result.update(self._execute_module(tmp=tmp, task_vars=task_vars))
return result
else:
try:
src = self._find_needle('files', src)
except AnsibleError as e:
result['failed'] = True
result['msg'] = to_native(e)
return result
if not tmp:
tmp = self._make_tmp_path(remote_user)
self._cleanup_remote_tmp = True
if not os.path.isdir(src):
result['failed'] = True
result['msg'] = u"Source (%s) is not a directory" % src
return result
_re = None
if regexp is not None:
_re = re.compile(regexp)
# Does all work assembling the file
path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden)
path_checksum = checksum_s(path)
dest = self._remote_expand_user(dest)
dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow, tmp=tmp)
diff = {}
# setup args for running modules
new_module_args = self._task.args.copy()
# clean assemble specific options
for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden']:
if opt in new_module_args:
del new_module_args[opt]
new_module_args.update(
dict(
dest=dest,
original_basename=os.path.basename(src),
)
)
if path_checksum != dest_stat['checksum']:
if self._play_context.diff:
diff = self._get_diff_data(dest, path, task_vars)
remote_path = self._connection._shell.join_path(tmp, 'src')
xfered = self._transfer_file(path, remote_path)
# fix file permissions when the copy is done as a different user
self._fixup_perms2((tmp, remote_path), remote_user)
new_module_args.update( dict( src=xfered,))
res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False)
if diff:
res['diff'] = diff
result.update(res)
else:
result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False))
self._remove_tmp_path(tmp)
return result
|
eerorika/ansible
|
lib/ansible/plugins/action/assemble.py
|
Python
|
gpl-3.0
| 6,427
|
[
"Brian"
] |
653b8615fa6d7726d5b929858d152b08327a9b5abc2775a0abae3c1774965bc0
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MessageType.clinic'
db.add_column('core_messagetype', 'clinic', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Clinic'], null=True), keep_default=False)
# Changing field 'MessageType.group'
db.alter_column('core_messagetype', 'group_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.Group'], null=True))
def backwards(self, orm):
# Deleting field 'MessageType.clinic'
db.delete_column('core_messagetype', 'clinic_id')
# User chose to not deal with backwards NULL issues for 'MessageType.group'
raise RuntimeError("Cannot reverse this migration. 'MessageType.group' and its values cannot be restored.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.authprofile': {
'Meta': {'object_name': 'AuthProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'patient': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Patient']", 'unique': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'core.changerequest': {
'Meta': {'ordering': "['-created_at']", 'object_name': 'ChangeRequest'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'request': ('django.db.models.fields.TextField', [], {}),
'request_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Visit']"})
},
'core.clinic': {
'Meta': {'object_name': 'Clinic'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'te_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'clinic'", 'null': 'True', 'to': "orm['auth.User']"})
},
'core.event': {
'Meta': {'object_name': 'Event'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'core.historicalpatient': {
'Meta': {'ordering': "('-history_id',)", 'object_name': 'HistoricalPatient'},
'active_msisdn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MSISDN']", 'null': 'True', 'blank': 'True'}),
'age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deceased': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disclosed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'history_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'last_clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Clinic']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'opted_in': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'regiment': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'risk_profile': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'te_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'core.historicalvisit': {
'Meta': {'ordering': "('-history_id',)", 'object_name': 'HistoricalVisit'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Clinic']"}),
'comment': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'history_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Patient']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'te_visit_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visit_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'})
},
'core.language': {
'Meta': {'object_name': 'Language'},
'attended_message': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'missed_message': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'tomorrow_message': ('django.db.models.fields.TextField', [], {}),
'twoweeks_message': ('django.db.models.fields.TextField', [], {})
},
'core.messagetype': {
'Meta': {'object_name': 'MessageType'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Clinic']", 'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']"}),
'message': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'core.msisdn': {
'Meta': {'ordering': "['-id']", 'object_name': 'MSISDN'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msisdn': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'core.patient': {
'Meta': {'ordering': "['created_at']", 'object_name': 'Patient'},
'active_msisdn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MSISDN']", 'null': 'True', 'blank': 'True'}),
'age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deceased': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'disclosed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'last_clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Clinic']", 'null': 'True', 'blank': 'True'}),
'msisdns': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'contacts'", 'symmetrical': 'False', 'to': "orm['core.MSISDN']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'opted_in': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'regiment': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'risk_profile': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'te_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'core.pleasecallme': {
'Meta': {'object_name': 'PleaseCallMe'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pcms'", 'null': 'True', 'to': "orm['core.Clinic']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'msisdn': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pcms'", 'to': "orm['core.MSISDN']"}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'default': "'ot'", 'max_length': '2'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.visit': {
'Meta': {'ordering': "['date']", 'object_name': 'Visit'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Clinic']"}),
'comment': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Patient']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'te_visit_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'unique': 'True', 'null': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visit_type': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['core']
|
praekelt/txtalert
|
txtalert/core/migrations/0018_auto__add_field_messagetype_clinic__chg_field_messagetype_group.py
|
Python
|
gpl-3.0
| 16,298
|
[
"VisIt"
] |
1d015b1b897de2d8cb2fc3b6e3820eaa0209da1f0fe11c0fa5d24e3c5507dc7b
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('refactor_workflow')
@click.argument("workflow_id", type=str)
@click.option("--actions", type=str, multiple=True, required=True)
@click.option(
"--dry_run",
help="When true, perform a dry run where the existing workflow is preserved. The refactored workflow is returned in the output of the method, but not saved on the Galaxy server.",
is_flag=True
)
@pass_context
@custom_exception
@json_output
def cli(ctx, workflow_id, actions, dry_run=False):
"""Refactor workflow with given actions.
Output:
Dictionary containing logged messages for the executed actions
and the refactored workflow.
"""
return ctx.gi.workflows.refactor_workflow(workflow_id, actions, dry_run=dry_run)
|
galaxy-iuc/parsec
|
parsec/commands/workflows/refactor_workflow.py
|
Python
|
apache-2.0
| 859
|
[
"Galaxy"
] |
8c55e0a4de07c7e0d5c5e877366262b1cd6844d1241737d0f7728b65e83a16c0
|
import Tools.HTML
if __name__ == "__main__":
import sys,os
append_path = os.path.abspath(sys.argv[0])[:-15]
print("Append to PYTHONPATH: %s" % (append_path))
sys.path.append(append_path)
import re
import Tools.web as web
from Tools.file2 import file2
from Geometry import Scan,IRC,Geom,ListGeoms
import ElectronicStructure
from Containers import AtomicProps
import logging
log = logging.getLogger(__name__)
class Orca(ElectronicStructure):
"""
Orca 4 parser
Analyzes a multiple-step calculation
"""
def __init__(self):
"""
Declares steps (type List)
"""
self.steps = []
def parse(self):
"""
Parses Orca file, step by step
"""
try:
FI = file2(self.file)
log.debug('%s was opened for reading' %(self.file))
except:
log.error('Cannot open %s for reading' %(self.file))
while True:
step = OrcaStep(FI)
step.parse()
step.postprocess()
if step.blanc:
break
self.steps.append(step)
FI.close()
log.debug('%s parsed successfully' % (self.file))
return
def webdata(self):
"""
Returns 2 strings with HTML code
"""
we = self.settings.Engine3D()
b1,b2,bb1,bb2,i = '','','','',1
MaxGeoms, n_Freq = 0, 0
b1s = []
for step in self.steps:
MaxGeoms = max(MaxGeoms,len(step.geoms))
if step.vector:
n_Freq = i
self.settings.subcounter += 1
step.statfile = self.settings.real_path('.stat')
b1, b2 = step.webdata(StartApplet=False)
labeltext = '%s: %s' %(step.JobType,step.lot)
b1s.append([b1,labeltext.upper()])
bb2 += b2
i += 1
if b1s:
bb1 = we.JMolApplet(ExtraScript = b1s[n_Freq-1][0])
if MaxGeoms > 1:
bb1 += Tools.HTML.brn + we.html_geom_play_controls()
if n_Freq:
bb1 += Tools.HTML.brn + we.html_vibration_switch()
if len(b1s)>1:
bb1 += Tools.HTML.brn * 2
# add buttons for each step
for b1 in b1s:
bb1 += we.html_button(*b1)
log.debug('webdata generated successfully')
return bb1, bb2
def usage(self):
for step in self.steps:
step.usage()
class OrcaStep(ElectronicStructure):
"""
Works with a single calculation step
"""
def parse(self):
"""
Actual parsing happens here
"""
t_ifreq_done = False
self.all_coords = {}
s = 'BLANC' # It got to be initialized!
while not self.FI.eof:
next(self.FI)
if self.FI.eof:
break
s = self.FI.s.rstrip()
#
# ---------------------------------------- Read in cartesian coordinates ----------------------------------
#
# Have we found coords?
enter_coord = False
if s.find('CARTESIAN COORDINATES (ANGSTROEM)')==0:
coord_type = 'Cartesian Coordinates (Ang)'
enter_coord = True
# If yes, then read them
if enter_coord:
try:
# Positioning
dashes1 = next(self.FI)
s = next(self.FI)
# Read in coordinates
geom = Geom()
atnames = []
while len(s)>1:
xyz = s.strip().split()
try:
atn, x,y,z = xyz[0], xyz[1],xyz[2],xyz[3]
except:
log.warning('Error reading coordinates:\n%s' % (s))
break
atnames.append(atn)
geom.coord.append('%s %s %s %s' % (atn,x,y,z))
s = next(self.FI)
# Add found coordinate to output
pc = AtomicProps(attr='atnames',data=atnames)
geom.addAtProp(pc,visible=False) # We hide it, because there is no use to show atomic names for each geometry using checkboxes
if not coord_type in self.all_coords:
self.all_coords[coord_type] = {'all':ListGeoms(),'special':ListGeoms()}
self.all_coords[coord_type]['all'].geoms.append(geom)
except StopIteration:
log.warning('EOF while reading geometry')
break
#
# ------------------------------------------- Route lines -------------------------------------------------
#
if s.find('Your calculation utilizes the basis')==0:
self.basis = s.split()[5]
if s.find(' Ab initio Hamiltonian Method')==0:
self.lot = s.split()[5]
if s.find(' Exchange Functional')==0:
self.lot = s.split()[4]
if s.find(' Correlation Functional')==0:
s_corr = s.split()[4]
if s_corr != self.lot:
self.lot = s.split()[4] + s_corr
if s.find('Correlation treatment')==0:
self.lot = s.split()[3]
if s.find('Perturbative triple excitations ... ON')==0:
self.lot += '(T)'
if s.find('Calculation of F12 correction ... ON')==0:
self.lot += '-F12'
if s.find('Integral transformation ... All integrals via the RI transformation')==0:
self.lot += '-RI'
if s.find('K(C) Formation')==0:
if 'RI' in s and 'RI' in self.lot:
self.lot = self.lot.replace('RI',s.split()[3])
else:
self.lot += '+'+s.split()[3]
if s.find('Hartree-Fock type HFTyp')==0:
if s.split()[3]=='UHF':
self.openShell = True
if s.find('T1 diagnostic')==0:
self.T1_diagnostic = s.split()[3]
if s.find('E(CCSD(T)) ...')==0:
self.postHF_lot.append('CCSD(T)')
self.postHF_e.append(s.split()[2])
self.postHF["CCSD(T)"]=s.split()[2]
if s.find('E(CCSD) ...')==0:
self.postHF_lot.append('CCSD')
self.postHF_e.append(s.split()[2])
self.postHF["CCSD"]=s.split()[2]
if s.find(' * SCF CONVERGED AFTER')==0:
self.FI.skip_until('Total Energy')
self.scf_e = float(self.FI.s.split()[3])
self.scf_done = True
for ct in self.all_coords.values():
if ct['all']:
ct['all'][-1].addProp('e', self.scf_e) # TODO Read in something like self.best_e instead!
# S^2
if s.find('Expectation value of <S**2> :')==0:
s_splitted = s.split()
before = s_splitted[5]
self.s2 = before
for ct in self.all_coords.values():
if ct['all']:
ct['all'][-1].addProp('s2',self.s2)
if s.find(' * Geometry Optimization Run *')==0:
self.JobType = 'opt'
if 'opt' in self.JobType:
if s.find(' ----------------------|Geometry convergence')==0:
self.opt_iter += 1
try:
next(self.FI) # skip_n Item value
next(self.FI) # skip_n ------
for conv in ('max_force','rms_force','max_displacement','rms_displacement'):
s = next(self.FI)
x, thr = float(s.split()[2]),float(s.split()[3])
conv_param = getattr(self,conv)
conv_param.append(x-thr)
for ct in self.all_coords.values():
if ct['all']:
ct['all'][-1].addProp(conv, x-thr)
except:
log.warning('EOF in the "Converged?" block')
break
if s.find(' *** THE OPTIMIZATION HAS CONVERGED ***')==0:
self.opt_ok = True
#
# -------------------------------------------- Scan -------------------------------------------------------
#
if s.find(' * Relaxed Surface Scan *')==0:
self.JobType = 'scan'
if 'scan' in self.JobType:
"""
Order of scan-related parameters:
1. Geometry,
2. Energy calculated for that geometry
3. Optimization convergence test
If Stationary point has been found, we already have geometry with energy attached as prop, so we just pick it up
"""
# Memorize scan geometries
if s.find(' *** THE OPTIMIZATION HAS CONVERGED ***')==0:
for ct in self.all_coords.values():
if ct['all']:
ct['special'].geoms.append(ct['all'][-1])
# Record scanned parameters
# Designed to work properly only for 1D scans!
if s.find(' * RELAXED SURFACE SCAN STEP')==0:
next(self.FI)
s = next(self.FI)
param = s[12:45].strip()
# Will work properly only for bonds at this point
mt=re.compile('Bond \((.*?),(.*?)\)').match(param)
param = 'Bond(' + str(1+int(mt.group(1))) + ',' + str(1+int(mt.group(2))) + ')'
param_full = float(s[46:59].strip())
#print('|'+s[46:59]+'|'+str(param_full))
for ct in self.all_coords.values():
if ct['special']:
ct['special'][-1].addProp(param,param_full)
#
# ---------------------------------------- Read simple values ---------------------------------------------
#
#Nproc
if s.find(' * Program running with 4') == 0:
self.n_cores = s.split()[4]
# Read Symmetry
if s.find('POINT GROUP')==0:
self.sym = s.split()[3]
# Read charge_multmetry
if s.find('Total Charge Charge')==0:
self.charge = s.split(4)
if s.find('Multiplicity Mult')==0:
self.mult = s.split(4)
if 'ORCA TERMINATED NORMALLY' in s:
self.OK = True
next(self.FI)
break
# We got here either
self.blanc = (s=='BLANC')
return
def postprocess(self):
#
# ======================================= Postprocessing ======================================================
#
if self.lot_suffix:
self.lot += self.lot_suffix
"""
Choose coordinates to show in JMol
"""
if self.freqs and self.freqs[0]<0:
order = ('Standard','Input','Cartesian Coordinates (Ang)','Z-Matrix')
else:
order = ('Input','Cartesian Coordinates (Ang)','Z-Matrix','Standard')
n_steps_by_to = {}
for to in order:
if to in self.all_coords:
nst = len(self.all_coords[to]['all'].geoms)
if nst > self.n_steps:
self.n_steps = nst
# choose geometries to show
for tp in ('special','all'):
for to in order:
if to in self.all_coords and self.all_coords[to][tp]:
self.geoms = self.all_coords[to][tp]
break
if self.geoms:
log.debug('%s orientation used' % (to))
break
del self.all_coords
if 'irc' in self.JobType:
self.series = IRC(other=self.geoms)
self.series.direction = self.irc_direction
self.series.both = self.irc_both
del self.irc_direction
del self.irc_both
if 'scan' in self.JobType:
self.series = Scan(other=self.geoms)
if self.freqs and self.geoms:
if self.OK:
self.geoms.geoms = [self.geoms[-1],]
log.debug('Orca step (%s) parsed successfully' %(self.JobType))
return
def usage(self):
s = ''
s += 'Computation Node: %s\n' % (self.machine_name)
if hasattr(self,'n_cores'):
s+= '#Cores: %s\n' % (self.n_cores)
s += 'Level of Theory: %s\n' % (self.lot)
s += 'Job type: %s\n' % (self.JobType)
if self.solvent:
s += 'Solvent: %s\n' % (self.solvent)
s += 'Open Shell: %i\n' % (self.openShell)
s += '#Atoms: %i\n' % (self.n_atoms)
s += '#Electrons: %i\n' % (self.n_electrons)
s += '#Gaussian Primitives %i\n' % (self.n_primitives)
if 'opt' in self.JobType:
s += '#Opt Steps %s\n' % (self.n_steps)
if 'td' in self.JobType:
s += '#Excited States %s\n' % (self.n_states)
s += '#SU %.1f\n' % (self.time)
FS = open(self.statfile,'w')
FS.write(s)
FS.close()
#print s
#
#
#
#
#
if __name__ == "__main__":
DebugLevel = logging.DEBUG
logging.basicConfig(level=DebugLevel)
from Settings import Settings
from Top import Top
Top.settings = Settings(from_config_file= True)
Top.settings.selfPath=append_path
from Tools.HTML import HTML
WebPage = HTML()
WebPage.readTemplate()
f = Orca()
f.file = sys.argv[1]
#import profile
#profile.run('f.parse()')
f.parse()
f.postprocess()
#print(f.steps[0])
b1, b2 = f.webdata()
WebPage.addTableRow(str(f.file) + Tools.HTML.brn + b1, b2)
WebPage.write()
|
talipovm/terse
|
terse/Interface/Orca.py
|
Python
|
mit
| 14,537
|
[
"Gaussian",
"Jmol",
"ORCA"
] |
ca60c71f83ebbb8cfffbdbbd4150cf90ef6a7b3cb9b493b5bd366ce8e79c858a
|
# -*- coding: utf-8 -*-
#
# Copyright 2015-2016 - Gabriel Acosta <acostadariogabriel@gmail.com>
#
# This file is part of Pireal.
#
# Pireal is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# Pireal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pireal; If not, see <http://www.gnu.org/licenses/>.
"""
<Compound> : <Query>
| <Query> <Compund>
<Query> : <Assignment> SEMI
<Assignment> : <Variable> ASSIGNMENT <Expression>
<Expression> : <Variable>
| LPAREN <Expression> RPAREN
| <SelectExpression>
| <ProjectExpression>
| <BinaryExpression>
<SelectExpression> : SELECT <BooleanExpression> LPAREN <Expression> RPAREN
<ProjectExpression> : PROJECT <AttributeList> LPAREN <Expression> RPAREN
<BinaryExpression> : <Expression> NJOIN <Expression>
| <Expression> LOUTER <Expression>
| <Expression> ROUTER <Expression>
| <Expression> FOUTER <Expression>
| <Expression> PRODUCT <Expression>
| <Expression> DIFFERENCE <Expression>
| <Expression> INTERSECT <Expression>
| <Expression> UNION <Expression>
<BooleanExpression> : <Formula>
| <Formula> AND <Formula>
| <Formula> OR <Formula>
<AttributeList> : <Variable>
| <Variable> COMMA <AttributeList>
<Formula> : <Operand> <Operator> <Operand>
<Operand> : <Variable>
| <Literal>
<Operator> : EQUAL
| NOTEQUAL
| LESS
| GREATER
| LEQUAL
| GEQUAL
<Variable> : ID
| <Literal>
<Literal> : INTEGER
| REAL
| STRING
| DATE
| TIME
"""
from pireal.interpreter.tokens import (
TokenTypes,
BINARY_OPERATORS,
)
from pireal.interpreter.exceptions import ConsumeError
from pireal.interpreter.exceptions import DuplicateRelationNameError
from pireal.interpreter.scanner import Scanner
from pireal.interpreter.lexer import Lexer
from pireal.interpreter import rast as ast
class Parser(object):
"""The Parser is the part that really understands the syntax of
the language. It calls the Lexer to get tokens and processes the tokens
per the syntax of the language.
"""
def __init__(self, lexer):
self.lexer = lexer
self.token = self.lexer.next_token()
def consume(self, token_type):
"""Consume a token of a given type and get the next token.
If the current token is not of the expected type, then
raise an error
"""
if self.token.type == token_type:
self.token = self.lexer.next_token()
else:
raise ConsumeError(token_type, self.token.type, self.lexer.sc.lineno)
def parse(self):
return self.compound()
def compound(self):
nodes = []
while self.token.type is not TokenTypes.EOF:
nodes.append(self.query())
compound = ast.Compound()
compound.children = nodes
return compound
def query(self):
node = self.assignment()
self.consume(TokenTypes.SEMI)
return node
def assignment(self):
variable = self.variable()
self.consume(TokenTypes.ASSIGNMENT)
expr = self.expression()
node = ast.Assignment(variable, expr)
return node
def expression(self):
if self.token.type is TokenTypes.PROJECT:
node = self.project_expression()
elif self.token.type is TokenTypes.SELECT:
node = self.select_expression()
elif self.token.type is TokenTypes.LPAREN:
self.consume(TokenTypes.LPAREN)
node = self.expression()
self.consume(TokenTypes.RPAREN)
elif self.token.type is TokenTypes.ID:
node = self.variable()
if self.token.value in BINARY_OPERATORS:
# Binary expression
# now, node is left node in binary expression
token_type = BINARY_OPERATORS[self.token.value]
self.consume(self.token.type)
node = ast.BinaryOp(
left=node,
op=token_type,
right=self.expression(), # to allow (<Expression>)
)
return node
def variable(self):
node = ast.Variable(self.token)
self.consume(TokenTypes.ID)
return node
def project_expression(self):
self.consume(TokenTypes.PROJECT)
attributes = self.attributes()
self.consume(TokenTypes.LPAREN)
expr = self.expression()
self.consume(TokenTypes.RPAREN)
node = ast.ProjectExpr(attributes, expr)
return node
def select_expression(self):
self.consume(TokenTypes.SELECT)
boolean_expr = self.boolean_expression()
self.consume(TokenTypes.LPAREN)
expr = self.expression()
self.consume(TokenTypes.RPAREN)
node = ast.SelectExpr(boolean_expr, expr)
return node
def boolean_expression(self):
node = self.formula()
while self.token.type is TokenTypes.AND or self.token.type is TokenTypes.OR:
if self.token.type is TokenTypes.AND:
boolean_operator = TokenTypes.AND
self.consume(TokenTypes.AND)
elif self.token.type is TokenTypes.OR:
boolean_operator = TokenTypes.OR
self.consume(TokenTypes.OR)
boolean_node = ast.BooleanExpression(
left_formula=node,
operator=boolean_operator,
right_formula=self.formula(),
)
node = boolean_node
return node
def formula(self):
left_operand = self.operand()
operator = self.operator()
right_operand = self.operand()
node = ast.Condition(left_operand, operator, right_operand)
return node
def operand(self):
if self.token.type is TokenTypes.ID:
node = self.variable()
else:
node = self.literal()
return node
def literal(self):
if self.token.type is TokenTypes.INTEGER:
node = ast.Number(self.token)
self.consume(TokenTypes.INTEGER)
elif self.token.type is TokenTypes.REAL:
node = ast.Number(self.token)
self.consume(TokenTypes.REAL)
elif self.token.type is TokenTypes.STRING:
node = ast.String(self.token)
self.consume(TokenTypes.STRING)
elif self.token.type is TokenTypes.DATE:
node = ast.Date(self.token)
self.consume(TokenTypes.DATE)
elif self.token.type is TokenTypes.TIME:
node = ast.Time(self.token)
self.consume(TokenTypes.TIME)
return node
def operator(self):
node = self.token
operators = [
TokenTypes.EQUAL,
TokenTypes.NOTEQUAL,
TokenTypes.LESS,
TokenTypes.LEQUAL,
TokenTypes.GREATER,
TokenTypes.GEQUAL,
]
index = operators.index(node.type)
op = operators[index]
self.consume(op)
return node
def attributes(self):
"""Return a list of ast.Variable nodes"""
node = self.variable()
attribute_list = [node]
while self.token.type is TokenTypes.COMMA:
self.consume(TokenTypes.COMMA)
attribute_list.append(self.variable())
return attribute_list
class Interpreter(ast.NodeVisitor):
"""Este objeto es el encargado de 'visitar' los nodos con el
método Interpreter.to_python(), que convierte a un string que luego
es evaluado como código Python
`global_memory` es un diccionario ordenado que guarda las consultas 'reales'
que serán evaluadas, hace de "symbol table".
"""
def __init__(self, tree):
self.tree = tree
self.global_memory = {}
def to_python(self):
return self.visit(self.tree)
def visit_Compound(self, node):
for child in node.children:
self.visit(child)
def visit_Assignment(self, node):
rname = self.visit(node.rname)
if rname in self.global_memory:
raise DuplicateRelationNameError(rname)
self.global_memory[rname] = self.visit(node.query)
def visit_BinaryOp(self, node):
left = self.visit(node.left)
right = self.visit(node.right)
return "{0}.{1}({2})".format(left, node.token.value, right)
def visit_Number(self, node):
return node.num
def visit_ProjectExpr(self, node):
attrs = [i.value for i in node.attrs]
expr = self.visit(node.expr)
return "{0}.project({1})".format(
expr, ", ".join("'{0}'".format(i) for i in attrs)
)
def visit_SelectExpr(self, node):
bool_expr = self.visit(node.condition)
expr = self.visit(node.expr)
return f'{expr}.select("{bool_expr}")'
def visit_BooleanExpression(self, node):
left_formula = self.visit(node.left_formula)
right_formula = self.visit(node.right_formula)
return f"{left_formula} {node.operator.value} {right_formula}"
def visit_Condition(self, node):
left_operand = self.visit(node.op1)
operator = node.operator.value
right_operand = self.visit(node.op2)
# Convert RA operator to valid Python operator
map_operators = {"=": "==", "<>": "!="}
operator = map_operators.get(operator, operator)
return f"{left_operand} {operator} {right_operand}"
def visit_Variable(self, node):
return node.value
def visit_Date(self, node):
return repr(node.date)
def visit_Time(self, node):
return repr(node.time)
def visit_String(self, node):
return repr(node.string)
def clear(self):
self.global_memory.clear()
def interpret(query: str):
return parse(query)
def parse(query: str) -> dict:
scanner = Scanner(query)
lexer = Lexer(scanner)
try:
parser = Parser(lexer)
tree = parser.parse()
except Exception as exc:
print(exc)
return {}
else:
interpreter = Interpreter(tree)
interpreter.to_python()
return interpreter.global_memory
|
centaurialpha/pireal
|
src/pireal/interpreter/parser.py
|
Python
|
gpl-3.0
| 11,042
|
[
"VisIt"
] |
9e30968f58becd25b2ec9e0582717bac18ff47b78cd2c173638cdcb561840737
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from PyQt5 import QtCore, QtWidgets
import peacock
class MeshBlockSelectorWidget(peacock.base.MooseWidget, QtWidgets.QWidget):
"""
A generic widget for controlling visible blocks, nodesets, and sidesets of the current mesh.
Args:
type[int]: The block type from vtk (see BlockControls.py).
"""
selectionChanged = QtCore.pyqtSignal()
def __init__(self, block_type, title, **kwargs):
super(MeshBlockSelectorWidget, self).__init__(**kwargs)
self._title = title
self._type = block_type
self.MainLayout = QtWidgets.QVBoxLayout()
self.MainLayout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.MainLayout)
self.Label = QtWidgets.QLabel(self)
self.MainLayout.addWidget(self.Label)
self.Label.setText(self._title)
self.Options = QtWidgets.QComboBox(self)
self.Options.setFocusPolicy(QtCore.Qt.StrongFocus)
self.MainLayout.addWidget(self.Options)
self.Options.currentTextChanged.connect(self.comboTextChanged)
self._blocks = {}
self.setup()
def updateBlocks(self, reader, force=False):
"""
Initialization function, which is called by the main tab widget.
Args:
reader[chigger.ExodusReader]: The current reader object.
"""
blocks = reader.getBlockInformation()[self._type]
if blocks != self._blocks or force:
self.Options.clear()
self._blocks = blocks
self.Options.addItem("")
for block in sorted(blocks.values(), key=lambda b: b.name):
self.Options.addItem(block.name)
self.Options.setCurrentIndex(0)
def getBlocks(self):
"""
Callback when the list items are changed
"""
if self.isEnabled() and self.Options.currentText() != "":
return [self.Options.currentText()]
else:
return None
def reset(self):
"""
Set the current text to "" without emitting any signals.
"""
self.Options.blockSignals(True)
self.Options.setCurrentText("")
self.Options.blockSignals(False)
def comboTextChanged(self, text):
self.selectionChanged.emit()
|
nuclear-wizard/moose
|
python/peacock/Input/MeshBlockSelectorWidget.py
|
Python
|
lgpl-2.1
| 2,568
|
[
"MOOSE",
"VTK"
] |
d55b8452cd8351085b6dca23e33dfa02ff6cbc409eee52d07a0a12dd64a25cae
|
"""
Django settings for homeworks project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ADMINS = (('Brian', 'brian@koebbe.org'), )
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'qa.apps.QaConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'rest_framework',
'visit',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'homeworks.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'homeworks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
#STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
LOGIN_REDIRECT_URL = '/'
#
# GMAIL
#
from . import passwd
from passwd import SECRET_KEY
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = passwd.GMAIL_USERNAME
EMAIL_HOST_PASSWORD = passwd.GMAIL_PASSWORD
#DEFAULT_FROM_EMAIL = 'my gmail account'
#DEFAULT_TO_EMAIL = 'to email'
AUTHENTICATION_BACKENDS = (
'homeworks.backend.CaseInsensitiveModelBackend', # inherits from 'django.contrib.auth.backends.ModelBackend'
)
ALLOWED_HOSTS=('portal.teacherhomevisit.org',)
|
koebbe/homeworks
|
homeworks/settings.py
|
Python
|
mit
| 3,306
|
[
"Brian",
"VisIt"
] |
c09a9dcda48f0b7c971ade441debd56ee90093705ddba3ece2baca00e3cb45ff
|
from model_mommy import mommy
# def test_simple(timbrowser):
# timbrowser.visit(timbrowser.url)
# assert timbrowser.is_text_present('Timtec')
def test_login(timbrowser, user):
b = timbrowser
b.visit(b.url)
b.find_by_xpath('//a[normalize-space(text())="Entrar"]').click()
assert b.is_element_present_by_css('.open .dropdown-menu')
b.fill('login', user.username)
b.fill('password', 'password')
b.find_by_css('.submit .btn-success').first.click()
assert b.is_element_present_by_css('.username')
def test_courses_home(timbrowser):
b = timbrowser
mommy.make('Course', name='FindMe', home_published=True)
b.visit(b.url)
assert len(b.find_by_css('.course')) >= 1
assert b.is_element_present_by_xpath('//h3[normalize-space(text())="FindMe"]')
|
mupi/tecsaladeaula
|
tests/test_fullstack.py
|
Python
|
agpl-3.0
| 802
|
[
"VisIt"
] |
1e3b32d40aaac555269af2a4450890522849807e9dfd519b21b8e652c4657df9
|
# Copyright (c) 2013, ReMake Electric ehf
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Message generator implementations
"""
import string
import random
import time
def GaussianSize(cid, sequence_size, target_size):
"""
Message generator creating gaussian distributed message sizes
centered around target_size with a deviance of target_size / 20
"""
num = 1
while num <= sequence_size:
topic = "mqtt-malaria/%s/data/%d/%d" % (cid, num, sequence_size)
real_size = int(random.gauss(target_size, target_size / 20))
payload = ''.join(random.choice(string.hexdigits) for _ in range(real_size))
yield (num, topic, payload)
num = num + 1
def TimeTracking(generator):
"""
Wrap an existing generator by prepending time tracking information
to the start of the payload.
"""
for a, b, c in generator:
newpayload = "{:f},{:s}".format(time.time(), c)
yield (a, b, newpayload)
def RateLimited(generator, msgs_per_sec):
"""
Wrap an existing generator in a rate limit.
This will probably behave somewhat poorly at high rates per sec, as it
simply uses time.sleep(1/msg_rate)
"""
for x in generator:
yield x
time.sleep(1 / msgs_per_sec)
def JitteryRateLimited(generator, msgs_per_sec, jitter=0.1):
"""
Wrap an existing generator in a (jittery) rate limit.
This will probably behave somewhat poorly at high rates per sec, as it
simply uses time.sleep(1/msg_rate)
"""
for x in generator:
yield x
desired = 1 / msgs_per_sec
extra = random.uniform(-1 * jitter * desired, 1 * jitter * desired)
time.sleep(desired + extra)
def createGenerator(label, options, index=None):
"""
Handle creating an appropriate message generator based on a set of options
index, if provided, will be appended to label
"""
cid = label
if index:
cid += "_" + str(index)
msg_gen = GaussianSize(cid, options.msg_count, options.msg_size)
if options.timing:
msg_gen = TimeTracking(msg_gen)
if options.msgs_per_second > 0:
if options.jitter > 0:
msg_gen = JitteryRateLimited(msg_gen,
options.msgs_per_second,
options.jitter)
else:
msg_gen = RateLimited(msg_gen, options.msgs_per_second)
return msg_gen
|
remakeelectric/mqtt-malaria
|
beem/msgs.py
|
Python
|
bsd-2-clause
| 3,699
|
[
"Gaussian"
] |
ce8f66c37617a95ad1501e87b9870479f38968e27452504f3fd929297db7878b
|
#
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from ige import *
from xml.dom.minidom import Node
from ige.IObject import IObject
from ige.IDataHolder import IDataHolder
from Const import *
import Rules, Utils, math, random, copy
from ige import log
class ISystem(IObject):
typeID = T_SYSTEM
def init(self, obj):
IObject.init(self, obj)
#
obj.x = 0.0
obj.y = 0.0
obj.planets = []
obj.fleets = []
obj.closeFleets = []
obj.starClass = u'---' # Star clasification
obj.signature = 100
# rotation
#~ obj.dist = 0.0
#~ obj.dAngle = 0.0
#~ obj.sAngle = 0.0
# renaming
obj.lastNameChng = 0
# combat
obj.combatCounter = 0
# system wide data
obj.scannerPwrs = {}
# mine field
obj.minefield = {}
def update(self, tran, obj):
# check existence of all planets
if 0:
for planetID in obj.planets:
if not tran.db.has_key(planetID):
log.debug("CONSISTENCY - planet %d from system %d does not exists" % (planetID, obj.oid))
elif tran.db[planetID].type != T_PLANET:
log.debug("CONSISTENCY - planet %d from system %d is not a T_PLANET" % (planetID, obj.oid))
if not hasattr(obj,'minefield'):
obj.miefield = {}
# check that all .fleet are in .closeFleets
for fleetID in obj.fleets:
if fleetID not in obj.closeFleets:
log.debug("CONSISTENCY - fleet %d is in .fleet but not in .closeFleets - adding" % fleetID)
obj.closeFleets.append(fleetID)
# check existence of all fleets
for fleetID in obj.closeFleets:
if not tran.db.has_key(fleetID):
log.debug("CONSISTENCY - fleet %d from system %d does not exists" % (fleetID, obj.oid))
elif tran.db[fleetID].type not in (T_FLEET, T_ASTEROID):
log.debug("CONSISTENCY - fleet %d from system %d is not a T_FLEET" % (fleetID, obj.oid))
# delete nonexistent fleets
index = 0
while index < len(obj.closeFleets) and obj.closeFleets:
fleet = tran.db.get(obj.closeFleets[index], None)
if fleet == None:
log.debug("CONSISTENCY - fleet %d does not exists" % obj.closeFleets[index])
fleetID = obj.closeFleets[index]
obj.closeFleets.remove(fleetID)
obj.fleets.remove(fleetID)
else:
index += 1
# check compOf
if not tran.db.has_key(obj.compOf) or tran.db[obj.compOf].type != T_GALAXY:
log.debug("CONSISTENCY invalid compOf for system", obj.oid)
# rebuild closeFleets attribute
old = obj.closeFleets
obj.closeFleets = []
for fleetID in old:
fleet = tran.db.get(fleetID, None)
if fleet and fleet.closeSystem == obj.oid and fleetID not in obj.closeFleets:
obj.closeFleets.append(fleetID)
if old != obj.closeFleets:
log.debug("System close fleets fixed", obj.oid, old, obj.closeFleets)
# TODO: remove, no need to start players on random systems
# try to find starting planets
#starting = 0
#free = 1
#for planetID in obj.planets:
# planet = tran.db[planetID]
# if planet.plStarting:
# starting = planetID
# if planet.owner != OID_NONE:
# free = 0
#if starting and free:
# # good starting position
# #@log.debug("Found starting position", obj.oid, starting)
# # get galaxy
# galaxy = tran.db[obj.compOf]
# if starting not in galaxy.startingPos:
# log.debug("Adding to starting positions of galaxy", galaxy.oid)
# galaxy.startingPos.append(starting)
# check if system has planets
hasHabitable = 0
for planetID in obj.planets:
if tran.db[planetID].plSlots > 0:
hasHabitable = 1
break
if (not obj.planets or not hasHabitable) and obj.starClass[0] != "b" and obj.starClass != "wW0":
log.debug("No planet for system", obj.oid, obj.name, obj.starClass)
# delete old planets
for planetID in obj.planets:
del tran.db[planetID]
obj.planets = []
# find matching systems
avail = []
for systemID in tran.db[obj.compOf].systems:
system = tran.db[systemID]
if system.starClass[1] == obj.starClass[1] \
or (obj.starClass[1] == "G" and system.starClass[1] == "F"):
ok = 0
for planetID in system.planets:
planet = tran.db[planetID]
if planet.plStarting:
ok = 0
break
if planet.plSlots > 0:
ok = 1
if ok and system.planets:
avail.append(systemID)
# select random system
import random
log.debug("Can copy", avail)
try:
systemID = random.choice(avail)
# copy it
log.debug("Will copy system", systemID)
nType = Utils.getPlanetNamesType()
orbit = 1
for planetID in tran.db[systemID].planets:
orig = tran.db[planetID]
planet = tran.db[self.createPlanet(tran, obj)]
planet.name = Utils.getPlanetName(obj.name, nType, orbit - 1)
planet.x = obj.x
planet.y = obj.y
planet.plDiameter = orig.plDiameter
planet.plType = orig.plType
planet.plMin = orig.plMin
planet.plBio = orig.plBio
planet.plEn = orig.plEn
planet.plEnv = orig.plEnv
planet.plSlots = orig.plSlots
planet.plMaxSlots = orig.plMaxSlots
planet.plStratRes = 0
planet.plDisease = 0
planet.plStarting = 0
planet.orbit = orbit
planet.storPop = 0
planet.slots = []
orbit += 1
except:
log.debug("Copy failed")
update.public = 0
def getReferences(self, tran, obj):
return obj.planets
getReferences.public = 0
def getScanInfos(self, tran, obj, scanPwr, player):
result = IDataHolder()
results = [result]
if scanPwr >= Rules.level1InfoScanPwr:
result._type = T_SCAN
result.scanPwr = scanPwr
result.oid = obj.oid
result.x = obj.x
result.y = obj.y
if hasattr(obj, 'destinationOid'):
result.destinationOid = obj.destinationOid
# multiply by 1000 to increase accuracy
#~ result.dist = obj.dist * 1000
#~ result.dAngle = obj.dAngle * 1000
#~ result.sAngle = obj.sAngle * 1000
result.signature = obj.signature
result.type = obj.type
result.compOf = obj.compOf
result.starClass = obj.starClass
if scanPwr >= Rules.level2InfoScanPwr:
result.name = obj.name
result.combatCounter = obj.combatCounter
if scanPwr >= Rules.level3InfoScanPwr:
result.planets = obj.planets
result.owner = obj.owner
for planetID in obj.planets:
planet = tran.db[planetID]
if planet.owner == player: ####### This was player.owner, which made no sense. Hope this change doesn't break something
continue
newPwr = scanPwr * planet.signature / obj.signature
results.extend(self.cmd(planet).getScanInfos(tran, planet, newPwr, player))
if scanPwr >= Rules.level4InfoScanPwr:
result.fleets = obj.fleets
for fleetID in obj.fleets:
fleet = tran.db[fleetID]
if fleet.owner == player:
continue
newPwr = scanPwr * fleet.signature / obj.signature
results.extend(self.cmd(fleet).getScanInfos(tran, fleet, newPwr, player))
result.hasmines = 0 #no
if len(obj.minefield) > 0:
result.hasmines = 1 #yes
result.minefield = self.getMines(obj,player.oid) #only shows mines you own
if len(obj.minefield) > 1 or (len(obj.minefield) == 1 and len(result.minefield) == 0):
result.hasmines = 2 #yes, and some aren't my mines
return results
def processINITPhase(self, tran, obj, data):
obj.scannerPwrs = {}
processINITPhase.public = 1
processINITPhase.accLevel = AL_ADMIN
def processPRODPhase(self, tran, obj, data):
#mine deployment
owners = []
for planetID in obj.planets:
planet = tran.db[planetID]
if planet.owner not in owners:
owners.append(planet.owner)
for ownerid in owners:
tech,structtech = self.getSystemMineLauncher(tran,obj,ownerid)
if tech==0: #no control structure
continue
owner = tran.db[ownerid]
turn = tran.db[OID_UNIVERSE].turn
minerate = int(tech.minerate / Rules.techImprEff[owner.techs.get(structtech, Rules.techBaseImprovement)])
minenum = int(tech.minenum * Rules.techImprEff[owner.techs.get(structtech, Rules.techBaseImprovement)])
if (turn%minerate)==0: #it is the launch turn
self.addMine(obj,ownerid,tech.mineclass,minenum)
log.debug('ISystem', 'Mine deployed for owner %d in system %d' % (ownerid, obj.oid))
return obj.planets
processPRODPhase.public = 1
processPRODPhase.accLevel = AL_ADMIN
def processACTIONPhase(self, tran, obj, data):
# distribute resources
planets = {}
# group planets by owner
for planetID in obj.planets:
planet = tran.db[planetID]
if planet.owner != OID_NONE:
tmp = planets.get(planet.owner, [])
tmp.append(planet)
planets[planet.owner] = tmp
# group planets if owners are allied
# TODO
# process each group
for owner in planets.keys():
# skip alone planets
if len(planets[owner]) < 2:
continue
# process each resource
for resName in ('Bio', 'En'):
donors = []
donees = []
minRes = 'min%s' % resName
maxRes = 'max%s' % resName
storRes = 'stor%s' % resName
donorsSum = 0
doneesSum = 0
# put planets into donors/donees
for planet in planets[owner]:
if getattr(planet, storRes) > getattr(planet, minRes):
donors.append(planet)
donorsSum += getattr(planet, storRes) - getattr(planet, minRes)
elif getattr(planet, storRes) < getattr(planet, minRes):
donees.append(planet)
doneesSum += getattr(planet, minRes) - getattr(planet, storRes)
#@log.debug('ISystem', obj.oid, 'Donors / donees for %s' % resName, donorsSum, doneesSum)
# there are requests for donation and there is somebody able to donate
if doneesSum > 0 and donorsSum > 0:
#@log.debug('ISystem', 'Redistributin %s for' % resName, owner)
# give
balance = 0
tmpRatio = min(float(doneesSum) / donorsSum, 1.0)
for planet in donees:
diff = getattr(planet, minRes) - getattr(planet, storRes)
amount = int(float(diff) / doneesSum * donorsSum * tmpRatio)
#@log.debug('ISystem', 'Give res', planet.oid, amount)
balance -= amount
setattr(planet, storRes, getattr(planet, storRes) + amount)
# take
assert donorsSum + balance >= 0
lastPlanet = None
tmpRatio = min(float(donorsSum) / doneesSum, 1.0)
for planet in donors:
diff = getattr(planet, storRes) - getattr(planet, minRes)
amount = int(float(diff) / donorsSum * doneesSum * tmpRatio)
balance += amount
#@log.debug('ISystem', 'Take res', planet.oid, amount)
setattr(planet, storRes, getattr(planet, storRes) - amount)
lastPlanet = planet
# fix rounding error
setattr(lastPlanet, storRes, getattr(lastPlanet, storRes) + balance)
#@log.debug('ISystem', 'Rounding error', balance)
# try to move additional resources to the other planets
for planet in planets[owner]:
if getattr(planet, storRes) > getattr(planet, maxRes):
excess = getattr(planet, storRes) - getattr(planet, maxRes)
#@log.debug('ISystem', 'Trying to move excess rsrcs from', planet.oid, excess)
for planet2 in planets[owner]:
if planet == planet2:
continue
if getattr(planet2, storRes) < getattr(planet2, maxRes):
space = getattr(planet2, maxRes) - getattr(planet2, storRes)
amount = min(space, excess)
#@log.debug('ISystem', 'Moved to', planet2.oid, amount)
setattr(planet2, storRes, getattr(planet2, storRes) + amount)
excess -= amount
if excess == 0:
break
#@log.debug('ISystem', 'Cannot move excess rsrcs on', planet.oid, excess)
setattr(planet, storRes, getattr(planet, maxRes) + excess)
#~ # rotate system around the galaxy core
#~ #log.debug("Rotate, old coords", obj.x, obj.y)
#~ turn = tran.db[OID_UNIVERSE].turn
#~ galaxy = tran.db[obj.compOf]
#~ angle = obj.sAngle + (turn / Rules.rotationMod) * obj.dAngle
#~ obj.x = galaxy.x + obj.dist * math.cos(angle)
#~ obj.y = galaxy.y + obj.dist * math.sin(angle)
#~ #log.debug("Rotate, new coords", obj.x, obj.y)
#~ # change positions of planets and orbitting fleets
#~ for planetID in obj.planets:
#~ planet = tran.db[planetID]
#~ planet.x = obj.x
#~ planet.y = obj.y
#~ for fleetID in obj.fleets:
#~ fleet = tran.db[fleetID]
#~ fleet.x = obj.x
#~ fleet.y = obj.y
# process planets and fleets
#@log.debug("System close fleets", obj.oid, obj.closeFleets)
return obj.planets[:] + obj.closeFleets[:]
processACTIONPhase.public = 1
processACTIONPhase.accLevel = AL_ADMIN
def getObjectsInSpace(self, tran, obj):
inSpace = obj.closeFleets[:]
for fleetID in obj.fleets:
try:
inSpace.remove(fleetID)
except ValueError:
log.warning(obj.oid, "Cannot remove fleet from closeFleets", fleetID, obj.fleets, obj.closeFleets)
return inSpace
getObjectsInSpace.public = 1
getObjectsInSpace.accLevel = AL_ADMIN
def processBATTLEPhase(self, tran, obj, data):
system = obj
#@log.debug('ISystem', 'BATTLE - system', obj.oid)
# we are processing fleets, planets, ...
objects = obj.planets[:] + obj.fleets[:]
# shuffle them to prevent predetermined one-sided battles (temporary hack)
random.shuffle(objects)
# store owners of objects
# find enemies and allies
attack = {}
allies = {}
owners = {}
ownerIDs = {}
systemAtt = {}
systemDef = {}
hasMine = {}
isOwnedObject = 0
for objID in objects:
attack[objID] = []
allies[objID] = []
owner = tran.db[objID].owner
owners[objID] = owner
ownerIDs[owner] = owner
if owner != OID_NONE:
isOwnedObject = 1
for owner in ownerIDs:
tempAtt, tempDef = self.getSystemCombatBonuses(tran,system,owner)
systemAtt[owner] = tempAtt
systemDef[owner] = tempDef
hasMine[owner] = self.getSystemMineSource(tran,system,owner)
if not isOwnedObject:
#@log.debug('ISystem', 'No combat')
# reset combat counters
system.combatCounter = 0
return
# first - direct ones
index = 1
for obj1ID in objects:
obj1 = tran.db[obj1ID]
if obj1.owner == OID_NONE:
index += 1
continue
commander = tran.db[obj1.owner]
# relationships
#for obj2ID in objects[index:]:
for obj2ID in objects:
obj2 = tran.db[obj2ID]
if obj2.owner == OID_NONE or obj1 is obj2:
continue
if obj1.owner == obj2.owner:
allies[obj1ID].append(obj2ID)
allies[obj2ID].append(obj1ID)
continue
# planet and military object
elif obj1.type == T_PLANET and obj2.isMilitary and \
not self.cmd(commander).isPactActive(tran, commander, obj2.owner, PACT_ALLOW_MILITARY_SHIPS):
#@log.debug("ISystem pl - mil", obj1ID, obj2ID)
if obj2ID not in attack[obj1ID]:
attack[obj1ID].append(obj2ID)
if obj1ID not in attack[obj2ID]:
attack[obj2ID].append(obj1ID)
# planet and civilian object
elif obj1.type == T_PLANET and not obj2.isMilitary and \
not self.cmd(commander).isPactActive(tran, commander, obj2.owner, PACT_ALLOW_CIVILIAN_SHIPS):
#@log.debug("ISystem pl - civ", obj1ID, obj2ID)
if obj2ID not in attack[obj1ID]:
attack[obj1ID].append(obj2ID)
if obj1ID not in attack[obj2ID]:
attack[obj2ID].append(obj1ID)
# military and military object
elif obj1.isMilitary and obj2.isMilitary and \
not self.cmd(commander).isPactActive(tran, commander, obj2.owner, PACT_ALLOW_MILITARY_SHIPS):
#@log.debug("ISystem mil - mil", obj1ID, obj2ID)
if obj2ID not in attack[obj1ID]:
attack[obj1ID].append(obj2ID)
if obj1ID not in attack[obj2ID]:
attack[obj2ID].append(obj1ID)
# military and civilian object
elif obj1.isMilitary and not obj2.isMilitary and \
not self.cmd(commander).isPactActive(tran, commander, obj2.owner, PACT_ALLOW_CIVILIAN_SHIPS):
#@log.debug("ISystem mil - civ", obj1ID, obj2ID)
if obj2ID not in attack[obj1ID]:
attack[obj1ID].append(obj2ID)
if obj1ID not in attack[obj2ID]:
attack[obj2ID].append(obj1ID)
# planet and fleet
#elif obj1.type == T_PLANET and obj2.type == T_FLEET and \
# self.cmd(commander).isPactActive(tran, commander, obj2.owner, PACT_MUTUAL_DEFENCE):
# allies[obj1ID].append(obj2ID)
# allies[obj2ID].append(obj1ID)
# fleet and fleet
#elif obj1.type == T_FLEET and obj2.type == T_FLEET and \
# self.cmd(commander).isPactActive(tran, commander, obj2.owner, PACT_MUTUAL_OFFENCE):
# allies[obj1ID].append(obj2ID)
# allies[obj2ID].append(obj1ID)
# asteroid
if obj2.type == T_ASTEROID:
attack[obj1ID].append(obj2ID)
attack[obj2ID].append(obj1ID)
index += 1
#@log.debug('ISystem', 'Targets:', targets)
#@log.debug('ISystem', 'Allies:', allies)
# find indirect a/e
#for objID in objects:
# iTargets = []
# iAllies = []
# # find indirect a/e
# todo = allies[objID][:]
# while todo:
# id = todo.pop(0)
# iTargets.extend(targets[id])
# for tmpID in allies[id]:
# if tmpID not in iAllies:
# todo.append(tmpID)
# iAllies.append(tmpID)
# # remove allies from targets
# for id in iAllies:
# if id in iTargets:
# iTargets.remove(id)
# # IMPORTATNT preffer NOT to fire at possible allies
# # add my targets
# #for id in targets[objID]:
# # if id not in iTargets:
# # iTargets.append(id)
# # that's all folks
# for id in iTargets:
# if objID not in attack[id]:
# attack[id].append(objID)
# if id not in attack[objID]:
# attack[objID].append(id)
# NOT VALID: objects with action ACTION_ATTACK will attack only their targets
# check, if there are any targets
isCombat = 0
for objID in objects:
if attack[objID]:
isCombat = 1
break #end loop
if not isCombat:
#@log.debug('ISystem', 'No combat')
# reset combat counters
system.combatCounter = 0
for fleetID in system.fleets:
tran.db[fleetID].combatCounter = 0
return
# increase combat counters
system.combatCounter += 1
for fleetID in system.fleets:
tran.db[fleetID].combatCounter += 1
# debug
log.debug('ISystem', 'Final attacks in system %d:' % system.oid, attack)
# mines detonate before battle
shots = {}
targets = {}
firing = {}
damageCaused = {}
killsCaused = {}
damageTaken = {}
shipsLost = {}
minesTriggered = {}
fleetOwners = {}
isCombat = False
isMineCombat = False
for owner in ownerIDs:
if not (owner in hasMine): #no planets
continue
if hasMine[owner] == 0: #no control structure
continue
objID = hasMine[owner]
if len(self.getMines(system,owner)) == 0:
continue #no mines, something broke
#log.debug('ISystem-Mines', 'Mines Found')
if len(attack[objID]) == 0:
continue #no targets
isMineFired = True
mineTargets = copy.copy(attack[objID])
while isMineFired:
while len(mineTargets) > 0:
targetID = random.choice(mineTargets) #select random target
targetobj = tran.db.get(targetID, None)
try:
if targetobj.type == T_FLEET:
fleetOwners[targetID] = targetobj.owner
break #target found
mineTargets.remove(targetID) #remove an object type that a mine can't hit from the temporary targets list
except:
mineTargets.remove(targetID) #remove a dead fleet from the temporary targets list
if len(mineTargets) == 0:
break #no fleet targets for mines
temp, temp, firing[targetID] = self.cmd(targetobj).getPreCombatData(tran, targetobj) #fix firing for "surrender to" section
damage,att,ignoreshield, mineID = self.cmd(obj).fireMine(system, owner)
if not damage: #no more mines
isMineFired = False
break
log.debug('ISystem', 'Mine Shooting (damage, att, ignore shield):',damage,att,ignoreshield)
isMineCombat = True
minesTriggered[mineID] = minesTriggered.get(mineID, 0) + 1
#Process Combat
#for now we assume only one ship can be destroyed by one mine
dmg, destroyed = self.cmd(targetobj).applyMine(tran, targetobj, att, damage, ignoreshield)
#log.debug('ISystem-Mines', 'Actual Damage Done:',dmg)
if dmg > 0:
damageTaken[targetID] = damageTaken.get(targetID, 0) + dmg
shipsLost[targetID] = shipsLost.get(targetID, 0) + destroyed
killsCaused[mineID] = killsCaused.get(mineID, 0) + destroyed
if dmg > 0:
damageCaused[mineID] = damageCaused.get(mineID, 0) + dmg
# send messages about mine effects to the owner of the minefield
# collect hit players
players = {}
for triggerID in firing.keys():
players[owners[triggerID]] = None
controllerPlanet = tran.db.get(objID, None)
damageCausedSum = 0
killsCausedSum = 0
for mineID in damageCaused.keys():
damageCausedSum = damageCausedSum + damageCaused.get(mineID, 0)
killsCausedSum = killsCausedSum + killsCaused.get(mineID, 0)
Utils.sendMessage(tran, controllerPlanet, MSG_MINES_OWNER_RESULTS, system.oid, (players.keys(),(damageCaused, killsCaused, minesTriggered),damageCausedSum,killsCausedSum))
# send messages to the players whose fleets got hit by minefields
for targetID in damageTaken.keys():
targetFleet = tran.db.get(targetID, None)
if targetFleet:
Utils.sendMessage(tran, targetFleet, MSG_MINES_FLEET_RESULTS, system.oid, (damageTaken[targetID], shipsLost[targetID]))
else:
targetFleet = IDataHolder()
targetFleet.oid = fleetOwners[targetID]
Utils.sendMessage(tran, targetFleet, MSG_MINES_FLEET_RESULTS, system.oid, (damageTaken[targetID], shipsLost[targetID]))
Utils.sendMessage(tran, targetFleet, MSG_DESTROYED_FLEET, system.oid, ())
damageCaused = {}
killsCaused = {}
damageTaken = {}
shipsLost = {}
# now to battle
for objID in objects:
obj = tran.db.get(objID, None)
# get shots from object, should be sorted by weaponClass
# shots = [ shot, ...], shot = (combatAtt, weaponID)
# get target classes and numbers
# (class1, class2, class3, class4)
# cls0 == fighters, cls1 == midships, cls2 == capital ships, cls3 == planet installations
#@log.debug(objID, obj.name, "getting pre combat data")
if obj: # source already destroyed; ignore
shots[objID], targets[objID], firing[objID] = self.cmd(obj).getPreCombatData(tran, obj)
if firing[objID]:
isCombat = True
if not isCombat and not isMineCombat:
# no shots has been fired
#@log.debug('ISystem', 'No combat')
# reset combat counters
system.combatCounter = 0
for fleetID in system.fleets:
tran.db[fleetID].combatCounter = 0
return
#@log.debug("Shots:", shots)
#@log.debug("Targets", targets)
if isCombat:
for shotIdx in (3, 2, 1, 0):
for objID in objects:
# obj CAN be deleted at this point
obj = tran.db.get(objID, None)
if obj == None:
continue # source already destroyed; move to next source
# if object is fleet, then it's signature is max
if obj and obj.type == T_FLEET:
obj.signature = Rules.maxSignature
# target preselection
totalClass = [0, 0, 0, 0]
total = 0
for targetID in attack[objID]:
totalClass[0] += targets[targetID][0]
totalClass[1] += targets[targetID][1]
totalClass[2] += targets[targetID][2]
totalClass[3] += targets[targetID][3]
total = totalClass[0] + totalClass[1] + totalClass[2] + totalClass[3]
# process shots
for combatAtt, weaponID in shots[objID][shotIdx]:
weapon = Rules.techs[weaponID]
weaponClass = weapon.weaponClass
if total == 0:
# there are no targets
break
#@log.debug('ISystem', 'Processing shot', objID, weapon.name, weaponClass)
# process from weaponClass up
# never shoot on smaller ships than weaponClass
applied = 0
for tmpWpnClass in xrange(weaponClass, 4):
#@log.debug('ISystem', 'Trying target class', tmpWpnClass, totalClass[tmpWpnClass])
# select target
if totalClass[tmpWpnClass]:
target = Utils.rand(0, totalClass[tmpWpnClass])
#@log.debug('ISystem', 'Target rnd num', target, totalClass[tmpWpnClass])
for targetID in attack[objID]:
if target < targets[targetID][tmpWpnClass]:
#@log.debug(objID, 'attacks', targetID, tmpWpnClass)
# targetID can be deleted at this point
anObj = tran.db.get(targetID, None)
if anObj:
dmg, destroyed, destroyedClass = self.cmd(anObj).applyShot(tran, anObj, systemDef[owners[targetID]], combatAtt + systemAtt[owners[objID]], weaponID, tmpWpnClass, target)
#@log.debug("ISystem result", dmg, destroyed, destroyedClass, tmpWpnClass)
#@print objID, 'dmg, destroyed', dmg, destroyed
damageTaken[targetID] = damageTaken.get(targetID, 0) + dmg
if destroyed > 0:
shipsLost[targetID] = shipsLost.get(targetID, 0) + destroyed
total -= destroyed
totalClass[destroyedClass] -= destroyed
if dmg > 0 and obj:
obj.combatExp += dmg
damageCaused[objID] = damageCaused.get(objID, 0) + dmg
applied = 1
else:
continue # target already destroyed, move to next target
break
else:
#@log.debug('ISystem', 'Lovering target by', targets[targetID][tmpWpnClass])
target -= targets[targetID][tmpWpnClass]
if applied:
break
# send messages and modify diplomacy relations
# distribute experience pts
for objID in objects:
obj = tran.db.get(objID, None)
if obj:
self.cmd(obj).distributeExp(tran, obj)
if attack[objID]:
source = obj or tran.db[owners[objID]]
# collect players
players = {}
for attackerID in attack[objID]:
players[owners[attackerID]] = None
d1 = damageTaken.get(objID,0)
d2 = damageCaused.get(objID,0)
l = shipsLost.get(objID, 0)
if d1 or d2 or l:
# send only if damage is taken/caused
Utils.sendMessage(tran, source, MSG_COMBAT_RESULTS, system.oid, (d1, d2, l, players.keys()))
if not obj:
# report DESTROYED status
Utils.sendMessage(tran, source, MSG_DESTROYED_FLEET, system.oid, ())
# modify diplomacy relations
objOwner = tran.db[owners[objID]]
for attackerID in attack[objID]:
attOwner = tran.db.get(owners[attackerID], None)
# owner of the fleet
rel = self.cmd(objOwner).getDiplomacyWith(tran, objOwner, attOwner.oid)
rel.relChng = Rules.relLostWhenAttacked
# attacker
rel = self.cmd(attOwner).getDiplomacyWith(tran, attOwner, objOwner.oid)
rel.rechChng = Rules.relLostWhenAttacked
# check if object surrenders
for objID in objects:
# object surrender IFF it and its allies had target and was not able
# to fire at it, planet is not counted as ally in this case
obj = tran.db.get(objID, None)
if firing[objID] and obj:
continue
surrenderTo = []
for attID in attack[objID]:
if firing[attID] and tran.db.has_key(attID):
surrenderTo.append(tran.db[attID].owner)
for allyID in allies[objID]:
if not tran.db.has_key(allyID):
continue
ally = tran.db[allyID]
if firing[allyID] and ally.type != T_PLANET:
surrenderTo = []
break
if surrenderTo:
index = Utils.rand(0, len(surrenderTo))
if obj:
if self.cmd(obj).surrenderTo(tran, obj, surrenderTo[index]):
winner = tran.db[surrenderTo[index]]
source = tran.db.get(owners[objID], None)
log.debug('ISystem', 'BATTLE - surrender', objID, surrenderTo[index], surrenderTo)
if source:
Utils.sendMessage(tran, source, MSG_COMBAT_LOST, system.oid, winner.oid)
Utils.sendMessage(tran, winner, MSG_COMBAT_WON, system.oid, source.oid)
else:
Utils.sendMessage(tran, winner, MSG_COMBAT_WON, system.oid, obj.oid)
else:
winner = tran.db[surrenderTo[index]]
source = tran.db[owners[objID]]
log.debug('ISystem', 'BATTLE - surrender', objID, surrenderTo[index], surrenderTo)
Utils.sendMessage(tran, source, MSG_COMBAT_LOST, system.oid, winner.oid)
Utils.sendMessage(tran, winner, MSG_COMBAT_WON, system.oid, source.oid)
return
processBATTLEPhase.public = 1
processBATTLEPhase.accLevel = AL_ADMIN
def processFINALPhase(self, tran, obj, data):
# TODO find new starting points
# clean up mines if system ownership was lost
owners = []
for planetID in obj.planets:
planet = tran.db[planetID]
if planet.owner not in owners:
owners.append(planet.owner)
for ownerid in obj.minefield:
if ownerid not in owners:
self.removeMines(obj,ownerid)
return obj.planets[:] + obj.closeFleets[:]
processFINALPhase.public = 1
processFINALPhase.accLevel = AL_ADMIN
def cmpPlanetByEnergy(self, tran, planetID1, planetID2):
planet1 = tran.db[planetID1]
planet2 = tran.db[planetID2]
return cmp(planet2.plEn, planet1.plEn)
cmpPlanetByEnergy.public = 0
def sortPlanets(self, tran, obj, data):
obj.planets.sort(lambda x, y: self.cmpPlanetByEnergy(tran, x, y))
orbit = 1
for planetID in obj.planets:
planet = tran.db[planetID]
planet.orbit = orbit
orbit += 1
sortPlanets.public = 0
def rename(self, tran, obj, newName, nType):
newName = newName.strip()
# you have to own all planets
# TODO: Throw another cmdr exc AFTER you have no planet
haveOne = 0
anotherComm = 0
for planetID in obj.planets:
planet = tran.db[planetID]
if planet.owner != tran.session.cid and planet.owner != OID_NONE:
anotherComm = 1
if planet.owner == tran.session.cid:
haveOne = 1
if not haveOne:
raise GameException('You cannot change name of this system - you have no planet in this system.')
if anotherComm:
raise GameException('You cannot change name of this system - another commander in system.')
# check validity of name
if not Utils.isCorrectName(newName):
raise GameException('Invalid name. Only characters, digits, space, dot and dash permitted, max. length is 30 characters.')
# check if there is other system with this name
galaxy = tran.db[obj.compOf]
for systemID in galaxy.systems:
if tran.db[systemID].name == newName and systemID != obj.oid:
raise GameException('This name is already used.')
# TODO you have to own this system longer than previous owner
# one change per 1 day allowed
turn = tran.db[OID_UNIVERSE].turn
if obj.lastNameChng + Rules.turnsPerDay <= turn:
# rename system
obj.name = newName
# rename planets
newNames = [obj.name]
for planetID in obj.planets:
planet = tran.db[planetID]
planet.name = Utils.getPlanetName(obj.name, nType, planet.orbit - 1)
newNames.append(planet.name)
obj.lastNameChng = turn
else:
raise GameException('You cannot change name of this system - name has been changed recently (try it one day later).')
return newNames
rename.public = 1
rename.accLevel = AL_NONE
def createPlanet(self, tran, obj):
planet = self.new(T_PLANET)
planet.compOf = obj.oid
oid = tran.db.create(planet)
obj.planets.append(oid)
return oid
def addMine(self,obj,ownerid,minetechid,maxnum): #add a mine for an owner
if ownerid in obj.minefield:
if len(obj.minefield[ownerid]) < maxnum:
obj.minefield[ownerid].append(minetechid)
else:
obj.minefield[ownerid]= [minetechid]
addMine.public = 1
addMine.accLevel = AL_ADMIN
def getMines(self,obj,ownerid): #get all mines of an owner
if ownerid in obj.minefield:
return obj.minefield[ownerid]
else:
return []
getMines.public = 1
getMines.accLevel = AL_ADMIN
def removeMines(self,obj,ownerid): #remove all mines of an owner
if ownerid in obj.minefield:
del obj.minefield[ownerid]
removeMines.public = 0
def fireMine(self,obj,ownerid): #shoot the mine
if ownerid in obj.minefield:
mine = obj.minefield[ownerid].pop(random.randrange(0,len(obj.minefield[ownerid]))) #select a random mine to detonate
if len(obj.minefield[ownerid]) == 0:
obj.minefield.pop(ownerid) #delete the owner if no more mines
else:
return False,False,False,False
tech = Rules.techs[mine]
damage = random.randrange(tech.weaponDmgMin,tech.weaponDmgMax)
attack = tech.weaponAtt
ignoreshield = tech.weaponIgnoreShield
return damage,attack,ignoreshield, mine
fireMine.public = 1
fireMine.accLevel = AL_ADMIN
def getSystemMineLauncher(self,tran,obj,playerID):
launchtech = 0
mineclass = 0
structure = 0
for planetID in obj.planets:
planet = tran.db[planetID]
if planet.owner == playerID:
for struct in planet.slots:
tech = Rules.techs[struct[STRUCT_IDX_TECHID]]
if tech.mineclass > mineclass:
if tech.mineclass > mineclass:
mineclass = tech.mineclass
launchtech = tech
structure = struct[STRUCT_IDX_TECHID]
return launchtech, structure
getSystemMineLauncher.public = 0
def getSystemMineSource(self,tran,obj,playerID):
source = 0
mineclass = 0
for planetID in obj.planets:
planet = tran.db[planetID]
if planet.owner == playerID:
for struct in planet.slots:
tech = Rules.techs[struct[STRUCT_IDX_TECHID]]
if tech.mineclass > mineclass:
if tech.mineclass > mineclass:
mineclass = tech.mineclass
source = planetID
return source
getSystemMineSource.public = 0
def getSystemCombatBonuses(self,tran,obj,playerID):
systemAtt = 0;
systemDef = 0;
for planetID in obj.planets:
planet = tran.db[planetID]
if planet.owner == playerID:
for struct in planet.slots:
tech = Rules.techs[struct[STRUCT_IDX_TECHID]]
techEff = Utils.getTechEff(tran, struct[STRUCT_IDX_TECHID], planet.owner)
if tech.systemAtt > 0 or tech.systemDef > 0:
systemAtt = max(systemAtt,tech.systemAtt*techEff)
systemDef = max(systemDef,tech.systemDef*techEff)
return (systemAtt,systemDef)
getSystemCombatBonuses.public = 0
def loadDOMNode(self, tran, obj, xoff, yoff, node):
obj.x = float(node.getAttribute('x')) + xoff
obj.y = float(node.getAttribute('y')) + yoff
orbit = 1
nType = Utils.getPlanetNamesType()
for elem in node.childNodes:
if elem.nodeType == Node.ELEMENT_NODE:
name = elem.tagName
if name == 'properties':
self.loadDOMAttrs(obj, elem)
elif name == 'planet':
# create planet
planet = tran.db[self.createPlanet(tran, obj)]
self.cmd(planet).loadDOMNode(tran, planet, obj.x, obj.y, orbit, elem)
# planet.name = u'%s %s' % (obj.name, '-ABCDEFGHIJKLMNOPQRSTUVWXYZ'[orbit])
planet.name = Utils.getPlanetName(obj.name, nType, orbit - 1)
orbit += 1
else:
raise GameException('Unknown element %s' % name)
#~ # compute rotational constants
#~ galaxy = tran.db[obj.compOf]
#~ dx = obj.x - galaxy.x
#~ dy = obj.y - galaxy.y
#~ obj.dist = math.sqrt(dx * dx + dy * dy)
#~ if obj.dist > 0:
#~ obj.dAngle = math.sqrt(galaxy.centerWeight / obj.dist) / obj.dist
#~ else:
#~ obj.dAngle = 0.0
#~ if dx != 0:
#~ obj.sAngle = math.atan(dy / dx)
#~ if dx < 0: obj.sAngle += math.pi
#~ elif dy > 0:
#~ obj.sAngle = math.pi / 2
#~ elif dx < 0:
#~ obj.sAngle = math.pi * 3 / 2
#~ # this is a check only
#~ angle = obj.sAngle + (0 / 384.0) * obj.dAngle
#~ x = galaxy.x + obj.dist * math.cos(angle)
#~ y = galaxy.y + obj.dist * math.sin(angle)
#~ if x != obj.x or y != obj.y:
#~ log.warning(obj.name, obj.x, obj.y, dx, dy, obj.dist, obj.dAngle, obj.sAngle, x, y)
return SUCC
|
Lukc/ospace-lukc
|
server/lib/ige/ospace/ISystem.py
|
Python
|
gpl-2.0
| 35,859
|
[
"Galaxy"
] |
b332ac7d0eda7084e03e4bae63c2237f579026517ce65c1e09ac15b616ea1374
|
# leaves.py
# DNF plugin for listing installed packages not required by any other
# installed package.
#
# Copyright (C) 2015 Emil Renner Berthing
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
import dnf
import dnf.sack
import dnf.cli
from dnfpluginsextras import _
class Leaves(dnf.Plugin):
name = 'leaves'
def __init__(self, base, cli):
super(Leaves, self).__init__(base, cli)
if cli:
cli.register_command(LeavesCommand)
class LeavesCommand(dnf.cli.Command):
aliases = ('leaves',)
summary = _('List installed packages not required by any other package')
def buildgraph(self):
"""
Load the list of installed packages and their dependencies using
hawkey, and build the dependency graph and the graph of reverse
dependencies.
"""
sack = dnf.sack.rpmdb_sack(self.base)
pkgmap = dict()
packages = []
depends = []
rdepends = []
deps = set()
providers = set()
for i, pkg in enumerate(sack.query()):
pkgmap[pkg] = i
packages.append(pkg)
rdepends.append([])
for i, pkg in enumerate(packages):
for req in pkg.requires:
sreq = str(req)
if sreq.startswith('rpmlib(') or sreq == 'solvable:prereqmarker':
continue
for dpkg in sack.query().filter(provides=req):
providers.add(pkgmap[dpkg])
if i not in providers:
deps.update(providers)
providers.clear()
deplist = list(deps)
deps.clear()
depends.append(deplist)
for j in deplist:
rdepends[j].append(i)
return (packages, depends, rdepends)
def kosaraju(self, graph, rgraph):
"""
Run Kosaraju's algorithm to find strongly connected components
in the graph, and return the list of nodes in the components
without any incoming edges.
"""
N = len(graph)
rstack = []
stack = []
idx = []
tag = [False] * N
# do depth-first searches in the graph
# and push nodes to rstack "on the way up"
# until all nodes have been pushed.
# tag nodes so we don't visit them more than once
for u in range(N):
if tag[u]:
continue
stack.append(u)
idx.append(len(graph[u]))
tag[u] = True
while stack:
u = stack[-1]
i = idx[-1]
if i:
i-=1
idx[-1]=i
v = graph[u][i]
if not tag[v]:
stack.append(v)
idx.append(len(graph[v]))
tag[v] = True
else:
stack.pop()
idx.pop()
rstack.append(u)
# now searches beginning at nodes popped from
# rstack in the graph with all edges reversed
# will give us the strongly connected components.
# the incoming edges to each component is the
# union of incoming edges to each node in the
# component minus the incoming edges from
# component nodes themselves.
# now all nodes are tagged, so this time let's
# remove the tags as we visit each node.
leaves = []
scc = []
sccredges = set()
while rstack:
v = rstack.pop()
if not tag[v]:
continue
stack.append(v)
tag[v] = False
while stack:
v = stack.pop()
redges = rgraph[v]
scc.append(v)
sccredges.update(redges)
for u in redges:
if tag[u]:
stack.append(u)
tag[u] = False
sccredges.difference_update(scc)
if not sccredges:
leaves.extend(scc)
del scc[:]
sccredges.clear()
return leaves
def findleaves(self):
(packages, depends, rdepends) = self.buildgraph()
return [packages[i] for i in self.kosaraju(depends, rdepends)]
def run(self, args):
for pkg in sorted(map(str, self.findleaves())):
print(pkg)
|
esmil/dnf-plugins-extras
|
plugins/leaves.py
|
Python
|
gpl-2.0
| 4,887
|
[
"VisIt"
] |
35f5009b8e9da0290808812c2ed3380301f7f394b5201c354ec4e12b389cc07d
|
#!/usr/bin/env python
'''
GW calculation with exact frequency integration
'''
from pyscf import gto, dft, gw
mol = gto.M(
atom = 'H 0 0 0; F 0 0 1.1',
basis = 'ccpvdz')
mf = dft.RKS(mol)
mf.xc = 'pbe'
mf.kernel()
nocc = mol.nelectron//2
gw = gw.GW(mf, freq_int='exact')
gw.kernel()
print(gw.mo_energy)
|
sunqm/pyscf
|
examples/gw/02-gw_exact.py
|
Python
|
apache-2.0
| 315
|
[
"PySCF"
] |
ad382b6adf7c1151619d85220d625acea1185ed929230ae1bf2b330257f9496a
|
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
"""
general purpose utility methods.
"""
import sys, string
import os.path as osp
import shutil
import operator
import os, pickle ## for Load and Dump
import tempfile
import traceback
from inspect import getframeinfo
from time import localtime
import glob
import subprocess
import gzip
import collections
import types
from biskit.core import oldnumeric as N0
class ToolsError( Exception ):
pass
class PickleError( ToolsError ):
pass
def errWriteln(s):
"""
print s to standard error with line feed.
:param s: string
:type s: str
"""
sys.stderr.write(s+'\n')
sys.stderr.flush()
def errWrite(s):
"""
print s to standard error.
:param s: string
:type s: str
"""
sys.stderr.write(s)
sys.stderr.flush()
def flushPrint(s):
"""
print s without line break and flush standard out.
:param s: string
:type s: str
"""
sys.stdout.write(s)
sys.stdout.flush()
def lastError():
"""
Collect type and line of last exception.
:return: '<ExceptionType> in line <lineNumber>:<Exception arguments>'
:rtype: String
"""
try:
trace = sys.exc_info()[2]
why = sys.exc_info()[1]
try:
why = sys.exc_info()[1].args
except:
pass
file = getframeinfo( trace.tb_frame )[0]
result = "%s in %s line %i:\n\t%s." % ( str(sys.exc_info()[0]),
file, trace.tb_lineno, str(why) )
finally:
trace = None
return result
def lastErrorTrace( limit=None ):
tb = sys.exc_info()[2]
lines = traceback.extract_tb( tb, None )
result = ''
for l in lines:
pyFile = stripFilename( l[0] )
result += '%s: %i (%s) %s\n' % (pyFile, l[1],l[2],l[3])
return result
def dictAdd( dic, key, value, forceList=False ):
"""
Add value to dic, create list, if dic has already value in key.
:param key: dictionary key
:type key: str
:param value: value
:type value: any
"""
if key in dic:
old = dic[key]
if type( old ) != list and value != old:
dic[ key ] = [ old ] + [ value ]
else:
if type( old ) == list and value not in old:
dic[ key ] = old + [ value ]
else:
if forceList and (type( value ) != list):
dic[key] = [ value ]
else:
dic[key] = value
def absfile( filename, resolveLinks=1 ):
"""
Get absolute file path::
- expand ~ to user home, change
- expand ../../ to absolute path
- resolve links
- add working directory to unbound files ('ab.txt'->'/home/raik/ab.txt')
:param filename: name of file
:type filename: str
:param resolveLinks: eliminate any symbolic links (default: 1)
:type resolveLinks: 1|0
:return: absolute path or filename
:rtype: string
:raise ToolsError: if a ~user part does not translate to an existing path
"""
if not filename:
return filename
r = osp.abspath( osp.expanduser( filename ) )
if '~' in r:
raise ToolsError('Could not expand user home in %s' % filename)
if resolveLinks:
r = osp.realpath( r )
r = osp.normpath(r)
return r
def homefile( filename, otherUser=1, ownCopy=1 ):
"""
Relativize a file name to ~ or, if it is in another user's home,
to ~otheruser or, if it is in nobody's home, to / .
:class:`splithome()` is used to also guess home directories of other users.
:param filename: name of file
:type filename: str
:param otherUser: look also in other user's home directories (default 1)
:type otherUser: 1|0
:param ownCopy: replace alien path by path into own home directory if
possible, e.g. ~other/data/x is replaced
by ~/data/x if there is such a file. (default 1) Careful!
:type ownCopy: 1|0
:return: path or filename
:rtype: str
"""
f = absfile( filename )
my_home = osp.expanduser('~')
user_home, rest = splithome( f )
if user_home == my_home:
return f.replace( user_home, '~', 1 )
if otherUser and user_home != '':
## first try to find same path in own home directory
if ownCopy:
my_path = os.path.join( my_home, rest )
if osp.exists( my_path ):
return my_path
user = osp.split( user_home )[-1]
return f.replace( user_home+'/', '~' + user + '/', 1 )
return f
def splithome( filename ):
"""
Split path into home directory and remaining path. Valid home directories
are folders belonging to the same folder as the current user's home. I.e.
the method tries also to guess home directories of other users.
:param filename: name of file
:type filename: str
:return: home folder of some user, remaining path relative to home
:rtype: (str, str)
"""
home = osp.expanduser( '~' )
home_base = osp.split( home )[0]
if filename.find( home_base ) != 0:
return '', filename
f = filename.replace( home_base + '/', '', 1 )
user = f.split( '/' )[0]
user_home = os.path.join( home_base, user )
rest = f.replace( user + '/', '', 1 )
return user_home, rest
def __pathsplit(p, rest=[]):
"""
from ASPN Python Cookbook
"""
(h,t) = os.path.split(p)
if len(h) < 1: return [t]+rest
if len(t) < 1: return [h]+rest
return __pathsplit(h,[t]+rest)
def __commonpath(l1, l2, common=[]):
"""
from ASPN Python Cookbook
"""
if len(l1) < 1: return (common, l1, l2)
if len(l2) < 1: return (common, l1, l2)
if l1[0] != l2[0]: return (common, l1, l2)
return __commonpath(l1[1:], l2[1:], common+[l1[0]])
def relpath(p1, p2):
"""
Translate p2 into a path relative to p1.
:param p1: base path
:type p1: str
:param p2: target path
:type p2: str
:return: relative path p1 -> p2
:rtype: str
"""
if not p1 or not p2:
return p2
(__common,l1,l2) = __commonpath(__pathsplit(p1), __pathsplit(p2))
p = []
if len(l1) > 0:
p = [ '../' * len(l1) ]
p = p + l2
return os.path.join( *p )
def stripSuffix( filename ):
"""
Return file name without ending.
:param filename: name of file
:type filename: str
:return: filename or path without suffix
:rtype: str
"""
try:
if filename.find('.') != -1:
filename = filename[: filename.rfind('.') ] # remove ending
except:
pass ## just in case there is no ending to start with...
return filename
def stripFilename( filename ):
"""
Return filename without path and without ending.
:param filename: name of file
:type filename: str
:return: base filename
:rtype: str
"""
name = osp.basename( filename ) # remove path
try:
if name.find('.') != -1:
name = name[: name.rfind('.') ] # remove ending
except:
pass ## just in case there is no ending to start with...
return name
def fileLength( filename ):
"""
Count number of lines in a file.
:param filename: name of file
:type filename: str
:return: number of lines
:rtype: int
"""
p1 = subprocess.Popen( ['cat',filename], stdout=subprocess.PIPE )
p2 = subprocess.Popen( ["wc", "-l"], stdin=p1.stdout,
stdout=subprocess.PIPE )
return int(p2.communicate()[0])
def tempDir():
"""
Get folder for temporary files - either from environment settings
or '/tmp'
:return: directort for temporary files
:rtype: str
"""
if tempfile.tempdir is not None:
return tempfile.tempdir
return osp.dirname( tempfile.mktemp() )
def file2dic( filename ):
"""
Construct dictionary from file with key - value pairs (one per line).
:param filename: name of file
:type filename: str
:raise ToolsError: if file can't be parsed into dictionary
:raise IOError: if file can't be opened
"""
try:
line = None
result = {}
for line in open( filename ):
if '#' in line:
line = line[ : line.index('#') ]
line = line.strip()
l = line.split()[1:]
if len( l ) == 0 and len( line ) > 0:
result[ line.split()[0] ] = ''
if len( l ) == 1:
result[ line.split()[0] ] = l[0]
if len( l ) > 1:
result[ line.split()[0] ] = l
except:
s = "Error parsing option file %s." % filename
s += '\nLine: ' + str( line )
s += '\n' + lastError()
raise ToolsError( s )
return result
def get_cmdDict(lst_cmd, dic_default):
"""
Parse commandline options into dictionary of type C{ {<option> : <value>} }
Options are recognised by a leading '-'.
Error handling should be improved.
Option C{ -x |file_name| } is interpreted as file with additional options.
The key value pairs in lst_cmd replace key value pairs in the
-x file and in dic_default.
:param lst_cmd: list with the command line options::
e.g. ['-pdb', 'in1.pdb', 'in2.pdb', '-o', 'out.dat']
:type lst_cmd: [str]
:param dic_default: dictionary with default options::
e.g. {'psf':'in.psf'}
:type dic_default: {str : str}
:return: command dictionary::
ala {'pdb':['in1.pdb', 'in2.pdb'], 'psf':'in.psf', 'o':'out.dat'}
:rtype: {<option> : <value>}
"""
dic_cmd = {} # create return dictionary
try:
for cmd in lst_cmd:
if (cmd[0] == '-'): # this entry is new option
current_option = cmd[1:] # take all but leading "-"
dic_cmd[current_option] = "" # make sure key exists even
# w/o value
counter = 0 # number of values for this option
else: # this entry is value for latest option
if counter < 1:
dic_cmd[current_option] = cmd
# in case, several values follow after a "-xxx" option convert dictionary
# entry into list and add all elements (until the next "-") to this list
else:
if counter == 1: # there is already a value assigned
# convert to list
dic_cmd[current_option] = [dic_cmd[current_option]]
# add value to list
dic_cmd[current_option] = dic_cmd[current_option] + [cmd]
counter = counter + 1
except (KeyError, UnboundLocalError) as why:
errWriteln("Can't resolve command line options.\n \tError:"+str(why))
## get extra options from external file
try:
if 'x' in dic_cmd:
d = file2dic( dic_cmd['x'] )
d.update( dic_cmd )
dic_cmd = d
except IOError:
errWriteln( "Error opening %s."% dic_cmd['x'] )
except ToolsError as why:
errWriteln( str(why) )
## fill in missing default values
dic_default.update( dic_cmd )
dic_cmd = dic_default
return dic_cmd
def cmdDict( defaultDic={} ):
"""
Convenience implementation of :class:`get_cmdDict`. Take command line options
from sys.argv[1:] and convert them into dictionary.
Example::
'-o out.dat -in 1.pdb 2.pdb 3.pdb -d' will be converted to
{'o':'out.dat', 'in': ['1.pdb', '2.pdb', '3.pdb'], 'd':'' }
Option C{ -x |file_name| } is interpreted as file with additional options.
:param defaultDic: dic with default values.
:type defaultDic: dic
:return: command dictionary
:rtype: dic
"""
return get_cmdDict( sys.argv[1:], defaultDic )
def dump(this, filename, gzip = 0, mode = 'wb'):
"""
Dump this::
dump(this, filename, gzip = 0)
Supports also '~' or '~user'.
Note: Peter Schmidtke : gzip fixed, works now
Written by Wolfgang Rieping.
:param this: object to dump
:type this: any
:param filename: name of file
:type filename: str
:param gzip: gzip dumped object (default 0)
:type gzip: 1|0
:param mode: file handle mode (default w)
:type mode: str
"""
import biskit as B
filename = osp.expanduser(filename)
## special case: do not slim PDBModels that are pickled to override
## their own source
if isinstance( this, B.PDBModel ) and not this.forcePickle \
and osp.samefile( str(this.source), filename ):
this.saveAs( filename )
else:
if not mode in ['wb', 'w', 'a']:
raise PickleError("mode has to be 'wb' (write binary) or 'a' (append)")
if gzip:
f = gzopen(filename, "wb")
else:
f = open(filename, mode)
pickle.dump(this, f, 1)
f.close()
def Dump( this, filename, gzip = 0, mode = 'w'):
EHandler.warning('deprecated: tools.Dump has been renamed to tools.dump')
return dump( this, filename, gzip=gzip, mode=mode )
def load(filename, gzip=0, encoding='ASCII'):
"""
Load dumped object from file.
Note: Peter Schmidtke : gzip fixed, works now
Written by Wolfgang Rieping.
:param filename: name of file
:type filename: str
:param gzip: unzip dumped object (default 0)
:type gzip: 1|0
:param encoding: optional encoding for pickle.load ['ASCII']
:type encoding: str
:return: loaded object
:rtype: any
:raise cPickle.UnpicklingError, if the pickle format is not recognized
"""
filename = osp.expanduser(filename)
try:
if gzip :
f=gzopen(filename,"rb")
else :
f = open(filename, 'rb')
objects = []
eof = 0
n = 0
while not eof:
try:
this = pickle.load(f) #, encoding=encoding)
objects.append(this)
n += 1
except EOFError:
eof = 1
f.close()
if n == 1:
return objects[0]
else:
return tuple(objects)
except ValueError as why:
raise PickleError('Python pickle %s is corrupted.' % filename)
def Load( filename, gzip=0 ):
EHandler.warning('deprecated: tools.Load has been renamed to tools.load')
return load( filename, gzip=gzip )
def packageRoot():
"""
:return: absolute folder of the biskit python package.
:rtype: str
"""
import biskit
return absfile( osp.split(biskit.__file__)[0] )
def projectRoot():
"""
Root of biskit project. That's the folder **containing** the biskit python
package (the parent folder of `packageRoot`).
:return: absolute path of the root (parent) folder of current project::
i.e. '/home/raik/py/biskitproject'
:rtype: string
"""
return absfile(osp.join( packageRoot(), '..' ))
def dataRoot():
"""
Root of Biskit data directory (formerly 'biskit/Biskit/data').
:return: absolute path
:rtype: string
"""
return osp.join( packageRoot(), 'data' )
def testRoot( subfolder='' ):
"""
Root of Biskit test directory.
:param subfolder: str, optional sub-folder of test data folder
:return: absolute path
:rtype: string
"""
if subfolder and subfolder[0] == os.sep:
subfolder = subfolder[1:]
return os.path.join( packageRoot(), 'testdata', subfolder )
def isBinary( f ):
"""
Check if file is a binary.
:param f: path to existing file
:type f: str
:return: condition
:rtype: 1|0
:raise OSError: if file doesn't exist
"""
return os.access( f, os.X_OK )
def binExists( f ):
"""
Check if binary with file name f exists.
:param f: binary file name
:type f: str
:return: True if binary file f is found in PATH and is executable
:rtype: 1|0
"""
if osp.exists( f ):
return isBinary( f )
for path in os.getenv( 'PATH' ).split(':') :
full_path = osp.join( path, f )
if osp.exists( full_path ) and isBinary( full_path ):
return True
return False
def absbinary( f ):
"""
Absolute path of binary.
:param f: binary file name
:type f: str
:return: full path to existing binary
:rtype: str
:raise IOError: if an executable binary is not found in PATH
"""
if osp.exists( f ) and isBinary( f ):
return f
for path in os.getenv( 'PATH' ).split(':') :
full_path = osp.join( path, f )
if osp.exists( full_path ) and isBinary( full_path ):
return full_path
raise IOError('binary %s not found.' % f)
def platformFolder( f ):
"""
Get a platform-specific subfolder of f for platform-dependent imports.
:param f: parent folder
:type f: str
:return: path
:rtype: str
"""
import platform as P
__version = '.'.join( P.python_version().split('.')[:2] )
r = 'py%s_%s' % (__version, P.machine() ) ## , P.architecture()[0] )
r = os.path.join( f, r)
return r
def sortString( s ):
"""
Sort the letters of a string::
sortString( str ) -> str with sorted letters
:param s: string to be sorted
:type s: str
:return: sorted string
:rtype: str
"""
l = list(s)
l.sort()
return ''.join(l)
def string2Fname( s ):
"""
Remove forbidden character from string so that it can be used as a
filename.
:param s: string
:type s: str
:return: cleaned string
:rtype: str
"""
forbidden = ['*', '?', '|', '/', ' ']
replaceme = ['-', '-', '-', '-', '_']
for i in range(0, len(forbidden)):
s = s.replace( forbidden[i], replaceme[i] )
return s
def toIntList( o ):
"""
Convert single value or list of values into list of integers.
:param o: value or list
:type o: int or [int]
:return: list of integer
:rtype: [int]
"""
if type( o ) != type( [] ):
o = [ o ]
return list(map( int, o ))
def toIntArray( o ):
"""
Convert single value or list of values to numpy array of int.
:param o: value or list
:type o: int or [int]
:return: array of integer
:rtype: N0.array('i')
"""
if type( o ) == list or type( o ) == type( N0.array([])):
return N0.array( map( int, o ) )
return N0.array( [ int( o ) ] )
def toList( o ):
"""
Make a list::
toList(o) -> [o], or o, if o is already a list
:param o: value(s)
:type o: any or [any]
:return: list
:rtype: [any]
"""
if type( o ) != type( [] ):
return [ o ]
return o
def toStr( o ):
"""
Make a string from a list or interger.
Stripping of any flanking witespaces.
:param o: value(s)
:type o: any or [any]
:return: list
:rtype: [any]
"""
if type( o ) == type( 1 ):
return str(o)
if type( o ) == type( [] ):
s = ''
for item in o:
s += str.strip( str(item) )
return s
return o
def toInt( o, default=None ):
"""
Convert to intereg if possible::
toInt(o) -> int, int(o) or default if o is impossible to convert.
:param o: value
:type o: any
:param default: value to return if conversion is impossible (default: None)
:type default: any
:return: integer OR None
:rtype: int OR None
"""
if o is None or o == '':
return default
try:
return int( o )
except:
return default
def hex2int( shex ):
"""
Convert hex-code string into float number.
:param s: hex-code, e.g. 'FF0B99'
:type s: str
:return: float
:rtype: float
"""
shex = shex.replace('0x','')
factors = [ 16**(i) for i in range(len(shex)) ]
factors.reverse()
factors = N0.array( factors )
table = dict( list(zip('0123456789abcdef',list(range(16)))) )
components = [ table[s]*f for s,f in zip( shex.lower(), factors ) ]
return N0.sum( components )
def colorSpectrum( nColors, firstColor='FF0000', lastColor='FF00FF' ):
"""
Creates a list of 'nColors' colors for biggles starting at
'firstColor' ending at 'lastColor'
Examples::
free spectrum red FF0000 to green 00FF00
bound spectrum cyan 00FFFF to magenta FF00FF
:param nColors: number of colors to create
:type nColors: int
:param firstColor: first color in hex format (default: FF0000)
:type firstColor: str
:param lastColor: last color in hex format (default: FF00FF)
:type lastColor: str
:return: list of colors
:rtype: [int]
"""
spec = []
cmd = dataRoot() + '/spectrum.pl ' +str(nColors) +\
' ' + str(firstColor) + ' ' + str(lastColor)
out = os.popen( cmd ).readlines()
if not out:
raise IOError('color generation command failed: %r' % cmd)
for s in out:
spec += [ hex2int( str( str.strip( s ) ) ) ]
return spec
def hexColors( nColors, firstColor='FF0000', lastColor='FF00FF' ):
"""
Creates a list of 'nColors' colors for PyMol starting at
'firstColor' ending at 'lastColor'
Examples::
free spectrum red FF0000 to green 00FF00
bound spectrum cyan 00FFFF to magenta FF00FF
:param nColors: number of colors to create
:type nColors: int
:param firstColor: first color in hex format (default: FF0000)
:type firstColor: str
:param lastColor: last color in hex format (default: FF00FF)
:type lastColor: str
:return: list of hex colors
:rtype: [ str ]
"""
spec = []
cmd = dataRoot() + '/spectrum.pl ' +str(nColors) +\
' ' + str(firstColor) + ' ' + str(lastColor)
out = os.popen( cmd ).readlines()
if not out:
raise IOError('color generation command failed: %r' % cmd)
for s in out:
spec += [ '0x' + str( str.strip( s ) ) ]
return spec
def rgb2hex( rgbColor ):
"""
convert rgb color into 8 bit hex rgb color::
[ 1.0, 0.0, 1.0, ] -> 'FF00FF'
:param rgbColor: RGB-color e.g. [ 1.0, 0.0, 1.0, ]
:type rgbColor : [float]
:return: hex colors
:rtype: str
"""
hexRgb = ''
for i in range(0,3):
component = hex( int( rgbColor[i]*255 ) )[2:]
if len(component) == 1:
hexRgb += '0' + component
else:
hexRgb += component
return hexRgb
def hex2rgb( hexColor, str=0 ):
"""
convert 8 bit hex rgb color into rgb color ::
'FF00FF' -> [ 1.0, 0.0, 1.0, ]
:param hexColor: HEX-color e.g. 'FF00FF'
:type hexColor: str
:param str: return rgb colors as a tring (i.e for PyMol)
:type str: 1|0
:return: rgb colors
:rtype: [float]
"""
rgb = []
if hexColor[:2] == '0x':
hexColor = hexColor[2:]
for i in range(0,6,2):
rgb += [ int(hexColor[i:i+2], 16)/255.0 ]
if str:
rgb_str= '[ %.3f, %.3f, %.3f ]'%(rgb[0], rgb[1], rgb[2])
return rgb_str
return rgb
def dateString():
"""
:return: DD/MM/YYYY
:rtype: str
"""
t = localtime()
return '%02i/%02i/%i' % (t[2],t[1],t[0] )
def dateSortString():
"""
:return: YYYY/MM/DD:hh:mm.ss.ms
:rtype:
"""
t = localtime()
return "%i/%02i/%02i:%02i.%02i.%02i" % (t[0],t[1],t[2],t[3],t[4],t[5])
def tryRemove(f, verbose=0, tree=0, wildcard=0 ):
"""
Remove file or folder::
remove(f [,verbose=0, tree=0]), remove if possible, otherwise do nothing
:param f: file path
:type f: str
:param verbose: report failure (default 0)
:type verbose: 0|1
:param tree: remove whole folder (default 0)
:type tree: 0|1
:param wildcard: filename contains wildcards (default 0)
:type wildcard: 0|1
:return: 1 if file was removed
:rtype: 1|0
"""
try:
if osp.isdir(f):
if tree:
shutil.rmtree( f, ignore_errors=1 )
else:
errWriteln('%r is directory - not removed.' % f)
else:
if wildcard:
l = glob.glob( f )
for i in l:
os.remove( i )
else:
os.remove( f )
return 1
except:
if verbose: errWriteln( 'Warning: Cannot remove %r.' % f )
return 0
def backup( fname, suffix='~' ):
"""
Create backup of file if it already exists.
:param fname: file name
:type fname: str
:param suffix: suffix to add to backup file name ['~']
:type suffix: str
:return: True if backup was created, False otherwise
:rtype: bool
"""
fname = absfile( fname )
if os.path.exists( fname ):
os.rename( fname, fname + suffix )
return True
return False
def ensure( v, t, allowed=[], forbidden=[] ):
"""
Check type of a variable
:param v: variable to test
:type v: variable
:param t: required type
:type t: str
:param allowed: list of additional values allowed for v {default: []}
:type allowed: [str]
:raise TypeError: if invalid
"""
if allowed:
allowed = toList( allowed )
if len( allowed ) > 0 and v in allowed:
return
if not isinstance(v, t):
raise TypeError('looked for %s but found %s' % (str(t),str(v)[:20]))
if forbidden and v in forbidden:
raise TypeError('value %s is not allowed.' % (str(v)[:20]))
def clipStr( s, length, suffix='..', expandtabs=1 ):
"""
Shorten string from end and replace the last characters with suffix::
clipStr( str, length ) -> str, with len( str ) <= length
:param s: original string
:type s: str
:param length: desired length
:type length: int
:param suffix: suffix (default: ..)
:type suffix: str
:return: shortend string
:rtype: str
"""
if expandtabs:
s = s.expandtabs()
if len(s) > length:
s = s[:(length - len(suffix))] + suffix
return s
def info( item, short=1 ):
"""
::
info( item, short=1) -> Print useful information about item.
:param item: query item
:type item: item
:param short: short version (default: 1)
:type short: 1|0
"""
## quick and dirty ##
if hasattr(item, '__name__'):
print("NAME: ", item.__name__)
if hasattr(item, '__class__'):
print("CLASS: ", item.__class__.__name__)
print("ID: ", id(item))
print("TYPE: ", type(item))
print("VALUE: ", repr(item))
print("CALLABLE:", end=' ')
if isinstance(item, collections.Callable):
print("Yes")
else:
print("No")
if hasattr(item, '__doc__'):
doc = getattr(item, '__doc__')
if doc:
doc = doc.strip() # Remove leading/trailing whitespace.
if short:
doc = doc.split('\n')[0]
print("DOC: ", '\n\t' * (not short), doc)
print("\nMETHODS")
methods = [ getattr( item, m ) for m in dir( item )
if isinstance( getattr( item, m ), collections.Callable) ]
for m in methods:
doc = getattr(m, '__doc__', '')
if doc:
doc = str(doc).strip()
if short:
doc = str(doc).split('\n')[0]
else:
doc = ''
s = "%-15s: " % (getattr(m,'__name__','?')) + '\n\t'*(not short) + doc
if short:
s = clipStr( s, 79 )
print(s)
if hasattr( item, '__dict__'):
print("\nFIELDS")
for k, v in list(item.__dict__.items()):
s = "%-15s: %s" % (k, str(v).strip() )
print(clipStr( s, 79 ))
class PseudoClass(object):
"""
Empty class that raises an ImportError upon creation.
"""
def __new__(cls, *args, **kw):
raise ImportError('Class %r is not available because of missing modules: %r' \
% (cls.__name__, str(cls.error)))
def tryImport( module, cls, old_as=None, namespace=None ):
"""
Try to import a class from a module. If that fails, 'import' a
default class of the same name that raises an exception when used.
:param module: name of the module
:type module: str
:param cls : name of the class
:type cls : str
:param namespace: namespace for the import [default: globals() ]
:type namespace: dict
:return: True if import succeeded, False otherwise
:rtype: bool
"""
old_as = old_as or cls
g = namespace or globals()
try:
exec('from %s import %s as %s' % (module, cls, old_as), g)
return True
except ImportError as e:
Cls = type( cls,(PseudoClass,),{'error':e} )
g.update( {old_as: Cls} )
return False
def tryImportModule( module, old_as=None, namespace=None ):
"""
Try to import a module. If that fails, 'import' a dummy module
of the same name.
NOTE: as of python 3, the namespace returned by globals() cannot
reach out of the tools module -- this method thus always needs to be
called like this:
>>> tryImportModule( 'numpy', namespace=globals() )
:param module: name of the module
:type module: str
:param namespace: namespace for the import [default: globals() ]
:type namespace: dict
:return: True if import succeeded, False otherwise
:rtype: bool
"""
old_as = old_as or module
g = namespace or globals()
try:
exec('import %s as %s' % (module, old_as), g)
return True
except ImportError as e:
m = types.ModuleType( old_as, doc='Pseudo module. Import of real one failed.' )
m.error = str(e)
g.update( {old_as: m} )
return False
def gzopen( fname, mode='rt' ):
"""
Open a normal or a gzipped file.
:param fname: file name (can contain ~, .. etc.)
:type fname: str
:param mode: read/write mode ['r']
:type mode: str
:return: file handle
:rtype: file or GZipFile
"""
fname = absfile( fname )
if fname[-2:] == 'gz':
return gzip.open( fname, mode )
return open( fname, mode )
def profile( s ):
"""
Profile the given code fragment and report time-consuming method calls.
:param s: python code fragment, example: 'm = PDBModel("3tgi")'
:type s: str
"""
try:
import cProfile as profile
fout = tempDir() + '/profiling.out'
profile.run( s, fout )
## Analyzing
import pstats
p = pstats.Stats(fout)
## long steps and methods calling them
p.sort_stats('cumulative').print_stats(20)
p.print_callers(0.0)
tryRemove( fout )
except ImportError as why:
raise ToolsError('Python profiling modules are not installed.')
#############
## TESTING
#############
import biskit.test as BT
class Test(BT.BiskitTest):
"""Test case"""
def prepare(self):
import tempfile
self.f_pickle = tempfile.mktemp('.pickle')
def cleanUp( self ):
tryRemove( self.f_pickle )
def test_error_reporting( self ):
try:
i = 1/0
except:
if self.local:
print('\nTest error trace:\n' + lastErrorTrace())
self.assertTrue( lastErrorTrace() != '' )
def test_ensure_types(self):
self.assertIsNone( ensure( 'teststr', str ) )
with self.assertRaises(TypeError):
ensure( 'teststr', int )
def test_filehandling(self):
f = absfile('~')
self.assertTrue(osp.exists(f))
self.fpdb = testRoot( '/rec/1A2P.pdb' )
if self.VERBOSITY > 2:
self.log.write('path: ' + self.fpdb)
self.assertTrue( osp.exists(self.fpdb) )
def test_isBinary(self):
self.assertEquals( isBinary('/usr/bin/nice'), 1)
EXPECT_colors = [16711680, 15493120, 11259136, 5570304, 458570, 63925,
44031, 3298559, 9901055, 16711935]
EXPECT_hexcolors = ['0xFF0000', '0xEC6800', '0xABCD00', '0x54FF00',
'0x06FF4A', '0x00F9B5', '0x00ABFF', '0x3254FF',
'0x9713FF', '0xFF00FF']
def test_colorSpectrum(self):
colors = colorSpectrum(10)
self.assertAlmostEqual(colors, self.EXPECT_colors)
hexcolors = hexColors(10)
self.assertEqual(hexcolors, self.EXPECT_hexcolors)
def test_pickling(self):
d1 = {'a':10, 'b':10.0, 'c':'test'}
dump(d1, self.f_pickle )
d2 = load(self.f_pickle)
self.assertEqual(d1, d2)
if __name__ == '__main__':
BT.localTest()
|
graik/biskit
|
biskit/tools.py
|
Python
|
gpl-3.0
| 33,854
|
[
"PyMOL"
] |
e6fd57d0688304e67236b7bccffefdaacf5bfb4bf9c540176af957be732e86c1
|
""" RHEAS module for retrieving maximum and minimum
temperature from the NCEP Reanalysis stored at the IRI Data Library.
.. module:: ncep
:synopsis: Retrieve NCEP meteorological data
.. moduleauthor:: Kostas Andreadis <kandread@jpl.nasa.gov>
"""
import numpy as np
import datasets
from decorators import netcdf
from datetime import timedelta
def dates(dbname):
dts = datasets.dates(dbname, "wind.ncep")
return dts
@netcdf
def fetch_tmax(dbname, dt, bbox):
"""Downloads maximum temperature from NCEP Reanalysis."""
url = "http://iridl.ldeo.columbia.edu/SOURCES/.NOAA/.NCEP-NCAR/.CDAS-1/.DAILY/.Diagnostic/.above_ground/.maximum/dods"
varname = "temp"
return url, varname, bbox, dt
@netcdf
def fetch_tmin(dbname, dt, bbox):
"""Downloads minimum temperature from NCEP Reanalysis."""
url = "http://iridl.ldeo.columbia.edu/SOURCES/.NOAA/.NCEP-NCAR/.CDAS-1/.DAILY/.Diagnostic/.above_ground/.minimum/dods"
varname = "temp"
return url, varname, bbox, dt
@netcdf
def fetch_uwnd(dbname, dt, bbox):
"""Downloads U-component wind speed from NCEP Reanalysis."""
url = "http://iridl.ldeo.columbia.edu/SOURCES/.NOAA/.NCEP-NCAR/.CDAS-1/.DAILY/.Diagnostic/.above_ground/.u/dods"
varname = "u"
return url, varname, bbox, dt
@netcdf
def fetch_vwnd(dbname, dt, bbox):
"""Downloads U-component wind speed from NCEP Reanalysis."""
url = "http://iridl.ldeo.columbia.edu/SOURCES/.NOAA/.NCEP-NCAR/.CDAS-1/.DAILY/.Diagnostic/.above_ground/.v/dods"
varname = "v"
return url, varname, bbox, dt
def download(dbname, dts, bbox=None):
"""Downloads NCEP Reanalysis data from IRI data library."""
res = 1.875
tmax, lat, lon, _ = fetch_tmax(dbname, dts, bbox)
tmin, _, _, _ = fetch_tmin(dbname, dts, bbox)
uwnd, _, _, _ = fetch_uwnd(dbname, dts, bbox)
vwnd, _, _, dts = fetch_vwnd(dbname, dts, bbox)
wnd = np.sqrt(uwnd**2 + vwnd**2)
tmax -= 273.15
tmin -= 273.15
for t, dt in enumerate([dts[0] + timedelta(tt) for tt in range((dts[-1] - dts[0]).days + 1)]):
datasets.ingest(dbname, "tmax.ncep", tmax[t, :, :], lat, lon, res, dt)
datasets.ingest(dbname, "tmin.ncep", tmin[t, :, :], lat, lon, res, dt)
datasets.ingest(dbname, "wind.ncep", wnd[t, :, :], lat, lon, res, dt)
|
nasa/RHEAS
|
src/datasets/ncep.py
|
Python
|
mit
| 2,291
|
[
"NetCDF"
] |
9420a573e5cc24dbda0b0d9b9db882acd69eb7e391b8a79dab0ccebba49f8c92
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkAVSucdReader(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkAVSucdReader(), 'Reading vtkAVSucd.',
(), ('vtkAVSucd',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkAVSucdReader.py
|
Python
|
bsd-3-clause
| 476
|
[
"VTK"
] |
6b3d3fb1f85ca25b85e9d7e4fd364fe2630490f9f05f6197d5452ad1b4b89bb9
|
# encoding: utf-8
"Module for homogeneous deformation and calculations of elastic constants."
import gtk
from ase.gui.simulation import Simulation
from ase.gui.minimize import MinimizeMixin
from ase.gui.energyforces import OutputFieldMixin
from ase.gui.widgets import oops, pack, AseGuiCancelException
import ase
import numpy as np
scaling_txt = """\
This module is intended for calculating elastic constants by homogeneously
deforming a system."""
help_txt = """
The homogeneous scaling module changes the size of a system by stretching it
along on or more directions. Small amounts of deformation can be used to
calculate elastic constants, large amounts to simulate plastic deformation.
You will have to choose along which axis/axes the deformation is done. Usually,
it only makes sense to deform along axes with periodic boundary conditions. The
<b>amount of deformation</b> is set in the Deformation frame. A scale factor of
e.g. 0.01 means that the system is changed incrementally from being 1% smaller
than the initial configuration to 1% larger. The offset alters this so it is
not symmetric around 0% deformation. A check-box can disable the negative
deformation (compression).
<b>'Atomic relaxations'</b> means that the individual atoms are allowed to move
relative to the unit cell. This is done by performing an energy minimization
for each configuration. You will have to choose the algorithm and minimization
parameters.
During the deformation, a number of steps is taken, with different system sizes.
You can choose to load all configurations into the main window as a movie, to
only load the configuration with the lowest energy, or to keep the original
configuration loaded. <b>Important:</b> If you repeat the calculation by
pressing [Run] a second time, the starting configuration will have changed
unless you keep the original configuration.
"""
class HomogeneousDeformation(Simulation, MinimizeMixin, OutputFieldMixin):
"Window for homogeneous deformation and elastic constants."
def __init__(self, gui):
Simulation.__init__(self, gui)
self.set_title("Homogeneous scaling")
vbox = gtk.VBox()
self.packtext(vbox, scaling_txt)
self.packimageselection(vbox, txt1="", txt2="")
self.start_radio_nth.set_active(True)
pack(vbox, gtk.Label(""))
# Radio buttons for choosing deformation mode.
tbl = gtk.Table(4,3)
for i, l in enumerate(('3D', '2D', '1D')):
l = l + " deformation "
lbl = gtk.Label(l)
tbl.attach(lbl, i, i+1, 0, 1)
self.radio_bulk = gtk.RadioButton(None, "Bulk")
tbl.attach(self.radio_bulk, 0, 1, 1, 2)
self.radio_xy = gtk.RadioButton(self.radio_bulk, "xy-plane")
tbl.attach(self.radio_xy, 1, 2, 1, 2)
self.radio_xz = gtk.RadioButton(self.radio_bulk, "xz-plane")
tbl.attach(self.radio_xz, 1, 2, 2, 3)
self.radio_yz = gtk.RadioButton(self.radio_bulk, "yz-plane")
tbl.attach(self.radio_yz, 1, 2, 3, 4)
self.radio_x = gtk.RadioButton(self.radio_bulk, "x-axis")
tbl.attach(self.radio_x, 2, 3, 1, 2)
self.radio_y = gtk.RadioButton(self.radio_bulk, "y-axis")
tbl.attach(self.radio_y, 2, 3, 2, 3)
self.radio_z = gtk.RadioButton(self.radio_bulk, "z-axis")
tbl.attach(self.radio_z, 2, 3, 3, 4)
tbl.show_all()
pack(vbox, [tbl])
self.deformtable = [
(self.radio_bulk, (1,1,1)),
(self.radio_xy, (1,1,0)),
(self.radio_xz, (1,0,1)),
(self.radio_yz, (0,1,1)),
(self.radio_x, (1,0,0)),
(self.radio_y, (0,1,0)),
(self.radio_z, (0,0,1))]
self.allow_non_pbc = gtk.CheckButton(
"Allow deformation along non-periodic directions.")
pack(vbox, [self.allow_non_pbc])
self.allow_non_pbc.connect('toggled', self.choose_possible_deformations)
# Parameters for the deformation
framedef = gtk.Frame("Deformation:")
vbox2 = gtk.VBox()
vbox2.show()
framedef.add(vbox2)
self.max_scale = gtk.Adjustment(0.010, 0.001, 10.0, 0.001)
max_scale_spin = gtk.SpinButton(self.max_scale, 10.0, 3)
pack(vbox2, [gtk.Label("Maximal scale factor: "), max_scale_spin])
self.scale_offset = gtk.Adjustment(0.0, -10.0, 10.0, 0.001)
self.scale_offset_spin = gtk.SpinButton(self.scale_offset, 10.0, 3)
pack(vbox2, [gtk.Label("Scale offset: "), self.scale_offset_spin])
self.nsteps = gtk.Adjustment(5, 3, 1000, 1)
nsteps_spin = gtk.SpinButton(self.nsteps, 1, 0)
pack(vbox2, [gtk.Label("Number of steps: "), nsteps_spin])
self.pull = gtk.CheckButton("Only positive deformation")
pack(vbox2, [self.pull])
self.pull.connect('toggled', self.pull_toggled)
# Atomic relaxations
framerel = gtk.Frame("Atomic relaxations:")
vbox2 = gtk.VBox()
vbox2.show()
framerel.add(vbox2)
self.radio_relax_on = gtk.RadioButton(None, "On ")
self.radio_relax_off = gtk.RadioButton(self.radio_relax_on, "Off")
self.radio_relax_off.set_active(True)
pack(vbox2, [self.radio_relax_on, self.radio_relax_off])
self.make_minimize_gui(vbox2)
for r in (self.radio_relax_on, self.radio_relax_off):
r.connect("toggled", self.relax_toggled)
self.relax_toggled()
pack(vbox, [framedef, gtk.Label(" "), framerel])
pack(vbox, gtk.Label(""))
# Results
pack(vbox, [gtk.Label("Results:")])
self.radio_results_keep = gtk.RadioButton(
None, "Keep original configuration")
self.radio_results_optimal = gtk.RadioButton(
self.radio_results_keep, "Load optimal configuration")
self.radio_results_all = gtk.RadioButton(
self.radio_results_optimal, "Load all configurations")
self.radio_results_keep.set_active(True)
pack(vbox, [self.radio_results_keep])
pack(vbox, [self.radio_results_optimal])
pack(vbox, [self.radio_results_all])
# Output field
#label = gtk.Label("Strain\t\tEnergy [eV]\n")
outframe = self.makeoutputfield(None, heading="Strain\t\tEnergy [eV]")
fitframe = gtk.Frame("Fit:")
vbox2 = gtk.VBox()
vbox2.show()
fitframe.add(vbox2)
self.radio_fit_2 = gtk.RadioButton(None, "2nd")
self.radio_fit_3 = gtk.RadioButton(self.radio_fit_2, "3rd")
self.radio_fit_2.connect("toggled", self.change_fit)
self.radio_fit_3.connect("toggled", self.change_fit)
self.radio_fit_3.set_active(True)
pack(vbox2, [gtk.Label("Order of fit: "), self.radio_fit_2,
self.radio_fit_3])
pack(vbox2, [gtk.Label("")])
scrwin = gtk.ScrolledWindow()
scrwin.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.fit_output = gtk.TextBuffer()
txtview = gtk.TextView(self.fit_output)
txtview.set_editable(False)
scrwin.add(txtview)
scrwin.show_all()
self.fit_win = scrwin
vbox2.pack_start(scrwin, True, True, 0)
hbox = gtk.HBox(homogeneous=True)
for w in [outframe, fitframe]:
hbox.pack_start(w)
w.show()
pack(vbox, hbox)
pack(vbox, gtk.Label(""))
# Status field
self.status_label = gtk.Label("")
pack(vbox, [self.status_label])
# Activate the right deformation buttons
self.choose_possible_deformations(first=True)
# Run buttons etc.
self.makebutbox(vbox, helptext=help_txt)
vbox.show()
self.add(vbox)
self.show()
self.gui.register_vulnerable(self)
def choose_possible_deformations(self, widget=None, first=False):
"""Turn on sensible radio buttons.
Only radio buttons corresponding to deformations in directions
with periodic boundary conditions should be turned on.
"""
if self.setup_atoms():
pbc = self.atoms.get_pbc()
else:
pbc = np.array([False, False, False], bool)
if (pbc == [True, True, True]).all():
self.allow_non_pbc.set_active(False)
self.allow_non_pbc.set_sensitive(False)
else:
self.allow_non_pbc.set_sensitive(True)
if self.allow_non_pbc.get_active():
pbc = [True, True, True] #All is allowed
self.radio_relax_off.set_active(True)
self.radio_relax_on.set_sensitive(False)
else:
self.radio_relax_on.set_sensitive(True)
for radio, requirement in self.deformtable:
ok = True
for i in range(3):
if requirement[i] and not pbc[i]:
ok = False
radio.set_sensitive(ok)
if first and ok:
# The first acceptable choice, choose it to prevent
# inconsistent state.
radio.set_active(True)
first = False
def relax_toggled(self, *args):
"Turn minimization widgets on or off."
state = self.radio_relax_on.get_active()
for widget in (self.algo, self.fmax_spin, self.steps_spin):
widget.set_sensitive(state)
def pull_toggled(self, *args):
"When positive def. only, the scale offset is turned off."
self.scale_offset_spin.set_sensitive(not self.pull.get_active())
def notify_atoms_changed(self):
"When atoms have changed, check for the number of images."
self.setupimageselection()
self.choose_possible_deformations()
def get_deformation_axes(self):
"""Return which axes the user wants to deform along."""
for but, deform in self.deformtable:
if but.get_active():
return np.array(deform)
# No deformation chosen!
oops("No deformation chosen: Please choose a deformation mode.")
return False
def run(self, *args):
"""Make the deformation."""
self.output.set_text("")
if not self.setup_atoms():
return
deform_axes = self.get_deformation_axes()
if deform_axes is False:
return #Nothing to do!
# Prepare progress bar
if self.radio_relax_on.get_active():
fmax = self.fmax.value
mininame = self.minimizers[self.algo.get_active()]
self.begin(mode="scale/min", algo=mininame, fmax=fmax,
steps=self.steps.value, scalesteps=self.nsteps.value)
else:
self.begin(mode="scale", scalesteps=self.nsteps.value)
try:
logger_func = self.gui.simulation['progress'].get_logger_stream
except (KeyError, AttributeError):
logger = None
else:
logger = logger_func() # Don't catch errors in the function.
# Display status message
self.status_label.set_text("Running ...")
self.status_label.modify_fg(gtk.STATE_NORMAL,
gtk.gdk.color_parse('#AA0000'))
while gtk.events_pending():
gtk.main_iteration()
# Do the scaling
scale = self.max_scale.value
if self.pull.get_active():
steps = np.linspace(0, scale, self.nsteps.value)
else:
steps = np.linspace(-scale, scale, self.nsteps.value)
steps += self.scale_offset.value
undef_cell = self.atoms.get_cell()
results = []
#txt = "Strain\t\tEnergy [eV]\n"
txt = ""
# If we load all configurations, prepare it.
if self.radio_results_all.get_active():
self.prepare_store_atoms()
stored_atoms = False
try:
# Now, do the deformation
for i, d in enumerate(steps):
deformation = np.diag(1.0 + d * deform_axes)
self.atoms.set_cell(np.dot(undef_cell, deformation),
scale_atoms=True)
if self.gui.simulation.has_key('progress'):
self.gui.simulation['progress'].set_scale_progress(i)
if self.radio_relax_on.get_active():
algo = getattr(ase.optimize, mininame)
if mininame == "MDMin":
minimizer = algo(self.atoms, logfile=logger,
dt=self.mdmin_dt.value)
else:
minimizer = algo(self.atoms, logfile=logger)
minimizer.run(fmax=fmax, steps=self.steps.value)
e = self.atoms.get_potential_energy()
results.append((d, e))
txt = txt + ("%.5f\t\t%.5f\n" % (d, e))
self.output.set_text(txt)
if self.radio_results_all.get_active():
self.store_atoms()
stored_atoms = True
except AseGuiCancelException:
# Update display to reflect cancellation of simulation.
self.status_label.set_text("Calculation CANCELLED.")
self.status_label.modify_fg(gtk.STATE_NORMAL,
gtk.gdk.color_parse('#AA4000'))
except MemoryError:
self.status_label.set_text("Out of memory, consider using LBFGS instead")
self.status_label.modify_fg(gtk.STATE_NORMAL,
gtk.gdk.color_parse('#AA4000'))
else:
# Update display to reflect succesful end of simulation.
self.status_label.set_text("Calculation completed.")
self.status_label.modify_fg(gtk.STATE_NORMAL,
gtk.gdk.color_parse('#007700'))
if results:
self.do_fit(np.array(results))
if self.radio_results_optimal.get_active():
if self.minimum_ok:
deformation = np.diag(1.0 + self.x0 * deform_axes)
self.atoms.set_cell(np.dot(undef_cell, deformation),
scale_atoms=True)
if self.radio_relax_on.get_active():
if self.gui.simulation.has_key('progress'):
self.gui.simulation['progress'].set_scale_progress(
len(steps))
algo = getattr(ase.optimize, mininame)
minimizer = algo(self.atoms, logfile=logger)
minimizer.run(fmax=fmax, steps=self.steps.value)
# Store the optimal configuration.
self.prepare_store_atoms()
self.store_atoms()
stored_atoms = True
else:
oops("No trustworthy minimum: Old configuration kept.")
self.activate_output()
if stored_atoms:
self.gui.notify_vulnerable()
self.end()
# If we store all configurations: Open movie window and energy graph
if stored_atoms and self.gui.images.nimages > 1:
self.gui.movie()
assert not np.isnan(self.gui.images.E[0])
if not self.gui.plot_graphs_newatoms():
expr = 'i, e - E[-1]'
self.gui.plot_graphs(expr=expr)
# Continuations should use the best image
nbest = np.argmin(np.array(results)[:,1])
self.start_nth_adj.value = nbest
def change_fit(self, widget):
"Repeat the fitting if the order is changed."
# It may be called both for the button being turned on and the
# one being turned off. But we only want to call do_fit once.
# And only if there are already cached results (ie. if the
# order is changed AFTER the calculation is done).
if widget.get_active() and getattr(self, "results", None) is not None:
self.do_fit(None)
def do_fit(self, results):
"Fit the results to a polynomial"
if results is None:
results = self.results # Use cached results
else:
self.results = results # Keep for next time
self.minimum_ok = False
if self.radio_fit_3.get_active():
order = 3
else:
order = 2
if len(results) < 3:
txt = ("Insufficent data for a fit\n(only %i data points)\n"
% (len(results),) )
order = 0
elif len(results) == 3 and order == 3:
txt = "REVERTING TO 2ND ORDER FIT\n(only 3 data points)\n\n"
order = 2
else:
txt = ""
if order > 0:
fit0 = np.poly1d(np.polyfit(results[:,0], results[:,1], order))
fit1 = np.polyder(fit0, 1)
fit2 = np.polyder(fit1, 1)
x0 = None
for t in np.roots(fit1):
if fit2(t) > 0:
x0 = t
break
if x0 is None:
txt = txt + "No minimum found!"
else:
e0 = fit0(x0)
e2 = fit2(x0)
txt += "E = "
if order == 3:
txt += "A(x - x0)³ + "
txt += "B(x - x0)² + C\n\n"
txt += "B = %.5g eV\n" % (e2,)
txt += "C = %.5g eV\n" % (e0,)
txt += "x0 = %.5g\n" % (x0,)
lowest = self.scale_offset.value - self.max_scale.value
highest = self.scale_offset.value + self.max_scale.value
if x0 < lowest or x0 > highest:
txt += "\nWARNING: Minimum is outside interval\n"
txt += "It is UNRELIABLE!\n"
else:
self.minimum_ok = True
self.x0 = x0
self.fit_output.set_text(txt)
|
slabanja/ase
|
ase/gui/scaling.py
|
Python
|
gpl-2.0
| 18,083
|
[
"ASE"
] |
48401915d4c0487395d662c7fde084d43a3f524682cd361433d4e7b59a95aa71
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
#!/usr/bin/env python
from __future__ import division, unicode_literals
"""
This module has been moved pymatgen.io.gaussian. This sub module will
be removed in pymatgen 4.0.
"""
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '8/1/15'
import warnings
warnings.warn("pymatgen.io.gaussianio has been moved pymatgen.io.gaussian. "
"This stub will be removed in pymatgen 4.0.")
from .gaussian import *
|
migueldiascosta/pymatgen
|
pymatgen/io/gaussianio.py
|
Python
|
mit
| 662
|
[
"Gaussian",
"pymatgen"
] |
e19b29c26f02edd41530df487af26a455f3e7f7a8e98e670e23ccf440f3a592f
|
# Released under The MIT License (MIT)
# http://opensource.org/licenses/MIT
# Copyright (c) 2013-2015 SCoT Development Team
import unittest
import numpy as np
from numpy.testing import assert_allclose
from scot.varbase import VARBase as VAR
from scot.datatools import acm
epsilon = 1e-10
class TestVAR(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def generate_data(self, cc=((1, 0), (0, 1))):
var = VAR(2)
var.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
l = (1000, 100)
x = var.simulate(l, lambda: np.random.randn(2).dot(cc))
self.assertEqual(x.shape, (l[1], 2, l[0]))
return x, var
def test_abstract(self):
self.assertRaises(NotImplementedError, VAR(1).fit, [None])
self.assertRaises(NotImplementedError, VAR(1).optimize, [None])
def test_simulate(self):
noisefunc = lambda: [1, 1] # use deterministic function instead of noise
num_samples = 100
b = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
var = VAR(2)
var.coef = b
np.random.seed(42)
x = var.simulate(num_samples, noisefunc)
self.assertEqual(x.shape, (1, b.shape[0], num_samples))
# make sure we got expected values within reasonable accuracy
for n in range(10, num_samples):
self.assertTrue(np.all(
np.abs(x[0, :, n] - 1
- np.dot(x[0, :, n - 1], b[:, 0::2].T)
- np.dot(x[0, :, n - 2], b[:, 1::2].T)) < 1e-10))
def test_predict(self):
np.random.seed(777)
x, var = self.generate_data()
z = var.predict(x)
self.assertTrue(np.abs(np.var(x[:, :, 100:] - z[:, :, 100:]) - 1) < 0.005)
def test_yulewalker(self):
np.random.seed(7353)
x, var0 = self.generate_data([[1, 2], [3, 4]])
acms = [acm(x, l) for l in range(var0.p+1)]
var = VAR(var0.p)
var.from_yw(acms)
assert_allclose(var0.coef, var.coef, rtol=1e-2, atol=1e-2)
# that limit is rather generous, but we don't want tests to fail due to random variation
self.assertTrue(np.all(np.abs(var0.coef - var.coef) < 0.02))
self.assertTrue(np.all(np.abs(var0.rescov - var.rescov) < 0.02))
def test_whiteness(self):
np.random.seed(91)
r = np.random.randn(80, 15, 100) # gaussian white noise
r0 = r.copy()
var = VAR(0, n_jobs=-1)
var.residuals = r
p = var.test_whiteness(20, random_state=1)
self.assertTrue(np.all(r == r0)) # make sure we don't modify the input
self.assertGreater(p, 0.01) # test should be non-significant for white noise
r[:, 1, 3:] = r[:, 0, :-3] # create cross-correlation at lag 3
p = var.test_whiteness(20)
self.assertLessEqual(p, 0.01) # now test should be significant
def test_stable(self):
var = VAR(1)
# Stable AR model -- rule of thumb: sum(coefs) < 1
var.coef = np.asarray([[0.5, 0.3]])
self.assertTrue(var.is_stable())
# Unstable AR model -- rule of thumb: sum(coefs) > 1
var.coef = np.asarray([[0.5, 0.7]])
self.assertFalse(var.is_stable())
|
scot-dev/scot
|
scot/tests/test_var.py
|
Python
|
mit
| 3,293
|
[
"Gaussian"
] |
eab7901d1d5b202a7a3faea0648b4a43c9836cd925b38bd4575301f0c5d54437
|
from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from astropy.io import fits
from astropy.time import Time
from PyAstronomy import pyasl
from scipy import ndimage
import pandas as pd
import gaussfitter as gf
from cycler import cycler
'''
Functions used in BF_python.py
Read the damn comments
(I'm sorry there aren't more objects)
'''
##Diana's function to make the plots look nice##
def user_rc(lw=1.5):
"""Set plotting RC parameters"""
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
# Change some of the default line-widths, font sizes for xticks, labels, titles,
# and the color cycle to tableau20
plt.rc('lines', linewidth=lw)
plt.rc('font', size=14, weight='normal')
plt.rc('xtick', labelsize=14)
plt.rc('xtick.major', size=6, width=1)
plt.rc('axes', prop_cycle=cycler(c=tableau20), lw=1, labelsize=18, titlesize=22)
#plt.rc('axes', color_cycle=tableau20, lw=1, labelsize=18, titlesize=22)
return tableau20
def logify_spec(isAPOGEE=False, w00=5400, n=38750, stepV=1.7, m=171):
# The new log-wavelength array will be w1. it will have equal spacing in velocity.
# Specify reasonable values when you call this function or else bad things will happen.
### GUIDELINES FOR CHOOSING GOOD INPUT VALUES ###
# good APOGEE values
# w00 = 15145 # starting wavelength of the log-wave array in Angstroms
# n = 20000 # desired length of the log-wave vector in pixels (must be EVEN)
# good ARCES values
# w00 = 5400 # starting wavelength of the log-wave array in Angstroms
# n = 38750 # desired length of the log-wave vector in pixels (must be EVEN)
# stepV = 1.7 # step in velocities in the wavelength vector w1
# m = 171 # length of BF (must be ODD)
### GUIDELINES FOR CHOOSING GOOD INPUT VALUES ###
r = stepV/299792.458 # put stepV in km/s/pix
w1 = w00 * np.power((1+r), np.arange(float(n)))
print('The new log-wavelength scale will span %d - %d A with stepsize %f km/s.' % (w1[0], w1[-1], stepV))
print(' ')
return w1, m, r
def read_one_specfile(infile = 'myspectrum.txt', isAPOGEE = False):
'''
Read in a single FITS or txt spectrum file
(Bare-bones version of read_specfiles, below)
Requires infile, isAPOGEE
Returns wave, spec
'''
if infile[-3:] == 'txt':
try:
wave, spec = np.loadtxt(open(infile), comments='#', usecols=(0,1), unpack=True)
print('Text file {0}, isAPOGEE = {1}'.format(infile[-15:], isAPOGEE))
except:
raise FileNotFoundError('The file {0} was not found or cannot be opened'.format(infile))
if isAPOGEE == True: # we need to sort by wavelength
spec = spec[np.argsort(wave)]
wave = wave[np.argsort(wave)]
elif infile[-4:] == 'fits' or infile[-4:] == 'FITS':
# assume it's a FITS file
# Read in the FITS file with all the data in the primary HDU
try:
hdu = fits.open(infile)
except:
print('{0} not found or cannot be opened'.format(infile))
else:
head = hdu[0].header
try: datetime = head['date-obs']
except: datetime = head['date']
print('FITS file {0}, isAPOGEE = {1}, header date {2}'.format(infile[-17:], isAPOGEE, datetime))
if isAPOGEE == True: # APOGEE: the data is in a funny place and backwards
spec = hdu[1].data
spec = spec.flatten()
spec = spec[::-1]
else: # non-APOGEE (regular) option
spec = hdu[0].data
# Define the original wavelength scale
if isAPOGEE == True: # APOGEE: read wavelength values straight from FITS file
wave = hdu[4].data
wave = wave.flatten()
wave = wave[::-1]
else: # non-APOGEE (linear): create wavelength values from header data
headerdwave = head['cdelt1']
headerwavestart = head['crval1']
headerwavestop = headerwavestart + headerdwave*len(spec)
wave = np.arange(headerwavestart, headerwavestop, headerdwave)
if len(wave) != len(spec): # The wave array is sometimes 1 longer than it should be?
minlength = min(len(wave), len(spec))
wave = wave[0:minlength]
spec = spec[0:minlength]
try: # check to see if we have a file with log angstroms
logcheck = head['dispunit']
except:
logcheck = 'linear' # assume linear if no 'dispunit' is in header
if logcheck == 'log angstroms':
wave = np.power(10,wave) # make it linear
else:
print('File does not end in \'txt\' or \'fits\', no spectrum loaded.')
wave = []; spec = []
return wave, spec
def read_specfiles(infiles = 'infiles_BF.txt', bjdinfile = 'bjds_baryvels.txt', isAPOGEE = False):
'''
Read in some FITS or TXT files that are spectra and may or may not be APOGEE
Requires infiles, bjdinfile, isAPOGEE
Returns nspec, filenamelist, datetimelist, wavelist, speclist
'''
f1 = open(infiles)
print('Reading the files listed in %s' % infiles)
#print('The first one had better be your template spectrum.')
print(' ')
speclist = []; wavelist = []
filenamelist = []; datetimelist = []
if isAPOGEE == False:
checkAPOGEE = True #notallinfiles are APOGEE, but let's check in case *some* are
else:
checkAPOGEE = False #all the infiles are APOGEE so we don't have to search
i = 0 # keep track of which spectrum we're on
for line in f1: # This loop happens once for each spectrum
infile = os.path.expandvars(line.rstrip())
if checkAPOGEE == True: # check to see if a subset of infiles are from APOGEE or not
if 'apogee' in infile or 'APOGEE' in infile: isAPOGEE = True
else: isAPOGEE = False
if infile[-4:] == 'fits' or infile[-4:] == 'FITS':
# assume it's a FITS file
try:
hdu = fits.open(infile)
head = hdu[0].header
filenamelist.append(infile)
try: datetime = head['date-obs']
except: datetime = head['date']
datetimelist.append(Time(datetime, scale='utc', format='isot'))
print('FITS file {0}, isAPOGEE = {1}, header date {2}'.format(infile[-17:], isAPOGEE, datetime))
except:
raise FileNotFoundError('The file {0} was not found or cannot be opened'.format(infile))
# it's time to dig out the spectral (flux) data and the wavelength scale!
if isAPOGEE == True: # APOGEE: the data is in a funny place and backwards
wave, spec = ProcessAPOGEEFITS(hdu)
else: # not APOGEE
spec = hdu[0].data # hope the info we want is in the zeroth HDU
try:
headerdwave = head['cdelt1']
headerwavestart = head['crval1']
headerwavestop = headerwavestart + headerdwave*len(spec)
wave = np.arange(headerwavestart, headerwavestop, headerdwave)
except:
raise RuntimeError('Cannot find wavelength info in FITS header')
if len(wave) != len(spec): # the wave array is sometimes 1 longer than it should be?
minlength = min(len(wave), len(spec))
wave = wave[0:minlength]
spec = spec[0:minlength]
try: # check to see if we have a file with log angstroms
logcheck = head['dispunit']
except:
logcheck = 'linear' # hopefully, at least
if logcheck == 'log angstroms':
wave = np.power(10, wave) # make it linear
#spec = spec / np.median(spec) # WARNING really basic, possibly bad normalization
else: # treat it like a text file
filenamelist.append(infile)
try:
datetime = np.loadtxt(bjdinfile, comments='#', usecols=(1,), unpack=True)[i]
except:
raise RuntimeError('Your bjdinfile {0} is not formatted correctly'.format(bjdinfile))
datetimelist.append(Time(datetime, scale='utc', format='jd'))
try:
wave, spec = np.loadtxt(open(infile), comments='#', usecols=(0,1), unpack=True)
print('Text file {0}, isAPOGEE = {1}, bjdinfile date {2}'.format(infile[-17:], isAPOGEE, datetime))
except:
raise FileNotFoundError('The file {0} was not found or cannot be opened'.format(infile))
if isAPOGEE == True: # we need sort by wavelength, just in case it hasn't been
spec = spec[np.argsort(wave)]
wave = wave[np.argsort(wave)]
# if infile[0:5] == 'trans': # you have a model telluric spectrum in nm, not A
# print("Assuming this is a telluric spectrum in nm, not A, proceed with caution")
# wave = wave*10
# at the end of this mess, we have one file's WAVE and corresponding SPEC - save it!
wavelist.append(wave)
speclist.append(spec)
i = i + 1 # increment the spectrum counter
# save the total number of spectra
nspec = i
f1.close()
return nspec, filenamelist, datetimelist, wavelist, speclist
def ProcessAPOGEEFITS(hdu):
'''
Turns an APOGEE FITS hdu into a pair of wavelength and spectrum ndarrays
'''
spec = hdu[1].data
spec = spec.flatten()
spec = spec[::-1]
spec = spec / np.median(spec) # WARNING really basic, possibly bad normalization
wave = hdu[4].data
wave = wave.flatten()
wave = wave[::-1]
return wave, spec
def gaussparty(gausspars, nspec, filenamelist, bfsmoothlist, bf_ind, amplimits, threshold, widlimits):
'''
Fits 2 or 3 gaussians to some data
'''
param = []
with open(gausspars) as f1:
for line in f1:
if line[0] != '#':
param.append( line.rstrip() )
#param = np.loadtxt(gausspars, comments='#')
bffitlist = []
bffitlist.append(0)
gauss1 = [[] for i in range(nspec)]
gauss2 = [[] for i in range(nspec)]
gauss3 = [[] for i in range(nspec)]
gauss1[0] = [0,0]
gauss2[0] = [0,0]
gauss3[0] = [0,0]
error_array = np.ones(len(bfsmoothlist[0]))*0.01 # dummy array with 0.01 error values
print(' ')
print('Gaussian fit results: peak amplitude, width, rvraw, rvraw_err')
print ('-------------------------------------------------------------')
for i in range(1, nspec):
# check to see if we are fitting a third gaussian, i.e., one near zero
# don't print out the result of this fit, but do return it for plotting
# handle comments in gausspars file without exploding
if '#' in param[i]:
commentbegin = param[i].find('#')
partest = param[i][0:commentbegin].split()
else:
partest = param[i].split()
if len(partest) == 6: ngauss = 2
elif len(partest) == 9: ngauss = 3
else: print('something is wrong with your gausspars file!')
minpars = [amplimits[0], float(partest[1])-threshold, widlimits[0]]
maxpars = [amplimits[1], float(partest[1])+threshold, widlimits[1]]
minpars.extend([amplimits[2], float(partest[4])-threshold, widlimits[2]])
maxpars.extend([amplimits[3], float(partest[4])+threshold, widlimits[3]])
if ngauss == 2:
bffit = gf.multigaussfit(bf_ind, bfsmoothlist[i], ngauss=ngauss,
params=partest, err=error_array,
limitedmin=[True,True,True], limitedmax=[True,True,True],
minpars=minpars, maxpars=maxpars, quiet=True, shh=True)
elif ngauss == 3:
# min and max pars for peak 3: amp, rv, width
# this will not work if len(amplimits) < 6 or len(widlimits) < 6
minpars.extend([amplimits[4], float(partest[7])-threshold, widlimits[4]])
maxpars.extend([amplimits[5], float(partest[7])+threshold, widlimits[5]])
bffit = gf.multigaussfit(bf_ind, bfsmoothlist[i], ngauss=ngauss,
params=partest, err=error_array,
limitedmin=[True,True,True], limitedmax=[True,True,True],
minpars=minpars, maxpars=maxpars, quiet=True, shh=True)
newbffit = [[] for x in range(len(bffit))]
# Sometimes bffit[2] is None, or contains None. Set it to zeros instead.
try:
if not any(bffit[2]): # this will fail if bffit[2] = None
newbffit[0] = bffit[0]
newbffit[1] = bffit[1]
newbffit[2] = [0, 0, 0, 0, 0]
else:
newbffit = bffit
except:
print('WARNING - gaussfit is acting up, fit failed for the next row, adjust gausspars file:')
if not bffit[2]: # this catches the case where bffit[2] = None
newbffit[0] = bffit[0]
newbffit[1] = bffit[1]
newbffit[2] = [0, 0, 0, 0, 0]
else:
newbffit = bffit
bffitlist.append(newbffit)
# NOTE: to get the gaussian fit corresponding to bfsmoothlist[i], use bffitlist[i][1].
# RV1 for observation i is bffitlist[i][0][1] +/- bffitlist[i][2][1].
# RV2 for observation i is bffitlist[i][0][4] +/- bffitlist[i][2][4].
# (note: need to check if bffit[2] == None before calling bffit[2][1] or bffit[2][4])
if ngauss == 2:
print('{0:s} {1:.3f} {2:.2f} {3:.4f} {4:.4f} \t {5:.3f} {6:.2f} {7:.4f} {8:.4f}'.format(
filenamelist[i][-20:], newbffit[0][0], newbffit[0][2], newbffit[0][1], newbffit[2][1],
newbffit[0][3], newbffit[0][5], newbffit[0][4], newbffit[2][4]))
elif ngauss == 3:
print('{0:s} {1:.3f} {2:.2f} {3:.4f} {4:.4f} \t {5:.3f} {6:.2f} {7:.4f} {8:.4f} \t {9:.3f} {10:.2f} {11:.4f} {12:.4f}'.format(
filenamelist[i][-20:], newbffit[0][0], newbffit[0][2], newbffit[0][1], newbffit[2][1],
newbffit[0][3], newbffit[0][5], newbffit[0][4], newbffit[2][4], newbffit[0][6], newbffit[0][8], newbffit[0][7], newbffit[2][7]))
print(' ')
print('You MUST manually guesstimate the location of each Gaussian\'s peak in %s!' % gausspars)
print('Until you do, the above values will be WRONG and the plot will look TERRIBLE.')
print(' ')
return bffitlist
def rvphasecalc(bjdinfile, bjdoffset, nspec, period, BJD0, rvrawlist, rvstd, bcvstd):
rvraw1 = rvrawlist[0]; rvraw1_err = rvrawlist[1]; rvraw2 = rvrawlist[2]; rvraw2_err = rvrawlist[3]
rvraw3 = rvrawlist[4]; rvraw3_err = rvrawlist[5]
rv1 = []; rv2 = []; rv3 = []
rv1.append(0); rv2.append(0); rv3.append(0)
rv1_err = []; rv2_err = []; rv3_err = []
rv1_err.append(0); rv2_err.append(0); rv3_err.append(0)
g1 = open(bjdinfile)
#g2 = open(outfile, 'w')
print('Calculating RVs...')
bjdmid, bcv = np.loadtxt(g1, comments='#', usecols=(1,2), unpack=True)
bjdfunny = bjdmid - bjdoffset
phase = []
phase.append(0)
for i in range(1, nspec):
fracP = (bjdmid[i] - BJD0) / period
if fracP < 0:
phase.append(1 + (fracP % 1))
cycle = int(fracP) - 1
else:
phase.append((fracP % 1))
cycle = int(fracP)
rv1.append(rvraw1[i] + bcv[i] - rvstd - bcvstd) # DON'T MESS UP THE +/- SIGNS
rv2.append(rvraw2[i] + bcv[i] - rvstd - bcvstd)
if rvraw3[i] is not None:
rv3.append(rvraw3[i] + bcv[i] - rvstd - bcvstd)
else:
rv3.append(None)
rv1_err.append(rvraw1_err[i])
rv2_err.append(rvraw2_err[i])
if rvraw3[i] is not None:
rv3_err.append(rvraw3_err[i])
else:
rv3_err.append(None)
rvfinals = [rv1, rv1_err, rv2, rv2_err, rv3, rv3_err]
#print ('%.9f %.9f %.9f %.5f %.5f %.5f %.5f' % (bjdmid[i], phase[i], bjdfunny[i],
# rv1[i], rv1_err[i], rv2[i], rv2_err[i]), file=g2)
g1.close()
#g2.close()
print(' ')
#print('BJD, phase, and RVs written to %s.' % outfile)
#print('Use rvplotmaker.py to plot the RV curve.')
return phase, bjdfunny, rvfinals
|
savvytruffle/cauldron
|
rvs/BF_functions.py
|
Python
|
mit
| 17,121
|
[
"Gaussian"
] |
e5f270e4561d995ae74ef6bce7fa1311908b10b9bb285a3610126e0ee2854d00
|
# Tests that multiple views can render and save images correctly in batch.
from paraview import smtesting
smtesting.ProcessCommandLineArguments()
from paraview.simple import *
v1 = CreateRenderView()
Sphere()
Show()
Render()
from paraview.vtk.vtkTestingRendering import vtkTesting
import os.path
prefix, ext = os.path.splitext(smtesting.BaselineImage)
baseline1 = "%s_render_view" % prefix + ext
baseline2 = "%s_chart_view" % prefix + ext
testing1 = vtkTesting()
testing1.AddArgument("-T")
testing1.AddArgument(smtesting.TempDir)
testing1.AddArgument("-V")
testing1.AddArgument(baseline1)
testing2 = vtkTesting()
testing2.AddArgument("-T")
testing2.AddArgument(smtesting.TempDir)
testing2.AddArgument("-V")
testing2.AddArgument(baseline2)
v2 = CreateXYPlotView()
for i in [0, 1, 2]:
Render(v1)
filename = "%s/view1_%d.png" % (smtesting.TempDir, i)
WriteImage(filename, v1)
if testing1.RegressionTest(filename, smtesting.Threshold) != testing1.PASSED:
raise RuntimeError, "Failed image comparison for view 1 on run #%d"%i
Render(v2)
filename = "%s/view2_%d.png" % (smtesting.TempDir, i)
WriteImage(filename, v2)
if testing2.RegressionTest(filename, smtesting.Threshold) != testing2.PASSED:
raise RuntimeError, "Failed image comparison for view 2 on run #%d"%i
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/ParaViewCore/ServerManager/Default/Testing/Python/MultiView.py
|
Python
|
gpl-3.0
| 1,316
|
[
"ParaView",
"VTK"
] |
421e4d45ba64b948e1bb3a3a97bd33e54f839226516c6e687708b9aad28d676b
|
import os, time
from wslink import register as exportRpc
from paraview import simple, servermanager
from paraview.web import protocols as pv_protocols
from vtkmodules.vtkCommonCore import vtkUnsignedCharArray, vtkCollection
from vtkmodules.vtkCommonDataModel import vtkImageData
from vtkmodules.vtkPVClientServerCoreRendering import vtkPVRenderView
from vtkmodules.vtkPVServerManagerRendering import vtkSMPVRepresentationProxy, vtkSMTransferFunctionProxy, vtkSMTransferFunctionManager
from vtkmodules.vtkWebCore import vtkDataEncoder
class ParaViewLite(pv_protocols.ParaViewWebProtocol):
def __init__(self, **kwargs):
super(pv_protocols.ParaViewWebProtocol, self).__init__()
self.lineContext = None
@exportRpc("paraview.lite.proxy.name")
def getProxyName(self, pid):
proxy = self.mapIdToProxy(pid)
if not proxy:
return {
'id': pid,
'error': 'No proxy for id %s' % pid,
}
return {
'id': pid,
'group': proxy.GetXMLGroup(),
'name': proxy.GetXMLName(),
'label': proxy.GetXMLLabel(),
}
@exportRpc("paraview.lite.camera.get")
def getCamera(self, viewId):
view = self.getView(viewId)
bounds = [-1, 1, -1, 1, -1, 1]
if view and view.GetClientSideView().GetClassName() == 'vtkPVRenderView':
rr = view.GetClientSideView().GetRenderer()
bounds = rr.ComputeVisiblePropBounds()
return {
'id': viewId,
'bounds': bounds,
'position': tuple(view.CameraPosition),
'viewUp': tuple(view.CameraViewUp),
'focalPoint': tuple(view.CameraFocalPoint),
'centerOfRotation': tuple(view.CenterOfRotation),
}
@exportRpc("paraview.lite.lut.get")
def getLookupTableForArrayName(self, name, numSamples = 255):
lutProxy = simple.GetColorTransferFunction(name)
lut = lutProxy.GetClientSideObject()
dataRange = lut.GetRange()
delta = (dataRange[1] - dataRange[0]) / float(numSamples)
colorArray = vtkUnsignedCharArray()
colorArray.SetNumberOfComponents(3)
colorArray.SetNumberOfTuples(numSamples)
rgb = [ 0, 0, 0 ]
for i in range(numSamples):
lut.GetColor(dataRange[0] + float(i) * delta, rgb)
r = int(round(rgb[0] * 255))
g = int(round(rgb[1] * 255))
b = int(round(rgb[2] * 255))
colorArray.SetTuple3(i, r, g, b)
# Add the color array to an image data
imgData = vtkImageData()
imgData.SetDimensions(numSamples, 1, 1)
aIdx = imgData.GetPointData().SetScalars(colorArray)
# Use the vtk data encoder to base-64 encode the image as png, using no compression
encoder = vtkDataEncoder()
# two calls in a row crash on Windows - bald timing hack to avoid the crash.
time.sleep(0.01);
b64Str = encoder.EncodeAsBase64Jpg(imgData, 100)
return { 'image': 'data:image/jpg;base64,' + b64Str, 'range': dataRange, 'name': name }
@exportRpc("paraview.lite.lut.range.update")
def updateLookupTableRange(self, arrayName, dataRange):
lutProxy = simple.GetColorTransferFunction(arrayName)
vtkSMTransferFunctionProxy.RescaleTransferFunction(lutProxy.SMProxy, dataRange[0], dataRange[1], False)
self.getApplication().InvokeEvent('UpdateEvent')
@exportRpc("paraview.lite.lut.preset")
def getLookupTablePreset(self, presetName, numSamples = 512):
lutProxy = simple.GetColorTransferFunction('__PRESET__')
lutProxy.ApplyPreset(presetName, True)
lut = lutProxy.GetClientSideObject()
dataRange = lut.GetRange()
delta = (dataRange[1] - dataRange[0]) / float(numSamples)
colorArray = vtkUnsignedCharArray()
colorArray.SetNumberOfComponents(3)
colorArray.SetNumberOfTuples(numSamples)
rgb = [ 0, 0, 0 ]
for i in range(numSamples):
lut.GetColor(dataRange[0] + float(i) * delta, rgb)
r = int(round(rgb[0] * 255))
g = int(round(rgb[1] * 255))
b = int(round(rgb[2] * 255))
colorArray.SetTuple3(i, r, g, b)
# Add the color array to an image data
imgData = vtkImageData()
imgData.SetDimensions(numSamples, 1, 1)
aIdx = imgData.GetPointData().SetScalars(colorArray)
# Use the vtk data encoder to base-64 encode the image as png, using no compression
encoder = vtkDataEncoder()
# two calls in a row crash on Windows - bald timing hack to avoid the crash.
time.sleep(0.01);
b64Str = encoder.EncodeAsBase64Jpg(imgData, 100)
return { 'name': presetName, 'image': 'data:image/jpg;base64,' + b64Str }
@exportRpc("paraview.lite.lut.set.preset")
def applyPreset(self, arrayName, presetName):
lutProxy = simple.GetColorTransferFunction(arrayName)
lutProxy.ApplyPreset(presetName, True)
self.getApplication().InvokeEvent('UpdateEvent')
@exportRpc("paraview.lite.context.line.set")
def updateLineContext(self, visible = False, p1 = [0, 0, 0], p2 = [1, 1, 1]):
if not self.lineContext:
self.lineContext = servermanager.extended_sources.HighResolutionLineSource(Resolution=2, Point1=p1, Point2=p2)
self.lineRepresentation = simple.Show(self.lineContext)
self.lineRepresentation.Visibility = 1 if visible else 0
self.lineContext.Point1 = p1
self.lineContext.Point2 = p2
self.getApplication().InvokeEvent('UpdateEvent')
return self.lineContext.GetGlobalIDAsString()
|
Kitware/HPCCloud
|
pvw-dependencies/pv-lite/lite_protocols.py
|
Python
|
apache-2.0
| 5,489
|
[
"ParaView",
"VTK"
] |
19a788398fcd7388854323944451925e87d1d4822e3392f0666e9fdf376baff8
|
#!/usr/bin/env python
""" Mission 7-Detect and Deliver
1. Random walk with gaussian at center of map until station position is acquired
2. loiter around until correct face seen
3. if symbol seen, move towards symbol perpendicularly
4. if close enough, do move_base aiming
task 7:
-----------------
Created by Reinaldo@ 2016-12-07
Authors: Reinaldo
-----------------
"""
import rospy
import multiprocessing as mp
import math
import time
import numpy as np
import os
import tf
from sklearn.cluster import KMeans
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Pose, Quaternion
from visualization_msgs.msg import MarkerArray, Marker
from move_base_forward import Forward
from move_base_waypoint import MoveTo
from move_base_loiter import Loiter
from move_base_stationkeeping import StationKeeping
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from std_msgs.msg import Int8
class DetectDeliver(object):
MAX_DATA=3
x0, y0, yaw0= 0, 0, 0
symbol=[0 , 0]
symbols=np.zeros((MAX_DATA, 2)) #unordered list
symbols_counter=0
angle_threshold=10*math.pi/180
symbol_location=np.zeros((MAX_DATA, 2))
shape_counter=0
distance_to_box=2
def __init__(self, symbol_list):
print("starting task 7")
rospy.init_node('task_7', anonymous=True)
self.symbol=symbol_list
self.symbol_visited=0
self.symbol_seen=False
self.symbol_position=[0, 0, 0]
self.station_seen=False #station here is cluster center of any face
self.station_position=[0, 0]
self.loiter_obj = Loiter("loiter", is_newnode=False, target=None, radius=5, polygon=4, mode=1, mode_param=1, is_relative=False)
self.moveto_obj = MoveTo("moveto", is_newnode=False, target=None, is_relative=False)
self.stationkeep_obj = StationKeeping("station_keeping", is_newnode=False, target=None, radius=2, duration=30)
rospy.Subscriber("/filtered_marker_array", MarkerArray, self.symbol_callback, queue_size = 50)
#rospy.Subscriber("/shoot", MarkerArray, self.symbol_callback, queue_size = 50)
rospy.Subscriber("/finished_search_and_shoot", Int8, self.stop_shoot_callback, queue_size = 5)
self.shooting_pub= rospy.Publisher('/start_search_and_shoot', Int8, queue_size=5)
self.marker_pub= rospy.Publisher('/waypoint_markers', Marker, queue_size=5)
self.base_frame = rospy.get_param("~base_frame", "base_link")
self.fixed_frame = rospy.get_param("~fixed_frame", "map")
# tf_listener
self.tf_listener = tf.TransformListener()
self.odom_received = False
rospy.wait_for_message("/odometry/filtered/global", Odometry)
rospy.Subscriber("/odometry/filtered/global", Odometry, self.odom_callback, queue_size=50)
while not self.odom_received:
rospy.sleep(1)
print("odom received")
print(self.symbol)
d=1
while not rospy.is_shutdown() and not self.station_seen:
target=[self.x0+d*math.cos(self.yaw0), self.y0+d*math.sin(self.yaw0), self.yaw0]
self.moveto_obj.respawn(target, )#forward
print("station: ")
print(self.station_position)
#aiming to the box
self.shooting_complete=False
self.is_aiming=False
#loiter around station until symbol's face seen
while not rospy.is_shutdown():
theta=math.atan2(self.station_position[1]-self.y0, self.station_position[0]-self.x0)
target=[self.station_position[0], self.station_position[1], theta]
self.move_to_goal(target, )
if self.distance_from_boat(target)<6:
self.shooting_pub.publish(1)
break
loiter_radius=math.sqrt((self.x0-self.station_position[0])**2+(self.y0-self.station_position[1])**2)
if loiter_radius>5:
loiter_radius=3
while not rospy.is_shutdown():
print(loiter_radius)
self.loiter_obj.respawn(self.station_position, 4, loiter_radius, )
self.shooting_pub.publish(1)
if loiter_radius>3:
loiter_radius-=1
if self.symbol_seen:
print(self.symbol_position)
print("symbol's position acquired, exit loitering")
break
if self.shooting_complete:
print("shooting done, return to base")
break
time.sleep(1)
print(self.symbol_position)
d=math.sqrt((self.x0-self.symbol_position[0])**2+(self.y0-self.symbol_position[1])**2)
counter=0
print(d)
#moveto an offset, replan in the way
while not rospy.is_shutdown():
self.shooting_pub.publish(1)
alpha=self.yaw0-self.symbol_position[2]
theta=math.atan2(math.fabs(math.sin(alpha)), math.fabs(math.cos(alpha))) #always +ve and 0-pi/2
d=math.sqrt((self.x0-self.symbol_position[0])**2+(self.y0-self.symbol_position[1])**2)
perpendicular_d=0.6*d*math.cos(theta)
if counter ==0 or theta>self.angle_threshold or d>self.distance_to_box:
print("replan")
target=[self.symbol_position[0]+perpendicular_d*math.cos(self.symbol_position[2]),self.symbol_position[1]+perpendicular_d*math.sin(self.symbol_position[2]), -self.symbol_position[2]]
self.moveto_obj.respawn(target, )
counter+=1
if d<self.distance_to_box:
break
time.sleep(1)
if self.shooting_complete:
print("shooting done, return to base")
break
station=[self.x0, self.y0, -self.symbol_position[2]]
radius=2
duration=30
print(self.symbol_position)
print(station)
while not rospy.is_shutdown():
self.shooting_pub.publish(1)
#duration 0 is forever
if not self.is_aiming:
self.stationkeep_obj.respawn(station, radius, duration)
#make aiming respawn
if self.shooting_complete:
print("shooting done, return to base")
break
time.sleep(1)
def distance_from_boat(self, target):
return math.sqrt((target[0]-self.x0)**2+(target[1]-self.y0)**2)
def move_to_goal(self, goal):
print("move to point")
one_third_goal=[2*self.x0/3+goal[0]/3, 2*self.y0/3+goal[1]/3, math.atan2(goal[1]-self.y0, goal[0]-self.x0)]
print(one_third_goal)
self.moveto_obj.respawn(one_third_goal, )
def stop_shoot_callback(self, msg):
if msg.data==1:
#stop aiming station
self.shooting_complete=True
def symbol_callback(self, msg):
if len(msg.markers)>0:
if self.symbols_counter>self.MAX_DATA:
station_kmeans = KMeans(n_clusters=1).fit(self.symbols)
self.station_center=station_kmeans.cluster_centers_
self.station_position[0]=self.station_center[0][0]
self.station_position[1]=self.station_center[0][1]
self.station_seen=True
for i in range(len(msg.markers)):
self.symbols[self.symbols_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.symbols_counter+=1
if msg.markers[i].type==self.symbol[0] and msg.markers[i].id==self.symbol[1]:
#set position_list (not sure)
self.symbol_position[0]=msg.markers[i].pose.position.x
self.symbol_position[1]=msg.markers[i].pose.position.y
x = msg.markers[i].pose.orientation.x
y = msg.markers[i].pose.orientation.y
z = msg.markers[i].pose.orientation.z
w = msg.markers[i].pose.orientation.w
_, _, self.symbol_position[2] = euler_from_quaternion((x, y, z, w))
self.symbol_location[self.shape_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.shape_counter+=1
if self.station_seen and self.shape_counter>self.MAX_DATA:
symbol_kmeans = KMeans(n_clusters=1).fit(self.symbol_location)
self.symbol_center=symbol_kmeans.cluster_centers_
self.symbol_position[0]=self.symbol_center[0][0]
self.symbol_position[1]=self.symbol_center[0][1]
#print(self.symbol_position)
self.symbol_seen=True
#self.pool.apply(cancel_loiter)
def get_tf(self, fixed_frame, base_frame):
""" transform from base_link to map """
trans_received = False
while not trans_received:
try:
(trans, rot) = self.tf_listener.lookupTransform(fixed_frame,
base_frame,
rospy.Time(0))
trans_received = True
return (Point(*trans), Quaternion(*rot))
except (tf.LookupException,
tf.ConnectivityException,
tf.ExtrapolationException):
pass
def odom_callback(self, msg):
trans, rot = self.get_tf("map", "base_link")
self.x0 = trans.x
self.y0 = trans.y
_, _, self.yaw0 = euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))
self.odom_received = True
if __name__ == '__main__':
try:
#[id,type]cruciform red
DetectDeliver([1,0])
except rospy.ROSInterruptException:
rospy.loginfo("Task 7 Finished")
|
ron1818/Singaboat_RobotX2016
|
robotx_nav/nodes/mc_deliver.py
|
Python
|
gpl-3.0
| 8,318
|
[
"Gaussian"
] |
abc7649bb9bfe8bca72d15c3b5b4a1fb596fe58be612ed608dd1255b823d8be1
|
import maya.cmds as cmds
import os
from datetime import datetime
###############################
###jsLookdevAssistant DEV #####
###############################
#Author: Joseph Szokoli
#Website: cargocollective.com/josephSzokoli
#Help:
#To create a shelf icon, move jsLookdevAssistant.py to your scripts folder and assign the following command to the shelf.
#import jsLookdevAssistant
#reload(jsLookdevAssistant)
### ChangeLog: ################################################################################
#########4.0#########
###Changed Name to jsLookdevAssistant
#UI OVERHAUL
#Introduced Menu Bar
#REWRITE SHADERPACK INCOMING
###Aplhabetical listing with tabs in all exporter/importer/assigners
### Fixed bug in shader pack manager
#####LINE 277 FIX SG RENAMER WITH DISP
###3.2.0
#Changed Library Files to .mb
###3.1.0
#Introduced Add Selected to textField Button "+' in materialApplicator
#Sorted listMat
###3.0.1
#Changed Rename SG function to filter to selection or do all
###3.0.0
#Introduced Shader Pack Manager.
#Squashed Dsp removal bugs.
#Added Check all to Import Materials
#Added Auto Import Materials Mentioned in shaderPacks
#Fixed Bug in Auto Import Only to Import Missing Shaders
#Fixed Close UI Bug
###2.2.0
#Added View All File Texture Paths Function
###2.1.2
#Fixed more than one nested Shader Removal
###v2.0 + v2.0.1 + v2.0.2
#Overall UI Overhaul.
#Introduced Texture Path Manager.
#Introduced Material Batch Applicator.
#Introduced Material Applicator Assignment Backups. Backups current assignments under storage node.
#Fixed publishing materials with namespace. Still not suggested.
#Added Batch Rename Shading Groups.
#Fixed bug publishing after additional shaders are created.
#Fixed bug on import. Deleting Top nodes blindly. Now focused only to Shading Groups.
#.1
#Nested Shaders no longer show up under publish
#Typo
#.2
#Added Try to ShadingGroupChanger
#Added Try to materialAssigner
#Nested Shaders no longer show up under Material Applicator
#Added Lambert1 back to material assigner
###v1.2 + v1.2.1
#Added Shading Group support.
#Added top folder for backups.
#Added Storage Node Locks.
###v1.1
#Intoduced Material Library.
###v1.0
#Initial Material Assignment System
#End ChangeLog.#################################################################################
#Begin Functions Area
#Initial Build Material List
listMat = cmds.ls( materials=True )
dsp = cmds.ls(type = 'displacementShader')
if len(dsp)>0:
for d in dsp:
listMat.remove(d)
listMat.remove(u'particleCloud1')
matNumber = len(listMat)
sorted(listMat, key=lambda s: s.lower())
#####VERSION NUMBER#########################
#versionNumber = 'v4.0'
versionNumber = 'v4.0_DEV'
#############################################
def printNewMenuItem( item ):
cmds.select(cmds.ls( sl=1 , tr=True ))
cmds.hyperShade( assign= item )
updateIndividual()
def selection(*args):
selectionName = cmds.textField('TFL_selection',q=True,text=True)
selThis = cmds.ls('*%s*' %(selectionName), tr=1,r=1,type='mesh')
if len(selThis) > 0:
cmds.select(selThis)
else:
print 'Could not find object with name that matches: (%s)' %(selectionName)
def updateIndividual(args=None):
listMat = cmds.ls( materials=True )
dsp = cmds.ls(type = 'displacementShader')
if len(dsp)>0:
for d in dsp:
listMat.remove(d)
listMat.remove(u'particleCloud1')
matNumber = len(listMat)
listMat = sorted(listMat, key=lambda s: s.lower())
menuItems = cmds.optionMenu('individual', q=True, itemListLong=True) # itemListLong returns the children
if menuItems:
cmds.deleteUI(menuItems)
for num in range(0,matNumber):
cmds.menuItem(label= listMat [num],parent= 'individual' )
def applyChanges(args=None):
listMat = cmds.ls( materials=True )
listMat.remove(u'particleCloud1')
for each in listMat:
exists = cmds.textField('%s_ApplyField' %(each), query = True, exists=True)
if exists == True:
result = cmds.textField('%s_ApplyField' %(each), query = True, text=True)
resultList = result.split()
if len(resultList) > 0:
for r in resultList:
try:
cmds.select(r)
cmds.hyperShade(assign=each)
cmds.select(d=1)
except:
pass
def applyChangesSave(args=None):
checkStorageNode = cmds.ls('materialLibraryDirectory_STORAGE')
if not checkStorageNode:
cmds.group(name = 'materialLibraryDirectory_STORAGE', em=True)
cmds.addAttr('materialLibraryDirectory_STORAGE', sn='notes',nn = 'Notes', dt = "string" )
cmds.setAttr('materialLibraryDirectory_STORAGE.notes', 'No Directory Currently Set', type = "string", )
listMat = cmds.ls( materials=True )
listMat.remove(u'particleCloud1')
for each in listMat:
newName = cmds.ls(each)[0].rpartition(':')[2]
exists = cmds.textField('%s_ApplyField' %(newName), query = True, exists=True)
if exists == True:
result = cmds.textField('%s_ApplyField' %(newName), query = True, text=True)
testing = cmds.attributeQuery(newName, node='materialLibraryDirectory_STORAGE', ex=True )
if testing == True:
cmds.setAttr('materialLibraryDirectory_STORAGE.%s' %(newName), result ,type = "string" )
else:
cmds.addAttr('materialLibraryDirectory_STORAGE', sn=newName,nn = newName, dt = "string" )
cmds.setAttr('materialLibraryDirectory_STORAGE.%s' %(newName), result ,type = "string" )
resultList = result.split()
if len(resultList) > 0:
for r in resultList:
try:
cmds.select(r)
cmds.hyperShade(assign=each)
cmds.select(d=1)
except:
pass
def closeAssigner(args=None):
cmds.deleteUI('materialBatchApply')
def addSelected(name):
sel = cmds.ls(sl=1, tr=1)
for each in sel:
try:
current = cmds.textField(name+'_ApplyField', query = True, text = True)
if len(current) > 0:
new = current + ' ' +each
else:
new = each
cmds.textField(name+'_ApplyField', edit = True, text = new )
except:
pass
def materialBatchApply(args=None):
listMat = cmds.ls( materials=True )
listMatStart = cmds.ls( materials=True )
downList = []
for each in listMatStart:
lowerNode = cmds.hyperShade(lun = each)
underShader = cmds.ls(lowerNode, materials = True)
#print underShader
for x in underShader:
downList.append(x)
downList = remove_duplicates(downList)
if len(downList) > 0:
for z in downList:
#print z
listMat.remove(z)
listMat.append(u'lambert1')
listMat = sorted(listMat, key=lambda s: s.lower())
# listMat.remove(u'particleCloud1')
if cmds.window('materialBatchApply', exists=True):
cmds.deleteUI('materialBatchApply')
cmds.window('materialBatchApply', title= 'jsMaterialApplicator')
cmds.flowLayout()
cmds.frameLayout(label = "Create Material Assignments:", borderStyle = "etchedIn",h=400,w=600 )
cmds.scrollLayout(hst=16,vst=16)
last = 'a'
aExist =[]
if listMat[0][0] == last:
aExist.append('yes')
if 'yes' in aExist:
cmds.text('a'.capitalize()+':',fn = 'boldLabelFont')
for each in listMat:
if each[0] == last:
newName = cmds.ls(each)[0].rpartition(':')[2]
#cmds.rowLayout( nc=2, adjustableColumn=2, columnAlign=(1, 'left'), w= 580 )
cmds.rowLayout( nc=3, adjustableColumn=3, columnAlign=(1, 'left'), w= 980 )
cmds.button(newName,label = '+', c = 'jsLookdevAssistant.addSelected("%s")'%newName )
cmds.text(label = '%s:' %(newName), font = "boldLabelFont",rs=1 )
cmds.textField('%s_ApplyField' %(newName), w =200)
else:
cmds.text(label= each[0].capitalize()+':',fn = 'boldLabelFont')
newName = cmds.ls(each)[0].rpartition(':')[2]
#cmds.rowLayout( nc=2, adjustableColumn=2, columnAlign=(1, 'left'), w= 580 )
cmds.rowLayout( nc=3, adjustableColumn=3, columnAlign=(1, 'left'), w= 980 )
cmds.button(newName,label = '+', c = 'jsLookdevAssistant.addSelected("%s")'%newName )
cmds.text(label = '%s:' %(newName), font = "boldLabelFont",rs=1 )
cmds.textField('%s_ApplyField' %(newName), w =200)
last = each[0]
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
cmds.frameLayout(label = "Options:", borderStyle = "etchedIn",h=400,w=100 )
cmds.columnLayout( adjustableColumn=True, cal = 'left' )
cmds.button(label = 'Apply and Save:', c = applyChangesSave, h=125)
cmds.button(label = 'Apply', c = applyChanges, h= 125 )
cmds.button(label = 'Close:', h =125, c = closeAssigner )
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
checkStorageNode = cmds.ls('materialLibraryDirectory_STORAGE')
if len(checkStorageNode) > 0:
for each in listMat:
newName = cmds.ls(each)[0].rpartition(':')[2]
testing = cmds.attributeQuery(newName, node='materialLibraryDirectory_STORAGE', ex=True )
if testing == True:
newSet = cmds.getAttr('materialLibraryDirectory_STORAGE.%s' %(newName) )
cmds.textField('%s_ApplyField' %(newName),edit = True, text = newSet)
cmds.showWindow( 'materialBatchApply' )
cmds.window('materialBatchApply', edit=True, widthHeight=[700,400], s = False)
def renameShadingGroups(args=None):
listMat = cmds.ls(sl=1, materials=True )
if not listMat:
listMat = cmds.ls(materials=True)
dsp = cmds.ls(type = 'displacementShader')
if len(dsp)>0:
for d in dsp:
listMat.remove(d)
listMat.remove(u'lambert1')
listMat.remove(u'particleCloud1')
if len(listMat) > 0:
for each in listMat:
try:
upper = cmds.hyperShade(each, ldn = each)
print 'upper =' + str(upper)
SG = cmds.ls(upper, type = 'shadingEngine')
print 'SG =' +str(SG)
if len(SG) > 0:
cmds.rename(SG, '%sSG' %(each) )
print 'Shading Group of %s has been changed from %s to [u\'%sSG\']' %(each,SG,each)
except:
pass
####TEXTURE PATH CODE#####__________________________________________________________________
def changeTexPath(Args = None):
forwardSlash = '\\'
backSlash = '/'
textureNodes = cmds.ls( type = 'file')
origDirPrefix = cmds.textField('oldDir', query = True, text = True)
newDirPrefix = cmds.textField('newDir', query = True, text = True)
origDirPrefix = origDirPrefix.replace(forwardSlash,backSlash)
newDirPrefix = newDirPrefix.replace(forwardSlash,backSlash)
for each in textureNodes:
currentTexDir = cmds.getAttr('%s.ftn' %(each) )
newDirName = currentTexDir.replace(origDirPrefix,newDirPrefix)
cmds.setAttr('%s.ftn' %(each),"%s" %(newDirName), type = "string")
def changeSelTexPath(Args = None):
forwardSlash = '\\'
backSlash = '/'
textureNodes = cmds.ls( sl = 1, type = 'file')
origDirPrefix = cmds.textField('oldDir', query = True, text = True)
newDirPrefix = cmds.textField('newDir', query = True, text = True)
origDirPrefix = origDirPrefix.replace(forwardSlash,backSlash)
newDirPrefix = newDirPrefix.replace(forwardSlash,backSlash)
for each in textureNodes:
currentTexDir = cmds.getAttr('%s.ftn' %(each) )
newDirName = currentTexDir.replace(origDirPrefix,newDirPrefix)
cmds.setAttr('%s.ftn' %(each),"%s" %(newDirName), type = "string")
def switchPortions(Args=None):
oldOld= cmds.textField('oldDir', q=True, text = True)
oldNew = cmds.textField('newDir', q=True, text = True)
cmds.textField('oldDir', edit=True, text = oldNew)
cmds.textField('newDir', edit=True, text = oldOld)
def selectFtpNodes(args=None):
ftpCheckOptions = []
textureNodes = cmds.ls( type = 'file')
for each in textureNodes:
checkBoxExist = cmds.checkBox(each + '_ftpCheck', query=True, exists=True)
if checkBoxExist == True:
onOff = cmds.checkBox(each + '_ftpCheck', query=True, value=True)
if onOff == True:
ftpCheckOptions.append(each)
cmds.select(ftpCheckOptions)
def closeTexPathUI(args=None):
cmds.deleteUI('textureList')
def texturePathUI(args=None):
textureNodes = cmds.ls( type = 'file')
texturePaths = []
for each in enumerate(textureNodes):
currentTexDir = cmds.getAttr('%s.ftn' %(each[1]) )
texturePaths.append(each[1]+'$'+currentTexDir)
if cmds.window('textureList', exists=True):
cmds.deleteUI('textureList')
cmds.window('textureList', title= 'Texture File Paths')
cmds.flowLayout()
cmds.frameLayout(label = "Texture File Paths:", borderStyle = "etchedIn",h=196,w=550)
cmds.scrollLayout(hst=16,vst=16)
for each in texturePaths:
cmds.rowLayout( nc=2, adjustableColumn=2, columnAlign=(1, 'left'), w= 900 )
cmds.checkBox(each.split('$',1)[0]+'_ftpCheck', l=each.split('$',1)[0]+':')
cmds.textField(each.split('$',1)[1], tx = each.split('$',1)[1], ed = False, w=538)
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
cmds.frameLayout(label = "Functions:", borderStyle = "etchedIn",h=196,w=98)
cmds.columnLayout( adjustableColumn=True, cal = 'left' )
cmds.button('ftpNodeSelect', l= 'Select Nodes:', c = selectFtpNodes,h=86 )
cmds.button('ftpUIClose', l='Close', c = closeTexPathUI,h=86)
cmds.setParent('..')
cmds.setParent('..')
cmds.showWindow( 'textureList' )
cmds.window('textureList', edit=True, widthHeight=[650,200], s = False)
###TEXTUREPATH EDITOR
def texturePath(args=None):
if cmds.window('jsTextureEditor',exists=True):
cmds.deleteUI('jsTextureEditor')
window = cmds.window('jsTextureEditor',menuBar=True, title= 'jsTexturePathEditor', w=330, h=100) #, s = False
cmds.frameLayout(label = "jsTextureManager", borderStyle = "etchedIn", w = 320, h=260 )
cmds.columnLayout(adj = 1)
cmds.flowLayout(cs = 120,h=15)
cmds.text(label = 'Old String Portion:',align='left' )
cmds.button(label = 'Switch Old and New:',h=15, c=switchPortions)
cmds.setParent('..')
cmds.columnLayout(adj = 1)
cmds.textField('oldDir', w = 315, h = 30)
cmds.text(label = 'New String Portion:',align='left')
cmds.textField('newDir', w=315, h= 30)
cmds.frameLayout(label = "Change Texture Paths:", borderStyle = "etchedIn", w = 315, h=150)
cmds.columnLayout(adj = 1)
cmds.button(label= 'View All File Texture Paths:', c = texturePathUI, w = 312, h=42)
cmds.button(label= 'Change All File Texture Paths:', c = changeTexPath, w = 312, h=42)
cmds.button(label= 'Change Selected File Texture Path:', c = changeSelTexPath, w = 312, h=42)
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
cmds.showWindow( window )
cmds.window('jsTextureEditor', edit=True, widthHeight=[330,268], s = True)
####TEXTURE PATH END####____________________________________________________________________
####SetLibrary Area####______________________________________________________________________________________
def saveDirectory(args = None):
#needs create notes attr
checkStorageNode = cmds.ls('materialLibraryDirectory_STORAGE')
if len(checkStorageNode) > 0:
findDir = cmds.textField('getDir',q=True,text=True)
print findDir
cmds.setAttr('materialLibraryDirectory_STORAGE.notes', "%s" %(findDir), type = "string", )
else:
print 'Storage Node does not exist. Creating Storage Node...'
findDir = cmds.textField('getDir',q=True,text=True)
cmds.group(name = 'materialLibraryDirectory_STORAGE', em=True)
cmds.addAttr('materialLibraryDirectory_STORAGE', sn='notes',nn = 'Notes', dt = "string" )
cmds.setAttr('materialLibraryDirectory_STORAGE.notes', "%s" %(findDir), type = "string", )
def loadDirectory(args = None):
checkStorageNode = cmds.ls('materialLibraryDirectory_STORAGE')
if len(checkStorageNode) > 0:
dialogDir = cmds.fileDialog2(dialogStyle=2, fm = 3, okc = 'Set Directory')
if dialogDir == None:
print 'Operation Cancelled'
else:
for each in dialogDir:
dialogDirStrip = each
cmds.setAttr('materialLibraryDirectory_STORAGE.notes', "%s" %(dialogDirStrip), type = "string", )
else:
dialogDir = cmds.fileDialog2(dialogStyle=2, fm = 3, okc = 'Set Directory')
if not dialogDir:
print 'Operation Cancelled'
else:
for each in dialogDir:
dialogDirStrip = each
cmds.group(name = 'materialLibraryDirectory_STORAGE', em=True)
cmds.addAttr('materialLibraryDirectory_STORAGE', sn='notes',nn = 'Notes', dt = "string" )
cmds.setAttr('materialLibraryDirectory_STORAGE.notes', "%s" %(dialogDirStrip), type = "string", )
checkStorageNode = cmds.ls('materialLibraryDirectory_STORAGE')
if len(checkStorageNode) > 0:
currentSet = cmds.getAttr('materialLibraryDirectory_STORAGE.notes')
else:
currentSet = 'No Directory Storage File Found'
cmds.textField('getDir', edit = True, text = "%s" %(currentSet) )
####END SetLibrary Area####______________________________________________________________________________________
######EXPORT#####_________________________________________________________________________________________________
def remove_duplicates(li):
my_set = set()
res = []
for e in li:
if e not in my_set:
res.append(e)
my_set.add(e)
return res
def closeExport(args = None):
cmds.deleteUI('materialBatchExport')
def closeInport(args = None):
cmds.deleteUI('materialBatchImport')
def exportCheckerUi(args = None):
listMat = cmds.ls( materials=True )
listMatStart = cmds.ls( materials=True )
listMat = sorted(listMat, key=lambda s: s.lower())
downList = []
for each in listMatStart:
lowerNode = cmds.hyperShade(lun = each)
underShader = cmds.ls(lowerNode, materials = True)
#print underShader
for x in underShader:
downList.append(x)
downList = remove_duplicates(downList)
if len(downList) > 0:
for z in downList:
listMat.remove(z)
dsp = cmds.ls(type = 'displacementShader')
if len(dsp)>0:
for d in dsp:
try:
listMat.remove(d)
except:
pass
lowerNode = cmds.hyperShade(lun = each)
if cmds.window('materialBatchExport', exists=True):
cmds.deleteUI('materialBatchExport')
cmds.window('materialBatchExport', title= 'jsMaterialLibraryPublish')
cmds.flowLayout()
cmds.frameLayout(label = "Select Materials to Export:", borderStyle = "etchedIn",h=350,w=250 )
cmds.scrollLayout(hst=16,vst=16)
cmds.columnLayout( adjustableColumn=True, cal = 'left' )
last = 'a'
aExist =[]
if listMat[0][0] == last:
cmds.text('a'.capitalize()+':',fn = 'boldLabelFont')
for each in listMat:
if each[0] == last:
cmds.checkBox(each, l=each)
else:
cmds.text(label= each[0].capitalize()+':',fn = 'boldLabelFont')
cmds.checkBox(each,l=each)
last = each[0]
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
cmds.frameLayout(label = "Export Options:", borderStyle = "etchedIn",h=350)
cmds.columnLayout( adjustableColumn=True, cal = 'left' )
exportHeightButton = 108
cmds.button(label = 'Export Selected:', command = exportCheckedOper, h=exportHeightButton)
cmds.button(label = 'Export All:', command = batchExport, h=exportHeightButton)
cmds.button(label = 'Cancel:', command = closeExport, h=exportHeightButton)
cmds.setParent('..')
cmds.setParent('..')
cmds.showWindow( 'materialBatchExport' )
cmds.window('materialBatchExport', edit=True, widthHeight=[350,350], s = False)
listMat.append(u'lambert1')
def exportCheckedOper(args = None):
setDir = cmds.getAttr('materialLibraryDirectory_STORAGE.notes')
listMat = cmds.ls( materials=True )
listMat.remove(u'lambert1')
listMat.remove(u'particleCloud1')
dsp = cmds.ls(type = 'displacementShader')
if len(dsp)>0:
for d in dsp:
listMat.remove(d)
checkOptions = []
for each in listMat:
newName = cmds.ls(each)[0].rpartition(':')[2]
checkBoxExist = cmds.checkBox(each, query=True, exists=True)
if checkBoxExist == True:
onOff = cmds.checkBox(each, query=True, value=True)
if onOff == True:
checkOptions.append(each)
print '%s was selected.' %(newName)
else:
print '%s was not selected.' %(newName)
now = datetime.now()
timeStamp = '%s.%s.%s_%s.%s.%s' % (now.month, now.day, now.year, now.hour, now.minute, now.second )
if (os.path.exists("%s/_backup" %(setDir) )) == False:
cmds.sysFile("%s/_backup/" %(setDir) , md=1)
else:
print "Backup Directory Exists"
for each in checkOptions:
newName = cmds.ls(each)[0].rpartition(':')[2]
if (os.path.exists("%s/_backup/%s" %(setDir,newName) )) == False:
cmds.sysFile("%s/_backup/%s" %(setDir,newName) , md=1)
else:
print "Backup Directory Exists"
#cmds.select(each)
SGHOLDER = cmds.polyPlane(n='MATERIALMANAGEREXPORTPLANEDELETETHIS_%s' %(newName), sx=1, sy=1)
cmds.hyperShade(assign = each)
cmds.select(SGHOLDER)
if (os.path.exists("%s/%s/" %(setDir,newName))) == False:
cmds.sysFile("%s/%s/" %(setDir,newName) , md=1)
else:
cmds.sysFile( "%s/%s/" %(setDir,newName) , rename= "%s/_backup/%s/%s_backup_%s/" %(setDir,newName,newName,timeStamp) )
cmds.sysFile("%s/%s/" %(setDir,newName) , md=1)
cmds.file("%s/%s/%s" %(setDir,newName,newName), force=True, exportSelected=True, type="mayaBinary", pr = False)
cmds.delete(cmds.ls('*MATERIALMANAGEREXPORTPLANEDELETETHIS*'))
cmds.select(d=1)
cmds.deleteUI('materialBatchExport')
def batchExport(args = None):
listMat = cmds.ls( materials=True )
listMatStart = cmds.ls( materials=True )
for each in listMatStart:
lowerNode = cmds.hyperShade(lun = each)
underShader = cmds.ls(lowerNode, materials = True)
if len(underShader) > 0:
for z in underShader:
try:
listMat.remove(z)
except:
pass
dsp = cmds.ls(type = 'displacementShader')
if len(dsp)>0:
for d in dsp:
try:
listMat.remove(d)
except:
pass
setDir = cmds.getAttr('materialLibraryDirectory_STORAGE.notes')
for each in listMat:
newName = cmds.ls(each)[0].rpartition(':')[2]
if (os.path.exists("%s/_backup" %(setDir) )) == False:
cmds.sysFile("%s/_backup/" %(setDir) , md=1)
else:
print "Backup Directory Exists"
if (os.path.exists("%s/_backup/%s" %(setDir,newName) )) == False:
cmds.sysFile("%s/_backup/%s" %(setDir,newName) , md=1)
else:
print "Backup Directory Exists"
now = datetime.now()
timeStamp = '%s.%s.%s_%s.%s.%s' % (now.month, now.day, now.year, now.hour, now.minute, now.second )
#print listMat
for each in listMat:
newName = cmds.ls(each)[0].rpartition(':')[2]
#cmds.select(each)
SGHOLDER = cmds.polyPlane(n='MATERIALMANAGEREXPORTPLANEDELETETHIS_%s' %(newName), sx=1, sy=1)
cmds.hyperShade(assign = each)
cmds.select(SGHOLDER)
if (os.path.exists("%s/_backup/%s" %(setDir,newName) )) == False:
cmds.sysFile("%s/_backup/%s" %(setDir,newName) , md=1)
if (os.path.exists("%s/%s/" %(setDir,newName))) == False:
cmds.sysFile("%s/%s/" %(setDir,newName) , md=1)
else:
cmds.sysFile( "%s/%s/" %(setDir,newName) , rename= "%s/_backup/%s/%s_backup_%s/" %(setDir,newName,newName,timeStamp) )
cmds.sysFile("%s/%s/" %(setDir,newName) , md=1)
cmds.file("%s/%s/%s" %(setDir,newName,newName), force=True, exportSelected=True, type="mayaBinary", pr = False)
cmds.delete(cmds.ls('*MATERIALMANAGEREXPORTPLANEDELETETHIS*'))
cmds.select(d=1)
listMat.append(u'lambert1')
cmds.deleteUI('materialBatchExport')
######END EXPORT#####_________________________________________________________________________________________________
###### IMPORT #####_________________________________________________________________________________________________
def importCheckAll(args=None):
setDir = cmds.getAttr('materialLibraryDirectory_STORAGE.notes')
backupDir = os.path.abspath(".")
os.chdir("%s" %(setDir) )
allLibraryMaterials = [name for name in os.listdir(".") if os.path.isdir(name)]
os.chdir("%s" %backupDir)
try:
allLibraryMaterials.remove('_backup')
except:
pass
try:
allLibraryMaterials.remove('_asset')
except:
pass
for each in allLibraryMaterials:
cmds.checkBox(each,edit=1,v=1)
def importCheckerUI(args = None):
#Batch Import
if len(cmds.ls('materialLibraryDirectory_STORAGE')) == 0:
return None
setDir = cmds.getAttr('materialLibraryDirectory_STORAGE.notes')
backupDir = os.path.abspath(".")
os.chdir("%s" %(setDir) )
allLibraryMaterials = [name for name in os.listdir(".") if os.path.isdir(name)]
os.chdir("%s" %backupDir)
try:
allLibraryMaterials.remove('_backup')
except:
pass
try:
allLibraryMaterials.remove('_asset')
except:
pass
allLibraryMaterials=sorted(allLibraryMaterials, key=lambda s: s.lower())
if cmds.window('materialBatchImport', exists=True):
cmds.deleteUI('materialBatchImport')
cmds.window('materialBatchImport', title= 'jsMaterialLibraryImport')
#cmds.columnLayout( adjustableColumn=True, cal = 'left' )
cmds.flowLayout()
cmds.frameLayout(label = "Select Materials to Import", borderStyle = "etchedIn", w=250,h=350)
#cmds.columnLayout( adjustableColumn=True, cal = 'left' )
cmds.scrollLayout(hst=16,vst=16)
#cmds.gridLayout( numberOfColumns=4, cellWidthHeight=(150, 25) )
last = 'a'
aExist =[]
if len(allLibraryMaterials) > 0:
if allLibraryMaterials[0][0] == last:
aExist.append('yes')
if 'yes' in aExist:
cmds.text('a'.capitalize()+':',fn = 'boldLabelFont')
for each in allLibraryMaterials:
if each[0] == last:
cmds.checkBox(each, l=each)
else:
cmds.text(label= each[0].capitalize()+':',fn = 'boldLabelFont')
cmds.checkBox(each,l=each)
last = each[0]
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
cmds.frameLayout(label = "Import Options", borderStyle = "etchedIn")
#cmds.gridLayout( numberOfColumns=4,numberOfRows = 1, cellWidthHeight=(150, 25) )
cmds.columnLayout( adjustableColumn=True, cal = 'left' )
importButtonHeight = 107
cmds.button(label = 'Import Selected:', command = importCheckedOper, h = importButtonHeight)
cmds.button(label = 'Check All', h = importButtonHeight, c=importCheckAll)
cmds.button(label = 'Cancel:', command = closeInport, h = importButtonHeight)
cmds.setParent('..')
cmds.setParent('..')
cmds.showWindow( 'materialBatchImport' )
cmds.window('materialBatchImport', edit=True, widthHeight=[350,350], s = False)
def importCheckedOper(args = None):
#import selected Button
setDir = cmds.getAttr('materialLibraryDirectory_STORAGE.notes')
backupDir = os.path.abspath(".")
os.chdir("%s" %(setDir) )
allLibraryMaterials = [name for name in os.listdir(".") if os.path.isdir(name)]
os.chdir("%s" %backupDir)
allLibraryMaterials.remove('_backup')
checkOptions = []
for each in allLibraryMaterials:
checkBoxExist = cmds.checkBox(each, query=True, exists=True)
if checkBoxExist == True:
onOff = cmds.checkBox(each, query=True, value=True)
if onOff == True:
checkOptions.append(each)
print '%s was selected.' %(each)
else:
print '%s was not selected.' %(each)
for each in checkOptions:
shaderExists = cmds.ls('*%s*' %(each), materials = 1)
cmds.hyperShade(o = each)
prevAssigned = cmds.ls(sl=1)
cmds.select(d=1)
lowerNode = cmds.hyperShade(lun = each)
upperNode = cmds.hyperShade(ldn = each)
if len(shaderExists) > 0:
if len(lowerNode) > 0:
cmds.delete(lowerNode)
if len(upperNode) > 0:
SG = cmds.ls(upperNode, type = 'shadingEngine')
cmds.delete(SG)
cmds.delete(each)
cmds.file("%s/%s/%s.mb" %(setDir,each,each), force=True, i=True, type="mayaBinary", dns = True)
if len(prevAssigned) > 0:
cmds.select(prevAssigned)
cmds.hyperShade(assign = each)
cmds.select(d=1)
cmds.delete(cmds.ls('*MATERIALMANAGEREXPORTPLANEDELETETHIS*'))
cmds.deleteUI('materialBatchImport')
######END IMPORT#####_________________________________________________________________________________________________
##################################################################################################################################################################
##################################################################################################################################################################
##################################################################################################################################################################
##################################################################################################################################################################
def createAssetDir(args=None):
if len(cmds.ls('materialLibraryDirectory_STORAGE')) == 1:
setDir = cmds.getAttr('materialLibraryDirectory_STORAGE.notes')
if (os.path.exists("%s/_asset" %(setDir) )) == False:
cmds.sysFile("%s/_asset/" %(setDir) , md=1)
else:
print "Asset Directory Exists"
else:
print "No Directory is Set"
##################################################################################################################
def closeImportAsset(args=None):
cmds.deleteUI('materialAssetImport')
def importPacketMaterials(assetName):
setDir = cmds.getAttr('materialLibraryDirectory_STORAGE.notes')
backupDir = os.path.abspath(".")
os.chdir("%s" %(setDir) )
allLibraryMaterials = [name for name in os.listdir(".") if os.path.isdir(name)]
os.chdir("%s" %backupDir)
try:
allLibraryMaterials.remove('_backup')
except:
pass
materials = cmds.listAttr(assetName,ud=1)
checkOptions = []
for each in materials:
#testing = cmds.attributeQuery(each,node = assetName)
testing = cmds.getAttr(assetName+'.'+each,)
if testing == '':
pass
else:
checkOptions.append(each)
for each in checkOptions:
try:
if cmds.objExists(each):
print 'exists'
else:
cmds.file("%s/%s/%s.ma" %(setDir,each,each), force=True, i=True, type="mayaAscii", dns = True)
cmds.delete(cmds.ls('*MATERIALMANAGEREXPORTPLANEDELETETHIS*'))
except:
pass
def importAssetCheckedOper(args = None):
setDir = cmds.getAttr('materialLibraryDirectory_STORAGE.notes')
setDir = setDir+'/_asset/'
backupDir = os.path.abspath(".")
os.chdir("%s" %(setDir) )
allLibraryMaterials = [name for name in os.listdir(".") if os.path.isdir(name)]
os.chdir("%s" %backupDir)
allLibraryMaterials.remove('_backup')
checkOptions = []
for each in allLibraryMaterials:
checkBoxExist = cmds.checkBox(each, query=True, exists=True)
if checkBoxExist == True:
onOff = cmds.checkBox(each, query=True, value=True)
if onOff == True:
checkOptions.append(each)
print '%s was selected.' %(each)
else:
print '%s was not selected.' %(each)
for each in checkOptions:
try:
cmds.delete(each)
except:
pass
cmds.file("%s/%s/%s.mb" %(setDir,each,each), force=True, i=True, type="mayaBinary", dns = True)
cmds.parent( each, 'materialLibraryDirectory_STORAGE' )
#importPacketMaterials(each)
allStor = cmds.ls('materialLibraryDirectory_STORAGE*')
allStor.remove('materialLibraryDirectory_STORAGE')
cmds.delete(allStor)
cmds.delete(cmds.ls('*MATERIALMANAGEREXPORTPLANEDELETETHIS*'))
cmds.deleteUI('materialAssetImport')
try:
cmds.textScrollList('assetList',edit =1, ra=1 )
underList = cmds.listRelatives('materialLibraryDirectory_STORAGE')
assetList = []
for each in underList:
if 'Asset_STORAGE' in each:
assetList.append(each)
cmds.textScrollList('assetList',edit = 1, append = assetList,sii=1 )
except:
pass
def importAssetUI(args = None):
createAssetDir()
setDir = cmds.getAttr('materialLibraryDirectory_STORAGE.notes')
setDir = setDir+'/_asset/'
backupDir = os.path.abspath(".")
os.chdir("%s" %(setDir) )
allLibraryMaterials = [name for name in os.listdir(".") if os.path.isdir(name)]
os.chdir("%s" %backupDir)
try:
allLibraryMaterials.remove('_backup')
except:
pass
allLibraryMaterials = sorted(allLibraryMaterials, key=lambda s: s.lower())
if cmds.window('materialAssetImport', exists=True):
cmds.deleteUI('materialAssetImport')
cmds.window('materialAssetImport', title= 'jsShaderPackImport')
#cmds.columnLayout( adjustableColumn=True, cal = 'left' )
cmds.flowLayout()
cmds.frameLayout(label = "Select Shader Packs to Import", borderStyle = "etchedIn", w=250,h=350)
#cmds.columnLayout( adjustableColumn=True, cal = 'left' )
cmds.scrollLayout(hst=16,vst=16)
#cmds.gridLayout( numberOfColumns=4, cellWidthHeight=(150, 25) )
for each in allLibraryMaterials:
cmds.checkBox(each, l=each)
#cmds.checkBox(each, l=each, onCommand = checkEach, offCommand = uncheckEach)
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
cmds.frameLayout(label = "Import Options", borderStyle = "etchedIn")
#cmds.gridLayout( numberOfColumns=4,numberOfRows = 1, cellWidthHeight=(150, 25) )
cmds.columnLayout( adjustableColumn=True, cal = 'left' )
importButtonHeight = 160
cmds.button(label = 'Import Selected:', command = importAssetCheckedOper, h = importButtonHeight)
cmds.button(label = 'Cancel:', command = closeImportAsset, h = importButtonHeight)
cmds.setParent('..')
cmds.setParent('..')
cmds.showWindow( 'materialAssetImport' )
cmds.window('materialAssetImport', edit=True, widthHeight=[350,350], s = False)
##################################################################################################################
def getMaterialsUsed(assetName):
materials = cmds.listAttr(assetName,ud=1)
checkOptions = []
for each in materials:
testing = cmds.getAttr(assetName+'.'+each,)
if testing == '':
pass
else:
shaderHolder = cmds.polyPlane(n='MATERIALMANAGEREXPORTPLANEDELETETHIS_%s' %(each), sx=1, sy=1)
cmds.delete(ch=1)
cmds.hyperShade(assign=each)
checkOptions.append('MATERIALMANAGEREXPORTPLANEDELETETHIS_%s' %(each))
cmds.select(checkOptions, assetName)
def publishSelected(args=None):
createAssetDir()
allAssets = cmds.listRelatives('materialLibraryDirectory_STORAGE')
setDir = cmds.getAttr('materialLibraryDirectory_STORAGE.notes')
setDir = setDir+'/_asset/'
checkOptions = []
for each in allAssets:
checkBoxExist = cmds.checkBox(each, query=True, exists=True)
if checkBoxExist == True:
onOff = cmds.checkBox(each, query=True, value=True)
if onOff == True:
checkOptions.append(each)
now = datetime.now()
timeStamp = '%s.%s.%s_%s.%s.%s' % (now.month, now.day, now.year, now.hour, now.minute, now.second )
if (os.path.exists("%s/_backup" %(setDir) )) == False:
cmds.sysFile("%s/_backup/" %(setDir) , md=1)
else:
print "Backup Directory Exists"
if (os.path.exists("%s/_backup/%s" %(setDir,each) )) == False:
cmds.sysFile("%s/_backup/%s" %(setDir,each) , md=1)
else:
print "Backup Directory Exists"
for each in checkOptions:
cmds.parent( each, world=True )
getMaterialsUsed(each)
if (os.path.exists("%s/%s/" %(setDir,each))) == False:
cmds.sysFile("%s/%s/" %(setDir,each) , md=1)
else:
cmds.sysFile( "%s/%s/" %(setDir,each) , rename= "%s/_backup/%s/%s_backup_%s/" %(setDir,each,each,timeStamp) )
cmds.sysFile("%s/%s/" %(setDir,each) , md=1)
cmds.file("%s/%s/%s" %(setDir,each,each), force=True, exportSelected=True, type="mayaBinary", pr = False)
cmds.parent(each,'materialLibraryDirectory_STORAGE')
cmds.delete(cmds.ls('*MATERIALMANAGEREXPORTPLANEDELETETHIS*'))
cmds.deleteUI('assetsExport')
def publishAllAssets(args=None):
createAssetDir()
allAssets = cmds.listRelatives('materialLibraryDirectory_STORAGE')
setDir = cmds.getAttr('materialLibraryDirectory_STORAGE.notes')
setDir = setDir+'/_asset/'
listMat = cmds.ls( materials=True )
checkOptions = []
now = datetime.now()
timeStamp = '%s.%s.%s_%s.%s.%s' % (now.month, now.day, now.year, now.hour, now.minute, now.second )
for each in allAssets:
if (os.path.exists("%s/_backup" %(setDir) )) == False:
cmds.sysFile("%s/_backup/" %(setDir) , md=1)
else:
print "Backup Directory Exists"
if (os.path.exists("%s/_backup/%s" %(setDir,each) )) == False:
cmds.sysFile("%s/_backup/%s" %(setDir,each) , md=1)
else:
print "Backup Directory Exists"
for each in allAssets:
cmds.parent( each, world=True )
getMaterialsUsed(each)
if (os.path.exists("%s/%s/" %(setDir,each))) == False:
cmds.sysFile("%s/%s/" %(setDir,each) , md=1)
else:
cmds.sysFile( "%s/%s/" %(setDir,each) , rename= "%s/_backup/%s/%s_backup_%s/" %(setDir,each,each,timeStamp) )
cmds.sysFile("%s/%s/" %(setDir,each) , md=1)
cmds.file("%s/%s/%s" %(setDir,each,each), force=True, exportSelected=True, type="mayaBinary", pr = False)
cmds.delete(cmds.ls('*MATERIALMANAGEREXPORTPLANEDELETETHIS*'))
cmds.deleteUI('assetsExport')
def publishAssetWindow(Args=None):
if cmds.window('assetsExport', exists=True):
cmds.deleteUI('assetsExport')
cmds.window('assetsExport', title= 'jsShaderPackPublish')
#cmds.columnLayout( adjustableColumn=True, cal = 'left' )
cmds.flowLayout()
cmds.frameLayout(label = "Select Shader Packs to Publish:", borderStyle = "etchedIn",h=350,w=250 )
cmds.scrollLayout(hst=16,vst=16)
cmds.columnLayout( adjustableColumn=True, cal = 'left' )
#cmds.gridLayout( numberOfColumns=4, cellWidthHeight=(150, 25) )
allAssets = cmds.listRelatives('materialLibraryDirectory_STORAGE')
if allAssets > 0:
for each in allAssets:
cmds.checkBox(each, l=each)
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
cmds.frameLayout(label = "Export Options:", borderStyle = "etchedIn",h=350)
#cmds.gridLayout( numberOfColumns=4,numberOfRows = 1, cellWidthHeight=(150, 25) )
cmds.columnLayout( adjustableColumn=True, cal = 'left' )
exportHeightButton = 108
cmds.button(label = 'Export Selected:', h=exportHeightButton, c = publishSelected)
cmds.button(label = 'Export All:', h=exportHeightButton, c = publishAllAssets)
cmds.button(label = 'Cancel:', h=exportHeightButton,c="cmds.deleteUI('assetsExport')")
cmds.setParent('..')
cmds.setParent('..')
cmds.showWindow( 'assetsExport' )
cmds.window('assetsExport', edit=True, widthHeight=[350,350], s = False)
###########################################################################################################################################
def createAssetPopUp(args=None):
if cmds.window('jsAssetCreate', exists=True):
cmds.deleteUI('jsAssetCreate')
cmds.window('jsAssetCreate', title= 'jsShaderPackCreate')
cmds.frameLayout(label = "Create New Shader Pack:", borderStyle = "etchedIn" )
cmds.columnLayout( adjustableColumn=True, cal = 'center' )
#cmds.text('Asset Name:')
cmds.textField('assetNameField')
cmds.button(label='Create Shader Pack:', c=createAsset,h=35 )
cmds.showWindow( 'jsAssetCreate' )
cmds.window('jsAssetCreate', edit=True, widthHeight=[200,80], s = False)
def createAsset(args=None):
assetName = cmds.textField('assetNameField',query=1,text=1)
checkExists=cmds.ls(assetName+'_Asset_STORAGE')
if len(checkExists) >0:
print 'Asset already exists'
cmds.textField('assetNameField',edit=1,text='')
else:
if len(cmds.ls('materialLibraryDirectory_STORAGE')) > 0:
cmds.group(em=True,name = assetName+'_Asset_STORAGE',parent = 'materialLibraryDirectory_STORAGE')
cmds.textScrollList('assetList',edit =1, ra=1 )
underList = cmds.listRelatives('materialLibraryDirectory_STORAGE')
assetList = []
for each in underList:
if 'Asset_STORAGE' in each:
assetList.append(each)
cmds.textScrollList('assetList',edit = 1, ra=1 )
cmds.textScrollList('assetList',edit = 1, append = assetList,sii=1 )
else:
cmds.group(name = 'materialLibraryDirectory_STORAGE', em=True)
cmds.addAttr('materialLibraryDirectory_STORAGE', sn='notes',nn = 'Notes', dt = "string" )
cmds.setAttr('materialLibraryDirectory_STORAGE.notes', 'No Directory Currently Set', type = "string", )
cmds.group(em=True,name = assetName+'_Asset_STORAGE',parent = 'materialLibraryDirectory_STORAGE')
cmds.textScrollList('assetList',edit =1, ra=1 )
underList = cmds.listRelatives('materialLibraryDirectory_STORAGE')
assetList = []
for each in underList:
if 'Asset_STORAGE' in each:
assetList.append(each)
cmds.textScrollList('assetList',edit = 1, ra=1 )
cmds.textScrollList('assetList',edit = 1, append = assetList,sii=1 )
cmds.deleteUI('jsAssetCreate')
##################################################################################################################
def saveAssetApply(args=None):
listAsset = cmds.textScrollList('assetList', q = True, si = True )
if listAsset > 0:
for each in listAsset:
assetName = each
else:
currentAsset = 'error'
checkStorageNode = cmds.ls('materialLibraryDirectory_STORAGE')
if not checkStorageNode:
cmds.group(name = 'materialLibraryDirectory_STORAGE', em=True)
cmds.addAttr('materialLibraryDirectory_STORAGE', sn='notes',nn = 'Notes', dt = "string" )
cmds.setAttr('materialLibraryDirectory_STORAGE.notes', 'No Directory Currently Set', type = "string", )
listMat = cmds.ls( materials=True )
listMat.remove(u'particleCloud1')
for each in listMat:
newName = cmds.ls(each)[0].rpartition(':')[2]
exists = cmds.textField('%s_AssetApplyField' %(newName), query = True, exists=True)
if exists == True:
result = cmds.textField('%s_AssetApplyField' %(newName), query = True, text=True)
testing = cmds.attributeQuery(newName, node=assetName, ex=True )
if testing == True:
cmds.setAttr(str(assetName)+'.%s' %(newName), result ,type = "string" )
else:
cmds.addAttr(str(assetName), sn=newName,nn = newName, dt = "string" )
cmds.setAttr(str(assetName)+'.%s' %(newName), result ,type = "string" )
resultList = result.split()
if len(resultList) > 0:
for r in resultList:
try:
cmds.select(r)
cmds.hyperShade(assign=each)
cmds.select(d=1)
except:
pass
def saveAsset(args=None):
listAsset = cmds.textScrollList('assetList', q = True, si = True )
if listAsset > 0:
for each in listAsset:
assetName = each
else:
currentAsset = 'error'
checkStorageNode = cmds.ls('materialLibraryDirectory_STORAGE')
if not checkStorageNode:
cmds.group(name = 'materialLibraryDirectory_STORAGE', em=True)
cmds.addAttr('materialLibraryDirectory_STORAGE', sn='notes',nn = 'Notes', dt = "string" )
cmds.setAttr('materialLibraryDirectory_STORAGE.notes', 'No Directory Currently Set', type = "string", )
listMat = cmds.ls( materials=True )
listMat.remove(u'particleCloud1')
for each in listMat:
newName = cmds.ls(each)[0].rpartition(':')[2]
exists = cmds.textField('%s_AssetApplyField' %(newName), query = True, exists=True)
if exists == True:
result = cmds.textField('%s_AssetApplyField' %(newName), query = True, text=True)
testing = cmds.attributeQuery(newName, node=assetName, ex=True )
if testing == True:
cmds.setAttr(str(assetName)+'.%s' %(newName), result ,type = "string" )
else:
cmds.addAttr(str(assetName), sn=newName,nn = newName, dt = "string" )
cmds.setAttr(str(assetName)+'.%s' %(newName), result ,type = "string" )
resultList = result.split()
def closeAssetAssigner(args=None):
cmds.deleteUI('jsAssetAssigner')
def assignAssetTest(args=None):
listAsset = cmds.textScrollList('assetList', q = True, si = True )
if listAsset > 0:
for each in listAsset:
assetName = each
else:
currentAsset = 'error'
listMat = cmds.ls( materials=True )
listMat.remove(u'particleCloud1')
for each in listMat:
newName = cmds.ls(each)[0].rpartition(':')[2]
exists = cmds.textField('%s_AssetApplyField' %(newName), query = True, exists=True)
if exists == True:
result = cmds.textField('%s_AssetApplyField' %(newName), query = True, text=True)
resultList = result.split()
if len(resultList) > 0:
for r in resultList:
try:
cmds.select(r)
cmds.hyperShade(assign=each)
cmds.select(d=1)
except:
pass
##################################################################################################################
def addSelectedOther(name):
sel = cmds.ls(sl=1, tr=1)
for each in sel:
current = cmds.textField(name+'_AssetApplyField', query = True, text = True)
if len(current) > 0:
new = current + ' ' +each
else:
new = each
cmds.textField(name+'_AssetApplyField', edit = True, text = new )
def assignAssetWindow(args=None):
if len(cmds.ls('*_Asset_STORAGE')) >0:
assignAssetWindowFunc()
else:
print 'No Asset Storage Nodes Exist'
def assignAssetWindowFunc(args=None):
listAsset = cmds.textScrollList('assetList', q = True, si = True )
if listAsset > 0:
for each in listAsset:
currentAsset = each
else:
currentAsset = 'error'
listMat = cmds.ls( materials=True )
listMatStart = cmds.ls( materials=True )
downList = []
for each in listMatStart:
lowerNode = cmds.hyperShade(lun = each)
underShader = cmds.ls(lowerNode, materials = True)
for x in underShader:
downList.append(x)
downList = remove_duplicates(downList)
if len(downList) > 0:
for z in downList:
listMat.remove(z)
listMat.append(u'lambert1')
listMat=sorted(listMat, key=lambda s: s.lower())
if cmds.window('jsAssetAssigner', exists=True):
cmds.deleteUI('jsAssetAssigner')
cmds.window('jsAssetAssigner', title= currentAsset+' Assignments')
cmds.columnLayout( adjustableColumn=True, cal = 'left' )
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
cmds.flowLayout()
cmds.frameLayout(label = "Create Material Assignments for " +str(currentAsset)+":", borderStyle = "etchedIn",h=400,w=600 )
cmds.scrollLayout(hst=16,vst=16)
last = 'a'
aExist =[]
if listMat[0][0] == last:
aExist.append('yes')
if 'yes' in aExist:
cmds.text('a'.capitalize()+':',fn = 'boldLabelFont')
for each in listMat:
if each[0] == last:
newName = cmds.ls(each)[0].rpartition(':')[2]
#cmds.rowLayout( nc=2, adjustableColumn=2, columnAlign=(1, 'left'), w= 580 )
cmds.rowLayout( nc=3, adjustableColumn=3, columnAlign=(1, 'left'), w= 980 )
cmds.button(newName,label = '+', c = 'jsLookdevAssistant.addSelectedOther("%s")'%newName )
cmds.text(label = '%s:' %(newName), font = "boldLabelFont",rs=1 )
cmds.textField('%s_AssetApplyField' %(newName), w =200)
cmds.setParent('..')
else:
cmds.text(label= each[0].capitalize()+':',fn = 'boldLabelFont')
newName = cmds.ls(each)[0].rpartition(':')[2]
#cmds.rowLayout( nc=2, adjustableColumn=2, columnAlign=(1, 'left'), w= 580 )
cmds.rowLayout( nc=3, adjustableColumn=3, columnAlign=(1, 'left'), w= 980 )
cmds.button(newName,label = '+', c = 'jsLookdevAssistant.addSelectedOther("%s")'%newName )
cmds.text(label = '%s:' %(newName), font = "boldLabelFont",rs=1 )
cmds.textField('%s_AssetApplyField' %(newName), w =200)
last = each[0]
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
cmds.frameLayout(label = "Options:", borderStyle = "etchedIn",h=400,w=100 )
cmds.columnLayout( adjustableColumn=True, cal = 'left' )
evenHeight = 93
cmds.button(label = 'Assign and Save:', c = saveAssetApply, h = evenHeight)
cmds.button(label = 'Test Assignments:', c = assignAssetTest, h = evenHeight)
cmds.button(label = 'Save Current:', c = saveAsset, h = evenHeight )
cmds.button(label = 'Close', c= closeAssetAssigner, h = evenHeight)
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
cmds.showWindow( 'jsAssetAssigner' )
cmds.window('jsAssetAssigner', edit=True, widthHeight=[705,400], s = False)
for each in listMat:
newName = cmds.ls(each)[0].rpartition(':')[2]
testing = cmds.attributeQuery(newName, node=currentAsset, ex=True )
if testing == True:
newSet = cmds.getAttr('%s.%s' %(currentAsset, newName) )
cmds.textField('%s_AssetApplyField' %(newName),edit = True, text = newSet)
def applyAsset(args=None):
assignAssetWindow()
assignAssetTest()
closeAssetAssigner()
################################################################################################################################
def createAssetWindow(args=None):
if cmds.window('jsAssetManager', exists=True):
cmds.deleteUI('jsAssetManager')
cmds.window('jsAssetManager', title= 'jsShaderPackManager')
cmds.frameLayout(label = "jsShaderPackManager:", borderStyle = "etchedIn" )
cmds.flowLayout()
cmds.frameLayout(label = "Shader Packs in Scene:", borderStyle = "etchedIn",w=200 )
cmds.columnLayout( adjustableColumn=True, cal = 'left' )
if len(cmds.ls('materialLibraryDirectory_STORAGE')) == 0:
cmds.textScrollList('assetList', numberOfRows=8, allowMultiSelection=False, h = 250,append = '',sii =1 )
else:
underList = cmds.listRelatives('materialLibraryDirectory_STORAGE')
if underList > 0:
assetList = []
for each in underList:
if 'Asset_STORAGE' in each:
assetList.append(each)
cmds.textScrollList('assetList', numberOfRows=8, allowMultiSelection=False, h = 250, append = assetList,sii = 1 )
cmds.setParent('..')
cmds.setParent('..')
cmds.frameLayout(label = "Asset Functions:", borderStyle = "etchedIn",h=275 )
cmds.button(label='Create:',c = createAssetPopUp)
cmds.button(label='Edit:', c= assignAssetWindow)
cmds.button(label='Import:',c=importAssetUI)
cmds.button(label='Publish:',c=publishAssetWindow)
cmds.button(label='Apply Asset:',c=applyAsset)
cmds.setParent('..')
cmds.setParent('..')
cmds.showWindow( 'jsAssetManager' )
cmds.window('jsAssetManager', edit=True, widthHeight=[305,300], s = 0)
######################################################################################
######Arnold Quick Settings#################################################################################
############################################################################################################
############################################################################################################
############################################################################################################
def lowSamples(args=None):
cmds.setAttr("defaultArnoldRenderOptions.AA_samples", 2)
cmds.setAttr("defaultArnoldRenderOptions.GIDiffuseSamples",2)
cmds.setAttr("defaultArnoldRenderOptions.GIGlossySamples",1)
cmds.setAttr("defaultArnoldRenderOptions.GI_refraction_samples",1)
cmds.setAttr("defaultArnoldRenderOptions.sss_bssrdf_samples",1)
cmds.setAttr("defaultArnoldRenderOptions.volume_indirect_samples",1)
cmds.setAttr("defaultArnoldDriver.tiled", True)
cmds.setAttr("defaultArnoldDriver.preserveLayerName", True)
cmds.setAttr("defaultArnoldDriver.halfPrecision", True)
def midSamples(args=None):
cmds.setAttr("defaultArnoldRenderOptions.AA_samples", 3)
cmds.setAttr("defaultArnoldRenderOptions.GIDiffuseSamples",2)
cmds.setAttr("defaultArnoldRenderOptions.GIGlossySamples",2)
cmds.setAttr("defaultArnoldRenderOptions.GI_refraction_samples",2)
cmds.setAttr("defaultArnoldRenderOptions.sss_bssrdf_samples",2)
cmds.setAttr("defaultArnoldRenderOptions.volume_indirect_samples",2)
cmds.setAttr("defaultArnoldDriver.tiled", True)
cmds.setAttr("defaultArnoldDriver.preserveLayerName", True)
cmds.setAttr("defaultArnoldDriver.halfPrecision", True)
def highSamples(args=None):
cmds.setAttr("defaultArnoldRenderOptions.AA_samples", 5)
cmds.setAttr("defaultArnoldRenderOptions.GIDiffuseSamples",4)
cmds.setAttr("defaultArnoldRenderOptions.GIGlossySamples",3)
cmds.setAttr("defaultArnoldRenderOptions.GI_refraction_samples",2)
cmds.setAttr("defaultArnoldRenderOptions.sss_bssrdf_samples",2)
cmds.setAttr("defaultArnoldRenderOptions.volume_indirect_samples",2)
cmds.setAttr("defaultArnoldDriver.tiled", True)
cmds.setAttr("defaultArnoldDriver.preserveLayerName", True)
cmds.setAttr("defaultArnoldDriver.halfPrecision", True)
def mBlurOn(args=None):
cmds.setAttr("defaultArnoldRenderOptions.mb_en",1)
def mBlurOff(args=None):
cmds.setAttr("defaultArnoldRenderOptions.mb_en",0)
#quickSettings End ########################################################################################
############################################################################################################
##################################################################################################################################################################
##################################################################################################################################################################
##################################################################################################################################################################
##################################################################################################################################################################
def helpWindow(args=None):
if cmds.window('jsLookDevAssistantReadMe',exists=True):
cmds.deleteUI('jsLookDevAssistantReadMe')
window = cmds.window('jsLookDevAssistantReadMe',menuBar=True, title= 'ReadMe',bgc=[.25,.25,.25]) #, s = False
cmds.scrollLayout(hst=16,vst=16, cr=1)
cmds.text('jsLookDevAssistant\'s Super Exciting Read Me:', fn = "boldLabelFont")
cmds.separator( height=10, style='in' )
cmds.text('General Overview:', fn = "boldLabelFont", al="left")
cmds.text('jsLookDevAssistant is a tool designed to speed up the management and application of shaders. In the General Overview I will only cover the two most important points.' , al="left", ww=1)
cmds.text('Which are... First - All material assignments in every material applicator in the script are made via gemommetry naming and wildcards. Material Applications can be made to groups and or individual geo in any combination. Secondly is the general existance of the "STORAGE_NODE". The storage node is what stores all of your saved data. Library Paths, Material assignments, etc. To use any "Library" function sucesfully you need to set a directory using "Load Directory". Understanding these two main factors you can basically jump right in and it\'s very straight forward. If you need a more in depth explanation of individual tools, you can find them below. ', al="left", ww=1)
cmds.separator( height=20, style='in' )
cmds.text('Primary Tools:', fn = "boldLabelFont")
cmds.separator( height=20, style='in' )
cmds.text('Material Applicator:', fn = "boldLabelFont", al="left")
cmds.text('Material Applicator does stuff', al="left", ww=1)
cmds.separator( height=15, style='in' )
cmds.text('Shader Pack Manager:', fn = "boldLabelFont", al="left")
cmds.text('ShaderPackManager does stuff', al="left", ww=1)
cmds.separator( height=15, style='in' )
cmds.text('Load Directory:', fn = "boldLabelFont", al="left")
cmds.text('Used to set the directory where all exported materials, and asset shaderPacks are exported to.', al="left", ww=1)
cmds.separator( height=15, style='in' )
cmds.text('Import Material from Library:', fn = "boldLabelFont", al="left")
cmds.text('Opens a UI to choose which materials to import from the library directory.', al="left", ww=1)
cmds.separator( height=15, style='in' )
cmds.text('Publish Material to Library:', fn = "boldLabelFont", al="left")
cmds.text('Opens a UI to choose which materials to publish into the library directory.', al="left", ww=1)
cmds.separator( height=20, style='in' )
cmds.text('Bonus Tools:', fn = "boldLabelFont")
cmds.separator( height=20, style='in' )
cmds.text('Arnold Quick Setting:', fn = "boldLabelFont", al="left")
cmds.text('Arnold Render Setting Presets created to allow quick changes to predefined low, mid, and High Settings. As well as toggle motion blur.', al="left", ww=1)
cmds.separator( height=15, style='in' )
cmds.text('Rename Shading Groups:', fn = "boldLabelFont", al="left")
cmds.text('A one click script. Depending on your selection will either... Rename the Shading Groups of the selected materials. Or if no materials are selected rename ALL shading Groups to match the materialNames +"SG"', al="left", ww=1)
cmds.separator( height=15, style='in' )
cmds.text('Texture Path Editor:', fn = "boldLabelFont", al="left")
cmds.text('A simple tool to replace parts of the file path string. Use "View All File Texture Paths" to view, and select Nodes that you wish to edit. ', al="left", ww=1)
cmds.separator( height=15, style='in' )
cmds.setParent('..')
cmds.showWindow( window )
cmds.window('jsLookDevAssistantReadMe', edit=True, widthHeight=[525,400], s = False)
####Help Ends
############################################################################################################
##################################################################################################################################################################
##################################################################################################################################################################
####UI Section Begins####_____________________________________________________________________
if cmds.window('jsLookDevAssistant',exists=True):
cmds.deleteUI('jsLookDevAssistant')
window = cmds.window('jsLookDevAssistant',menuBar=True, title= 'jsLookdevAssistant %s' %(versionNumber) , w=330, h=100) #, s = False
cmds.columnLayout()
form = cmds.formLayout()
tabs = cmds.tabLayout(innerMarginWidth=5, innerMarginHeight=5)
cmds.formLayout( form, edit=True, attachForm=((tabs, 'top', 0), (tabs, 'left', 0), (tabs, 'bottom', 0), (tabs, 'right', 0)) )
cmds.menu( label='Primary Tools', tearOff=True )
cmds.menuItem( label='Load Directory' , c = loadDirectory)
cmds.menuItem( divider=True )
cmds.menuItem( label='Material Manager',c= materialBatchApply)
cmds.menuItem( divider=True )
cmds.menuItem( label='Import Materials', c=importCheckerUI )
cmds.menuItem( label='Publish Materials', c =exportCheckerUi )
cmds.menuItem( divider=True )
cmds.menuItem( label='Shader Pack Manager', c=createAssetWindow )
cmds.menu( label='Bonus Tools', tearOff=True )
cmds.menuItem( subMenu=True, label='Arnold Quick Settings', tearOff=True )
cmds.menuItem( label='Low Samples',c=lowSamples )
cmds.menuItem( label='Mid Samples',c=midSamples )
cmds.menuItem( label='High Samples', c =highSamples)
cmds.menuItem( divider=True )
cmds.menuItem( label='Motion Blur On', c=mBlurOn )
cmds.menuItem( label='Motion Blur Off', c=mBlurOff )
cmds.setParent( '..', menu=True )
cmds.menuItem( label='Rename Shading Groups', c = renameShadingGroups )
cmds.menuItem( label='Texture Path Editor', c=texturePath )
cmds.menu( label='Help', helpMenu=True )
cmds.menuItem( label='Documentation', c = helpWindow )
############ MATERIAL MANAGER UI:############################################_________________________________________________________________________________
child1 = cmds.rowColumnLayout(numberOfColumns=3)
cmds.frameLayout(label = "jsMaterialManager", borderStyle = "etchedIn", w = 320 )
cmds.columnLayout(adj = 1)
cmds.flowLayout(h=32, wr=1)
cmds.textField('TFL_selection', enterCommand= selection, tx= 'Geometry Picker',h=30,w=263,aie = 1)
cmds.button(label='Select:', command= selection,w=50,h=30)
cmds.setParent('..')
cmds.flowLayout(h=32)
cmds.optionMenu('individual', label='Individual Assignment:', changeCommand=printNewMenuItem , h=30,w=263)#optionMenuCommand
for num in range(0,matNumber):
cmds.menuItem(label= listMat [num] )
cmds.button(label='Update:', command = updateIndividual, h=30)
cmds.setParent('..')
cmds.setParent('..')
cmds.frameLayout(label = "Material Assignment Tools:", borderStyle = "etchedIn", w = 310)
cmds.columnLayout(adj = 1, rs = 2)
cmds.button(label='Material Applicator:', c = materialBatchApply, h=44)
cmds.button(label = 'Shader Pack Manager:', c = createAssetWindow, h=43)
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
############MATERIAL LIBRARY UI##########_______________________________________________________________________________________________________
child2 = cmds.rowColumnLayout(numberOfColumns=3)
checkStorageNode = cmds.ls('materialLibraryDirectory_STORAGE')
if len(checkStorageNode) > 0:
testing = cmds.attributeQuery('notes', node='materialLibraryDirectory_STORAGE', ex=True )
if testing == True:
currentSet = cmds.getAttr('materialLibraryDirectory_STORAGE.notes')
else:
currentSet = 'No Directory Storage File Found'
cmds.columnLayout( adjustableColumn=True, cal = 'left' )
cmds.frameLayout(label = "jsMaterialLibrary", borderStyle = "etchedIn", w = 320 )
cmds.columnLayout( adjustableColumn=True, cal = 'left' )
typeDir = cmds.textField('getDir', text = currentSet, h=30)
cmds.button(label = 'Load Directory:', command = loadDirectory, h=35)
cmds.setParent('..')
cmds.frameLayout(label = "jsPublishMaterial", borderStyle = "etchedIn", w = 300)
cmds.columnLayout(adj = 1, rs = 2)
cmds.button(label = 'Import Material from Library:', command = importCheckerUI, h=44)
cmds.button(label = 'Publish Material to Library:', command = exportCheckerUi, h=44)
cmds.setParent( '..' )
cmds.tabLayout( tabs, edit=True, tabLabel=((child1, 'Material Manager'), (child2, 'Material Library')) )
cmds.showWindow( window )
cmds.window('jsLookDevAssistant', edit=True, widthHeight=[330,259], s = False)
|
jszokoli/jsTK
|
Misc/jsLookdevAssistant.py
|
Python
|
gpl-3.0
| 67,262
|
[
"exciting"
] |
c81e04e19f7048473121fb695ff616eecae6738a09f2b7ada9ac561356406ad0
|
# Gaussian Process Classifier demo
# Author: Drishtii@
# Based on
# https://github.com/probml/pmtk3/blob/master/demos/gpcDemo2d.m
# See also gpc_demo_2d_pytorch for a Gpytorch version of this demo.
import superimport
import pyprobml_utils as pml
import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# make synthetic data
np.random.seed(9)
n1=80
n2=40
S1 = np.eye(2)
S2 = np.array([[1, 0.95], [0.95, 1]])
m1 = np.array([0.75, 0]).reshape(-1, 1)
m2 = np.array([-0.75, 0])
xx = np.repeat(m1, n1).reshape(2, n1)
yy = np.repeat(m2, n2).reshape(2, n2)
x1 = np.linalg.cholesky(S1).T @ np.random.randn(2,n1) + xx
x2 = np.linalg.cholesky(S2).T @ np.random.randn(2,n2) + yy
x = np.concatenate([x1.T, x2.T])
y1 = -np.ones(n1).reshape(-1, 1)
y2 = np.ones(n2).reshape(-1, 1)
y = np.concatenate([y1, y2])
q = np.linspace(-4, 4, 81)
r = np.linspace(-4, 4, 81)
t1, t2 = np.meshgrid(q, r)
t = np.hstack([t1.reshape(-1, 1), t2.reshape(-1, 1)])
def g(x):
return 5. - x[:, 1] - .5 * x[:, 0] ** 2
y_true = g(t)
y_true = y_true.reshape(81, 81)
def make_plot(gp):
plt.figure()
y_prob = gp.predict_proba(t)[:, 1]
y_prob = y_prob.reshape(81, 81)
plt.scatter(x1[0, :], x1[1, :], marker='o')
plt.scatter(x2[0, :], x2[1, :], marker='+')
plt.contour(t1, t2, y_prob, levels = np.linspace(0.1, 0.9, 9))
plt.contour(t1, t2, y_prob, [0.5], colors=['red'])
plt.title(gp.kernel_)
# GP without fitting the kernel hyper-parameters
# Note that 10.0 ~- 3.16**2
kernel = 10.0 * RBF(length_scale=0.5)
gp1 = GaussianProcessClassifier(kernel=kernel, optimizer=None)
gp1.fit(x, y)
make_plot(gp1)
pml.savefig('gpc2d_init_params.pdf')
# GP where we optimize the kernel parameters
gp2 = GaussianProcessClassifier(kernel=kernel)
gp2.fit(x, y)
make_plot(gp2)
pml.savefig('gpc2d_learned_params.pdf')
|
probml/pyprobml
|
scripts/gpc_demo_2d_sklearn.py
|
Python
|
mit
| 1,888
|
[
"Gaussian"
] |
db43135819c3a333112e9ec9711e3280831d9dbbdbd440671ab6fa52b754ae20
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Extract information from alignment objects.
In order to try and avoid huge alignment objects with tons of functions,
functions which return summary type information about alignments should
be put into classes in this module.
"""
from __future__ import print_function
import sys
# Add path to Bio
sys.path.append('../..')
import math
import sys
from Bio import Alphabet
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq
from Bio.SubsMat import FreqTable
__docformat__ = "restructuredtext en"
# Expected random distributions for 20-letter protein, and
# for 4-letter nucleotide alphabets
Protein20Random = 0.05
Nucleotide4Random = 0.25
class SummaryInfo(object):
"""Calculate summary info about the alignment.
This class should be used to caclculate information summarizing the
results of an alignment. This may either be straight consensus info
or more complicated things.
"""
def __init__(self, alignment):
"""Initialize with the alignment to calculate information on.
ic_vector attribute. A dictionary. Keys: column numbers. Values:
"""
self.alignment = alignment
self.ic_vector = {}
def dumb_consensus(self, threshold=.7, ambiguous="X",
consensus_alpha=None, require_multiple=0):
"""Output a fast consensus sequence of the alignment.
This doesn't do anything fancy at all. It will just go through the
sequence residue by residue and count up the number of each type
of residue (ie. A or G or T or C for DNA) in all sequences in the
alignment. If the percentage of the most common residue type is
greater then the passed threshold, then we will add that residue type,
otherwise an ambiguous character will be added.
This could be made a lot fancier (ie. to take a substitution matrix
into account), but it just meant for a quick and dirty consensus.
Arguments:
- threshold - The threshold value that is required to add a particular
atom.
- ambiguous - The ambiguous character to be added when the threshold is
not reached.
- consensus_alpha - The alphabet to return for the consensus sequence.
If this is None, then we will try to guess the alphabet.
- require_multiple - If set as 1, this will require that more than
1 sequence be part of an alignment to put it in the consensus (ie.
not just 1 sequence and gaps).
"""
# Iddo Friedberg, 1-JUL-2004: changed ambiguous default to "X"
consensus = ''
# find the length of the consensus we are creating
con_len = self.alignment.get_alignment_length()
# go through each seq item
for n in range(con_len):
# keep track of the counts of the different atoms we get
atom_dict = {}
num_atoms = 0
for record in self.alignment._records:
# make sure we haven't run past the end of any sequences
# if they are of different lengths
if n < len(record.seq):
if record.seq[n] != '-' and record.seq[n] != '.':
if record.seq[n] not in atom_dict:
atom_dict[record.seq[n]] = 1
else:
atom_dict[record.seq[n]] += 1
num_atoms = num_atoms + 1
max_atoms = []
max_size = 0
for atom in atom_dict:
if atom_dict[atom] > max_size:
max_atoms = [atom]
max_size = atom_dict[atom]
elif atom_dict[atom] == max_size:
max_atoms.append(atom)
if require_multiple and num_atoms == 1:
consensus += ambiguous
elif (len(max_atoms) == 1) and ((float(max_size) / float(num_atoms))
>= threshold):
consensus += max_atoms[0]
else:
consensus += ambiguous
# we need to guess a consensus alphabet if one isn't specified
if consensus_alpha is None:
consensus_alpha = self._guess_consensus_alphabet(ambiguous)
return Seq(consensus, consensus_alpha)
def gap_consensus(self, threshold=.7, ambiguous="X",
consensus_alpha=None, require_multiple=0):
"""Same as dumb_consensus(), but allows gap on the output.
Things to do:
- Let the user define that with only one gap, the result
character in consensus is gap.
- Let the user select gap character, now
it takes the same as input.
"""
# Iddo Friedberg, 1-JUL-2004: changed ambiguous default to "X"
consensus = ''
# find the length of the consensus we are creating
con_len = self.alignment.get_alignment_length()
# go through each seq item
for n in range(con_len):
# keep track of the counts of the different atoms we get
atom_dict = {}
num_atoms = 0
for record in self.alignment._records:
# make sure we haven't run past the end of any sequences
# if they are of different lengths
if n < len(record.seq):
if record.seq[n] not in atom_dict:
atom_dict[record.seq[n]] = 1
else:
atom_dict[record.seq[n]] += 1
num_atoms += 1
max_atoms = []
max_size = 0
for atom in atom_dict:
if atom_dict[atom] > max_size:
max_atoms = [atom]
max_size = atom_dict[atom]
elif atom_dict[atom] == max_size:
max_atoms.append(atom)
if require_multiple and num_atoms == 1:
consensus += ambiguous
elif (len(max_atoms) == 1) and ((float(max_size) / float(num_atoms))
>= threshold):
consensus += max_atoms[0]
else:
consensus += ambiguous
# we need to guess a consensus alphabet if one isn't specified
if consensus_alpha is None:
# TODO - Should we make this into a Gapped alphabet?
consensus_alpha = self._guess_consensus_alphabet(ambiguous)
return Seq(consensus, consensus_alpha)
def _guess_consensus_alphabet(self, ambiguous):
"""Pick an (ungapped) alphabet for an alignment consesus sequence.
This just looks at the sequences we have, checks their type, and
returns as appropriate type which seems to make sense with the
sequences we've got.
"""
# Start with the (un-gapped version of) the alignment alphabet
a = Alphabet._get_base_alphabet(self.alignment._alphabet)
# Now check its compatible with all the rest of the sequences
for record in self.alignment:
# Get the (un-gapped version of) the sequence's alphabet
alt = Alphabet._get_base_alphabet(record.seq.alphabet)
if not isinstance(alt, a.__class__):
raise ValueError("Alignment contains a sequence with \
an incompatible alphabet.")
# Check the ambiguous character we are going to use in the consensus
# is in the alphabet's list of valid letters (if defined).
if hasattr(a, "letters") and a.letters is not None \
and ambiguous not in a.letters:
# We'll need to pick a more generic alphabet...
if isinstance(a, IUPAC.IUPACUnambiguousDNA):
if ambiguous in IUPAC.IUPACUnambiguousDNA().letters:
a = IUPAC.IUPACUnambiguousDNA()
else:
a = Alphabet.generic_dna
elif isinstance(a, IUPAC.IUPACUnambiguousRNA):
if ambiguous in IUPAC.IUPACUnambiguousRNA().letters:
a = IUPAC.IUPACUnambiguousRNA()
else:
a = Alphabet.generic_rna
elif isinstance(a, IUPAC.IUPACProtein):
if ambiguous in IUPAC.ExtendedIUPACProtein().letters:
a = IUPAC.ExtendedIUPACProtein()
else:
a = Alphabet.generic_protein
else:
a = Alphabet.single_letter_alphabet
return a
def replacement_dictionary(self, skip_chars=[]):
"""Generate a replacement dictionary to plug into a substitution matrix
This should look at an alignment, and be able to generate the number
of substitutions of different residues for each other in the
aligned object.
Will then return a dictionary with this information::
{('A', 'C') : 10, ('C', 'A') : 12, ('G', 'C') : 15 ....}
This also treats weighted sequences. The following example shows how
we calculate the replacement dictionary. Given the following
multiple sequence alignment::
GTATC 0.5
AT--C 0.8
CTGTC 1.0
For the first column we have::
('A', 'G') : 0.5 * 0.8 = 0.4
('C', 'G') : 0.5 * 1.0 = 0.5
('A', 'C') : 0.8 * 1.0 = 0.8
We then continue this for all of the columns in the alignment, summing
the information for each substitution in each column, until we end
up with the replacement dictionary.
Arguments:
- skip_chars - A list of characters to skip when creating the dictionary.
For instance, you might have Xs (screened stuff) or Ns, and not want
to include the ambiguity characters in the dictionary.
"""
# get a starting dictionary based on the alphabet of the alignment
rep_dict, skip_items = self._get_base_replacements(skip_chars)
# iterate through each record
for rec_num1 in range(len(self.alignment._records)):
# iterate through each record from one beyond the current record
# to the end of the list of records
for rec_num2 in range(rec_num1 + 1, len(self.alignment._records)):
# for each pair of records, compare the sequences and add
# the pertinent info to the dictionary
rep_dict = self._pair_replacement(
self.alignment._records[rec_num1].seq,
self.alignment._records[rec_num2].seq,
self.alignment._records[rec_num1].annotations.get('weight', 1.0),
self.alignment._records[rec_num2].annotations.get('weight', 1.0),
rep_dict, skip_items)
return rep_dict
def _pair_replacement(self, seq1, seq2, weight1, weight2,
start_dict, ignore_chars):
"""Compare two sequences and generate info on the replacements seen.
Arguments:
- seq1, seq2 - The two sequences to compare.
- weight1, weight2 - The relative weights of seq1 and seq2.
- start_dict - The dictionary containing the starting replacement
info that we will modify.
- ignore_chars - A list of characters to ignore when calculating
replacements (ie. '-').
Returns:
- A replacment dictionary which is modified from initial_dict with
the information from the sequence comparison.
"""
# loop through each residue in the sequences
for residue_num in range(len(seq1)):
residue1 = seq1[residue_num]
try:
residue2 = seq2[residue_num]
# if seq2 is shorter, then we just stop looking at replacements
# and return the information
except IndexError:
return start_dict
# if the two residues are characters we want to count
if (residue1 not in ignore_chars) and (residue2 not in ignore_chars):
try:
# add info about the replacement to the dictionary,
# modified by the sequence weights
start_dict[(residue1, residue2)] += weight1 * weight2
# if we get a key error, then we've got a problem with alphabets
except KeyError:
raise ValueError("Residues %s, %s not found in alphabet %s"
% (residue1, residue2,
self.alignment._alphabet))
return start_dict
def _get_all_letters(self):
"""Returns a string containing the expected letters in the alignment."""
all_letters = self.alignment._alphabet.letters
if all_letters is None \
or (isinstance(self.alignment._alphabet, Alphabet.Gapped)
and all_letters == self.alignment._alphabet.gap_char):
# We are dealing with a generic alphabet class where the
# letters are not defined! We must build a list of the
# letters used...
set_letters = set()
for record in self.alignment:
# Note the built in set does not have a union_update
# which was provided by the sets module's Set
set_letters = set_letters.union(record.seq)
list_letters = sorted(set_letters)
all_letters = "".join(list_letters)
return all_letters
def _get_base_replacements(self, skip_items=[]):
"""Get a zeroed dictionary of all possible letter combinations.
This looks at the type of alphabet and gets the letters for it.
It then creates a dictionary with all possible combinations of these
letters as keys (ie. ('A', 'G')) and sets the values as zero.
Returns:
- The base dictionary created
- A list of alphabet items to skip when filling the dictionary.
(Right now the only thing I can imagine in this list is gap
characters, but maybe X's or something else might be useful later.
This will also include any characters that are specified to be
skipped.)
"""
base_dictionary = {}
all_letters = self._get_all_letters()
# if we have a gapped alphabet we need to find the gap character
# and drop it out
if isinstance(self.alignment._alphabet, Alphabet.Gapped):
skip_items.append(self.alignment._alphabet.gap_char)
all_letters = all_letters.replace(self.alignment._alphabet.gap_char, '')
# now create the dictionary
for first_letter in all_letters:
for second_letter in all_letters:
if first_letter not in skip_items and \
second_letter not in skip_items:
base_dictionary[(first_letter, second_letter)] = 0
return base_dictionary, skip_items
def pos_specific_score_matrix(self, axis_seq=None,
chars_to_ignore=[]):
"""Create a position specific score matrix object for the alignment.
This creates a position specific score matrix (pssm) which is an
alternative method to look at a consensus sequence.
Arguments:
- chars_to_ignore - A listing of all characters not to include in
the pssm. If the alignment alphabet declares a gap character,
then it will be excluded automatically.
- axis_seq - An optional argument specifying the sequence to
put on the axis of the PSSM. This should be a Seq object. If nothing
is specified, the consensus sequence, calculated with default
parameters, will be used.
Returns:
- A PSSM (position specific score matrix) object.
"""
# determine all of the letters we have to deal with
all_letters = self._get_all_letters()
assert all_letters
if not isinstance(chars_to_ignore, list):
raise TypeError("chars_to_ignore should be a list.")
# if we have a gap char, add it to stuff to ignore
if isinstance(self.alignment._alphabet, Alphabet.Gapped):
chars_to_ignore.append(self.alignment._alphabet.gap_char)
for char in chars_to_ignore:
all_letters = all_letters.replace(char, '')
if axis_seq:
left_seq = axis_seq
assert len(axis_seq) == self.alignment.get_alignment_length()
else:
left_seq = self.dumb_consensus()
pssm_info = []
# now start looping through all of the sequences and getting info
for residue_num in range(len(left_seq)):
score_dict = self._get_base_letters(all_letters)
for record in self.alignment._records:
try:
this_residue = record.seq[residue_num]
# if we hit an index error we've run out of sequence and
# should not add new residues
except IndexError:
this_residue = None
if this_residue and this_residue not in chars_to_ignore:
weight = record.annotations.get('weight', 1.0)
try:
score_dict[this_residue] += weight
# if we get a KeyError then we have an alphabet problem
except KeyError:
raise ValueError("Residue %s not found in alphabet %s"
% (this_residue,
self.alignment._alphabet))
pssm_info.append((left_seq[residue_num],
score_dict))
return PSSM(pssm_info)
def _get_base_letters(self, letters):
"""Create a zeroed dictionary with all of the specified letters.
"""
base_info = {}
for letter in letters:
base_info[letter] = 0
return base_info
def information_content(self, start=0,
end=None,
e_freq_table=None, log_base=2,
chars_to_ignore=[]):
"""Calculate the information content for each residue along an alignment.
Arguments:
- start, end - The starting an ending points to calculate the
information content. These points should be relative to the first
sequence in the alignment, starting at zero (ie. even if the 'real'
first position in the seq is 203 in the initial sequence, for
the info content, we need to use zero). This defaults to the entire
length of the first sequence.
- e_freq_table - A FreqTable object specifying the expected frequencies
for each letter in the alphabet we are using (e.g. {'G' : 0.4,
'C' : 0.4, 'T' : 0.1, 'A' : 0.1}). Gap characters should not be
included, since these should not have expected frequencies.
- log_base - The base of the logathrim to use in calculating the
information content. This defaults to 2 so the info is in bits.
- chars_to_ignore - A listing of characterw which should be ignored
in calculating the info content.
Returns:
- A number representing the info content for the specified region.
Please see the Biopython manual for more information on how information
content is calculated.
"""
# if no end was specified, then we default to the end of the sequence
if end is None:
end = len(self.alignment._records[0].seq)
if start < 0 or end > len(self.alignment._records[0].seq):
raise ValueError("Start (%s) and end (%s) are not in the \
range %s to %s"
% (start, end, 0, len(self.alignment._records[0].seq)))
# determine random expected frequencies, if necessary
random_expected = None
if not e_freq_table:
# TODO - What about ambiguous alphabets?
base_alpha = Alphabet._get_base_alphabet(self.alignment._alphabet)
if isinstance(base_alpha, Alphabet.ProteinAlphabet):
random_expected = Protein20Random
elif isinstance(base_alpha, Alphabet.NucleotideAlphabet):
random_expected = Nucleotide4Random
else:
errstr = "Error in alphabet: not Nucleotide or Protein, "
errstr += "supply expected frequencies"
raise ValueError(errstr)
del base_alpha
elif not isinstance(e_freq_table, FreqTable.FreqTable):
raise ValueError("e_freq_table should be a FreqTable object")
# determine all of the letters we have to deal with
all_letters = self._get_all_letters()
for char in chars_to_ignore:
all_letters = all_letters.replace(char, '')
info_content = {}
for residue_num in range(start, end):
freq_dict = self._get_letter_freqs(residue_num,
self.alignment._records,
all_letters, chars_to_ignore)
# print freq_dict,
column_score = self._get_column_info_content(freq_dict,
e_freq_table,
log_base,
random_expected)
info_content[residue_num] = column_score
# sum up the score
total_info = sum(info_content.values())
# fill in the ic_vector member: holds IC for each column
for i in info_content:
self.ic_vector[i] = info_content[i]
return total_info
def _get_letter_freqs(self, residue_num, all_records, letters, to_ignore):
"""Determine the frequency of specific letters in the alignment.
Arguments:
- residue_num - The number of the column we are getting frequencies
from.
- all_records - All of the SeqRecords in the alignment.
- letters - The letters we are interested in getting the frequency
for.
- to_ignore - Letters we are specifically supposed to ignore.
This will calculate the frequencies of each of the specified letters
in the alignment at the given frequency, and return this as a
dictionary where the keys are the letters and the values are the
frequencies.
"""
freq_info = self._get_base_letters(letters)
total_count = 0
# collect the count info into the dictionary for all the records
for record in all_records:
try:
if record.seq[residue_num] not in to_ignore:
weight = record.annotations.get('weight', 1.0)
freq_info[record.seq[residue_num]] += weight
total_count += weight
# getting a key error means we've got a problem with the alphabet
except KeyError:
raise ValueError("Residue %s not found in alphabet %s"
% (record.seq[residue_num],
self.alignment._alphabet))
if total_count == 0:
# This column must be entirely ignored characters
for letter in freq_info:
assert freq_info[letter] == 0
# TODO - Map this to NA or NaN?
else:
# now convert the counts into frequencies
for letter in freq_info:
freq_info[letter] = freq_info[letter] / total_count
return freq_info
def _get_column_info_content(self, obs_freq, e_freq_table, log_base,
random_expected):
"""Calculate the information content for a column.
Arguments:
- obs_freq - The frequencies observed for each letter in the column.
- e_freq_table - An optional argument specifying the expected
frequencies for each letter. This is a SubsMat.FreqTable instance.
- log_base - The base of the logathrim to use in calculating the
info content.
"""
try:
gap_char = self.alignment._alphabet.gap_char
except AttributeError:
# The alphabet doesn't declare a gap - there could be none
# in the sequence... or just a vague alphabet.
gap_char = "-" # Safe?
if e_freq_table:
if not isinstance(e_freq_table, FreqTable.FreqTable):
raise ValueError("e_freq_table should be a FreqTable object")
# check the expected freq information to make sure it is good
for key in obs_freq:
if (key != gap_char and key not in e_freq_table):
raise ValueError("Expected frequency letters %s "
"do not match observed %s"
% (list(e_freq_table),
list(obs_freq) - [gap_char]))
total_info = 0.0
for letter in obs_freq:
inner_log = 0.0
# if we have expected frequencies, modify the log value by them
# gap characters do not have expected frequencies, so they
# should just be the observed frequency.
if letter != gap_char:
if e_freq_table:
inner_log = obs_freq[letter] / e_freq_table[letter]
else:
inner_log = obs_freq[letter] / random_expected
# if the observed frequency is zero, we don't add any info to the
# total information content
if inner_log > 0:
letter_info = (obs_freq[letter] *
math.log(inner_log) / math.log(log_base))
total_info += letter_info
return total_info
def get_column(self, col):
# TODO - Deprecate this and implement slicing?
return self.alignment[:, col]
class PSSM(object):
"""Represent a position specific score matrix.
This class is meant to make it easy to access the info within a PSSM
and also make it easy to print out the information in a nice table.
Let's say you had an alignment like this::
GTATC
AT--C
CTGTC
The position specific score matrix (when printed) looks like::
G A T C
G 1 1 0 1
T 0 0 3 0
A 1 1 0 0
T 0 0 2 0
C 0 0 0 3
You can access a single element of the PSSM using the following::
your_pssm[sequence_number][residue_count_name]
For instance, to get the 'T' residue for the second element in the
above alignment you would need to do:
your_pssm[1]['T']
"""
def __init__(self, pssm):
"""Initialize with pssm data to represent.
The pssm passed should be a list with the following structure:
list[0] - The letter of the residue being represented (for instance,
from the example above, the first few list[0]s would be GTAT...
list[1] - A dictionary with the letter substitutions and counts.
"""
self.pssm = pssm
def __getitem__(self, pos):
return self.pssm[pos][1]
def __str__(self):
out = " "
all_residues = sorted(self.pssm[0][1])
# first print out the top header
for res in all_residues:
out += " %s" % res
out += "\n"
# for each item, write out the substitutions
for item in self.pssm:
out += "%s " % item[0]
for res in all_residues:
out += " %.1f" % item[1][res]
out += "\n"
return out
def get_residue(self, pos):
"""Return the residue letter at the specified position.
"""
return self.pssm[pos][0]
def print_info_content(summary_info, fout=None, rep_record=0):
""" Three column output: position, aa in representative sequence,
ic_vector value"""
fout = fout or sys.stdout
if not summary_info.ic_vector:
summary_info.information_content()
rep_sequence = summary_info.alignment._records[rep_record].seq
for pos in sorted(summary_info.ic_vector):
fout.write("%d %s %.3f\n" % (pos, rep_sequence[pos],
summary_info.ic_vector[pos]))
if __name__ == "__main__":
print("Quick test")
from Bio import AlignIO
from Bio.Align.Generic import Alignment
filename = "../../Tests/GFF/multi.fna"
format = "fasta"
expected = FreqTable.FreqTable({"A": 0.25, "G": 0.25, "T": 0.25, "C": 0.25},
FreqTable.FREQ,
IUPAC.unambiguous_dna)
alignment = AlignIO.read(open(filename), format)
for record in alignment:
print(record.seq)
print("=" * alignment.get_alignment_length())
summary = SummaryInfo(alignment)
consensus = summary.dumb_consensus(ambiguous="N")
print(consensus)
consensus = summary.gap_consensus(ambiguous="N")
print(consensus)
print("")
print(summary.pos_specific_score_matrix(chars_to_ignore=['-'],
axis_seq=consensus))
print("")
# Have a generic alphabet, without a declared gap char, so must tell
# provide the frequencies and chars to ignore explicitly.
print(summary.information_content(e_freq_table=expected,
chars_to_ignore=['-']))
print("")
print("Trying a protein sequence with gaps and stops")
alpha = Alphabet.HasStopCodon(Alphabet.Gapped(Alphabet.generic_protein, "-"), "*")
a = Alignment(alpha)
a.add_sequence("ID001", "MHQAIFIYQIGYP*LKSGYIQSIRSPEYDNW-")
a.add_sequence("ID002", "MH--IFIYQIGYAYLKSGYIQSIRSPEY-NW*")
a.add_sequence("ID003", "MHQAIFIYQIGYPYLKSGYIQSIRSPEYDNW*")
print(a)
print("=" * a.get_alignment_length())
s = SummaryInfo(a)
c = s.dumb_consensus(ambiguous="X")
print(c)
c = s.gap_consensus(ambiguous="X")
print(c)
print("")
print(s.pos_specific_score_matrix(chars_to_ignore=['-', '*'], axis_seq=c))
print(s.information_content(chars_to_ignore=['-', '*']))
print("Done")
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/Align/AlignInfo.py
|
Python
|
gpl-2.0
| 30,803
|
[
"Biopython"
] |
1fff7cae2b158b124714c0252538fe9b05326990671149c40d57f7c744c65b30
|
#!/usr/bin/python -O
############################################################################
# Copyright (c) 2015 Saint Petersburg State University
# Copyright (c) 2011-2014 Saint Petersburg Academic University
# All Rights Reserved
# See file LICENSE for details.
############################################################################
import os
import sys
import shutil
import support
from site import addsitedir
from distutils import dir_util
def prepare_config_corr(filename, cfg, ext_python_modules_home):
addsitedir(ext_python_modules_home)
if sys.version.startswith('2.'):
import pyyaml2 as pyyaml
elif sys.version.startswith('3.'):
import pyyaml3 as pyyaml
data = pyyaml.load(open(filename, 'r'))
data["dataset"] = cfg.dataset
data["output_dir"] = cfg.output_dir
data["work_dir"] = os.path.join(cfg.output_dir, 'tmp')
#data["hard_memory_limit"] = cfg.max_memory
data["max_nthreads"] = cfg.max_threads
data["bwa"] = cfg.bwa
file_c = open(filename, 'w')
pyyaml.dump(data, file_c, default_flow_style = False, default_style='"', width=100500)
file_c.close()
def run_corrector(configs_dir, execution_home, cfg,
ext_python_modules_home, log, to_correct, result):
addsitedir(ext_python_modules_home)
if sys.version.startswith('2.'):
import pyyaml2 as pyyaml
elif sys.version.startswith('3.'):
import pyyaml3 as pyyaml
dst_configs = os.path.join(cfg.output_dir, "configs")
if os.path.exists(dst_configs):
shutil.rmtree(dst_configs)
dir_util.copy_tree(os.path.join(configs_dir, "corrector"), dst_configs, preserve_times=False)
cfg_file_name = os.path.join(dst_configs, "corrector.info")
cfg.tmp_dir = support.get_tmp_dir(prefix="corrector_")
prepare_config_corr(cfg_file_name, cfg, ext_python_modules_home)
binary_name = "corrector"
command = [os.path.join(execution_home, binary_name),
os.path.abspath(cfg_file_name), os.path.abspath(to_correct)]
log.info("\n== Running contig polishing tool: " + ' '.join(command) + "\n")
log.info("\n== Dataset description file was created: " + cfg_file_name + "\n")
support.sys_call(command, log)
if not os.path.isfile(result):
support.error("Mismatch correction finished abnormally: " + result + " not found!")
|
INNUENDOWEB/INNUca
|
src/SPAdes-3.9.0-Linux/share/spades/spades_pipeline/corrector_logic.py
|
Python
|
gpl-3.0
| 2,370
|
[
"BWA"
] |
4916cdfd035068aa0dd2175bc49e6070376ec4c5ffab1f702885f5619e63120e
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copyright (C) 2008 Evan Martin <martine@danga.com>
"""A git-command for integrating reviews on Rietveld and Gerrit."""
from __future__ import print_function
from distutils.version import LooseVersion
from multiprocessing.pool import ThreadPool
import base64
import collections
import contextlib
import datetime
import fnmatch
import httplib
import itertools
import json
import logging
import multiprocessing
import optparse
import os
import re
import shutil
import stat
import sys
import tempfile
import textwrap
import urllib
import urllib2
import urlparse
import uuid
import webbrowser
import zlib
try:
import readline # pylint: disable=import-error,W0611
except ImportError:
pass
from third_party import colorama
from third_party import httplib2
from third_party import upload
import auth
import checkout
import clang_format
import dart_format
import setup_color
import fix_encoding
import gclient_utils
import gerrit_util
import git_cache
import git_common
import git_footers
import owners
import owners_finder
import presubmit_support
import rietveld
import scm
import split_cl
import subcommand
import subprocess2
import watchlists
__version__ = '2.0'
COMMIT_BOT_EMAIL = 'commit-bot@chromium.org'
DEFAULT_SERVER = 'https://codereview.chromium.org'
POSTUPSTREAM_HOOK = '.git/hooks/post-cl-land'
DESCRIPTION_BACKUP_FILE = '~/.git_cl_description_backup'
REFS_THAT_ALIAS_TO_OTHER_REFS = {
'refs/remotes/origin/lkgr': 'refs/remotes/origin/master',
'refs/remotes/origin/lkcr': 'refs/remotes/origin/master',
}
# Valid extensions for files we want to lint.
DEFAULT_LINT_REGEX = r"(.*\.cpp|.*\.cc|.*\.h)"
DEFAULT_LINT_IGNORE_REGEX = r"$^"
# Buildbucket master name prefix.
MASTER_PREFIX = 'master.'
# Shortcut since it quickly becomes redundant.
Fore = colorama.Fore
# Initialized in main()
settings = None
# Used by tests/git_cl_test.py to add extra logging.
# Inside the weirdly failing test, add this:
# >>> self.mock(git_cl, '_IS_BEING_TESTED', True)
# And scroll up to see the stack trace printed.
_IS_BEING_TESTED = False
def DieWithError(message, change_desc=None):
if change_desc:
SaveDescriptionBackup(change_desc)
print(message, file=sys.stderr)
sys.exit(1)
def SaveDescriptionBackup(change_desc):
backup_path = os.path.expanduser(DESCRIPTION_BACKUP_FILE)
print('\nError after CL description prompt -- saving description to %s\n' %
backup_path)
backup_file = open(backup_path, 'w')
backup_file.write(change_desc.description)
backup_file.close()
def GetNoGitPagerEnv():
env = os.environ.copy()
# 'cat' is a magical git string that disables pagers on all platforms.
env['GIT_PAGER'] = 'cat'
return env
def RunCommand(args, error_ok=False, error_message=None, shell=False, **kwargs):
try:
return subprocess2.check_output(args, shell=shell, **kwargs)
except subprocess2.CalledProcessError as e:
logging.debug('Failed running %s', args)
if not error_ok:
DieWithError(
'Command "%s" failed.\n%s' % (
' '.join(args), error_message or e.stdout or ''))
return e.stdout
def RunGit(args, **kwargs):
"""Returns stdout."""
return RunCommand(['git'] + args, **kwargs)
def RunGitWithCode(args, suppress_stderr=False):
"""Returns return code and stdout."""
if suppress_stderr:
stderr = subprocess2.VOID
else:
stderr = sys.stderr
try:
(out, _), code = subprocess2.communicate(['git'] + args,
env=GetNoGitPagerEnv(),
stdout=subprocess2.PIPE,
stderr=stderr)
return code, out
except subprocess2.CalledProcessError as e:
logging.debug('Failed running %s', ['git'] + args)
return e.returncode, e.stdout
def RunGitSilent(args):
"""Returns stdout, suppresses stderr and ignores the return code."""
return RunGitWithCode(args, suppress_stderr=True)[1]
def IsGitVersionAtLeast(min_version):
prefix = 'git version '
version = RunGit(['--version']).strip()
return (version.startswith(prefix) and
LooseVersion(version[len(prefix):]) >= LooseVersion(min_version))
def BranchExists(branch):
"""Return True if specified branch exists."""
code, _ = RunGitWithCode(['rev-parse', '--verify', branch],
suppress_stderr=True)
return not code
def time_sleep(seconds):
# Use this so that it can be mocked in tests without interfering with python
# system machinery.
import time # Local import to discourage others from importing time globally.
return time.sleep(seconds)
def ask_for_data(prompt):
try:
return raw_input(prompt)
except KeyboardInterrupt:
# Hide the exception.
sys.exit(1)
def confirm_or_exit(prefix='', action='confirm'):
"""Asks user to press enter to continue or press Ctrl+C to abort."""
if not prefix or prefix.endswith('\n'):
mid = 'Press'
elif prefix.endswith('.') or prefix.endswith('?'):
mid = ' Press'
elif prefix.endswith(' '):
mid = 'press'
else:
mid = ' press'
ask_for_data('%s%s Enter to %s, or Ctrl+C to abort' % (prefix, mid, action))
def ask_for_explicit_yes(prompt):
"""Returns whether user typed 'y' or 'yes' to confirm the given prompt"""
result = ask_for_data(prompt + ' [Yes/No]: ').lower()
while True:
if 'yes'.startswith(result):
return True
if 'no'.startswith(result):
return False
result = ask_for_data('Please, type yes or no: ').lower()
def _git_branch_config_key(branch, key):
"""Helper method to return Git config key for a branch."""
assert branch, 'branch name is required to set git config for it'
return 'branch.%s.%s' % (branch, key)
def _git_get_branch_config_value(key, default=None, value_type=str,
branch=False):
"""Returns git config value of given or current branch if any.
Returns default in all other cases.
"""
assert value_type in (int, str, bool)
if branch is False: # Distinguishing default arg value from None.
branch = GetCurrentBranch()
if not branch:
return default
args = ['config']
if value_type == bool:
args.append('--bool')
# git config also has --int, but apparently git config suffers from integer
# overflows (http://crbug.com/640115), so don't use it.
args.append(_git_branch_config_key(branch, key))
code, out = RunGitWithCode(args)
if code == 0:
value = out.strip()
if value_type == int:
return int(value)
if value_type == bool:
return bool(value.lower() == 'true')
return value
return default
def _git_set_branch_config_value(key, value, branch=None, **kwargs):
"""Sets the value or unsets if it's None of a git branch config.
Valid, though not necessarily existing, branch must be provided,
otherwise currently checked out branch is used.
"""
if not branch:
branch = GetCurrentBranch()
assert branch, 'a branch name OR currently checked out branch is required'
args = ['config']
# Check for boolean first, because bool is int, but int is not bool.
if value is None:
args.append('--unset')
elif isinstance(value, bool):
args.append('--bool')
value = str(value).lower()
else:
# git config also has --int, but apparently git config suffers from integer
# overflows (http://crbug.com/640115), so don't use it.
value = str(value)
args.append(_git_branch_config_key(branch, key))
if value is not None:
args.append(value)
RunGit(args, **kwargs)
def _get_committer_timestamp(commit):
"""Returns Unix timestamp as integer of a committer in a commit.
Commit can be whatever git show would recognize, such as HEAD, sha1 or ref.
"""
# Git also stores timezone offset, but it only affects visual display,
# actual point in time is defined by this timestamp only.
return int(RunGit(['show', '-s', '--format=%ct', commit]).strip())
def _git_amend_head(message, committer_timestamp):
"""Amends commit with new message and desired committer_timestamp.
Sets committer timezone to UTC.
"""
env = os.environ.copy()
env['GIT_COMMITTER_DATE'] = '%d+0000' % committer_timestamp
return RunGit(['commit', '--amend', '-m', message], env=env)
def _get_properties_from_options(options):
properties = dict(x.split('=', 1) for x in options.properties)
for key, val in properties.iteritems():
try:
properties[key] = json.loads(val)
except ValueError:
pass # If a value couldn't be evaluated, treat it as a string.
return properties
def _prefix_master(master):
"""Convert user-specified master name to full master name.
Buildbucket uses full master name(master.tryserver.chromium.linux) as bucket
name, while the developers always use shortened master name
(tryserver.chromium.linux) by stripping off the prefix 'master.'. This
function does the conversion for buildbucket migration.
"""
if master.startswith(MASTER_PREFIX):
return master
return '%s%s' % (MASTER_PREFIX, master)
def _unprefix_master(bucket):
"""Convert bucket name to shortened master name.
Buildbucket uses full master name(master.tryserver.chromium.linux) as bucket
name, while the developers always use shortened master name
(tryserver.chromium.linux) by stripping off the prefix 'master.'. This
function does the conversion for buildbucket migration.
"""
if bucket.startswith(MASTER_PREFIX):
return bucket[len(MASTER_PREFIX):]
return bucket
def _buildbucket_retry(operation_name, http, *args, **kwargs):
"""Retries requests to buildbucket service and returns parsed json content."""
try_count = 0
while True:
response, content = http.request(*args, **kwargs)
try:
content_json = json.loads(content)
except ValueError:
content_json = None
# Buildbucket could return an error even if status==200.
if content_json and content_json.get('error'):
error = content_json.get('error')
if error.get('code') == 403:
raise BuildbucketResponseException(
'Access denied: %s' % error.get('message', ''))
msg = 'Error in response. Reason: %s. Message: %s.' % (
error.get('reason', ''), error.get('message', ''))
raise BuildbucketResponseException(msg)
if response.status == 200:
if not content_json:
raise BuildbucketResponseException(
'Buildbucket returns invalid json content: %s.\n'
'Please file bugs at http://crbug.com, label "Infra-BuildBucket".' %
content)
return content_json
if response.status < 500 or try_count >= 2:
raise httplib2.HttpLib2Error(content)
# status >= 500 means transient failures.
logging.debug('Transient errors when %s. Will retry.', operation_name)
time_sleep(0.5 + 1.5*try_count)
try_count += 1
assert False, 'unreachable'
def _get_bucket_map(changelist, options, option_parser):
"""Returns a dict mapping bucket names to builders and tests,
for triggering try jobs.
"""
# If no bots are listed, we try to get a set of builders and tests based
# on GetPreferredTryMasters functions in PRESUBMIT.py files.
if not options.bot:
change = changelist.GetChange(
changelist.GetCommonAncestorWithUpstream(), None)
# Get try masters from PRESUBMIT.py files.
masters = presubmit_support.DoGetTryMasters(
change=change,
changed_files=change.LocalPaths(),
repository_root=settings.GetRoot(),
default_presubmit=None,
project=None,
verbose=options.verbose,
output_stream=sys.stdout)
if masters is None:
return None
return {_prefix_master(m): b for m, b in masters.iteritems()}
if options.bucket:
return {options.bucket: {b: [] for b in options.bot}}
if options.master:
return {_prefix_master(options.master): {b: [] for b in options.bot}}
# If bots are listed but no master or bucket, then we need to find out
# the corresponding master for each bot.
bucket_map, error_message = _get_bucket_map_for_builders(options.bot)
if error_message:
option_parser.error(
'Tryserver master cannot be found because: %s\n'
'Please manually specify the tryserver master, e.g. '
'"-m tryserver.chromium.linux".' % error_message)
return bucket_map
def _get_bucket_map_for_builders(builders):
"""Returns a map of buckets to builders for the given builders."""
map_url = 'https://builders-map.appspot.com/'
try:
builders_map = json.load(urllib2.urlopen(map_url))
except urllib2.URLError as e:
return None, ('Failed to fetch builder-to-master map from %s. Error: %s.' %
(map_url, e))
except ValueError as e:
return None, ('Invalid json string from %s. Error: %s.' % (map_url, e))
if not builders_map:
return None, 'Failed to build master map.'
bucket_map = {}
for builder in builders:
bucket = builders_map.get(builder, {}).get('bucket')
if bucket:
bucket_map.setdefault(bucket, {})[builder] = []
return bucket_map, None
def _trigger_try_jobs(auth_config, changelist, buckets, options, patchset):
"""Sends a request to Buildbucket to trigger try jobs for a changelist.
Args:
auth_config: AuthConfig for Rietveld.
changelist: Changelist that the try jobs are associated with.
buckets: A nested dict mapping bucket names to builders to tests.
options: Command-line options.
"""
assert changelist.GetIssue(), 'CL must be uploaded first'
codereview_url = changelist.GetCodereviewServer()
assert codereview_url, 'CL must be uploaded first'
patchset = patchset or changelist.GetMostRecentPatchset()
assert patchset, 'CL must be uploaded first'
codereview_host = urlparse.urlparse(codereview_url).hostname
authenticator = auth.get_authenticator_for_host(codereview_host, auth_config)
http = authenticator.authorize(httplib2.Http())
http.force_exception_to_status_code = True
buildbucket_put_url = (
'https://{hostname}/_ah/api/buildbucket/v1/builds/batch'.format(
hostname=options.buildbucket_host))
buildset = 'patch/{codereview}/{hostname}/{issue}/{patch}'.format(
codereview='gerrit' if changelist.IsGerrit() else 'rietveld',
hostname=codereview_host,
issue=changelist.GetIssue(),
patch=patchset)
shared_parameters_properties = changelist.GetTryJobProperties(patchset)
shared_parameters_properties['category'] = options.category
if options.clobber:
shared_parameters_properties['clobber'] = True
extra_properties = _get_properties_from_options(options)
if extra_properties:
shared_parameters_properties.update(extra_properties)
batch_req_body = {'builds': []}
print_text = []
print_text.append('Tried jobs on:')
for bucket, builders_and_tests in sorted(buckets.iteritems()):
print_text.append('Bucket: %s' % bucket)
master = None
if bucket.startswith(MASTER_PREFIX):
master = _unprefix_master(bucket)
for builder, tests in sorted(builders_and_tests.iteritems()):
print_text.append(' %s: %s' % (builder, tests))
parameters = {
'builder_name': builder,
'changes': [{
'author': {'email': changelist.GetIssueOwner()},
'revision': options.revision,
}],
'properties': shared_parameters_properties.copy(),
}
if 'presubmit' in builder.lower():
parameters['properties']['dry_run'] = 'true'
if tests:
parameters['properties']['testfilter'] = tests
tags = [
'builder:%s' % builder,
'buildset:%s' % buildset,
'user_agent:git_cl_try',
]
if master:
parameters['properties']['master'] = master
tags.append('master:%s' % master)
batch_req_body['builds'].append(
{
'bucket': bucket,
'parameters_json': json.dumps(parameters),
'client_operation_id': str(uuid.uuid4()),
'tags': tags,
}
)
_buildbucket_retry(
'triggering try jobs',
http,
buildbucket_put_url,
'PUT',
body=json.dumps(batch_req_body),
headers={'Content-Type': 'application/json'}
)
print_text.append('To see results here, run: git cl try-results')
print_text.append('To see results in browser, run: git cl web')
print('\n'.join(print_text))
def fetch_try_jobs(auth_config, changelist, buildbucket_host,
patchset=None):
"""Fetches try jobs from buildbucket.
Returns a map from build id to build info as a dictionary.
"""
assert buildbucket_host
assert changelist.GetIssue(), 'CL must be uploaded first'
assert changelist.GetCodereviewServer(), 'CL must be uploaded first'
patchset = patchset or changelist.GetMostRecentPatchset()
assert patchset, 'CL must be uploaded first'
codereview_url = changelist.GetCodereviewServer()
codereview_host = urlparse.urlparse(codereview_url).hostname
authenticator = auth.get_authenticator_for_host(codereview_host, auth_config)
if authenticator.has_cached_credentials():
http = authenticator.authorize(httplib2.Http())
else:
print('Warning: Some results might be missing because %s' %
# Get the message on how to login.
(auth.LoginRequiredError(codereview_host).message,))
http = httplib2.Http()
http.force_exception_to_status_code = True
buildset = 'patch/{codereview}/{hostname}/{issue}/{patch}'.format(
codereview='gerrit' if changelist.IsGerrit() else 'rietveld',
hostname=codereview_host,
issue=changelist.GetIssue(),
patch=patchset)
params = {'tag': 'buildset:%s' % buildset}
builds = {}
while True:
url = 'https://{hostname}/_ah/api/buildbucket/v1/search?{params}'.format(
hostname=buildbucket_host,
params=urllib.urlencode(params))
content = _buildbucket_retry('fetching try jobs', http, url, 'GET')
for build in content.get('builds', []):
builds[build['id']] = build
if 'next_cursor' in content:
params['start_cursor'] = content['next_cursor']
else:
break
return builds
def print_try_jobs(options, builds):
"""Prints nicely result of fetch_try_jobs."""
if not builds:
print('No try jobs scheduled.')
return
# Make a copy, because we'll be modifying builds dictionary.
builds = builds.copy()
builder_names_cache = {}
def get_builder(b):
try:
return builder_names_cache[b['id']]
except KeyError:
try:
parameters = json.loads(b['parameters_json'])
name = parameters['builder_name']
except (ValueError, KeyError) as error:
print('WARNING: Failed to get builder name for build %s: %s' % (
b['id'], error))
name = None
builder_names_cache[b['id']] = name
return name
def get_bucket(b):
bucket = b['bucket']
if bucket.startswith('master.'):
return bucket[len('master.'):]
return bucket
if options.print_master:
name_fmt = '%%-%ds %%-%ds' % (
max(len(str(get_bucket(b))) for b in builds.itervalues()),
max(len(str(get_builder(b))) for b in builds.itervalues()))
def get_name(b):
return name_fmt % (get_bucket(b), get_builder(b))
else:
name_fmt = '%%-%ds' % (
max(len(str(get_builder(b))) for b in builds.itervalues()))
def get_name(b):
return name_fmt % get_builder(b)
def sort_key(b):
return b['status'], b.get('result'), get_name(b), b.get('url')
def pop(title, f, color=None, **kwargs):
"""Pop matching builds from `builds` dict and print them."""
if not options.color or color is None:
colorize = str
else:
colorize = lambda x: '%s%s%s' % (color, x, Fore.RESET)
result = []
for b in builds.values():
if all(b.get(k) == v for k, v in kwargs.iteritems()):
builds.pop(b['id'])
result.append(b)
if result:
print(colorize(title))
for b in sorted(result, key=sort_key):
print(' ', colorize('\t'.join(map(str, f(b)))))
total = len(builds)
pop(status='COMPLETED', result='SUCCESS',
title='Successes:', color=Fore.GREEN,
f=lambda b: (get_name(b), b.get('url')))
pop(status='COMPLETED', result='FAILURE', failure_reason='INFRA_FAILURE',
title='Infra Failures:', color=Fore.MAGENTA,
f=lambda b: (get_name(b), b.get('url')))
pop(status='COMPLETED', result='FAILURE', failure_reason='BUILD_FAILURE',
title='Failures:', color=Fore.RED,
f=lambda b: (get_name(b), b.get('url')))
pop(status='COMPLETED', result='CANCELED',
title='Canceled:', color=Fore.MAGENTA,
f=lambda b: (get_name(b),))
pop(status='COMPLETED', result='FAILURE',
failure_reason='INVALID_BUILD_DEFINITION',
title='Wrong master/builder name:', color=Fore.MAGENTA,
f=lambda b: (get_name(b),))
pop(status='COMPLETED', result='FAILURE',
title='Other failures:',
f=lambda b: (get_name(b), b.get('failure_reason'), b.get('url')))
pop(status='COMPLETED',
title='Other finished:',
f=lambda b: (get_name(b), b.get('result'), b.get('url')))
pop(status='STARTED',
title='Started:', color=Fore.YELLOW,
f=lambda b: (get_name(b), b.get('url')))
pop(status='SCHEDULED',
title='Scheduled:',
f=lambda b: (get_name(b), 'id=%s' % b['id']))
# The last section is just in case buildbucket API changes OR there is a bug.
pop(title='Other:',
f=lambda b: (get_name(b), 'id=%s' % b['id']))
assert len(builds) == 0
print('Total: %d try jobs' % total)
def write_try_results_json(output_file, builds):
"""Writes a subset of the data from fetch_try_jobs to a file as JSON.
The input |builds| dict is assumed to be generated by Buildbucket.
Buildbucket documentation: http://goo.gl/G0s101
"""
def convert_build_dict(build):
"""Extracts some of the information from one build dict."""
parameters = json.loads(build.get('parameters_json', '{}')) or {}
return {
'buildbucket_id': build.get('id'),
'bucket': build.get('bucket'),
'builder_name': parameters.get('builder_name'),
'created_ts': build.get('created_ts'),
'experimental': build.get('experimental'),
'failure_reason': build.get('failure_reason'),
'result': build.get('result'),
'status': build.get('status'),
'tags': build.get('tags'),
'url': build.get('url'),
}
converted = []
for _, build in sorted(builds.items()):
converted.append(convert_build_dict(build))
write_json(output_file, converted)
def print_stats(args):
"""Prints statistics about the change to the user."""
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = GetNoGitPagerEnv()
if 'GIT_EXTERNAL_DIFF' in env:
del env['GIT_EXTERNAL_DIFF']
try:
stdout = sys.stdout.fileno()
except AttributeError:
stdout = None
return subprocess2.call(
['git', 'diff', '--no-ext-diff', '--stat', '-l100000', '-C50'] + args,
stdout=stdout, env=env)
class BuildbucketResponseException(Exception):
pass
class Settings(object):
def __init__(self):
self.default_server = None
self.cc = None
self.root = None
self.tree_status_url = None
self.viewvc_url = None
self.updated = False
self.is_gerrit = None
self.squash_gerrit_uploads = None
self.gerrit_skip_ensure_authenticated = None
self.git_editor = None
self.project = None
self.force_https_commit_url = None
def LazyUpdateIfNeeded(self):
"""Updates the settings from a codereview.settings file, if available."""
if not self.updated:
# The only value that actually changes the behavior is
# autoupdate = "false". Everything else means "true".
autoupdate = RunGit(['config', 'rietveld.autoupdate'],
error_ok=True
).strip().lower()
cr_settings_file = FindCodereviewSettingsFile()
if autoupdate != 'false' and cr_settings_file:
LoadCodereviewSettingsFromFile(cr_settings_file)
self.updated = True
def GetDefaultServerUrl(self, error_ok=False):
if not self.default_server:
self.LazyUpdateIfNeeded()
self.default_server = gclient_utils.UpgradeToHttps(
self._GetRietveldConfig('server', error_ok=True))
if error_ok:
return self.default_server
if not self.default_server:
error_message = ('Could not find settings file. You must configure '
'your review setup by running "git cl config".')
self.default_server = gclient_utils.UpgradeToHttps(
self._GetRietveldConfig('server', error_message=error_message))
return self.default_server
@staticmethod
def GetRelativeRoot():
return RunGit(['rev-parse', '--show-cdup']).strip()
def GetRoot(self):
if self.root is None:
self.root = os.path.abspath(self.GetRelativeRoot())
return self.root
def GetGitMirror(self, remote='origin'):
"""If this checkout is from a local git mirror, return a Mirror object."""
local_url = RunGit(['config', '--get', 'remote.%s.url' % remote]).strip()
if not os.path.isdir(local_url):
return None
git_cache.Mirror.SetCachePath(os.path.dirname(local_url))
remote_url = git_cache.Mirror.CacheDirToUrl(local_url)
# Use the /dev/null print_func to avoid terminal spew.
mirror = git_cache.Mirror(remote_url, print_func=lambda *args: None)
if mirror.exists():
return mirror
return None
def GetTreeStatusUrl(self, error_ok=False):
if not self.tree_status_url:
error_message = ('You must configure your tree status URL by running '
'"git cl config".')
self.tree_status_url = self._GetRietveldConfig(
'tree-status-url', error_ok=error_ok, error_message=error_message)
return self.tree_status_url
def GetViewVCUrl(self):
if not self.viewvc_url:
self.viewvc_url = self._GetRietveldConfig('viewvc-url', error_ok=True)
return self.viewvc_url
def GetBugPrefix(self):
return self._GetRietveldConfig('bug-prefix', error_ok=True)
def GetIsSkipDependencyUpload(self, branch_name):
"""Returns true if specified branch should skip dep uploads."""
return self._GetBranchConfig(branch_name, 'skip-deps-uploads',
error_ok=True)
def GetRunPostUploadHook(self):
run_post_upload_hook = self._GetRietveldConfig(
'run-post-upload-hook', error_ok=True)
return run_post_upload_hook == "True"
def GetDefaultCCList(self):
return self._GetRietveldConfig('cc', error_ok=True)
def GetDefaultPrivateFlag(self):
return self._GetRietveldConfig('private', error_ok=True)
def GetIsGerrit(self):
"""Return true if this repo is associated with gerrit code review system."""
if self.is_gerrit is None:
self.is_gerrit = (
self._GetConfig('gerrit.host', error_ok=True).lower() == 'true')
return self.is_gerrit
def GetSquashGerritUploads(self):
"""Return true if uploads to Gerrit should be squashed by default."""
if self.squash_gerrit_uploads is None:
self.squash_gerrit_uploads = self.GetSquashGerritUploadsOverride()
if self.squash_gerrit_uploads is None:
# Default is squash now (http://crbug.com/611892#c23).
self.squash_gerrit_uploads = not (
RunGit(['config', '--bool', 'gerrit.squash-uploads'],
error_ok=True).strip() == 'false')
return self.squash_gerrit_uploads
def GetSquashGerritUploadsOverride(self):
"""Return True or False if codereview.settings should be overridden.
Returns None if no override has been defined.
"""
# See also http://crbug.com/611892#c23
result = RunGit(['config', '--bool', 'gerrit.override-squash-uploads'],
error_ok=True).strip()
if result == 'true':
return True
if result == 'false':
return False
return None
def GetGerritSkipEnsureAuthenticated(self):
"""Return True if EnsureAuthenticated should not be done for Gerrit
uploads."""
if self.gerrit_skip_ensure_authenticated is None:
self.gerrit_skip_ensure_authenticated = (
RunGit(['config', '--bool', 'gerrit.skip-ensure-authenticated'],
error_ok=True).strip() == 'true')
return self.gerrit_skip_ensure_authenticated
def GetGitEditor(self):
"""Return the editor specified in the git config, or None if none is."""
if self.git_editor is None:
self.git_editor = self._GetConfig('core.editor', error_ok=True)
return self.git_editor or None
def GetLintRegex(self):
return (self._GetRietveldConfig('cpplint-regex', error_ok=True) or
DEFAULT_LINT_REGEX)
def GetLintIgnoreRegex(self):
return (self._GetRietveldConfig('cpplint-ignore-regex', error_ok=True) or
DEFAULT_LINT_IGNORE_REGEX)
def GetProject(self):
if not self.project:
self.project = self._GetRietveldConfig('project', error_ok=True)
return self.project
def _GetRietveldConfig(self, param, **kwargs):
return self._GetConfig('rietveld.' + param, **kwargs)
def _GetBranchConfig(self, branch_name, param, **kwargs):
return self._GetConfig('branch.' + branch_name + '.' + param, **kwargs)
def _GetConfig(self, param, **kwargs):
self.LazyUpdateIfNeeded()
return RunGit(['config', param], **kwargs).strip()
@contextlib.contextmanager
def _get_gerrit_project_config_file(remote_url):
"""Context manager to fetch and store Gerrit's project.config from
refs/meta/config branch and store it in temp file.
Provides a temporary filename or None if there was error.
"""
error, _ = RunGitWithCode([
'fetch', remote_url,
'+refs/meta/config:refs/git_cl/meta/config'])
if error:
# Ref doesn't exist or isn't accessible to current user.
print('WARNING: Failed to fetch project config for %s: %s' %
(remote_url, error))
yield None
return
error, project_config_data = RunGitWithCode(
['show', 'refs/git_cl/meta/config:project.config'])
if error:
print('WARNING: project.config file not found')
yield None
return
with gclient_utils.temporary_directory() as tempdir:
project_config_file = os.path.join(tempdir, 'project.config')
gclient_utils.FileWrite(project_config_file, project_config_data)
yield project_config_file
def _is_git_numberer_enabled(remote_url, remote_ref):
"""Returns True if Git Numberer is enabled on this ref."""
# TODO(tandrii): this should be deleted once repos below are 100% on Gerrit.
KNOWN_PROJECTS_WHITELIST = [
'chromium/src',
'external/webrtc',
'v8/v8',
'infra/experimental',
# For webrtc.googlesource.com/src.
'src',
]
assert remote_ref and remote_ref.startswith('refs/'), remote_ref
url_parts = urlparse.urlparse(remote_url)
project_name = url_parts.path.lstrip('/').rstrip('git./')
for known in KNOWN_PROJECTS_WHITELIST:
if project_name.endswith(known):
break
else:
# Early exit to avoid extra fetches for repos that aren't using Git
# Numberer.
return False
with _get_gerrit_project_config_file(remote_url) as project_config_file:
if project_config_file is None:
# Failed to fetch project.config, which shouldn't happen on open source
# repos KNOWN_PROJECTS_WHITELIST.
return False
def get_opts(x):
code, out = RunGitWithCode(
['config', '-f', project_config_file, '--get-all',
'plugin.git-numberer.validate-%s-refglob' % x])
if code == 0:
return out.strip().splitlines()
return []
enabled, disabled = map(get_opts, ['enabled', 'disabled'])
logging.info('validator config enabled %s disabled %s refglobs for '
'(this ref: %s)', enabled, disabled, remote_ref)
def match_refglobs(refglobs):
for refglob in refglobs:
if remote_ref == refglob or fnmatch.fnmatch(remote_ref, refglob):
return True
return False
if match_refglobs(disabled):
return False
return match_refglobs(enabled)
def ShortBranchName(branch):
"""Convert a name like 'refs/heads/foo' to just 'foo'."""
return branch.replace('refs/heads/', '', 1)
def GetCurrentBranchRef():
"""Returns branch ref (e.g., refs/heads/master) or None."""
return RunGit(['symbolic-ref', 'HEAD'],
stderr=subprocess2.VOID, error_ok=True).strip() or None
def GetCurrentBranch():
"""Returns current branch or None.
For refs/heads/* branches, returns just last part. For others, full ref.
"""
branchref = GetCurrentBranchRef()
if branchref:
return ShortBranchName(branchref)
return None
class _CQState(object):
"""Enum for states of CL with respect to Commit Queue."""
NONE = 'none'
DRY_RUN = 'dry_run'
COMMIT = 'commit'
ALL_STATES = [NONE, DRY_RUN, COMMIT]
class _ParsedIssueNumberArgument(object):
def __init__(self, issue=None, patchset=None, hostname=None, codereview=None):
self.issue = issue
self.patchset = patchset
self.hostname = hostname
assert codereview in (None, 'rietveld', 'gerrit')
self.codereview = codereview
@property
def valid(self):
return self.issue is not None
def ParseIssueNumberArgument(arg, codereview=None):
"""Parses the issue argument and returns _ParsedIssueNumberArgument."""
fail_result = _ParsedIssueNumberArgument()
if arg.isdigit():
return _ParsedIssueNumberArgument(issue=int(arg), codereview=codereview)
if not arg.startswith('http'):
return fail_result
url = gclient_utils.UpgradeToHttps(arg)
try:
parsed_url = urlparse.urlparse(url)
except ValueError:
return fail_result
if codereview is not None:
parsed = _CODEREVIEW_IMPLEMENTATIONS[codereview].ParseIssueURL(parsed_url)
return parsed or fail_result
results = {}
for name, cls in _CODEREVIEW_IMPLEMENTATIONS.iteritems():
parsed = cls.ParseIssueURL(parsed_url)
if parsed is not None:
results[name] = parsed
if not results:
return fail_result
if len(results) == 1:
return results.values()[0]
if parsed_url.netloc and parsed_url.netloc.split('.')[0].endswith('-review'):
# This is likely Gerrit.
return results['gerrit']
# Choose Rietveld as before if URL can parsed by either.
return results['rietveld']
class GerritChangeNotExists(Exception):
def __init__(self, issue, url):
self.issue = issue
self.url = url
super(GerritChangeNotExists, self).__init__()
def __str__(self):
return 'change %s at %s does not exist or you have no access to it' % (
self.issue, self.url)
_CommentSummary = collections.namedtuple(
'_CommentSummary', ['date', 'message', 'sender',
# TODO(tandrii): these two aren't known in Gerrit.
'approval', 'disapproval'])
class Changelist(object):
"""Changelist works with one changelist in local branch.
Supports two codereview backends: Rietveld or Gerrit, selected at object
creation.
Notes:
* Not safe for concurrent multi-{thread,process} use.
* Caches values from current branch. Therefore, re-use after branch change
with great care.
"""
def __init__(self, branchref=None, issue=None, codereview=None, **kwargs):
"""Create a new ChangeList instance.
If issue is given, the codereview must be given too.
If `codereview` is given, it must be 'rietveld' or 'gerrit'.
Otherwise, it's decided based on current configuration of the local branch,
with default being 'rietveld' for backwards compatibility.
See _load_codereview_impl for more details.
**kwargs will be passed directly to codereview implementation.
"""
# Poke settings so we get the "configure your server" message if necessary.
global settings
if not settings:
# Happens when git_cl.py is used as a utility library.
settings = Settings()
if issue:
assert codereview, 'codereview must be known, if issue is known'
self.branchref = branchref
if self.branchref:
assert branchref.startswith('refs/heads/')
self.branch = ShortBranchName(self.branchref)
else:
self.branch = None
self.upstream_branch = None
self.lookedup_issue = False
self.issue = issue or None
self.has_description = False
self.description = None
self.lookedup_patchset = False
self.patchset = None
self.cc = None
self.more_cc = []
self._remote = None
self._codereview_impl = None
self._codereview = None
self._load_codereview_impl(codereview, **kwargs)
assert self._codereview_impl
assert self._codereview in _CODEREVIEW_IMPLEMENTATIONS
def _load_codereview_impl(self, codereview=None, **kwargs):
if codereview:
assert codereview in _CODEREVIEW_IMPLEMENTATIONS
cls = _CODEREVIEW_IMPLEMENTATIONS[codereview]
self._codereview = codereview
self._codereview_impl = cls(self, **kwargs)
return
# Automatic selection based on issue number set for a current branch.
# Rietveld takes precedence over Gerrit.
assert not self.issue
# Whether we find issue or not, we are doing the lookup.
self.lookedup_issue = True
if self.GetBranch():
for codereview, cls in _CODEREVIEW_IMPLEMENTATIONS.iteritems():
issue = _git_get_branch_config_value(
cls.IssueConfigKey(), value_type=int, branch=self.GetBranch())
if issue:
self._codereview = codereview
self._codereview_impl = cls(self, **kwargs)
self.issue = int(issue)
return
# No issue is set for this branch, so decide based on repo-wide settings.
return self._load_codereview_impl(
codereview='gerrit' if settings.GetIsGerrit() else 'rietveld',
**kwargs)
def IsGerrit(self):
return self._codereview == 'gerrit'
def GetCCList(self):
"""Returns the users cc'd on this CL.
The return value is a string suitable for passing to git cl with the --cc
flag.
"""
if self.cc is None:
base_cc = settings.GetDefaultCCList()
more_cc = ','.join(self.more_cc)
self.cc = ','.join(filter(None, (base_cc, more_cc))) or ''
return self.cc
def GetCCListWithoutDefault(self):
"""Return the users cc'd on this CL excluding default ones."""
if self.cc is None:
self.cc = ','.join(self.more_cc)
return self.cc
def ExtendCC(self, more_cc):
"""Extends the list of users to cc on this CL based on the changed files."""
self.more_cc.extend(more_cc)
def GetBranch(self):
"""Returns the short branch name, e.g. 'master'."""
if not self.branch:
branchref = GetCurrentBranchRef()
if not branchref:
return None
self.branchref = branchref
self.branch = ShortBranchName(self.branchref)
return self.branch
def GetBranchRef(self):
"""Returns the full branch name, e.g. 'refs/heads/master'."""
self.GetBranch() # Poke the lazy loader.
return self.branchref
def ClearBranch(self):
"""Clears cached branch data of this object."""
self.branch = self.branchref = None
def _GitGetBranchConfigValue(self, key, default=None, **kwargs):
assert 'branch' not in kwargs, 'this CL branch is used automatically'
kwargs['branch'] = self.GetBranch()
return _git_get_branch_config_value(key, default, **kwargs)
def _GitSetBranchConfigValue(self, key, value, **kwargs):
assert 'branch' not in kwargs, 'this CL branch is used automatically'
assert self.GetBranch(), (
'this CL must have an associated branch to %sset %s%s' %
('un' if value is None else '',
key,
'' if value is None else ' to %r' % value))
kwargs['branch'] = self.GetBranch()
return _git_set_branch_config_value(key, value, **kwargs)
@staticmethod
def FetchUpstreamTuple(branch):
"""Returns a tuple containing remote and remote ref,
e.g. 'origin', 'refs/heads/master'
"""
remote = '.'
upstream_branch = _git_get_branch_config_value('merge', branch=branch)
if upstream_branch:
remote = _git_get_branch_config_value('remote', branch=branch)
else:
upstream_branch = RunGit(['config', 'rietveld.upstream-branch'],
error_ok=True).strip()
if upstream_branch:
remote = RunGit(['config', 'rietveld.upstream-remote']).strip()
else:
# Else, try to guess the origin remote.
remote_branches = RunGit(['branch', '-r']).split()
if 'origin/master' in remote_branches:
# Fall back on origin/master if it exits.
remote = 'origin'
upstream_branch = 'refs/heads/master'
else:
DieWithError(
'Unable to determine default branch to diff against.\n'
'Either pass complete "git diff"-style arguments, like\n'
' git cl upload origin/master\n'
'or verify this branch is set up to track another \n'
'(via the --track argument to "git checkout -b ...").')
return remote, upstream_branch
def GetCommonAncestorWithUpstream(self):
upstream_branch = self.GetUpstreamBranch()
if not BranchExists(upstream_branch):
DieWithError('The upstream for the current branch (%s) does not exist '
'anymore.\nPlease fix it and try again.' % self.GetBranch())
return git_common.get_or_create_merge_base(self.GetBranch(),
upstream_branch)
def GetUpstreamBranch(self):
if self.upstream_branch is None:
remote, upstream_branch = self.FetchUpstreamTuple(self.GetBranch())
if remote is not '.':
upstream_branch = upstream_branch.replace('refs/heads/',
'refs/remotes/%s/' % remote)
upstream_branch = upstream_branch.replace('refs/branch-heads/',
'refs/remotes/branch-heads/')
self.upstream_branch = upstream_branch
return self.upstream_branch
def GetRemoteBranch(self):
if not self._remote:
remote, branch = None, self.GetBranch()
seen_branches = set()
while branch not in seen_branches:
seen_branches.add(branch)
remote, branch = self.FetchUpstreamTuple(branch)
branch = ShortBranchName(branch)
if remote != '.' or branch.startswith('refs/remotes'):
break
else:
remotes = RunGit(['remote'], error_ok=True).split()
if len(remotes) == 1:
remote, = remotes
elif 'origin' in remotes:
remote = 'origin'
logging.warn('Could not determine which remote this change is '
'associated with, so defaulting to "%s".' % self._remote)
else:
logging.warn('Could not determine which remote this change is '
'associated with.')
branch = 'HEAD'
if branch.startswith('refs/remotes'):
self._remote = (remote, branch)
elif branch.startswith('refs/branch-heads/'):
self._remote = (remote, branch.replace('refs/', 'refs/remotes/'))
else:
self._remote = (remote, 'refs/remotes/%s/%s' % (remote, branch))
return self._remote
def GitSanityChecks(self, upstream_git_obj):
"""Checks git repo status and ensures diff is from local commits."""
if upstream_git_obj is None:
if self.GetBranch() is None:
print('ERROR: Unable to determine current branch (detached HEAD?)',
file=sys.stderr)
else:
print('ERROR: No upstream branch.', file=sys.stderr)
return False
# Verify the commit we're diffing against is in our current branch.
upstream_sha = RunGit(['rev-parse', '--verify', upstream_git_obj]).strip()
common_ancestor = RunGit(['merge-base', upstream_sha, 'HEAD']).strip()
if upstream_sha != common_ancestor:
print('ERROR: %s is not in the current branch. You may need to rebase '
'your tracking branch' % upstream_sha, file=sys.stderr)
return False
# List the commits inside the diff, and verify they are all local.
commits_in_diff = RunGit(
['rev-list', '^%s' % upstream_sha, 'HEAD']).splitlines()
code, remote_branch = RunGitWithCode(['config', 'gitcl.remotebranch'])
remote_branch = remote_branch.strip()
if code != 0:
_, remote_branch = self.GetRemoteBranch()
commits_in_remote = RunGit(
['rev-list', '^%s' % upstream_sha, remote_branch]).splitlines()
common_commits = set(commits_in_diff) & set(commits_in_remote)
if common_commits:
print('ERROR: Your diff contains %d commits already in %s.\n'
'Run "git log --oneline %s..HEAD" to get a list of commits in '
'the diff. If you are using a custom git flow, you can override'
' the reference used for this check with "git config '
'gitcl.remotebranch <git-ref>".' % (
len(common_commits), remote_branch, upstream_git_obj),
file=sys.stderr)
return False
return True
def GetGitBaseUrlFromConfig(self):
"""Return the configured base URL from branch.<branchname>.baseurl.
Returns None if it is not set.
"""
return self._GitGetBranchConfigValue('base-url')
def GetRemoteUrl(self):
"""Return the configured remote URL, e.g. 'git://example.org/foo.git/'.
Returns None if there is no remote.
"""
remote, _ = self.GetRemoteBranch()
url = RunGit(['config', 'remote.%s.url' % remote], error_ok=True).strip()
# If URL is pointing to a local directory, it is probably a git cache.
if os.path.isdir(url):
url = RunGit(['config', 'remote.%s.url' % remote],
error_ok=True,
cwd=url).strip()
return url
def GetIssue(self):
"""Returns the issue number as a int or None if not set."""
if self.issue is None and not self.lookedup_issue:
self.issue = self._GitGetBranchConfigValue(
self._codereview_impl.IssueConfigKey(), value_type=int)
self.lookedup_issue = True
return self.issue
def GetIssueURL(self):
"""Get the URL for a particular issue."""
issue = self.GetIssue()
if not issue:
return None
return '%s/%s' % (self._codereview_impl.GetCodereviewServer(), issue)
def GetDescription(self, pretty=False, force=False):
if not self.has_description or force:
if self.GetIssue():
self.description = self._codereview_impl.FetchDescription(force=force)
self.has_description = True
if pretty:
# Set width to 72 columns + 2 space indent.
wrapper = textwrap.TextWrapper(width=74, replace_whitespace=True)
wrapper.initial_indent = wrapper.subsequent_indent = ' '
lines = self.description.splitlines()
return '\n'.join([wrapper.fill(line) for line in lines])
return self.description
def GetDescriptionFooters(self):
"""Returns (non_footer_lines, footers) for the commit message.
Returns:
non_footer_lines (list(str)) - Simple list of description lines without
any footer. The lines do not contain newlines, nor does the list contain
the empty line between the message and the footers.
footers (list(tuple(KEY, VALUE))) - List of parsed footers, e.g.
[("Change-Id", "Ideadbeef...."), ...]
"""
raw_description = self.GetDescription()
msg_lines, _, footers = git_footers.split_footers(raw_description)
if footers:
msg_lines = msg_lines[:len(msg_lines)-1]
return msg_lines, footers
def GetPatchset(self):
"""Returns the patchset number as a int or None if not set."""
if self.patchset is None and not self.lookedup_patchset:
self.patchset = self._GitGetBranchConfigValue(
self._codereview_impl.PatchsetConfigKey(), value_type=int)
self.lookedup_patchset = True
return self.patchset
def SetPatchset(self, patchset):
"""Set this branch's patchset. If patchset=0, clears the patchset."""
assert self.GetBranch()
if not patchset:
self.patchset = None
else:
self.patchset = int(patchset)
self._GitSetBranchConfigValue(
self._codereview_impl.PatchsetConfigKey(), self.patchset)
def SetIssue(self, issue=None):
"""Set this branch's issue. If issue isn't given, clears the issue."""
assert self.GetBranch()
if issue:
issue = int(issue)
self._GitSetBranchConfigValue(
self._codereview_impl.IssueConfigKey(), issue)
self.issue = issue
codereview_server = self._codereview_impl.GetCodereviewServer()
if codereview_server:
self._GitSetBranchConfigValue(
self._codereview_impl.CodereviewServerConfigKey(),
codereview_server)
else:
# Reset all of these just to be clean.
reset_suffixes = [
'last-upload-hash',
self._codereview_impl.IssueConfigKey(),
self._codereview_impl.PatchsetConfigKey(),
self._codereview_impl.CodereviewServerConfigKey(),
] + self._PostUnsetIssueProperties()
for prop in reset_suffixes:
self._GitSetBranchConfigValue(prop, None, error_ok=True)
msg = RunGit(['log', '-1', '--format=%B']).strip()
if msg and git_footers.get_footer_change_id(msg):
print('WARNING: The change patched into this branch has a Change-Id. '
'Removing it.')
RunGit(['commit', '--amend', '-m',
git_footers.remove_footer(msg, 'Change-Id')])
self.issue = None
self.patchset = None
def GetChange(self, upstream_branch, author, local_description=False):
if not self.GitSanityChecks(upstream_branch):
DieWithError('\nGit sanity check failure')
root = settings.GetRelativeRoot()
if not root:
root = '.'
absroot = os.path.abspath(root)
# We use the sha1 of HEAD as a name of this change.
name = RunGitWithCode(['rev-parse', 'HEAD'])[1].strip()
# Need to pass a relative path for msysgit.
try:
files = scm.GIT.CaptureStatus([root], '.', upstream_branch)
except subprocess2.CalledProcessError:
DieWithError(
('\nFailed to diff against upstream branch %s\n\n'
'This branch probably doesn\'t exist anymore. To reset the\n'
'tracking branch, please run\n'
' git branch --set-upstream-to origin/master %s\n'
'or replace origin/master with the relevant branch') %
(upstream_branch, self.GetBranch()))
issue = self.GetIssue()
patchset = self.GetPatchset()
if issue and not local_description:
description = self.GetDescription()
else:
# If the change was never uploaded, use the log messages of all commits
# up to the branch point, as git cl upload will prefill the description
# with these log messages.
args = ['log', '--pretty=format:%s%n%n%b', '%s...' % (upstream_branch)]
description = RunGitWithCode(args)[1].strip()
if not author:
author = RunGit(['config', 'user.email']).strip() or None
return presubmit_support.GitChange(
name,
description,
absroot,
files,
issue,
patchset,
author,
upstream=upstream_branch)
def UpdateDescription(self, description, force=False):
self._codereview_impl.UpdateDescriptionRemote(description, force=force)
self.description = description
self.has_description = True
def UpdateDescriptionFooters(self, description_lines, footers, force=False):
"""Sets the description for this CL remotely.
You can get description_lines and footers with GetDescriptionFooters.
Args:
description_lines (list(str)) - List of CL description lines without
newline characters.
footers (list(tuple(KEY, VALUE))) - List of footers, as returned by
GetDescriptionFooters. Key must conform to the git footers format (i.e.
`List-Of-Tokens`). It will be case-normalized so that each token is
title-cased.
"""
new_description = '\n'.join(description_lines)
if footers:
new_description += '\n'
for k, v in footers:
foot = '%s: %s' % (git_footers.normalize_name(k), v)
if not git_footers.FOOTER_PATTERN.match(foot):
raise ValueError('Invalid footer %r' % foot)
new_description += foot + '\n'
self.UpdateDescription(new_description, force)
def RunHook(self, committing, may_prompt, verbose, change):
"""Calls sys.exit() if the hook fails; returns a HookResults otherwise."""
try:
return presubmit_support.DoPresubmitChecks(change, committing,
verbose=verbose, output_stream=sys.stdout, input_stream=sys.stdin,
default_presubmit=None, may_prompt=may_prompt,
rietveld_obj=self._codereview_impl.GetRietveldObjForPresubmit(),
gerrit_obj=self._codereview_impl.GetGerritObjForPresubmit())
except presubmit_support.PresubmitFailure as e:
DieWithError('%s\nMaybe your depot_tools is out of date?' % e)
def CMDPatchIssue(self, issue_arg, reject, nocommit, directory):
"""Fetches and applies the issue patch from codereview to local branch."""
if isinstance(issue_arg, (int, long)) or issue_arg.isdigit():
parsed_issue_arg = _ParsedIssueNumberArgument(int(issue_arg))
else:
# Assume url.
parsed_issue_arg = self._codereview_impl.ParseIssueURL(
urlparse.urlparse(issue_arg))
if not parsed_issue_arg or not parsed_issue_arg.valid:
DieWithError('Failed to parse issue argument "%s". '
'Must be an issue number or a valid URL.' % issue_arg)
return self._codereview_impl.CMDPatchWithParsedIssue(
parsed_issue_arg, reject, nocommit, directory, False)
def CMDUpload(self, options, git_diff_args, orig_args):
"""Uploads a change to codereview."""
custom_cl_base = None
if git_diff_args:
custom_cl_base = base_branch = git_diff_args[0]
else:
if self.GetBranch() is None:
DieWithError('Can\'t upload from detached HEAD state. Get on a branch!')
# Default to diffing against common ancestor of upstream branch
base_branch = self.GetCommonAncestorWithUpstream()
git_diff_args = [base_branch, 'HEAD']
# Warn about Rietveld deprecation for initial uploads to Rietveld.
if not self.IsGerrit() and not self.GetIssue():
print('=====================================')
print('NOTICE: Rietveld is being deprecated. '
'You can upload changes to Gerrit with')
print(' git cl upload --gerrit')
print('or set Gerrit to be your default code review tool with')
print(' git config gerrit.host true')
print('=====================================')
# Fast best-effort checks to abort before running potentially
# expensive hooks if uploading is likely to fail anyway. Passing these
# checks does not guarantee that uploading will not fail.
self._codereview_impl.EnsureAuthenticated(force=options.force)
self._codereview_impl.EnsureCanUploadPatchset(force=options.force)
# Apply watchlists on upload.
change = self.GetChange(base_branch, None)
watchlist = watchlists.Watchlists(change.RepositoryRoot())
files = [f.LocalPath() for f in change.AffectedFiles()]
if not options.bypass_watchlists:
self.ExtendCC(watchlist.GetWatchersForPaths(files))
if not options.bypass_hooks:
if options.reviewers or options.tbrs or options.add_owners_to:
# Set the reviewer list now so that presubmit checks can access it.
change_description = ChangeDescription(change.FullDescriptionText())
change_description.update_reviewers(options.reviewers,
options.tbrs,
options.add_owners_to,
change)
change.SetDescriptionText(change_description.description)
hook_results = self.RunHook(committing=False,
may_prompt=not options.force,
verbose=options.verbose,
change=change)
if not hook_results.should_continue():
return 1
if not options.reviewers and hook_results.reviewers:
options.reviewers = hook_results.reviewers.split(',')
self.ExtendCC(hook_results.more_cc)
# TODO(tandrii): Checking local patchset against remote patchset is only
# supported for Rietveld. Extend it to Gerrit or remove it completely.
if self.GetIssue() and not self.IsGerrit():
latest_patchset = self.GetMostRecentPatchset()
local_patchset = self.GetPatchset()
if (latest_patchset and local_patchset and
local_patchset != latest_patchset):
print('The last upload made from this repository was patchset #%d but '
'the most recent patchset on the server is #%d.'
% (local_patchset, latest_patchset))
print('Uploading will still work, but if you\'ve uploaded to this '
'issue from another machine or branch the patch you\'re '
'uploading now might not include those changes.')
confirm_or_exit(action='upload')
print_stats(git_diff_args)
ret = self.CMDUploadChange(options, git_diff_args, custom_cl_base, change)
if not ret:
if options.use_commit_queue:
self.SetCQState(_CQState.COMMIT)
elif options.cq_dry_run:
self.SetCQState(_CQState.DRY_RUN)
_git_set_branch_config_value('last-upload-hash',
RunGit(['rev-parse', 'HEAD']).strip())
# Run post upload hooks, if specified.
if settings.GetRunPostUploadHook():
presubmit_support.DoPostUploadExecuter(
change,
self,
settings.GetRoot(),
options.verbose,
sys.stdout)
# Upload all dependencies if specified.
if options.dependencies:
print()
print('--dependencies has been specified.')
print('All dependent local branches will be re-uploaded.')
print()
# Remove the dependencies flag from args so that we do not end up in a
# loop.
orig_args.remove('--dependencies')
ret = upload_branch_deps(self, orig_args)
return ret
def SetCQState(self, new_state):
"""Updates the CQ state for the latest patchset.
Issue must have been already uploaded and known.
"""
assert new_state in _CQState.ALL_STATES
assert self.GetIssue()
try:
self._codereview_impl.SetCQState(new_state)
return 0
except KeyboardInterrupt:
raise
except:
print('WARNING: Failed to %s.\n'
'Either:\n'
' * Your project has no CQ,\n'
' * You don\'t have permission to change the CQ state,\n'
' * There\'s a bug in this code (see stack trace below).\n'
'Consider specifying which bots to trigger manually or asking your '
'project owners for permissions or contacting Chrome Infra at:\n'
'https://www.chromium.org/infra\n\n' %
('cancel CQ' if new_state == _CQState.NONE else 'trigger CQ'))
# Still raise exception so that stack trace is printed.
raise
# Forward methods to codereview specific implementation.
def AddComment(self, message, publish=None):
return self._codereview_impl.AddComment(message, publish=publish)
def GetCommentsSummary(self, readable=True):
"""Returns list of _CommentSummary for each comment.
args:
readable: determines whether the output is designed for a human or a machine
"""
return self._codereview_impl.GetCommentsSummary(readable)
def CloseIssue(self):
return self._codereview_impl.CloseIssue()
def GetStatus(self):
return self._codereview_impl.GetStatus()
def GetCodereviewServer(self):
return self._codereview_impl.GetCodereviewServer()
def GetIssueOwner(self):
"""Get owner from codereview, which may differ from this checkout."""
return self._codereview_impl.GetIssueOwner()
def GetReviewers(self):
return self._codereview_impl.GetReviewers()
def GetMostRecentPatchset(self):
return self._codereview_impl.GetMostRecentPatchset()
def CannotTriggerTryJobReason(self):
"""Returns reason (str) if unable trigger try jobs on this CL or None."""
return self._codereview_impl.CannotTriggerTryJobReason()
def GetTryJobProperties(self, patchset=None):
"""Returns dictionary of properties to launch try job."""
return self._codereview_impl.GetTryJobProperties(patchset=patchset)
def __getattr__(self, attr):
# This is because lots of untested code accesses Rietveld-specific stuff
# directly, and it's hard to fix for sure. So, just let it work, and fix
# on a case by case basis.
# Note that child method defines __getattr__ as well, and forwards it here,
# because _RietveldChangelistImpl is not cleaned up yet, and given
# deprecation of Rietveld, it should probably be just removed.
# Until that time, avoid infinite recursion by bypassing __getattr__
# of implementation class.
return self._codereview_impl.__getattribute__(attr)
class _ChangelistCodereviewBase(object):
"""Abstract base class encapsulating codereview specifics of a changelist."""
def __init__(self, changelist):
self._changelist = changelist # instance of Changelist
def __getattr__(self, attr):
# Forward methods to changelist.
# TODO(tandrii): maybe clean up _GerritChangelistImpl and
# _RietveldChangelistImpl to avoid this hack?
return getattr(self._changelist, attr)
def GetStatus(self):
"""Apply a rough heuristic to give a simple summary of an issue's review
or CQ status, assuming adherence to a common workflow.
Returns None if no issue for this branch, or specific string keywords.
"""
raise NotImplementedError()
def GetCodereviewServer(self):
"""Returns server URL without end slash, like "https://codereview.com"."""
raise NotImplementedError()
def FetchDescription(self, force=False):
"""Fetches and returns description from the codereview server."""
raise NotImplementedError()
@classmethod
def IssueConfigKey(cls):
"""Returns branch setting storing issue number."""
raise NotImplementedError()
@classmethod
def PatchsetConfigKey(cls):
"""Returns branch setting storing patchset number."""
raise NotImplementedError()
@classmethod
def CodereviewServerConfigKey(cls):
"""Returns branch setting storing codereview server."""
raise NotImplementedError()
def _PostUnsetIssueProperties(self):
"""Which branch-specific properties to erase when unsetting issue."""
return []
def GetRietveldObjForPresubmit(self):
# This is an unfortunate Rietveld-embeddedness in presubmit.
# For non-Rietveld code reviews, this probably should return a dummy object.
raise NotImplementedError()
def GetGerritObjForPresubmit(self):
# None is valid return value, otherwise presubmit_support.GerritAccessor.
return None
def UpdateDescriptionRemote(self, description, force=False):
"""Update the description on codereview site."""
raise NotImplementedError()
def AddComment(self, message, publish=None):
"""Posts a comment to the codereview site."""
raise NotImplementedError()
def GetCommentsSummary(self, readable=True):
raise NotImplementedError()
def CloseIssue(self):
"""Closes the issue."""
raise NotImplementedError()
def GetMostRecentPatchset(self):
"""Returns the most recent patchset number from the codereview site."""
raise NotImplementedError()
def CMDPatchWithParsedIssue(self, parsed_issue_arg, reject, nocommit,
directory, force):
"""Fetches and applies the issue.
Arguments:
parsed_issue_arg: instance of _ParsedIssueNumberArgument.
reject: if True, reject the failed patch instead of switching to 3-way
merge. Rietveld only.
nocommit: do not commit the patch, thus leave the tree dirty. Rietveld
only.
directory: switch to directory before applying the patch. Rietveld only.
force: if true, overwrites existing local state.
"""
raise NotImplementedError()
@staticmethod
def ParseIssueURL(parsed_url):
"""Parses url and returns instance of _ParsedIssueNumberArgument or None if
failed."""
raise NotImplementedError()
def EnsureAuthenticated(self, force, refresh=False):
"""Best effort check that user is authenticated with codereview server.
Arguments:
force: whether to skip confirmation questions.
refresh: whether to attempt to refresh credentials. Ignored if not
applicable.
"""
raise NotImplementedError()
def EnsureCanUploadPatchset(self, force):
"""Best effort check that uploading isn't supposed to fail for predictable
reasons.
This method should raise informative exception if uploading shouldn't
proceed.
Arguments:
force: whether to skip confirmation questions.
"""
raise NotImplementedError()
def CMDUploadChange(self, options, git_diff_args, custom_cl_base, change):
"""Uploads a change to codereview."""
raise NotImplementedError()
def SetCQState(self, new_state):
"""Updates the CQ state for the latest patchset.
Issue must have been already uploaded and known.
"""
raise NotImplementedError()
def CannotTriggerTryJobReason(self):
"""Returns reason (str) if unable trigger try jobs on this CL or None."""
raise NotImplementedError()
def GetIssueOwner(self):
raise NotImplementedError()
def GetReviewers(self):
raise NotImplementedError()
def GetTryJobProperties(self, patchset=None):
raise NotImplementedError()
class _RietveldChangelistImpl(_ChangelistCodereviewBase):
def __init__(self, changelist, auth_config=None, codereview_host=None):
super(_RietveldChangelistImpl, self).__init__(changelist)
assert settings, 'must be initialized in _ChangelistCodereviewBase'
if not codereview_host:
settings.GetDefaultServerUrl()
self._rietveld_server = codereview_host
self._auth_config = auth_config or auth.make_auth_config()
self._props = None
self._rpc_server = None
def GetCodereviewServer(self):
if not self._rietveld_server:
# If we're on a branch then get the server potentially associated
# with that branch.
if self.GetIssue():
self._rietveld_server = gclient_utils.UpgradeToHttps(
self._GitGetBranchConfigValue(self.CodereviewServerConfigKey()))
if not self._rietveld_server:
self._rietveld_server = settings.GetDefaultServerUrl()
return self._rietveld_server
def EnsureAuthenticated(self, force, refresh=False):
"""Best effort check that user is authenticated with Rietveld server."""
if self._auth_config.use_oauth2:
authenticator = auth.get_authenticator_for_host(
self.GetCodereviewServer(), self._auth_config)
if not authenticator.has_cached_credentials():
raise auth.LoginRequiredError(self.GetCodereviewServer())
if refresh:
authenticator.get_access_token()
def EnsureCanUploadPatchset(self, force):
# No checks for Rietveld because we are deprecating Rietveld.
pass
def FetchDescription(self, force=False):
issue = self.GetIssue()
assert issue
try:
return self.RpcServer().get_description(issue, force=force).strip()
except urllib2.HTTPError as e:
if e.code == 404:
DieWithError(
('\nWhile fetching the description for issue %d, received a '
'404 (not found)\n'
'error. It is likely that you deleted this '
'issue on the server. If this is the\n'
'case, please run\n\n'
' git cl issue 0\n\n'
'to clear the association with the deleted issue. Then run '
'this command again.') % issue)
else:
DieWithError(
'\nFailed to fetch issue description. HTTP error %d' % e.code)
except urllib2.URLError as e:
print('Warning: Failed to retrieve CL description due to network '
'failure.', file=sys.stderr)
return ''
def GetMostRecentPatchset(self):
return self.GetIssueProperties()['patchsets'][-1]
def GetIssueProperties(self):
if self._props is None:
issue = self.GetIssue()
if not issue:
self._props = {}
else:
self._props = self.RpcServer().get_issue_properties(issue, True)
return self._props
def CannotTriggerTryJobReason(self):
props = self.GetIssueProperties()
if not props:
return 'Rietveld doesn\'t know about your issue %s' % self.GetIssue()
if props.get('closed'):
return 'CL %s is closed' % self.GetIssue()
if props.get('private'):
return 'CL %s is private' % self.GetIssue()
return None
def GetTryJobProperties(self, patchset=None):
"""Returns dictionary of properties to launch try job."""
project = (self.GetIssueProperties() or {}).get('project')
return {
'issue': self.GetIssue(),
'patch_project': project,
'patch_storage': 'rietveld',
'patchset': patchset or self.GetPatchset(),
'rietveld': self.GetCodereviewServer(),
}
def GetIssueOwner(self):
return (self.GetIssueProperties() or {}).get('owner_email')
def GetReviewers(self):
return (self.GetIssueProperties() or {}).get('reviewers')
def AddComment(self, message, publish=None):
return self.RpcServer().add_comment(self.GetIssue(), message)
def GetCommentsSummary(self, _readable=True):
summary = []
for message in self.GetIssueProperties().get('messages', []):
date = datetime.datetime.strptime(message['date'], '%Y-%m-%d %H:%M:%S.%f')
summary.append(_CommentSummary(
date=date,
disapproval=bool(message['disapproval']),
approval=bool(message['approval']),
sender=message['sender'],
message=message['text'],
))
return summary
def GetStatus(self):
"""Applies a rough heuristic to give a simple summary of an issue's review
or CQ status, assuming adherence to a common workflow.
Returns None if no issue for this branch, or one of the following keywords:
* 'error' - error from review tool (including deleted issues)
* 'unsent' - not sent for review
* 'waiting' - waiting for review
* 'reply' - waiting for owner to reply to review
* 'not lgtm' - Code-Review label has been set negatively
* 'lgtm' - LGTM from at least one approved reviewer
* 'commit' - in the commit queue
* 'closed' - closed
"""
if not self.GetIssue():
return None
try:
props = self.GetIssueProperties()
except urllib2.HTTPError:
return 'error'
if props.get('closed'):
# Issue is closed.
return 'closed'
if props.get('commit') and not props.get('cq_dry_run', False):
# Issue is in the commit queue.
return 'commit'
messages = props.get('messages') or []
if not messages:
# No message was sent.
return 'unsent'
if get_approving_reviewers(props):
return 'lgtm'
elif get_approving_reviewers(props, disapproval=True):
return 'not lgtm'
# Skip CQ messages that don't require owner's action.
while messages and messages[-1]['sender'] == COMMIT_BOT_EMAIL:
if 'Dry run:' in messages[-1]['text']:
messages.pop()
elif 'The CQ bit was unchecked' in messages[-1]['text']:
# This message always follows prior messages from CQ,
# so skip this too.
messages.pop()
else:
# This is probably a CQ messages warranting user attention.
break
if messages[-1]['sender'] != props.get('owner_email'):
# Non-LGTM reply from non-owner and not CQ bot.
return 'reply'
return 'waiting'
def UpdateDescriptionRemote(self, description, force=False):
self.RpcServer().update_description(self.GetIssue(), description)
def CloseIssue(self):
return self.RpcServer().close_issue(self.GetIssue())
def SetFlag(self, flag, value):
return self.SetFlags({flag: value})
def SetFlags(self, flags):
"""Sets flags on this CL/patchset in Rietveld.
"""
patchset = self.GetPatchset() or self.GetMostRecentPatchset()
try:
return self.RpcServer().set_flags(
self.GetIssue(), patchset, flags)
except urllib2.HTTPError as e:
if e.code == 404:
DieWithError('The issue %s doesn\'t exist.' % self.GetIssue())
if e.code == 403:
DieWithError(
('Access denied to issue %s. Maybe the patchset %s doesn\'t '
'match?') % (self.GetIssue(), patchset))
raise
def RpcServer(self):
"""Returns an upload.RpcServer() to access this review's rietveld instance.
"""
if not self._rpc_server:
self._rpc_server = rietveld.CachingRietveld(
self.GetCodereviewServer(),
self._auth_config)
return self._rpc_server
@classmethod
def IssueConfigKey(cls):
return 'rietveldissue'
@classmethod
def PatchsetConfigKey(cls):
return 'rietveldpatchset'
@classmethod
def CodereviewServerConfigKey(cls):
return 'rietveldserver'
def GetRietveldObjForPresubmit(self):
return self.RpcServer()
def SetCQState(self, new_state):
props = self.GetIssueProperties()
if props.get('private'):
DieWithError('Cannot set-commit on private issue')
if new_state == _CQState.COMMIT:
self.SetFlags({'commit': '1', 'cq_dry_run': '0'})
elif new_state == _CQState.NONE:
self.SetFlags({'commit': '0', 'cq_dry_run': '0'})
else:
assert new_state == _CQState.DRY_RUN
self.SetFlags({'commit': '1', 'cq_dry_run': '1'})
def CMDPatchWithParsedIssue(self, parsed_issue_arg, reject, nocommit,
directory, force):
# PatchIssue should never be called with a dirty tree. It is up to the
# caller to check this, but just in case we assert here since the
# consequences of the caller not checking this could be dire.
assert(not git_common.is_dirty_git_tree('apply'))
assert(parsed_issue_arg.valid)
self._changelist.issue = parsed_issue_arg.issue
if parsed_issue_arg.hostname:
self._rietveld_server = 'https://%s' % parsed_issue_arg.hostname
patchset = parsed_issue_arg.patchset or self.GetMostRecentPatchset()
patchset_object = self.RpcServer().get_patch(self.GetIssue(), patchset)
scm_obj = checkout.GitCheckout(settings.GetRoot(), None, None, None, None)
try:
scm_obj.apply_patch(patchset_object)
except Exception as e:
print(str(e))
return 1
# If we had an issue, commit the current state and register the issue.
if not nocommit:
self.SetIssue(self.GetIssue())
self.SetPatchset(patchset)
RunGit(['commit', '-m', (self.GetDescription() + '\n\n' +
'patch from issue %(i)s at patchset '
'%(p)s (http://crrev.com/%(i)s#ps%(p)s)'
% {'i': self.GetIssue(), 'p': patchset})])
print('Committed patch locally.')
else:
print('Patch applied to index.')
return 0
@staticmethod
def ParseIssueURL(parsed_url):
if not parsed_url.scheme or not parsed_url.scheme.startswith('http'):
return None
# Rietveld patch: https://domain/<number>/#ps<patchset>
match = re.match(r'/(\d+)/$', parsed_url.path)
match2 = re.match(r'ps(\d+)$', parsed_url.fragment)
if match and match2:
return _ParsedIssueNumberArgument(
issue=int(match.group(1)),
patchset=int(match2.group(1)),
hostname=parsed_url.netloc,
codereview='rietveld')
# Typical url: https://domain/<issue_number>[/[other]]
match = re.match('/(\d+)(/.*)?$', parsed_url.path)
if match:
return _ParsedIssueNumberArgument(
issue=int(match.group(1)),
hostname=parsed_url.netloc,
codereview='rietveld')
# Rietveld patch: https://domain/download/issue<number>_<patchset>.diff
match = re.match(r'/download/issue(\d+)_(\d+).diff$', parsed_url.path)
if match:
return _ParsedIssueNumberArgument(
issue=int(match.group(1)),
patchset=int(match.group(2)),
hostname=parsed_url.netloc,
codereview='rietveld')
return None
def CMDUploadChange(self, options, args, custom_cl_base, change):
"""Upload the patch to Rietveld."""
upload_args = ['--assume_yes'] # Don't ask about untracked files.
upload_args.extend(['--server', self.GetCodereviewServer()])
upload_args.extend(auth.auth_config_to_command_options(self._auth_config))
if options.emulate_svn_auto_props:
upload_args.append('--emulate_svn_auto_props')
change_desc = None
if options.email is not None:
upload_args.extend(['--email', options.email])
if self.GetIssue():
if options.title is not None:
upload_args.extend(['--title', options.title])
if options.message:
upload_args.extend(['--message', options.message])
upload_args.extend(['--issue', str(self.GetIssue())])
print('This branch is associated with issue %s. '
'Adding patch to that issue.' % self.GetIssue())
else:
if options.title is not None:
upload_args.extend(['--title', options.title])
if options.message:
message = options.message
else:
message = CreateDescriptionFromLog(args)
if options.title:
message = options.title + '\n\n' + message
change_desc = ChangeDescription(message)
if options.reviewers or options.add_owners_to:
change_desc.update_reviewers(options.reviewers, options.tbrs,
options.add_owners_to, change)
if not options.force:
change_desc.prompt(bug=options.bug, git_footer=False)
if not change_desc.description:
print('Description is empty; aborting.')
return 1
upload_args.extend(['--message', change_desc.description])
if change_desc.get_reviewers():
upload_args.append('--reviewers=%s' % ','.join(
change_desc.get_reviewers()))
if options.send_mail:
if not change_desc.get_reviewers():
DieWithError("Must specify reviewers to send email.", change_desc)
upload_args.append('--send_mail')
# We check this before applying rietveld.private assuming that in
# rietveld.cc only addresses which we can send private CLs to are listed
# if rietveld.private is set, and so we should ignore rietveld.cc only
# when --private is specified explicitly on the command line.
if options.private:
logging.warn('rietveld.cc is ignored since private flag is specified. '
'You need to review and add them manually if necessary.')
cc = self.GetCCListWithoutDefault()
else:
cc = self.GetCCList()
cc = ','.join(filter(None, (cc, ','.join(options.cc))))
if change_desc.get_cced():
cc = ','.join(filter(None, (cc, ','.join(change_desc.get_cced()))))
if cc:
upload_args.extend(['--cc', cc])
if options.private or settings.GetDefaultPrivateFlag() == "True":
upload_args.append('--private')
# Include the upstream repo's URL in the change -- this is useful for
# projects that have their source spread across multiple repos.
remote_url = self.GetGitBaseUrlFromConfig()
if not remote_url:
if self.GetRemoteUrl() and '/' in self.GetUpstreamBranch():
remote_url = '%s@%s' % (self.GetRemoteUrl(),
self.GetUpstreamBranch().split('/')[-1])
if remote_url:
remote, remote_branch = self.GetRemoteBranch()
target_ref = GetTargetRef(remote, remote_branch, options.target_branch)
if target_ref:
upload_args.extend(['--target_ref', target_ref])
# Look for dependent patchsets. See crbug.com/480453 for more details.
remote, upstream_branch = self.FetchUpstreamTuple(self.GetBranch())
upstream_branch = ShortBranchName(upstream_branch)
if remote is '.':
# A local branch is being tracked.
local_branch = upstream_branch
if settings.GetIsSkipDependencyUpload(local_branch):
print()
print('Skipping dependency patchset upload because git config '
'branch.%s.skip-deps-uploads is set to True.' % local_branch)
print()
else:
auth_config = auth.extract_auth_config_from_options(options)
branch_cl = Changelist(branchref='refs/heads/'+local_branch,
auth_config=auth_config)
branch_cl_issue_url = branch_cl.GetIssueURL()
branch_cl_issue = branch_cl.GetIssue()
branch_cl_patchset = branch_cl.GetPatchset()
if branch_cl_issue_url and branch_cl_issue and branch_cl_patchset:
upload_args.extend(
['--depends_on_patchset', '%s:%s' % (
branch_cl_issue, branch_cl_patchset)])
print(
'\n'
'The current branch (%s) is tracking a local branch (%s) with '
'an associated CL.\n'
'Adding %s/#ps%s as a dependency patchset.\n'
'\n' % (self.GetBranch(), local_branch, branch_cl_issue_url,
branch_cl_patchset))
project = settings.GetProject()
if project:
upload_args.extend(['--project', project])
else:
print()
print('WARNING: Uploading without a project specified. Please ensure '
'your repo\'s codereview.settings has a "PROJECT: foo" line.')
print()
try:
upload_args = ['upload'] + upload_args + args
logging.info('upload.RealMain(%s)', upload_args)
issue, patchset = upload.RealMain(upload_args)
issue = int(issue)
patchset = int(patchset)
except KeyboardInterrupt:
sys.exit(1)
except:
# If we got an exception after the user typed a description for their
# change, back up the description before re-raising.
if change_desc:
SaveDescriptionBackup(change_desc)
raise
if not self.GetIssue():
self.SetIssue(issue)
self.SetPatchset(patchset)
return 0
class _GerritChangelistImpl(_ChangelistCodereviewBase):
def __init__(self, changelist, auth_config=None, codereview_host=None):
# auth_config is Rietveld thing, kept here to preserve interface only.
super(_GerritChangelistImpl, self).__init__(changelist)
self._change_id = None
# Lazily cached values.
self._gerrit_host = None # e.g. chromium-review.googlesource.com
self._gerrit_server = None # e.g. https://chromium-review.googlesource.com
# Map from change number (issue) to its detail cache.
self._detail_cache = {}
if codereview_host is not None:
assert not codereview_host.startswith('https://'), codereview_host
self._gerrit_host = codereview_host
self._gerrit_server = 'https://%s' % codereview_host
def _GetGerritHost(self):
# Lazy load of configs.
self.GetCodereviewServer()
if self._gerrit_host and '.' not in self._gerrit_host:
# Abbreviated domain like "chromium" instead of chromium.googlesource.com.
# This happens for internal stuff http://crbug.com/614312.
parsed = urlparse.urlparse(self.GetRemoteUrl())
if parsed.scheme == 'sso':
print('WARNING: using non-https URLs for remote is likely broken\n'
' Your current remote is: %s' % self.GetRemoteUrl())
self._gerrit_host = '%s.googlesource.com' % self._gerrit_host
self._gerrit_server = 'https://%s' % self._gerrit_host
return self._gerrit_host
def _GetGitHost(self):
"""Returns git host to be used when uploading change to Gerrit."""
return urlparse.urlparse(self.GetRemoteUrl()).netloc
def GetCodereviewServer(self):
if not self._gerrit_server:
# If we're on a branch then get the server potentially associated
# with that branch.
if self.GetIssue():
self._gerrit_server = self._GitGetBranchConfigValue(
self.CodereviewServerConfigKey())
if self._gerrit_server:
self._gerrit_host = urlparse.urlparse(self._gerrit_server).netloc
if not self._gerrit_server:
# We assume repo to be hosted on Gerrit, and hence Gerrit server
# has "-review" suffix for lowest level subdomain.
parts = self._GetGitHost().split('.')
parts[0] = parts[0] + '-review'
self._gerrit_host = '.'.join(parts)
self._gerrit_server = 'https://%s' % self._gerrit_host
return self._gerrit_server
@classmethod
def IssueConfigKey(cls):
return 'gerritissue'
@classmethod
def PatchsetConfigKey(cls):
return 'gerritpatchset'
@classmethod
def CodereviewServerConfigKey(cls):
return 'gerritserver'
def EnsureAuthenticated(self, force, refresh=None):
"""Best effort check that user is authenticated with Gerrit server."""
if settings.GetGerritSkipEnsureAuthenticated():
# For projects with unusual authentication schemes.
# See http://crbug.com/603378.
return
# Lazy-loader to identify Gerrit and Git hosts.
if gerrit_util.GceAuthenticator.is_gce():
return
self.GetCodereviewServer()
git_host = self._GetGitHost()
assert self._gerrit_server and self._gerrit_host
cookie_auth = gerrit_util.CookiesAuthenticator()
gerrit_auth = cookie_auth.get_auth_header(self._gerrit_host)
git_auth = cookie_auth.get_auth_header(git_host)
if gerrit_auth and git_auth:
if gerrit_auth == git_auth:
return
all_gsrc = cookie_auth.get_auth_header('d0esN0tEx1st.googlesource.com')
print((
'WARNING: You have different credentials for Gerrit and git hosts:\n'
' %s\n'
' %s\n'
' Consider running the following command:\n'
' git cl creds-check\n'
' %s\n'
' %s') %
(git_host, self._gerrit_host,
('Hint: delete creds for .googlesource.com' if all_gsrc else ''),
cookie_auth.get_new_password_message(git_host)))
if not force:
confirm_or_exit('If you know what you are doing', action='continue')
return
else:
missing = (
([] if gerrit_auth else [self._gerrit_host]) +
([] if git_auth else [git_host]))
DieWithError('Credentials for the following hosts are required:\n'
' %s\n'
'These are read from %s (or legacy %s)\n'
'%s' % (
'\n '.join(missing),
cookie_auth.get_gitcookies_path(),
cookie_auth.get_netrc_path(),
cookie_auth.get_new_password_message(git_host)))
def EnsureCanUploadPatchset(self, force):
if not self.GetIssue():
return
# Warm change details cache now to avoid RPCs later, reducing latency for
# developers.
self._GetChangeDetail(
['DETAILED_ACCOUNTS', 'CURRENT_REVISION', 'CURRENT_COMMIT'])
status = self._GetChangeDetail()['status']
if status in ('MERGED', 'ABANDONED'):
DieWithError('Change %s has been %s, new uploads are not allowed' %
(self.GetIssueURL(),
'submitted' if status == 'MERGED' else 'abandoned'))
if gerrit_util.GceAuthenticator.is_gce():
return
cookies_user = gerrit_util.CookiesAuthenticator().get_auth_email(
self._GetGerritHost())
if self.GetIssueOwner() == cookies_user:
return
logging.debug('change %s owner is %s, cookies user is %s',
self.GetIssue(), self.GetIssueOwner(), cookies_user)
# Maybe user has linked accounts or something like that,
# so ask what Gerrit thinks of this user.
details = gerrit_util.GetAccountDetails(self._GetGerritHost(), 'self')
if details['email'] == self.GetIssueOwner():
return
if not force:
print('WARNING: Change %s is owned by %s, but you authenticate to Gerrit '
'as %s.\n'
'Uploading may fail due to lack of permissions.' %
(self.GetIssue(), self.GetIssueOwner(), details['email']))
confirm_or_exit(action='upload')
def _PostUnsetIssueProperties(self):
"""Which branch-specific properties to erase when unsetting issue."""
return ['gerritsquashhash']
def GetRietveldObjForPresubmit(self):
class ThisIsNotRietveldIssue(object):
def __nonzero__(self):
# This is a hack to make presubmit_support think that rietveld is not
# defined, yet still ensure that calls directly result in a decent
# exception message below.
return False
def __getattr__(self, attr):
print(
'You aren\'t using Rietveld at the moment, but Gerrit.\n'
'Using Rietveld in your PRESUBMIT scripts won\'t work.\n'
'Please, either change your PRESUBMIT to not use rietveld_obj.%s,\n'
'or use Rietveld for codereview.\n'
'See also http://crbug.com/579160.' % attr)
raise NotImplementedError()
return ThisIsNotRietveldIssue()
def GetGerritObjForPresubmit(self):
return presubmit_support.GerritAccessor(self._GetGerritHost())
def GetStatus(self):
"""Apply a rough heuristic to give a simple summary of an issue's review
or CQ status, assuming adherence to a common workflow.
Returns None if no issue for this branch, or one of the following keywords:
* 'error' - error from review tool (including deleted issues)
* 'unsent' - no reviewers added
* 'waiting' - waiting for review
* 'reply' - waiting for uploader to reply to review
* 'lgtm' - Code-Review label has been set
* 'commit' - in the commit queue
* 'closed' - successfully submitted or abandoned
"""
if not self.GetIssue():
return None
try:
data = self._GetChangeDetail([
'DETAILED_LABELS', 'CURRENT_REVISION', 'SUBMITTABLE'])
except (httplib.HTTPException, GerritChangeNotExists):
return 'error'
if data['status'] in ('ABANDONED', 'MERGED'):
return 'closed'
if data['labels'].get('Commit-Queue', {}).get('approved'):
# The section will have an "approved" subsection if anyone has voted
# the maximum value on the label.
return 'commit'
if data['labels'].get('Code-Review', {}).get('approved'):
return 'lgtm'
if not data.get('reviewers', {}).get('REVIEWER', []):
return 'unsent'
owner = data['owner'].get('_account_id')
messages = sorted(data.get('messages', []), key=lambda m: m.get('updated'))
last_message_author = messages.pop().get('author', {})
while last_message_author:
if last_message_author.get('email') == COMMIT_BOT_EMAIL:
# Ignore replies from CQ.
last_message_author = messages.pop().get('author', {})
continue
if last_message_author.get('_account_id') == owner:
# Most recent message was by owner.
return 'waiting'
else:
# Some reply from non-owner.
return 'reply'
# Somehow there are no messages even though there are reviewers.
return 'unsent'
def GetMostRecentPatchset(self):
data = self._GetChangeDetail(['CURRENT_REVISION'])
patchset = data['revisions'][data['current_revision']]['_number']
self.SetPatchset(patchset)
return patchset
def FetchDescription(self, force=False):
data = self._GetChangeDetail(['CURRENT_REVISION', 'CURRENT_COMMIT'],
no_cache=force)
current_rev = data['current_revision']
return data['revisions'][current_rev]['commit']['message']
def UpdateDescriptionRemote(self, description, force=False):
if gerrit_util.HasPendingChangeEdit(self._GetGerritHost(), self.GetIssue()):
if not force:
confirm_or_exit(
'The description cannot be modified while the issue has a pending '
'unpublished edit. Either publish the edit in the Gerrit web UI '
'or delete it.\n\n', action='delete the unpublished edit')
gerrit_util.DeletePendingChangeEdit(self._GetGerritHost(),
self.GetIssue())
gerrit_util.SetCommitMessage(self._GetGerritHost(), self.GetIssue(),
description, notify='NONE')
def AddComment(self, message, publish=None):
gerrit_util.SetReview(self._GetGerritHost(), self.GetIssue(),
msg=message, ready=publish)
def GetCommentsSummary(self, readable=True):
# DETAILED_ACCOUNTS is to get emails in accounts.
messages = self._GetChangeDetail(
options=['MESSAGES', 'DETAILED_ACCOUNTS']).get('messages', [])
file_comments = gerrit_util.GetChangeComments(
self._GetGerritHost(), self.GetIssue())
# Build dictionary of file comments for easy access and sorting later.
# {author+date: {path: {patchset: {line: url+message}}}}
comments = collections.defaultdict(
lambda: collections.defaultdict(lambda: collections.defaultdict(dict)))
for path, line_comments in file_comments.iteritems():
for comment in line_comments:
if comment.get('tag', '').startswith('autogenerated'):
continue
key = (comment['author']['email'], comment['updated'])
if comment.get('side', 'REVISION') == 'PARENT':
patchset = 'Base'
else:
patchset = 'PS%d' % comment['patch_set']
line = comment.get('line', 0)
url = ('https://%s/c/%s/%s/%s#%s%s' %
(self._GetGerritHost(), self.GetIssue(), comment['patch_set'], path,
'b' if comment.get('side') == 'PARENT' else '',
str(line) if line else ''))
comments[key][path][patchset][line] = (url, comment['message'])
summary = []
for msg in messages:
# Don't bother showing autogenerated messages.
if msg.get('tag') and msg.get('tag').startswith('autogenerated'):
continue
# Gerrit spits out nanoseconds.
assert len(msg['date'].split('.')[-1]) == 9
date = datetime.datetime.strptime(msg['date'][:-3],
'%Y-%m-%d %H:%M:%S.%f')
message = msg['message']
key = (msg['author']['email'], msg['date'])
if key in comments:
message += '\n'
for path, patchsets in sorted(comments.get(key, {}).items()):
if readable:
message += '\n%s' % path
for patchset, lines in sorted(patchsets.items()):
for line, (url, content) in sorted(lines.items()):
if line:
line_str = 'Line %d' % line
path_str = '%s:%d:' % (path, line)
else:
line_str = 'File comment'
path_str = '%s:0:' % path
if readable:
message += '\n %s, %s: %s' % (patchset, line_str, url)
message += '\n %s\n' % content
else:
message += '\n%s ' % path_str
message += '\n%s\n' % content
summary.append(_CommentSummary(
date=date,
message=message,
sender=msg['author']['email'],
# These could be inferred from the text messages and correlated with
# Code-Review label maximum, however this is not reliable.
# Leaving as is until the need arises.
approval=False,
disapproval=False,
))
return summary
def CloseIssue(self):
gerrit_util.AbandonChange(self._GetGerritHost(), self.GetIssue(), msg='')
def SubmitIssue(self, wait_for_merge=True):
gerrit_util.SubmitChange(self._GetGerritHost(), self.GetIssue(),
wait_for_merge=wait_for_merge)
def _GetChangeDetail(self, options=None, issue=None,
no_cache=False):
"""Returns details of the issue by querying Gerrit and caching results.
If fresh data is needed, set no_cache=True which will clear cache and
thus new data will be fetched from Gerrit.
"""
options = options or []
issue = issue or self.GetIssue()
assert issue, 'issue is required to query Gerrit'
# Optimization to avoid multiple RPCs:
if (('CURRENT_REVISION' in options or 'ALL_REVISIONS' in options) and
'CURRENT_COMMIT' not in options):
options.append('CURRENT_COMMIT')
# Normalize issue and options for consistent keys in cache.
issue = str(issue)
options = [o.upper() for o in options]
# Check in cache first unless no_cache is True.
if no_cache:
self._detail_cache.pop(issue, None)
else:
options_set = frozenset(options)
for cached_options_set, data in self._detail_cache.get(issue, []):
# Assumption: data fetched before with extra options is suitable
# for return for a smaller set of options.
# For example, if we cached data for
# options=[CURRENT_REVISION, DETAILED_FOOTERS]
# and request is for options=[CURRENT_REVISION],
# THEN we can return prior cached data.
if options_set.issubset(cached_options_set):
return data
try:
data = gerrit_util.GetChangeDetail(
self._GetGerritHost(), str(issue), options)
except gerrit_util.GerritError as e:
if e.http_status == 404:
raise GerritChangeNotExists(issue, self.GetCodereviewServer())
raise
self._detail_cache.setdefault(issue, []).append((frozenset(options), data))
return data
def _GetChangeCommit(self, issue=None):
issue = issue or self.GetIssue()
assert issue, 'issue is required to query Gerrit'
try:
data = gerrit_util.GetChangeCommit(self._GetGerritHost(), str(issue))
except gerrit_util.GerritError as e:
if e.http_status == 404:
raise GerritChangeNotExists(issue, self.GetCodereviewServer())
raise
return data
def CMDLand(self, force, bypass_hooks, verbose):
if git_common.is_dirty_git_tree('land'):
return 1
detail = self._GetChangeDetail(['CURRENT_REVISION', 'LABELS'])
if u'Commit-Queue' in detail.get('labels', {}):
if not force:
confirm_or_exit('\nIt seems this repository has a Commit Queue, '
'which can test and land changes for you. '
'Are you sure you wish to bypass it?\n',
action='bypass CQ')
differs = True
last_upload = self._GitGetBranchConfigValue('gerritsquashhash')
# Note: git diff outputs nothing if there is no diff.
if not last_upload or RunGit(['diff', last_upload]).strip():
print('WARNING: Some changes from local branch haven\'t been uploaded.')
else:
if detail['current_revision'] == last_upload:
differs = False
else:
print('WARNING: Local branch contents differ from latest uploaded '
'patchset.')
if differs:
if not force:
confirm_or_exit(
'Do you want to submit latest Gerrit patchset and bypass hooks?\n',
action='submit')
print('WARNING: Bypassing hooks and submitting latest uploaded patchset.')
elif not bypass_hooks:
hook_results = self.RunHook(
committing=True,
may_prompt=not force,
verbose=verbose,
change=self.GetChange(self.GetCommonAncestorWithUpstream(), None))
if not hook_results.should_continue():
return 1
self.SubmitIssue(wait_for_merge=True)
print('Issue %s has been submitted.' % self.GetIssueURL())
links = self._GetChangeCommit().get('web_links', [])
for link in links:
if link.get('name') == 'gitiles' and link.get('url'):
print('Landed as: %s' % link.get('url'))
break
return 0
def CMDPatchWithParsedIssue(self, parsed_issue_arg, reject, nocommit,
directory, force):
assert not reject
assert not directory
assert parsed_issue_arg.valid
self._changelist.issue = parsed_issue_arg.issue
if parsed_issue_arg.hostname:
self._gerrit_host = parsed_issue_arg.hostname
self._gerrit_server = 'https://%s' % self._gerrit_host
try:
detail = self._GetChangeDetail(['ALL_REVISIONS'])
except GerritChangeNotExists as e:
DieWithError(str(e))
if not parsed_issue_arg.patchset:
# Use current revision by default.
revision_info = detail['revisions'][detail['current_revision']]
patchset = int(revision_info['_number'])
else:
patchset = parsed_issue_arg.patchset
for revision_info in detail['revisions'].itervalues():
if int(revision_info['_number']) == parsed_issue_arg.patchset:
break
else:
DieWithError('Couldn\'t find patchset %i in change %i' %
(parsed_issue_arg.patchset, self.GetIssue()))
remote_url = self._changelist.GetRemoteUrl()
if remote_url.endswith('.git'):
remote_url = remote_url[:-len('.git')]
fetch_info = revision_info['fetch']['http']
if remote_url != fetch_info['url']:
DieWithError('Trying to patch a change from %s but this repo appears '
'to be %s.' % (fetch_info['url'], remote_url))
RunGit(['fetch', fetch_info['url'], fetch_info['ref']])
if force:
RunGit(['reset', '--hard', 'FETCH_HEAD'])
print('Checked out commit for change %i patchset %i locally' %
(parsed_issue_arg.issue, patchset))
elif nocommit:
RunGit(['cherry-pick', '--no-commit', 'FETCH_HEAD'])
print('Patch applied to index.')
else:
RunGit(['cherry-pick', 'FETCH_HEAD'])
print('Committed patch for change %i patchset %i locally.' %
(parsed_issue_arg.issue, patchset))
print('Note: this created a local commit which does not have '
'the same hash as the one uploaded for review. This will make '
'uploading changes based on top of this branch difficult.\n'
'If you want to do that, use "git cl patch --force" instead.')
if self.GetBranch():
self.SetIssue(parsed_issue_arg.issue)
self.SetPatchset(patchset)
fetched_hash = RunGit(['rev-parse', 'FETCH_HEAD']).strip()
self._GitSetBranchConfigValue('last-upload-hash', fetched_hash)
self._GitSetBranchConfigValue('gerritsquashhash', fetched_hash)
else:
print('WARNING: You are in detached HEAD state.\n'
'The patch has been applied to your checkout, but you will not be '
'able to upload a new patch set to the gerrit issue.\n'
'Try using the \'-b\' option if you would like to work on a '
'branch and/or upload a new patch set.')
return 0
@staticmethod
def ParseIssueURL(parsed_url):
if not parsed_url.scheme or not parsed_url.scheme.startswith('http'):
return None
# Gerrit's new UI is https://domain/c/project/+/<issue_number>[/[patchset]]
# But old GWT UI is https://domain/#/c/project/+/<issue_number>[/[patchset]]
# Short urls like https://domain/<issue_number> can be used, but don't allow
# specifying the patchset (you'd 404), but we allow that here.
if parsed_url.path == '/':
part = parsed_url.fragment
else:
part = parsed_url.path
match = re.match('(/c(/.*/\+)?)?/(\d+)(/(\d+)?/?)?$', part)
if match:
return _ParsedIssueNumberArgument(
issue=int(match.group(3)),
patchset=int(match.group(5)) if match.group(5) else None,
hostname=parsed_url.netloc,
codereview='gerrit')
return None
def _GerritCommitMsgHookCheck(self, offer_removal):
hook = os.path.join(settings.GetRoot(), '.git', 'hooks', 'commit-msg')
if not os.path.exists(hook):
return
# Crude attempt to distinguish Gerrit Codereview hook from potentially
# custom developer made one.
data = gclient_utils.FileRead(hook)
if not('From Gerrit Code Review' in data and 'add_ChangeId()' in data):
return
print('WARNING: You have Gerrit commit-msg hook installed.\n'
'It is not necessary for uploading with git cl in squash mode, '
'and may interfere with it in subtle ways.\n'
'We recommend you remove the commit-msg hook.')
if offer_removal:
if ask_for_explicit_yes('Do you want to remove it now?'):
gclient_utils.rm_file_or_tree(hook)
print('Gerrit commit-msg hook removed.')
else:
print('OK, will keep Gerrit commit-msg hook in place.')
def CMDUploadChange(self, options, git_diff_args, custom_cl_base, change):
"""Upload the current branch to Gerrit."""
if options.squash and options.no_squash:
DieWithError('Can only use one of --squash or --no-squash')
if not options.squash and not options.no_squash:
# Load default for user, repo, squash=true, in this order.
options.squash = settings.GetSquashGerritUploads()
elif options.no_squash:
options.squash = False
remote, remote_branch = self.GetRemoteBranch()
branch = GetTargetRef(remote, remote_branch, options.target_branch)
# This may be None; default fallback value is determined in logic below.
title = options.title
# Extract bug number from branch name.
bug = options.bug
match = re.match(r'(?:bug|fix)[_-]?(\d+)', self.GetBranch())
if not bug and match:
bug = match.group(1)
if options.squash:
self._GerritCommitMsgHookCheck(offer_removal=not options.force)
if self.GetIssue():
# Try to get the message from a previous upload.
message = self.GetDescription()
if not message:
DieWithError(
'failed to fetch description from current Gerrit change %d\n'
'%s' % (self.GetIssue(), self.GetIssueURL()))
if not title:
if options.message:
# When uploading a subsequent patchset, -m|--message is taken
# as the patchset title if --title was not provided.
title = options.message.strip()
else:
default_title = RunGit(
['show', '-s', '--format=%s', 'HEAD']).strip()
if options.force:
title = default_title
else:
title = ask_for_data(
'Title for patchset [%s]: ' % default_title) or default_title
change_id = self._GetChangeDetail()['change_id']
while True:
footer_change_ids = git_footers.get_footer_change_id(message)
if footer_change_ids == [change_id]:
break
if not footer_change_ids:
message = git_footers.add_footer_change_id(message, change_id)
print('WARNING: appended missing Change-Id to change description.')
continue
# There is already a valid footer but with different or several ids.
# Doing this automatically is non-trivial as we don't want to lose
# existing other footers, yet we want to append just 1 desired
# Change-Id. Thus, just create a new footer, but let user verify the
# new description.
message = '%s\n\nChange-Id: %s' % (message, change_id)
print(
'WARNING: change %s has Change-Id footer(s):\n'
' %s\n'
'but change has Change-Id %s, according to Gerrit.\n'
'Please, check the proposed correction to the description, '
'and edit it if necessary but keep the "Change-Id: %s" footer\n'
% (self.GetIssue(), '\n '.join(footer_change_ids), change_id,
change_id))
confirm_or_exit(action='edit')
if not options.force:
change_desc = ChangeDescription(message)
change_desc.prompt(bug=bug)
message = change_desc.description
if not message:
DieWithError("Description is empty. Aborting...")
# Continue the while loop.
# Sanity check of this code - we should end up with proper message
# footer.
assert [change_id] == git_footers.get_footer_change_id(message)
change_desc = ChangeDescription(message)
else: # if not self.GetIssue()
if options.message:
message = options.message
else:
message = CreateDescriptionFromLog(git_diff_args)
if options.title:
message = options.title + '\n\n' + message
change_desc = ChangeDescription(message)
if not options.force:
change_desc.prompt(bug=bug)
# On first upload, patchset title is always this string, while
# --title flag gets converted to first line of message.
title = 'Initial upload'
if not change_desc.description:
DieWithError("Description is empty. Aborting...")
change_ids = git_footers.get_footer_change_id(change_desc.description)
if len(change_ids) > 1:
DieWithError('too many Change-Id footers, at most 1 allowed.')
if not change_ids:
# Generate the Change-Id automatically.
change_desc.set_description(git_footers.add_footer_change_id(
change_desc.description,
GenerateGerritChangeId(change_desc.description)))
change_ids = git_footers.get_footer_change_id(change_desc.description)
assert len(change_ids) == 1
change_id = change_ids[0]
if options.reviewers or options.tbrs or options.add_owners_to:
change_desc.update_reviewers(options.reviewers, options.tbrs,
options.add_owners_to, change)
remote, upstream_branch = self.FetchUpstreamTuple(self.GetBranch())
parent = self._ComputeParent(remote, upstream_branch, custom_cl_base,
options.force, change_desc)
tree = RunGit(['rev-parse', 'HEAD:']).strip()
with tempfile.NamedTemporaryFile(delete=False) as desc_tempfile:
desc_tempfile.write(change_desc.description)
desc_tempfile.close()
ref_to_push = RunGit(['commit-tree', tree, '-p', parent,
'-F', desc_tempfile.name]).strip()
os.remove(desc_tempfile.name)
else:
change_desc = ChangeDescription(
options.message or CreateDescriptionFromLog(git_diff_args))
if not change_desc.description:
DieWithError("Description is empty. Aborting...")
if not git_footers.get_footer_change_id(change_desc.description):
DownloadGerritHook(False)
change_desc.set_description(
self._AddChangeIdToCommitMessage(options, git_diff_args))
if options.reviewers or options.tbrs or options.add_owners_to:
change_desc.update_reviewers(options.reviewers, options.tbrs,
options.add_owners_to, change)
ref_to_push = 'HEAD'
# For no-squash mode, we assume the remote called "origin" is the one we
# want. It is not worthwhile to support different workflows for
# no-squash mode.
parent = 'origin/%s' % branch
change_id = git_footers.get_footer_change_id(change_desc.description)[0]
assert change_desc
commits = RunGitSilent(['rev-list', '%s..%s' % (parent,
ref_to_push)]).splitlines()
if len(commits) > 1:
print('WARNING: This will upload %d commits. Run the following command '
'to see which commits will be uploaded: ' % len(commits))
print('git log %s..%s' % (parent, ref_to_push))
print('You can also use `git squash-branch` to squash these into a '
'single commit.')
confirm_or_exit(action='upload')
if options.reviewers or options.tbrs or options.add_owners_to:
change_desc.update_reviewers(options.reviewers, options.tbrs,
options.add_owners_to, change)
# Extra options that can be specified at push time. Doc:
# https://gerrit-review.googlesource.com/Documentation/user-upload.html
refspec_opts = []
# By default, new changes are started in WIP mode, and subsequent patchsets
# don't send email. At any time, passing --send-mail will mark the change
# ready and send email for that particular patch.
if options.send_mail:
refspec_opts.append('ready')
refspec_opts.append('notify=ALL')
elif not self.GetIssue():
refspec_opts.append('wip')
else:
refspec_opts.append('notify=NONE')
# TODO(tandrii): options.message should be posted as a comment
# if --send-mail is set on non-initial upload as Rietveld used to do it.
if title:
# Punctuation and whitespace in |title| must be percent-encoded.
refspec_opts.append('m=' + gerrit_util.PercentEncodeForGitRef(title))
if options.private:
refspec_opts.append('private')
if options.topic:
# Documentation on Gerrit topics is here:
# https://gerrit-review.googlesource.com/Documentation/user-upload.html#topic
refspec_opts.append('topic=%s' % options.topic)
# Gerrit sorts hashtags, so order is not important.
hashtags = {change_desc.sanitize_hash_tag(t) for t in options.hashtags}
if not self.GetIssue():
hashtags.update(change_desc.get_hash_tags())
refspec_opts += ['hashtag=%s' % t for t in sorted(hashtags)]
refspec_suffix = ''
if refspec_opts:
refspec_suffix = '%' + ','.join(refspec_opts)
assert ' ' not in refspec_suffix, (
'spaces not allowed in refspec: "%s"' % refspec_suffix)
refspec = '%s:refs/for/%s%s' % (ref_to_push, branch, refspec_suffix)
try:
push_stdout = gclient_utils.CheckCallAndFilter(
['git', 'push', self.GetRemoteUrl(), refspec],
print_stdout=True,
# Flush after every line: useful for seeing progress when running as
# recipe.
filter_fn=lambda _: sys.stdout.flush())
except subprocess2.CalledProcessError:
DieWithError('Failed to create a change. Please examine output above '
'for the reason of the failure.\n'
'Hint: run command below to diagnose common Git/Gerrit '
'credential problems:\n'
' git cl creds-check\n',
change_desc)
if options.squash:
regex = re.compile(r'remote:\s+https?://[\w\-\.\+\/#]*/(\d+)\s.*')
change_numbers = [m.group(1)
for m in map(regex.match, push_stdout.splitlines())
if m]
if len(change_numbers) != 1:
DieWithError(
('Created|Updated %d issues on Gerrit, but only 1 expected.\n'
'Change-Id: %s') % (len(change_numbers), change_id), change_desc)
self.SetIssue(change_numbers[0])
self._GitSetBranchConfigValue('gerritsquashhash', ref_to_push)
reviewers = sorted(change_desc.get_reviewers())
# Add cc's from the CC_LIST and --cc flag (if any).
if not options.private:
cc = self.GetCCList().split(',')
else:
cc = []
if options.cc:
cc.extend(options.cc)
cc = filter(None, [email.strip() for email in cc])
if change_desc.get_cced():
cc.extend(change_desc.get_cced())
gerrit_util.AddReviewers(
self._GetGerritHost(), self.GetIssue(), reviewers, cc,
notify=bool(options.send_mail))
if change_desc.get_reviewers(tbr_only=True):
labels = self._GetChangeDetail(['LABELS']).get('labels', {})
score = 1
if 'Code-Review' in labels and 'values' in labels['Code-Review']:
score = max([int(x) for x in labels['Code-Review']['values'].keys()])
print('Adding self-LGTM (Code-Review +%d) because of TBRs.' % score)
gerrit_util.SetReview(
self._GetGerritHost(), self.GetIssue(),
msg='Self-approving for TBR',
labels={'Code-Review': score})
return 0
def _ComputeParent(self, remote, upstream_branch, custom_cl_base, force,
change_desc):
"""Computes parent of the generated commit to be uploaded to Gerrit.
Returns revision or a ref name.
"""
if custom_cl_base:
# Try to avoid creating additional unintended CLs when uploading, unless
# user wants to take this risk.
local_ref_of_target_remote = self.GetRemoteBranch()[1]
code, _ = RunGitWithCode(['merge-base', '--is-ancestor', custom_cl_base,
local_ref_of_target_remote])
if code == 1:
print('\nWARNING: Manually specified base of this CL `%s` '
'doesn\'t seem to belong to target remote branch `%s`.\n\n'
'If you proceed with upload, more than 1 CL may be created by '
'Gerrit as a result, in turn confusing or crashing git cl.\n\n'
'If you are certain that specified base `%s` has already been '
'uploaded to Gerrit as another CL, you may proceed.\n' %
(custom_cl_base, local_ref_of_target_remote, custom_cl_base))
if not force:
confirm_or_exit(
'Do you take responsibility for cleaning up potential mess '
'resulting from proceeding with upload?',
action='upload')
return custom_cl_base
if remote != '.':
return self.GetCommonAncestorWithUpstream()
# If our upstream branch is local, we base our squashed commit on its
# squashed version.
upstream_branch_name = scm.GIT.ShortBranchName(upstream_branch)
if upstream_branch_name == 'master':
return self.GetCommonAncestorWithUpstream()
# Check the squashed hash of the parent.
# TODO(tandrii): consider checking parent change in Gerrit and using its
# hash if tree hash of latest parent revision (patchset) in Gerrit matches
# the tree hash of the parent branch. The upside is less likely bogus
# requests to reupload parent change just because it's uploadhash is
# missing, yet the downside likely exists, too (albeit unknown to me yet).
parent = RunGit(['config',
'branch.%s.gerritsquashhash' % upstream_branch_name],
error_ok=True).strip()
# Verify that the upstream branch has been uploaded too, otherwise
# Gerrit will create additional CLs when uploading.
if not parent or (RunGitSilent(['rev-parse', upstream_branch + ':']) !=
RunGitSilent(['rev-parse', parent + ':'])):
DieWithError(
'\nUpload upstream branch %s first.\n'
'It is likely that this branch has been rebased since its last '
'upload, so you just need to upload it again.\n'
'(If you uploaded it with --no-squash, then branch dependencies '
'are not supported, and you should reupload with --squash.)'
% upstream_branch_name,
change_desc)
return parent
def _AddChangeIdToCommitMessage(self, options, args):
"""Re-commits using the current message, assumes the commit hook is in
place.
"""
log_desc = options.message or CreateDescriptionFromLog(args)
git_command = ['commit', '--amend', '-m', log_desc]
RunGit(git_command)
new_log_desc = CreateDescriptionFromLog(args)
if git_footers.get_footer_change_id(new_log_desc):
print('git-cl: Added Change-Id to commit message.')
return new_log_desc
else:
DieWithError('ERROR: Gerrit commit-msg hook not installed.')
def SetCQState(self, new_state):
"""Sets the Commit-Queue label assuming canonical CQ config for Gerrit."""
vote_map = {
_CQState.NONE: 0,
_CQState.DRY_RUN: 1,
_CQState.COMMIT: 2,
}
labels = {'Commit-Queue': vote_map[new_state]}
notify = False if new_state == _CQState.DRY_RUN else None
gerrit_util.SetReview(self._GetGerritHost(), self.GetIssue(),
labels=labels, notify=notify)
def CannotTriggerTryJobReason(self):
try:
data = self._GetChangeDetail()
except GerritChangeNotExists:
return 'Gerrit doesn\'t know about your change %s' % self.GetIssue()
if data['status'] in ('ABANDONED', 'MERGED'):
return 'CL %s is closed' % self.GetIssue()
def GetTryJobProperties(self, patchset=None):
"""Returns dictionary of properties to launch try job."""
data = self._GetChangeDetail(['ALL_REVISIONS'])
patchset = int(patchset or self.GetPatchset())
assert patchset
revision_data = None # Pylint wants it to be defined.
for revision_data in data['revisions'].itervalues():
if int(revision_data['_number']) == patchset:
break
else:
raise Exception('Patchset %d is not known in Gerrit change %d' %
(patchset, self.GetIssue()))
return {
'patch_issue': self.GetIssue(),
'patch_set': patchset or self.GetPatchset(),
'patch_project': data['project'],
'patch_storage': 'gerrit',
'patch_ref': revision_data['fetch']['http']['ref'],
'patch_repository_url': revision_data['fetch']['http']['url'],
'patch_gerrit_url': self.GetCodereviewServer(),
}
def GetIssueOwner(self):
return self._GetChangeDetail(['DETAILED_ACCOUNTS'])['owner']['email']
def GetReviewers(self):
details = self._GetChangeDetail(['DETAILED_ACCOUNTS'])
return [reviewer['email'] for reviewer in details['reviewers']['REVIEWER']]
_CODEREVIEW_IMPLEMENTATIONS = {
'rietveld': _RietveldChangelistImpl,
'gerrit': _GerritChangelistImpl,
}
def _add_codereview_issue_select_options(parser, extra=""):
_add_codereview_select_options(parser)
text = ('Operate on this issue number instead of the current branch\'s '
'implicit issue.')
if extra:
text += ' '+extra
parser.add_option('-i', '--issue', type=int, help=text)
def _process_codereview_issue_select_options(parser, options):
_process_codereview_select_options(parser, options)
if options.issue is not None and not options.forced_codereview:
parser.error('--issue must be specified with either --rietveld or --gerrit')
def _add_codereview_select_options(parser):
"""Appends --gerrit and --rietveld options to force specific codereview."""
parser.codereview_group = optparse.OptionGroup(
parser, 'EXPERIMENTAL! Codereview override options')
parser.add_option_group(parser.codereview_group)
parser.codereview_group.add_option(
'--gerrit', action='store_true',
help='Force the use of Gerrit for codereview')
parser.codereview_group.add_option(
'--rietveld', action='store_true',
help='Force the use of Rietveld for codereview')
def _process_codereview_select_options(parser, options):
if options.gerrit and options.rietveld:
parser.error('Options --gerrit and --rietveld are mutually exclusive')
options.forced_codereview = None
if options.gerrit:
options.forced_codereview = 'gerrit'
elif options.rietveld:
options.forced_codereview = 'rietveld'
def _get_bug_line_values(default_project, bugs):
"""Given default_project and comma separated list of bugs, yields bug line
values.
Each bug can be either:
* a number, which is combined with default_project
* string, which is left as is.
This function may produce more than one line, because bugdroid expects one
project per line.
>>> list(_get_bug_line_values('v8', '123,chromium:789'))
['v8:123', 'chromium:789']
"""
default_bugs = []
others = []
for bug in bugs.split(','):
bug = bug.strip()
if bug:
try:
default_bugs.append(int(bug))
except ValueError:
others.append(bug)
if default_bugs:
default_bugs = ','.join(map(str, default_bugs))
if default_project:
yield '%s:%s' % (default_project, default_bugs)
else:
yield default_bugs
for other in sorted(others):
# Don't bother finding common prefixes, CLs with >2 bugs are very very rare.
yield other
class ChangeDescription(object):
"""Contains a parsed form of the change description."""
R_LINE = r'^[ \t]*(TBR|R)[ \t]*=[ \t]*(.*?)[ \t]*$'
CC_LINE = r'^[ \t]*(CC)[ \t]*=[ \t]*(.*?)[ \t]*$'
BUG_LINE = r'^[ \t]*(?:(BUG)[ \t]*=|Bug:)[ \t]*(.*?)[ \t]*$'
CHERRY_PICK_LINE = r'^\(cherry picked from commit [a-fA-F0-9]{40}\)$'
STRIP_HASH_TAG_PREFIX = r'^(\s*(revert|reland)( "|:)?\s*)*'
BRACKET_HASH_TAG = r'\s*\[([^\[\]]+)\]'
COLON_SEPARATED_HASH_TAG = r'^([a-zA-Z0-9_\- ]+):'
BAD_HASH_TAG_CHUNK = r'[^a-zA-Z0-9]+'
def __init__(self, description):
self._description_lines = (description or '').strip().splitlines()
@property # www.logilab.org/ticket/89786
def description(self): # pylint: disable=method-hidden
return '\n'.join(self._description_lines)
def set_description(self, desc):
if isinstance(desc, basestring):
lines = desc.splitlines()
else:
lines = [line.rstrip() for line in desc]
while lines and not lines[0]:
lines.pop(0)
while lines and not lines[-1]:
lines.pop(-1)
self._description_lines = lines
def update_reviewers(self, reviewers, tbrs, add_owners_to=None, change=None):
"""Rewrites the R=/TBR= line(s) as a single line each.
Args:
reviewers (list(str)) - list of additional emails to use for reviewers.
tbrs (list(str)) - list of additional emails to use for TBRs.
add_owners_to (None|'R'|'TBR') - Pass to do an OWNERS lookup for files in
the change that are missing OWNER coverage. If this is not None, you
must also pass a value for `change`.
change (Change) - The Change that should be used for OWNERS lookups.
"""
assert isinstance(reviewers, list), reviewers
assert isinstance(tbrs, list), tbrs
assert add_owners_to in (None, 'TBR', 'R'), add_owners_to
assert not add_owners_to or change, add_owners_to
if not reviewers and not tbrs and not add_owners_to:
return
reviewers = set(reviewers)
tbrs = set(tbrs)
LOOKUP = {
'TBR': tbrs,
'R': reviewers,
}
# Get the set of R= and TBR= lines and remove them from the description.
regexp = re.compile(self.R_LINE)
matches = [regexp.match(line) for line in self._description_lines]
new_desc = [l for i, l in enumerate(self._description_lines)
if not matches[i]]
self.set_description(new_desc)
# Construct new unified R= and TBR= lines.
# First, update tbrs/reviewers with names from the R=/TBR= lines (if any).
for match in matches:
if not match:
continue
LOOKUP[match.group(1)].update(cleanup_list([match.group(2).strip()]))
# Next, maybe fill in OWNERS coverage gaps to either tbrs/reviewers.
if add_owners_to:
owners_db = owners.Database(change.RepositoryRoot(),
fopen=file, os_path=os.path)
missing_files = owners_db.files_not_covered_by(change.LocalPaths(),
(tbrs | reviewers))
LOOKUP[add_owners_to].update(
owners_db.reviewers_for(missing_files, change.author_email))
# If any folks ended up in both groups, remove them from tbrs.
tbrs -= reviewers
new_r_line = 'R=' + ', '.join(sorted(reviewers)) if reviewers else None
new_tbr_line = 'TBR=' + ', '.join(sorted(tbrs)) if tbrs else None
# Put the new lines in the description where the old first R= line was.
line_loc = next((i for i, match in enumerate(matches) if match), -1)
if 0 <= line_loc < len(self._description_lines):
if new_tbr_line:
self._description_lines.insert(line_loc, new_tbr_line)
if new_r_line:
self._description_lines.insert(line_loc, new_r_line)
else:
if new_r_line:
self.append_footer(new_r_line)
if new_tbr_line:
self.append_footer(new_tbr_line)
def prompt(self, bug=None, git_footer=True):
"""Asks the user to update the description."""
self.set_description([
'# Enter a description of the change.',
'# This will be displayed on the codereview site.',
'# The first line will also be used as the subject of the review.',
'#--------------------This line is 72 characters long'
'--------------------',
] + self._description_lines)
regexp = re.compile(self.BUG_LINE)
if not any((regexp.match(line) for line in self._description_lines)):
prefix = settings.GetBugPrefix()
values = list(_get_bug_line_values(prefix, bug or '')) or [prefix]
if git_footer:
self.append_footer('Bug: %s' % ', '.join(values))
else:
for value in values:
self.append_footer('BUG=%s' % value)
content = gclient_utils.RunEditor(self.description, True,
git_editor=settings.GetGitEditor())
if not content:
DieWithError('Running editor failed')
lines = content.splitlines()
# Strip off comments and default inserted "Bug:" line.
clean_lines = [line.rstrip() for line in lines if not
(line.startswith('#') or line.rstrip() == "Bug:")]
if not clean_lines:
DieWithError('No CL description, aborting')
self.set_description(clean_lines)
def append_footer(self, line):
"""Adds a footer line to the description.
Differentiates legacy "KEY=xxx" footers (used to be called tags) and
Gerrit's footers in the form of "Footer-Key: footer any value" and ensures
that Gerrit footers are always at the end.
"""
parsed_footer_line = git_footers.parse_footer(line)
if parsed_footer_line:
# Line is a gerrit footer in the form: Footer-Key: any value.
# Thus, must be appended observing Gerrit footer rules.
self.set_description(
git_footers.add_footer(self.description,
key=parsed_footer_line[0],
value=parsed_footer_line[1]))
return
if not self._description_lines:
self._description_lines.append(line)
return
top_lines, gerrit_footers, _ = git_footers.split_footers(self.description)
if gerrit_footers:
# git_footers.split_footers ensures that there is an empty line before
# actual (gerrit) footers, if any. We have to keep it that way.
assert top_lines and top_lines[-1] == ''
top_lines, separator = top_lines[:-1], top_lines[-1:]
else:
separator = [] # No need for separator if there are no gerrit_footers.
prev_line = top_lines[-1] if top_lines else ''
if (not presubmit_support.Change.TAG_LINE_RE.match(prev_line) or
not presubmit_support.Change.TAG_LINE_RE.match(line)):
top_lines.append('')
top_lines.append(line)
self._description_lines = top_lines + separator + gerrit_footers
def get_reviewers(self, tbr_only=False):
"""Retrieves the list of reviewers."""
matches = [re.match(self.R_LINE, line) for line in self._description_lines]
reviewers = [match.group(2).strip()
for match in matches
if match and (not tbr_only or match.group(1).upper() == 'TBR')]
return cleanup_list(reviewers)
def get_cced(self):
"""Retrieves the list of reviewers."""
matches = [re.match(self.CC_LINE, line) for line in self._description_lines]
cced = [match.group(2).strip() for match in matches if match]
return cleanup_list(cced)
def get_hash_tags(self):
"""Extracts and sanitizes a list of Gerrit hashtags."""
subject = (self._description_lines or ('',))[0]
subject = re.sub(
self.STRIP_HASH_TAG_PREFIX, '', subject, flags=re.IGNORECASE)
tags = []
start = 0
bracket_exp = re.compile(self.BRACKET_HASH_TAG)
while True:
m = bracket_exp.match(subject, start)
if not m:
break
tags.append(self.sanitize_hash_tag(m.group(1)))
start = m.end()
if not tags:
# Try "Tag: " prefix.
m = re.match(self.COLON_SEPARATED_HASH_TAG, subject)
if m:
tags.append(self.sanitize_hash_tag(m.group(1)))
return tags
@classmethod
def sanitize_hash_tag(cls, tag):
"""Returns a sanitized Gerrit hash tag.
A sanitized hashtag can be used as a git push refspec parameter value.
"""
return re.sub(cls.BAD_HASH_TAG_CHUNK, '-', tag).strip('-').lower()
def update_with_git_number_footers(self, parent_hash, parent_msg, dest_ref):
"""Updates this commit description given the parent.
This is essentially what Gnumbd used to do.
Consult https://goo.gl/WMmpDe for more details.
"""
assert parent_msg # No, orphan branch creation isn't supported.
assert parent_hash
assert dest_ref
parent_footer_map = git_footers.parse_footers(parent_msg)
# This will also happily parse svn-position, which GnumbD is no longer
# supporting. While we'd generate correct footers, the verifier plugin
# installed in Gerrit will block such commit (ie git push below will fail).
parent_position = git_footers.get_position(parent_footer_map)
# Cherry-picks may have last line obscuring their prior footers,
# from git_footers perspective. This is also what Gnumbd did.
cp_line = None
if (self._description_lines and
re.match(self.CHERRY_PICK_LINE, self._description_lines[-1])):
cp_line = self._description_lines.pop()
top_lines, footer_lines, _ = git_footers.split_footers(self.description)
# Original-ify all Cr- footers, to avoid re-lands, cherry-picks, or just
# user interference with actual footers we'd insert below.
for i, line in enumerate(footer_lines):
k, v = git_footers.parse_footer(line) or (None, None)
if k and k.startswith('Cr-'):
footer_lines[i] = '%s: %s' % ('Cr-Original-' + k[len('Cr-'):], v)
# Add Position and Lineage footers based on the parent.
lineage = list(reversed(parent_footer_map.get('Cr-Branched-From', [])))
if parent_position[0] == dest_ref:
# Same branch as parent.
number = int(parent_position[1]) + 1
else:
number = 1 # New branch, and extra lineage.
lineage.insert(0, '%s-%s@{#%d}' % (parent_hash, parent_position[0],
int(parent_position[1])))
footer_lines.append('Cr-Commit-Position: %s@{#%d}' % (dest_ref, number))
footer_lines.extend('Cr-Branched-From: %s' % v for v in lineage)
self._description_lines = top_lines
if cp_line:
self._description_lines.append(cp_line)
if self._description_lines[-1] != '':
self._description_lines.append('') # Ensure footer separator.
self._description_lines.extend(footer_lines)
def get_approving_reviewers(props, disapproval=False):
"""Retrieves the reviewers that approved a CL from the issue properties with
messages.
Note that the list may contain reviewers that are not committer, thus are not
considered by the CQ.
If disapproval is True, instead returns reviewers who 'not lgtm'd the CL.
"""
approval_type = 'disapproval' if disapproval else 'approval'
return sorted(
set(
message['sender']
for message in props['messages']
if message[approval_type] and message['sender'] in props['reviewers']
)
)
def FindCodereviewSettingsFile(filename='codereview.settings'):
"""Finds the given file starting in the cwd and going up.
Only looks up to the top of the repository unless an
'inherit-review-settings-ok' file exists in the root of the repository.
"""
inherit_ok_file = 'inherit-review-settings-ok'
cwd = os.getcwd()
root = settings.GetRoot()
if os.path.isfile(os.path.join(root, inherit_ok_file)):
root = '/'
while True:
if filename in os.listdir(cwd):
if os.path.isfile(os.path.join(cwd, filename)):
return open(os.path.join(cwd, filename))
if cwd == root:
break
cwd = os.path.dirname(cwd)
def LoadCodereviewSettingsFromFile(fileobj):
"""Parse a codereview.settings file and updates hooks."""
keyvals = gclient_utils.ParseCodereviewSettingsContent(fileobj.read())
def SetProperty(name, setting, unset_error_ok=False):
fullname = 'rietveld.' + name
if setting in keyvals:
RunGit(['config', fullname, keyvals[setting]])
else:
RunGit(['config', '--unset-all', fullname], error_ok=unset_error_ok)
if not keyvals.get('GERRIT_HOST', False):
SetProperty('server', 'CODE_REVIEW_SERVER')
# Only server setting is required. Other settings can be absent.
# In that case, we ignore errors raised during option deletion attempt.
SetProperty('cc', 'CC_LIST', unset_error_ok=True)
SetProperty('private', 'PRIVATE', unset_error_ok=True)
SetProperty('tree-status-url', 'STATUS', unset_error_ok=True)
SetProperty('viewvc-url', 'VIEW_VC', unset_error_ok=True)
SetProperty('bug-prefix', 'BUG_PREFIX', unset_error_ok=True)
SetProperty('cpplint-regex', 'LINT_REGEX', unset_error_ok=True)
SetProperty('cpplint-ignore-regex', 'LINT_IGNORE_REGEX', unset_error_ok=True)
SetProperty('project', 'PROJECT', unset_error_ok=True)
SetProperty('run-post-upload-hook', 'RUN_POST_UPLOAD_HOOK',
unset_error_ok=True)
if 'GERRIT_HOST' in keyvals:
RunGit(['config', 'gerrit.host', keyvals['GERRIT_HOST']])
if 'GERRIT_SQUASH_UPLOADS' in keyvals:
RunGit(['config', 'gerrit.squash-uploads',
keyvals['GERRIT_SQUASH_UPLOADS']])
if 'GERRIT_SKIP_ENSURE_AUTHENTICATED' in keyvals:
RunGit(['config', 'gerrit.skip-ensure-authenticated',
keyvals['GERRIT_SKIP_ENSURE_AUTHENTICATED']])
if 'PUSH_URL_CONFIG' in keyvals and 'ORIGIN_URL_CONFIG' in keyvals:
# should be of the form
# PUSH_URL_CONFIG: url.ssh://gitrw.chromium.org.pushinsteadof
# ORIGIN_URL_CONFIG: http://src.chromium.org/git
RunGit(['config', keyvals['PUSH_URL_CONFIG'],
keyvals['ORIGIN_URL_CONFIG']])
def urlretrieve(source, destination):
"""urllib is broken for SSL connections via a proxy therefore we
can't use urllib.urlretrieve()."""
with open(destination, 'w') as f:
f.write(urllib2.urlopen(source).read())
def hasSheBang(fname):
"""Checks fname is a #! script."""
with open(fname) as f:
return f.read(2).startswith('#!')
# TODO(bpastene) Remove once a cleaner fix to crbug.com/600473 presents itself.
def DownloadHooks(*args, **kwargs):
pass
def DownloadGerritHook(force):
"""Download and install Gerrit commit-msg hook.
Args:
force: True to update hooks. False to install hooks if not present.
"""
if not settings.GetIsGerrit():
return
src = 'https://gerrit-review.googlesource.com/tools/hooks/commit-msg'
dst = os.path.join(settings.GetRoot(), '.git', 'hooks', 'commit-msg')
if not os.access(dst, os.X_OK):
if os.path.exists(dst):
if not force:
return
try:
urlretrieve(src, dst)
if not hasSheBang(dst):
DieWithError('Not a script: %s\n'
'You need to download from\n%s\n'
'into .git/hooks/commit-msg and '
'chmod +x .git/hooks/commit-msg' % (dst, src))
os.chmod(dst, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
except Exception:
if os.path.exists(dst):
os.remove(dst)
DieWithError('\nFailed to download hooks.\n'
'You need to download from\n%s\n'
'into .git/hooks/commit-msg and '
'chmod +x .git/hooks/commit-msg' % src)
def GetRietveldCodereviewSettingsInteractively():
"""Prompt the user for settings."""
server = settings.GetDefaultServerUrl(error_ok=True)
prompt = 'Rietveld server (host[:port])'
prompt += ' [%s]' % (server or DEFAULT_SERVER)
newserver = ask_for_data(prompt + ':')
if not server and not newserver:
newserver = DEFAULT_SERVER
if newserver:
newserver = gclient_utils.UpgradeToHttps(newserver)
if newserver != server:
RunGit(['config', 'rietveld.server', newserver])
def SetProperty(initial, caption, name, is_url):
prompt = caption
if initial:
prompt += ' ("x" to clear) [%s]' % initial
new_val = ask_for_data(prompt + ':')
if new_val == 'x':
RunGit(['config', '--unset-all', 'rietveld.' + name], error_ok=True)
elif new_val:
if is_url:
new_val = gclient_utils.UpgradeToHttps(new_val)
if new_val != initial:
RunGit(['config', 'rietveld.' + name, new_val])
SetProperty(settings.GetDefaultCCList(), 'CC list', 'cc', False)
SetProperty(settings.GetDefaultPrivateFlag(),
'Private flag (rietveld only)', 'private', False)
SetProperty(settings.GetTreeStatusUrl(error_ok=True), 'Tree status URL',
'tree-status-url', False)
SetProperty(settings.GetViewVCUrl(), 'ViewVC URL', 'viewvc-url', True)
SetProperty(settings.GetBugPrefix(), 'Bug Prefix', 'bug-prefix', False)
SetProperty(settings.GetRunPostUploadHook(), 'Run Post Upload Hook',
'run-post-upload-hook', False)
class _GitCookiesChecker(object):
"""Provides facilities for validating and suggesting fixes to .gitcookies."""
_GOOGLESOURCE = 'googlesource.com'
def __init__(self):
# Cached list of [host, identity, source], where source is either
# .gitcookies or .netrc.
self._all_hosts = None
def ensure_configured_gitcookies(self):
"""Runs checks and suggests fixes to make git use .gitcookies from default
path."""
default = gerrit_util.CookiesAuthenticator.get_gitcookies_path()
configured_path = RunGitSilent(
['config', '--global', 'http.cookiefile']).strip()
configured_path = os.path.expanduser(configured_path)
if configured_path:
self._ensure_default_gitcookies_path(configured_path, default)
else:
self._configure_gitcookies_path(default)
@staticmethod
def _ensure_default_gitcookies_path(configured_path, default_path):
assert configured_path
if configured_path == default_path:
print('git is already configured to use your .gitcookies from %s' %
configured_path)
return
print('WARNING: You have configured custom path to .gitcookies: %s\n'
'Gerrit and other depot_tools expect .gitcookies at %s\n' %
(configured_path, default_path))
if not os.path.exists(configured_path):
print('However, your configured .gitcookies file is missing.')
confirm_or_exit('Reconfigure git to use default .gitcookies?',
action='reconfigure')
RunGit(['config', '--global', 'http.cookiefile', default_path])
return
if os.path.exists(default_path):
print('WARNING: default .gitcookies file already exists %s' %
default_path)
DieWithError('Please delete %s manually and re-run git cl creds-check' %
default_path)
confirm_or_exit('Move existing .gitcookies to default location?',
action='move')
shutil.move(configured_path, default_path)
RunGit(['config', '--global', 'http.cookiefile', default_path])
print('Moved and reconfigured git to use .gitcookies from %s' %
default_path)
@staticmethod
def _configure_gitcookies_path(default_path):
netrc_path = gerrit_util.CookiesAuthenticator.get_netrc_path()
if os.path.exists(netrc_path):
print('You seem to be using outdated .netrc for git credentials: %s' %
netrc_path)
print('This tool will guide you through setting up recommended '
'.gitcookies store for git credentials.\n'
'\n'
'IMPORTANT: If something goes wrong and you decide to go back, do:\n'
' git config --global --unset http.cookiefile\n'
' mv %s %s.backup\n\n' % (default_path, default_path))
confirm_or_exit(action='setup .gitcookies')
RunGit(['config', '--global', 'http.cookiefile', default_path])
print('Configured git to use .gitcookies from %s' % default_path)
def get_hosts_with_creds(self, include_netrc=False):
if self._all_hosts is None:
a = gerrit_util.CookiesAuthenticator()
self._all_hosts = [
(h, u, s)
for h, u, s in itertools.chain(
((h, u, '.netrc') for h, (u, _, _) in a.netrc.hosts.iteritems()),
((h, u, '.gitcookies') for h, (u, _) in a.gitcookies.iteritems())
)
if h.endswith(self._GOOGLESOURCE)
]
if include_netrc:
return self._all_hosts
return [(h, u, s) for h, u, s in self._all_hosts if s != '.netrc']
def print_current_creds(self, include_netrc=False):
hosts = sorted(self.get_hosts_with_creds(include_netrc=include_netrc))
if not hosts:
print('No Git/Gerrit credentials found')
return
lengths = [max(map(len, (row[i] for row in hosts))) for i in xrange(3)]
header = [('Host', 'User', 'Which file'),
['=' * l for l in lengths]]
for row in (header + hosts):
print('\t'.join((('%%+%ds' % l) % s)
for l, s in zip(lengths, row)))
@staticmethod
def _parse_identity(identity):
"""Parses identity "git-<username>.domain" into <username> and domain."""
# Special case: usernames that contain ".", which are generally not
# distinguishable from sub-domains. But we do know typical domains:
if identity.endswith('.chromium.org'):
domain = 'chromium.org'
username = identity[:-len('.chromium.org')]
else:
username, domain = identity.split('.', 1)
if username.startswith('git-'):
username = username[len('git-'):]
return username, domain
def _get_usernames_of_domain(self, domain):
"""Returns list of usernames referenced by .gitcookies in a given domain."""
identities_by_domain = {}
for _, identity, _ in self.get_hosts_with_creds():
username, domain = self._parse_identity(identity)
identities_by_domain.setdefault(domain, []).append(username)
return identities_by_domain.get(domain)
def _canonical_git_googlesource_host(self, host):
"""Normalizes Gerrit hosts (with '-review') to Git host."""
assert host.endswith(self._GOOGLESOURCE)
# Prefix doesn't include '.' at the end.
prefix = host[:-(1 + len(self._GOOGLESOURCE))]
if prefix.endswith('-review'):
prefix = prefix[:-len('-review')]
return prefix + '.' + self._GOOGLESOURCE
def _canonical_gerrit_googlesource_host(self, host):
git_host = self._canonical_git_googlesource_host(host)
prefix = git_host.split('.', 1)[0]
return prefix + '-review.' + self._GOOGLESOURCE
def _get_counterpart_host(self, host):
assert host.endswith(self._GOOGLESOURCE)
git = self._canonical_git_googlesource_host(host)
gerrit = self._canonical_gerrit_googlesource_host(git)
return git if gerrit == host else gerrit
def has_generic_host(self):
"""Returns whether generic .googlesource.com has been configured.
Chrome Infra recommends to use explicit ${host}.googlesource.com instead.
"""
for host, _, _ in self.get_hosts_with_creds(include_netrc=False):
if host == '.' + self._GOOGLESOURCE:
return True
return False
def _get_git_gerrit_identity_pairs(self):
"""Returns map from canonic host to pair of identities (Git, Gerrit).
One of identities might be None, meaning not configured.
"""
host_to_identity_pairs = {}
for host, identity, _ in self.get_hosts_with_creds():
canonical = self._canonical_git_googlesource_host(host)
pair = host_to_identity_pairs.setdefault(canonical, [None, None])
idx = 0 if canonical == host else 1
pair[idx] = identity
return host_to_identity_pairs
def get_partially_configured_hosts(self):
return set(
(host if i1 else self._canonical_gerrit_googlesource_host(host))
for host, (i1, i2) in self._get_git_gerrit_identity_pairs().iteritems()
if None in (i1, i2) and host != '.' + self._GOOGLESOURCE)
def get_conflicting_hosts(self):
return set(
host
for host, (i1, i2) in self._get_git_gerrit_identity_pairs().iteritems()
if None not in (i1, i2) and i1 != i2)
def get_duplicated_hosts(self):
counters = collections.Counter(h for h, _, _ in self.get_hosts_with_creds())
return set(host for host, count in counters.iteritems() if count > 1)
_EXPECTED_HOST_IDENTITY_DOMAINS = {
'chromium.googlesource.com': 'chromium.org',
'chrome-internal.googlesource.com': 'google.com',
}
def get_hosts_with_wrong_identities(self):
"""Finds hosts which **likely** reference wrong identities.
Note: skips hosts which have conflicting identities for Git and Gerrit.
"""
hosts = set()
for host, expected in self._EXPECTED_HOST_IDENTITY_DOMAINS.iteritems():
pair = self._get_git_gerrit_identity_pairs().get(host)
if pair and pair[0] == pair[1]:
_, domain = self._parse_identity(pair[0])
if domain != expected:
hosts.add(host)
return hosts
@staticmethod
def _format_hosts(hosts, extra_column_func=None):
hosts = sorted(hosts)
assert hosts
if extra_column_func is None:
extras = [''] * len(hosts)
else:
extras = [extra_column_func(host) for host in hosts]
tmpl = '%%-%ds %%-%ds' % (max(map(len, hosts)), max(map(len, extras)))
lines = []
for he in zip(hosts, extras):
lines.append(tmpl % he)
return lines
def _find_problems(self):
if self.has_generic_host():
yield ('.googlesource.com wildcard record detected',
['Chrome Infrastructure team recommends to list full host names '
'explicitly.'],
None)
dups = self.get_duplicated_hosts()
if dups:
yield ('The following hosts were defined twice',
self._format_hosts(dups),
None)
partial = self.get_partially_configured_hosts()
if partial:
yield ('Credentials should come in pairs for Git and Gerrit hosts. '
'These hosts are missing',
self._format_hosts(partial, lambda host: 'but %s defined' %
self._get_counterpart_host(host)),
partial)
conflicting = self.get_conflicting_hosts()
if conflicting:
yield ('The following Git hosts have differing credentials from their '
'Gerrit counterparts',
self._format_hosts(conflicting, lambda host: '%s vs %s' %
tuple(self._get_git_gerrit_identity_pairs()[host])),
conflicting)
wrong = self.get_hosts_with_wrong_identities()
if wrong:
yield ('These hosts likely use wrong identity',
self._format_hosts(wrong, lambda host: '%s but %s recommended' %
(self._get_git_gerrit_identity_pairs()[host][0],
self._EXPECTED_HOST_IDENTITY_DOMAINS[host])),
wrong)
def find_and_report_problems(self):
"""Returns True if there was at least one problem, else False."""
found = False
bad_hosts = set()
for title, sublines, hosts in self._find_problems():
if not found:
found = True
print('\n\n.gitcookies problem report:\n')
bad_hosts.update(hosts or [])
print(' %s%s' % (title , (':' if sublines else '')))
if sublines:
print()
print(' %s' % '\n '.join(sublines))
print()
if bad_hosts:
assert found
print(' You can manually remove corresponding lines in your %s file and '
'visit the following URLs with correct account to generate '
'correct credential lines:\n' %
gerrit_util.CookiesAuthenticator.get_gitcookies_path())
print(' %s' % '\n '.join(sorted(set(
gerrit_util.CookiesAuthenticator().get_new_password_url(
self._canonical_git_googlesource_host(host))
for host in bad_hosts
))))
return found
def CMDcreds_check(parser, args):
"""Checks credentials and suggests changes."""
_, _ = parser.parse_args(args)
if gerrit_util.GceAuthenticator.is_gce():
DieWithError(
'This command is not designed for GCE, are you on a bot?\n'
'If you need to run this, export SKIP_GCE_AUTH_FOR_GIT=1 in your env.')
checker = _GitCookiesChecker()
checker.ensure_configured_gitcookies()
print('Your .netrc and .gitcookies have credentials for these hosts:')
checker.print_current_creds(include_netrc=True)
if not checker.find_and_report_problems():
print('\nNo problems detected in your .gitcookies file.')
return 0
return 1
@subcommand.usage('[repo root containing codereview.settings]')
def CMDconfig(parser, args):
"""Edits configuration for this tree."""
print('WARNING: git cl config works for Rietveld only.')
# TODO(tandrii): remove this once we switch to Gerrit.
# See bugs http://crbug.com/637561 and http://crbug.com/600469.
parser.add_option('--activate-update', action='store_true',
help='activate auto-updating [rietveld] section in '
'.git/config')
parser.add_option('--deactivate-update', action='store_true',
help='deactivate auto-updating [rietveld] section in '
'.git/config')
options, args = parser.parse_args(args)
if options.deactivate_update:
RunGit(['config', 'rietveld.autoupdate', 'false'])
return
if options.activate_update:
RunGit(['config', '--unset', 'rietveld.autoupdate'])
return
if len(args) == 0:
GetRietveldCodereviewSettingsInteractively()
return 0
url = args[0]
if not url.endswith('codereview.settings'):
url = os.path.join(url, 'codereview.settings')
# Load code review settings and download hooks (if available).
LoadCodereviewSettingsFromFile(urllib2.urlopen(url))
return 0
def CMDbaseurl(parser, args):
"""Gets or sets base-url for this branch."""
branchref = RunGit(['symbolic-ref', 'HEAD']).strip()
branch = ShortBranchName(branchref)
_, args = parser.parse_args(args)
if not args:
print('Current base-url:')
return RunGit(['config', 'branch.%s.base-url' % branch],
error_ok=False).strip()
else:
print('Setting base-url to %s' % args[0])
return RunGit(['config', 'branch.%s.base-url' % branch, args[0]],
error_ok=False).strip()
def color_for_status(status):
"""Maps a Changelist status to color, for CMDstatus and other tools."""
return {
'unsent': Fore.YELLOW,
'waiting': Fore.BLUE,
'reply': Fore.YELLOW,
'not lgtm': Fore.RED,
'lgtm': Fore.GREEN,
'commit': Fore.MAGENTA,
'closed': Fore.CYAN,
'error': Fore.WHITE,
}.get(status, Fore.WHITE)
def get_cl_statuses(changes, fine_grained, max_processes=None):
"""Returns a blocking iterable of (cl, status) for given branches.
If fine_grained is true, this will fetch CL statuses from the server.
Otherwise, simply indicate if there's a matching url for the given branches.
If max_processes is specified, it is used as the maximum number of processes
to spawn to fetch CL status from the server. Otherwise 1 process per branch is
spawned.
See GetStatus() for a list of possible statuses.
"""
# Silence upload.py otherwise it becomes unwieldy.
upload.verbosity = 0
if not changes:
raise StopIteration()
if not fine_grained:
# Fast path which doesn't involve querying codereview servers.
# Do not use get_approving_reviewers(), since it requires an HTTP request.
for cl in changes:
yield (cl, 'waiting' if cl.GetIssueURL() else 'error')
return
# First, sort out authentication issues.
logging.debug('ensuring credentials exist')
for cl in changes:
cl.EnsureAuthenticated(force=False, refresh=True)
def fetch(cl):
try:
return (cl, cl.GetStatus())
except:
# See http://crbug.com/629863.
logging.exception('failed to fetch status for %s:', cl)
raise
threads_count = len(changes)
if max_processes:
threads_count = max(1, min(threads_count, max_processes))
logging.debug('querying %d CLs using %d threads', len(changes), threads_count)
pool = ThreadPool(threads_count)
fetched_cls = set()
try:
it = pool.imap_unordered(fetch, changes).__iter__()
while True:
try:
cl, status = it.next(timeout=5)
except multiprocessing.TimeoutError:
break
fetched_cls.add(cl)
yield cl, status
finally:
pool.close()
# Add any branches that failed to fetch.
for cl in set(changes) - fetched_cls:
yield (cl, 'error')
def upload_branch_deps(cl, args):
"""Uploads CLs of local branches that are dependents of the current branch.
If the local branch dependency tree looks like:
test1 -> test2.1 -> test3.1
-> test3.2
-> test2.2 -> test3.3
and you run "git cl upload --dependencies" from test1 then "git cl upload" is
run on the dependent branches in this order:
test2.1, test3.1, test3.2, test2.2, test3.3
Note: This function does not rebase your local dependent branches. Use it when
you make a change to the parent branch that will not conflict with its
dependent branches, and you would like their dependencies updated in
Rietveld.
"""
if git_common.is_dirty_git_tree('upload-branch-deps'):
return 1
root_branch = cl.GetBranch()
if root_branch is None:
DieWithError('Can\'t find dependent branches from detached HEAD state. '
'Get on a branch!')
if not cl.GetIssue() or (not cl.IsGerrit() and not cl.GetPatchset()):
DieWithError('Current branch does not have an uploaded CL. We cannot set '
'patchset dependencies without an uploaded CL.')
branches = RunGit(['for-each-ref',
'--format=%(refname:short) %(upstream:short)',
'refs/heads'])
if not branches:
print('No local branches found.')
return 0
# Create a dictionary of all local branches to the branches that are dependent
# on it.
tracked_to_dependents = collections.defaultdict(list)
for b in branches.splitlines():
tokens = b.split()
if len(tokens) == 2:
branch_name, tracked = tokens
tracked_to_dependents[tracked].append(branch_name)
print()
print('The dependent local branches of %s are:' % root_branch)
dependents = []
def traverse_dependents_preorder(branch, padding=''):
dependents_to_process = tracked_to_dependents.get(branch, [])
padding += ' '
for dependent in dependents_to_process:
print('%s%s' % (padding, dependent))
dependents.append(dependent)
traverse_dependents_preorder(dependent, padding)
traverse_dependents_preorder(root_branch)
print()
if not dependents:
print('There are no dependent local branches for %s' % root_branch)
return 0
confirm_or_exit('This command will checkout all dependent branches and run '
'"git cl upload".', action='continue')
# Add a default patchset title to all upload calls in Rietveld.
if not cl.IsGerrit():
args.extend(['-t', 'Updated patchset dependency'])
# Record all dependents that failed to upload.
failures = {}
# Go through all dependents, checkout the branch and upload.
try:
for dependent_branch in dependents:
print()
print('--------------------------------------')
print('Running "git cl upload" from %s:' % dependent_branch)
RunGit(['checkout', '-q', dependent_branch])
print()
try:
if CMDupload(OptionParser(), args) != 0:
print('Upload failed for %s!' % dependent_branch)
failures[dependent_branch] = 1
except: # pylint: disable=bare-except
failures[dependent_branch] = 1
print()
finally:
# Swap back to the original root branch.
RunGit(['checkout', '-q', root_branch])
print()
print('Upload complete for dependent branches!')
for dependent_branch in dependents:
upload_status = 'failed' if failures.get(dependent_branch) else 'succeeded'
print(' %s : %s' % (dependent_branch, upload_status))
print()
return 0
def CMDarchive(parser, args):
"""Archives and deletes branches associated with closed changelists."""
parser.add_option(
'-j', '--maxjobs', action='store', type=int,
help='The maximum number of jobs to use when retrieving review status.')
parser.add_option(
'-f', '--force', action='store_true',
help='Bypasses the confirmation prompt.')
parser.add_option(
'-d', '--dry-run', action='store_true',
help='Skip the branch tagging and removal steps.')
parser.add_option(
'-t', '--notags', action='store_true',
help='Do not tag archived branches. '
'Note: local commit history may be lost.')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
if args:
parser.error('Unsupported args: %s' % ' '.join(args))
auth_config = auth.extract_auth_config_from_options(options)
branches = RunGit(['for-each-ref', '--format=%(refname)', 'refs/heads'])
if not branches:
return 0
print('Finding all branches associated with closed issues...')
changes = [Changelist(branchref=b, auth_config=auth_config)
for b in branches.splitlines()]
alignment = max(5, max(len(c.GetBranch()) for c in changes))
statuses = get_cl_statuses(changes,
fine_grained=True,
max_processes=options.maxjobs)
proposal = [(cl.GetBranch(),
'git-cl-archived-%s-%s' % (cl.GetIssue(), cl.GetBranch()))
for cl, status in statuses
if status == 'closed']
proposal.sort()
if not proposal:
print('No branches with closed codereview issues found.')
return 0
current_branch = GetCurrentBranch()
print('\nBranches with closed issues that will be archived:\n')
if options.notags:
for next_item in proposal:
print(' ' + next_item[0])
else:
print('%*s | %s' % (alignment, 'Branch name', 'Archival tag name'))
for next_item in proposal:
print('%*s %s' % (alignment, next_item[0], next_item[1]))
# Quit now on precondition failure or if instructed by the user, either
# via an interactive prompt or by command line flags.
if options.dry_run:
print('\nNo changes were made (dry run).\n')
return 0
elif any(branch == current_branch for branch, _ in proposal):
print('You are currently on a branch \'%s\' which is associated with a '
'closed codereview issue, so archive cannot proceed. Please '
'checkout another branch and run this command again.' %
current_branch)
return 1
elif not options.force:
answer = ask_for_data('\nProceed with deletion (Y/n)? ').lower()
if answer not in ('y', ''):
print('Aborted.')
return 1
for branch, tagname in proposal:
if not options.notags:
RunGit(['tag', tagname, branch])
RunGit(['branch', '-D', branch])
print('\nJob\'s done!')
return 0
def CMDstatus(parser, args):
"""Show status of changelists.
Colors are used to tell the state of the CL unless --fast is used:
- Blue waiting for review
- Yellow waiting for you to reply to review, or not yet sent
- Green LGTM'ed
- Red 'not LGTM'ed
- Magenta in the commit queue
- Cyan was committed, branch can be deleted
- White error, or unknown status
Also see 'git cl comments'.
"""
parser.add_option('--field',
help='print only specific field (desc|id|patch|status|url)')
parser.add_option('-f', '--fast', action='store_true',
help='Do not retrieve review status')
parser.add_option(
'-j', '--maxjobs', action='store', type=int,
help='The maximum number of jobs to use when retrieving review status')
auth.add_auth_options(parser)
_add_codereview_issue_select_options(
parser, 'Must be in conjunction with --field.')
options, args = parser.parse_args(args)
_process_codereview_issue_select_options(parser, options)
if args:
parser.error('Unsupported args: %s' % args)
auth_config = auth.extract_auth_config_from_options(options)
if options.issue is not None and not options.field:
parser.error('--field must be specified with --issue')
if options.field:
cl = Changelist(auth_config=auth_config, issue=options.issue,
codereview=options.forced_codereview)
if options.field.startswith('desc'):
print(cl.GetDescription())
elif options.field == 'id':
issueid = cl.GetIssue()
if issueid:
print(issueid)
elif options.field == 'patch':
patchset = cl.GetMostRecentPatchset()
if patchset:
print(patchset)
elif options.field == 'status':
print(cl.GetStatus())
elif options.field == 'url':
url = cl.GetIssueURL()
if url:
print(url)
return 0
branches = RunGit(['for-each-ref', '--format=%(refname)', 'refs/heads'])
if not branches:
print('No local branch found.')
return 0
changes = [
Changelist(branchref=b, auth_config=auth_config)
for b in branches.splitlines()]
print('Branches associated with reviews:')
output = get_cl_statuses(changes,
fine_grained=not options.fast,
max_processes=options.maxjobs)
branch_statuses = {}
alignment = max(5, max(len(ShortBranchName(c.GetBranch())) for c in changes))
for cl in sorted(changes, key=lambda c: c.GetBranch()):
branch = cl.GetBranch()
while branch not in branch_statuses:
c, status = output.next()
branch_statuses[c.GetBranch()] = status
status = branch_statuses.pop(branch)
url = cl.GetIssueURL()
if url and (not status or status == 'error'):
# The issue probably doesn't exist anymore.
url += ' (broken)'
color = color_for_status(status)
reset = Fore.RESET
if not setup_color.IS_TTY:
color = ''
reset = ''
status_str = '(%s)' % status if status else ''
print(' %*s : %s%s %s%s' % (
alignment, ShortBranchName(branch), color, url,
status_str, reset))
branch = GetCurrentBranch()
print()
print('Current branch: %s' % branch)
for cl in changes:
if cl.GetBranch() == branch:
break
if not cl.GetIssue():
print('No issue assigned.')
return 0
print('Issue number: %s (%s)' % (cl.GetIssue(), cl.GetIssueURL()))
if not options.fast:
print('Issue description:')
print(cl.GetDescription(pretty=True))
return 0
def colorize_CMDstatus_doc():
"""To be called once in main() to add colors to git cl status help."""
colors = [i for i in dir(Fore) if i[0].isupper()]
def colorize_line(line):
for color in colors:
if color in line.upper():
# Extract whitespace first and the leading '-'.
indent = len(line) - len(line.lstrip(' ')) + 1
return line[:indent] + getattr(Fore, color) + line[indent:] + Fore.RESET
return line
lines = CMDstatus.__doc__.splitlines()
CMDstatus.__doc__ = '\n'.join(colorize_line(l) for l in lines)
def write_json(path, contents):
if path == '-':
json.dump(contents, sys.stdout)
else:
with open(path, 'w') as f:
json.dump(contents, f)
@subcommand.usage('[issue_number]')
def CMDissue(parser, args):
"""Sets or displays the current code review issue number.
Pass issue number 0 to clear the current issue.
"""
parser.add_option('-r', '--reverse', action='store_true',
help='Lookup the branch(es) for the specified issues. If '
'no issues are specified, all branches with mapped '
'issues will be listed.')
parser.add_option('--json',
help='Path to JSON output file, or "-" for stdout.')
_add_codereview_select_options(parser)
options, args = parser.parse_args(args)
_process_codereview_select_options(parser, options)
if options.reverse:
branches = RunGit(['for-each-ref', 'refs/heads',
'--format=%(refname)']).splitlines()
# Reverse issue lookup.
issue_branch_map = {}
for branch in branches:
cl = Changelist(branchref=branch)
issue_branch_map.setdefault(cl.GetIssue(), []).append(branch)
if not args:
args = sorted(issue_branch_map.iterkeys())
result = {}
for issue in args:
if not issue:
continue
result[int(issue)] = issue_branch_map.get(int(issue))
print('Branch for issue number %s: %s' % (
issue, ', '.join(issue_branch_map.get(int(issue)) or ('None',))))
if options.json:
write_json(options.json, result)
return 0
if len(args) > 0:
issue = ParseIssueNumberArgument(args[0], options.forced_codereview)
if not issue.valid:
DieWithError('Pass a url or number to set the issue, 0 to unset it, '
'or no argument to list it.\n'
'Maybe you want to run git cl status?')
cl = Changelist(codereview=issue.codereview)
cl.SetIssue(issue.issue)
else:
cl = Changelist(codereview=options.forced_codereview)
print('Issue number: %s (%s)' % (cl.GetIssue(), cl.GetIssueURL()))
if options.json:
write_json(options.json, {
'issue': cl.GetIssue(),
'issue_url': cl.GetIssueURL(),
})
return 0
def CMDcomments(parser, args):
"""Shows or posts review comments for any changelist."""
parser.add_option('-a', '--add-comment', dest='comment',
help='comment to add to an issue')
parser.add_option('-i', '--issue', dest='issue',
help='review issue id (defaults to current issue). '
'If given, requires --rietveld or --gerrit')
parser.add_option('-m', '--machine-readable', dest='readable',
action='store_false', default=True,
help='output comments in a format compatible with '
'editor parsing')
parser.add_option('-j', '--json-file',
help='File to write JSON summary to, or "-" for stdout')
auth.add_auth_options(parser)
_add_codereview_select_options(parser)
options, args = parser.parse_args(args)
_process_codereview_select_options(parser, options)
auth_config = auth.extract_auth_config_from_options(options)
issue = None
if options.issue:
try:
issue = int(options.issue)
except ValueError:
DieWithError('A review issue id is expected to be a number')
if not options.forced_codereview:
parser.error('--gerrit or --rietveld is required if --issue is specified')
cl = Changelist(issue=issue,
codereview=options.forced_codereview,
auth_config=auth_config)
if options.comment:
cl.AddComment(options.comment)
return 0
summary = sorted(cl.GetCommentsSummary(readable=options.readable),
key=lambda c: c.date)
for comment in summary:
if comment.disapproval:
color = Fore.RED
elif comment.approval:
color = Fore.GREEN
elif comment.sender == cl.GetIssueOwner():
color = Fore.MAGENTA
else:
color = Fore.BLUE
print('\n%s%s %s%s\n%s' % (
color,
comment.date.strftime('%Y-%m-%d %H:%M:%S UTC'),
comment.sender,
Fore.RESET,
'\n'.join(' ' + l for l in comment.message.strip().splitlines())))
if options.json_file:
def pre_serialize(c):
dct = c.__dict__.copy()
dct['date'] = dct['date'].strftime('%Y-%m-%d %H:%M:%S.%f')
return dct
with open(options.json_file, 'wb') as f:
json.dump(map(pre_serialize, summary), f)
return 0
@subcommand.usage('[codereview url or issue id]')
def CMDdescription(parser, args):
"""Brings up the editor for the current CL's description."""
parser.add_option('-d', '--display', action='store_true',
help='Display the description instead of opening an editor')
parser.add_option('-n', '--new-description',
help='New description to set for this issue (- for stdin, '
'+ to load from local commit HEAD)')
parser.add_option('-f', '--force', action='store_true',
help='Delete any unpublished Gerrit edits for this issue '
'without prompting')
_add_codereview_select_options(parser)
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
_process_codereview_select_options(parser, options)
target_issue_arg = None
if len(args) > 0:
target_issue_arg = ParseIssueNumberArgument(args[0],
options.forced_codereview)
if not target_issue_arg.valid:
parser.error('invalid codereview url or CL id')
auth_config = auth.extract_auth_config_from_options(options)
kwargs = {
'auth_config': auth_config,
'codereview': options.forced_codereview,
}
detected_codereview_from_url = False
if target_issue_arg:
kwargs['issue'] = target_issue_arg.issue
kwargs['codereview_host'] = target_issue_arg.hostname
if target_issue_arg.codereview and not options.forced_codereview:
detected_codereview_from_url = True
kwargs['codereview'] = target_issue_arg.codereview
cl = Changelist(**kwargs)
if not cl.GetIssue():
assert not detected_codereview_from_url
DieWithError('This branch has no associated changelist.')
if detected_codereview_from_url:
logging.info('canonical issue/change URL: %s (type: %s)\n',
cl.GetIssueURL(), target_issue_arg.codereview)
description = ChangeDescription(cl.GetDescription())
if options.display:
print(description.description)
return 0
if options.new_description:
text = options.new_description
if text == '-':
text = '\n'.join(l.rstrip() for l in sys.stdin)
elif text == '+':
base_branch = cl.GetCommonAncestorWithUpstream()
change = cl.GetChange(base_branch, None, local_description=True)
text = change.FullDescriptionText()
description.set_description(text)
else:
description.prompt(git_footer=cl.IsGerrit())
if cl.GetDescription().strip() != description.description:
cl.UpdateDescription(description.description, force=options.force)
return 0
def CreateDescriptionFromLog(args):
"""Pulls out the commit log to use as a base for the CL description."""
log_args = []
if len(args) == 1 and not args[0].endswith('.'):
log_args = [args[0] + '..']
elif len(args) == 1 and args[0].endswith('...'):
log_args = [args[0][:-1]]
elif len(args) == 2:
log_args = [args[0] + '..' + args[1]]
else:
log_args = args[:] # Hope for the best!
return RunGit(['log', '--pretty=format:%s\n\n%b'] + log_args)
def CMDlint(parser, args):
"""Runs cpplint on the current changelist."""
parser.add_option('--filter', action='append', metavar='-x,+y',
help='Comma-separated list of cpplint\'s category-filters')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
# Access to a protected member _XX of a client class
# pylint: disable=protected-access
try:
import cpplint
import cpplint_chromium
except ImportError:
print('Your depot_tools is missing cpplint.py and/or cpplint_chromium.py.')
return 1
# Change the current working directory before calling lint so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(settings.GetRoot())
try:
cl = Changelist(auth_config=auth_config)
change = cl.GetChange(cl.GetCommonAncestorWithUpstream(), None)
files = [f.LocalPath() for f in change.AffectedFiles()]
if not files:
print('Cannot lint an empty CL')
return 1
# Process cpplints arguments if any.
command = args + files
if options.filter:
command = ['--filter=' + ','.join(options.filter)] + command
filenames = cpplint.ParseArguments(command)
white_regex = re.compile(settings.GetLintRegex())
black_regex = re.compile(settings.GetLintIgnoreRegex())
extra_check_functions = [cpplint_chromium.CheckPointerDeclarationWhitespace]
for filename in filenames:
if white_regex.match(filename):
if black_regex.match(filename):
print('Ignoring file %s' % filename)
else:
cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level,
extra_check_functions)
else:
print('Skipping file %s' % filename)
finally:
os.chdir(previous_cwd)
print('Total errors found: %d\n' % cpplint._cpplint_state.error_count)
if cpplint._cpplint_state.error_count != 0:
return 1
return 0
def CMDpresubmit(parser, args):
"""Runs presubmit tests on the current changelist."""
parser.add_option('-u', '--upload', action='store_true',
help='Run upload hook instead of the push hook')
parser.add_option('-f', '--force', action='store_true',
help='Run checks even if tree is dirty')
parser.add_option('--all', action='store_true',
help='Run checks against all files, not just modified ones')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if not options.force and git_common.is_dirty_git_tree('presubmit'):
print('use --force to check even if tree is dirty.')
return 1
cl = Changelist(auth_config=auth_config)
if args:
base_branch = args[0]
else:
# Default to diffing against the common ancestor of the upstream branch.
base_branch = cl.GetCommonAncestorWithUpstream()
if options.all:
base_change = cl.GetChange(base_branch, None)
files = [('M', f) for f in base_change.AllFiles()]
change = presubmit_support.GitChange(
base_change.Name(),
base_change.FullDescriptionText(),
base_change.RepositoryRoot(),
files,
base_change.issue,
base_change.patchset,
base_change.author_email,
base_change._upstream)
else:
change = cl.GetChange(base_branch, None)
cl.RunHook(
committing=not options.upload,
may_prompt=False,
verbose=options.verbose,
change=change)
return 0
def GenerateGerritChangeId(message):
"""Returns Ixxxxxx...xxx change id.
Works the same way as
https://gerrit-review.googlesource.com/tools/hooks/commit-msg
but can be called on demand on all platforms.
The basic idea is to generate git hash of a state of the tree, original commit
message, author/committer info and timestamps.
"""
lines = []
tree_hash = RunGitSilent(['write-tree'])
lines.append('tree %s' % tree_hash.strip())
code, parent = RunGitWithCode(['rev-parse', 'HEAD~0'], suppress_stderr=False)
if code == 0:
lines.append('parent %s' % parent.strip())
author = RunGitSilent(['var', 'GIT_AUTHOR_IDENT'])
lines.append('author %s' % author.strip())
committer = RunGitSilent(['var', 'GIT_COMMITTER_IDENT'])
lines.append('committer %s' % committer.strip())
lines.append('')
# Note: Gerrit's commit-hook actually cleans message of some lines and
# whitespace. This code is not doing this, but it clearly won't decrease
# entropy.
lines.append(message)
change_hash = RunCommand(['git', 'hash-object', '-t', 'commit', '--stdin'],
stdin='\n'.join(lines))
return 'I%s' % change_hash.strip()
def GetTargetRef(remote, remote_branch, target_branch):
"""Computes the remote branch ref to use for the CL.
Args:
remote (str): The git remote for the CL.
remote_branch (str): The git remote branch for the CL.
target_branch (str): The target branch specified by the user.
"""
if not (remote and remote_branch):
return None
if target_branch:
# Canonicalize branch references to the equivalent local full symbolic
# refs, which are then translated into the remote full symbolic refs
# below.
if '/' not in target_branch:
remote_branch = 'refs/remotes/%s/%s' % (remote, target_branch)
else:
prefix_replacements = (
('^((refs/)?remotes/)?branch-heads/', 'refs/remotes/branch-heads/'),
('^((refs/)?remotes/)?%s/' % remote, 'refs/remotes/%s/' % remote),
('^(refs/)?heads/', 'refs/remotes/%s/' % remote),
)
match = None
for regex, replacement in prefix_replacements:
match = re.search(regex, target_branch)
if match:
remote_branch = target_branch.replace(match.group(0), replacement)
break
if not match:
# This is a branch path but not one we recognize; use as-is.
remote_branch = target_branch
elif remote_branch in REFS_THAT_ALIAS_TO_OTHER_REFS:
# Handle the refs that need to land in different refs.
remote_branch = REFS_THAT_ALIAS_TO_OTHER_REFS[remote_branch]
# Create the true path to the remote branch.
# Does the following translation:
# * refs/remotes/origin/refs/diff/test -> refs/diff/test
# * refs/remotes/origin/master -> refs/heads/master
# * refs/remotes/branch-heads/test -> refs/branch-heads/test
if remote_branch.startswith('refs/remotes/%s/refs/' % remote):
remote_branch = remote_branch.replace('refs/remotes/%s/' % remote, '')
elif remote_branch.startswith('refs/remotes/%s/' % remote):
remote_branch = remote_branch.replace('refs/remotes/%s/' % remote,
'refs/heads/')
elif remote_branch.startswith('refs/remotes/branch-heads'):
remote_branch = remote_branch.replace('refs/remotes/', 'refs/')
return remote_branch
def cleanup_list(l):
"""Fixes a list so that comma separated items are put as individual items.
So that "--reviewers joe@c,john@c --reviewers joa@c" results in
options.reviewers == sorted(['joe@c', 'john@c', 'joa@c']).
"""
items = sum((i.split(',') for i in l), [])
stripped_items = (i.strip() for i in items)
return sorted(filter(None, stripped_items))
@subcommand.usage('[flags]')
def CMDupload(parser, args):
"""Uploads the current changelist to codereview.
Can skip dependency patchset uploads for a branch by running:
git config branch.branch_name.skip-deps-uploads True
To unset run:
git config --unset branch.branch_name.skip-deps-uploads
Can also set the above globally by using the --global flag.
If the name of the checked out branch starts with "bug-" or "fix-" followed by
a bug number, this bug number is automatically populated in the CL
description.
If subject contains text in square brackets or has "<text>: " prefix, such
text(s) is treated as Gerrit hashtags. For example, CLs with subjects
[git-cl] add support for hashtags
Foo bar: implement foo
will be hashtagged with "git-cl" and "foo-bar" respectively.
"""
parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks',
help='bypass upload presubmit hook')
parser.add_option('--bypass-watchlists', action='store_true',
dest='bypass_watchlists',
help='bypass watchlists auto CC-ing reviewers')
parser.add_option('-f', '--force', action='store_true', dest='force',
help="force yes to questions (don't prompt)")
parser.add_option('--message', '-m', dest='message',
help='message for patchset')
parser.add_option('-b', '--bug',
help='pre-populate the bug number(s) for this issue. '
'If several, separate with commas')
parser.add_option('--message-file', dest='message_file',
help='file which contains message for patchset')
parser.add_option('--title', '-t', dest='title',
help='title for patchset')
parser.add_option('-r', '--reviewers',
action='append', default=[],
help='reviewer email addresses')
parser.add_option('--tbrs',
action='append', default=[],
help='TBR email addresses')
parser.add_option('--cc',
action='append', default=[],
help='cc email addresses')
parser.add_option('--hashtag', dest='hashtags',
action='append', default=[],
help=('Gerrit hashtag for new CL; '
'can be applied multiple times'))
parser.add_option('-s', '--send-mail', action='store_true',
help='send email to reviewer(s) and cc(s) immediately')
parser.add_option('--emulate_svn_auto_props',
'--emulate-svn-auto-props',
action="store_true",
dest="emulate_svn_auto_props",
help="Emulate Subversion's auto properties feature.")
parser.add_option('-c', '--use-commit-queue', action='store_true',
help='tell the commit queue to commit this patchset; '
'implies --send-mail')
parser.add_option('--target_branch',
'--target-branch',
metavar='TARGET',
help='Apply CL to remote ref TARGET. ' +
'Default: remote branch head, or master')
parser.add_option('--squash', action='store_true',
help='Squash multiple commits into one')
parser.add_option('--no-squash', action='store_true',
help='Don\'t squash multiple commits into one')
parser.add_option('--topic', default=None,
help='Topic to specify when uploading')
parser.add_option('--tbr-owners', dest='add_owners_to', action='store_const',
const='TBR', help='add a set of OWNERS to TBR')
parser.add_option('--r-owners', dest='add_owners_to', action='store_const',
const='R', help='add a set of OWNERS to R')
parser.add_option('-d', '--cq-dry-run', dest='cq_dry_run',
action='store_true',
help='Send the patchset to do a CQ dry run right after '
'upload.')
parser.add_option('--dependencies', action='store_true',
help='Uploads CLs of all the local branches that depend on '
'the current branch')
# TODO: remove Rietveld flags
parser.add_option('--private', action='store_true',
help='set the review private (rietveld only)')
parser.add_option('--email', default=None,
help='email address to use to connect to Rietveld')
orig_args = args
auth.add_auth_options(parser)
_add_codereview_select_options(parser)
(options, args) = parser.parse_args(args)
_process_codereview_select_options(parser, options)
auth_config = auth.extract_auth_config_from_options(options)
if git_common.is_dirty_git_tree('upload'):
return 1
options.reviewers = cleanup_list(options.reviewers)
options.tbrs = cleanup_list(options.tbrs)
options.cc = cleanup_list(options.cc)
if options.message_file:
if options.message:
parser.error('only one of --message and --message-file allowed.')
options.message = gclient_utils.FileRead(options.message_file)
options.message_file = None
if options.cq_dry_run and options.use_commit_queue:
parser.error('only one of --use-commit-queue and --cq-dry-run allowed.')
if options.use_commit_queue:
options.send_mail = True
# For sanity of test expectations, do this otherwise lazy-loading *now*.
settings.GetIsGerrit()
cl = Changelist(auth_config=auth_config, codereview=options.forced_codereview)
return cl.CMDUpload(options, args, orig_args)
@subcommand.usage('--description=<description file>')
def CMDsplit(parser, args):
"""Splits a branch into smaller branches and uploads CLs.
Creates a branch and uploads a CL for each group of files modified in the
current branch that share a common OWNERS file. In the CL description and
comment, the string '$directory', is replaced with the directory containing
the shared OWNERS file.
"""
parser.add_option("-d", "--description", dest="description_file",
help="A text file containing a CL description in which "
"$directory will be replaced by each CL's directory.")
parser.add_option("-c", "--comment", dest="comment_file",
help="A text file containing a CL comment.")
parser.add_option("-n", "--dry-run", dest="dry_run", action='store_true',
default=False,
help="List the files and reviewers for each CL that would "
"be created, but don't create branches or CLs.")
options, _ = parser.parse_args(args)
if not options.description_file:
parser.error('No --description flag specified.')
def WrappedCMDupload(args):
return CMDupload(OptionParser(), args)
return split_cl.SplitCl(options.description_file, options.comment_file,
Changelist, WrappedCMDupload, options.dry_run)
@subcommand.usage('DEPRECATED')
def CMDdcommit(parser, args):
"""DEPRECATED: Used to commit the current changelist via git-svn."""
message = ('git-cl no longer supports committing to SVN repositories via '
'git-svn. You probably want to use `git cl land` instead.')
print(message)
return 1
# Two special branches used by git cl land.
MERGE_BRANCH = 'git-cl-commit'
CHERRY_PICK_BRANCH = 'git-cl-cherry-pick'
@subcommand.usage('[upstream branch to apply against]')
def CMDland(parser, args):
"""Commits the current changelist via git.
In case of Gerrit, uses Gerrit REST api to "submit" the issue, which pushes
upstream and closes the issue automatically and atomically.
Otherwise (in case of Rietveld):
Squashes branch into a single commit.
Updates commit message with metadata (e.g. pointer to review).
Pushes the code upstream.
Updates review and closes.
"""
parser.add_option('--bypass-hooks', action='store_true', dest='bypass_hooks',
help='bypass upload presubmit hook')
parser.add_option('-m', dest='message',
help="override review description")
parser.add_option('-f', '--force', action='store_true', dest='force',
help="force yes to questions (don't prompt)")
parser.add_option('-c', dest='contributor',
help="external contributor for patch (appended to " +
"description and used as author for git). Should be " +
"formatted as 'First Last <email@example.com>'")
auth.add_auth_options(parser)
(options, args) = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
cl = Changelist(auth_config=auth_config)
if not cl.IsGerrit():
parser.error('rietveld is not supported')
if options.message:
# This could be implemented, but it requires sending a new patch to
# Gerrit, as Gerrit unlike Rietveld versions messages with patchsets.
# Besides, Gerrit has the ability to change the commit message on submit
# automatically, thus there is no need to support this option (so far?).
parser.error('-m MESSAGE option is not supported for Gerrit.')
if options.contributor:
parser.error(
'-c CONTRIBUTOR option is not supported for Gerrit.\n'
'Before uploading a commit to Gerrit, ensure it\'s author field is '
'the contributor\'s "name <email>". If you can\'t upload such a '
'commit for review, contact your repository admin and request'
'"Forge-Author" permission.')
if not cl.GetIssue():
DieWithError('You must upload the change first to Gerrit.\n'
' If you would rather have `git cl land` upload '
'automatically for you, see http://crbug.com/642759')
return cl._codereview_impl.CMDLand(options.force, options.bypass_hooks,
options.verbose)
def PushToGitWithAutoRebase(remote, branch, original_description,
git_numberer_enabled, max_attempts=3):
"""Pushes current HEAD commit on top of remote's branch.
Attempts to fetch and autorebase on push failures.
Adds git number footers on the fly.
Returns integer code from last command.
"""
cherry = RunGit(['rev-parse', 'HEAD']).strip()
code = 0
attempts_left = max_attempts
while attempts_left:
attempts_left -= 1
print('Attempt %d of %d' % (max_attempts - attempts_left, max_attempts))
# Fetch remote/branch into local cherry_pick_branch, overriding the latter.
# If fetch fails, retry.
print('Fetching %s/%s...' % (remote, branch))
code, out = RunGitWithCode(
['retry', 'fetch', remote,
'+%s:refs/heads/%s' % (branch, CHERRY_PICK_BRANCH)])
if code:
print('Fetch failed with exit code %d.' % code)
print(out.strip())
continue
print('Cherry-picking commit on top of latest %s' % branch)
RunGitWithCode(['checkout', 'refs/heads/%s' % CHERRY_PICK_BRANCH],
suppress_stderr=True)
parent_hash = RunGit(['rev-parse', 'HEAD']).strip()
code, out = RunGitWithCode(['cherry-pick', cherry])
if code:
print('Your patch doesn\'t apply cleanly to \'%s\' HEAD @ %s, '
'the following files have merge conflicts:' %
(branch, parent_hash))
print(RunGit(['-c', 'core.quotePath=false', 'diff',
'--name-status', '--diff-filter=U']).strip())
print('Please rebase your patch and try again.')
RunGitWithCode(['cherry-pick', '--abort'])
break
commit_desc = ChangeDescription(original_description)
if git_numberer_enabled:
logging.debug('Adding git number footers')
parent_msg = RunGit(['show', '-s', '--format=%B', parent_hash]).strip()
commit_desc.update_with_git_number_footers(parent_hash, parent_msg,
branch)
# Ensure timestamps are monotonically increasing.
timestamp = max(1 + _get_committer_timestamp(parent_hash),
_get_committer_timestamp('HEAD'))
_git_amend_head(commit_desc.description, timestamp)
code, out = RunGitWithCode(
['push', '--porcelain', remote, 'HEAD:%s' % branch])
print(out)
if code == 0:
break
if IsFatalPushFailure(out):
print('Fatal push error. Make sure your .netrc credentials and git '
'user.email are correct and you have push access to the repo.\n'
'Hint: run command below to diangose common Git/Gerrit credential '
'problems:\n'
' git cl creds-check\n')
break
return code
def IsFatalPushFailure(push_stdout):
"""True if retrying push won't help."""
return '(prohibited by Gerrit)' in push_stdout
@subcommand.usage('<patch url or issue id or issue url>')
def CMDpatch(parser, args):
"""Patches in a code review."""
parser.add_option('-b', dest='newbranch',
help='create a new branch off trunk for the patch')
parser.add_option('-f', '--force', action='store_true',
help='overwrite state on the current or chosen branch')
parser.add_option('-d', '--directory', action='store', metavar='DIR',
help='change to the directory DIR immediately, '
'before doing anything else. Rietveld only.')
parser.add_option('--reject', action='store_true',
help='failed patches spew .rej files rather than '
'attempting a 3-way merge. Rietveld only.')
parser.add_option('-n', '--no-commit', action='store_true', dest='nocommit',
help='don\'t commit after patch applies. Rietveld only.')
group = optparse.OptionGroup(
parser,
'Options for continuing work on the current issue uploaded from a '
'different clone (e.g. different machine). Must be used independently '
'from the other options. No issue number should be specified, and the '
'branch must have an issue number associated with it')
group.add_option('--reapply', action='store_true', dest='reapply',
help='Reset the branch and reapply the issue.\n'
'CAUTION: This will undo any local changes in this '
'branch')
group.add_option('--pull', action='store_true', dest='pull',
help='Performs a pull before reapplying.')
parser.add_option_group(group)
auth.add_auth_options(parser)
_add_codereview_select_options(parser)
(options, args) = parser.parse_args(args)
_process_codereview_select_options(parser, options)
auth_config = auth.extract_auth_config_from_options(options)
if options.reapply:
if options.newbranch:
parser.error('--reapply works on the current branch only')
if len(args) > 0:
parser.error('--reapply implies no additional arguments')
cl = Changelist(auth_config=auth_config,
codereview=options.forced_codereview)
if not cl.GetIssue():
parser.error('current branch must have an associated issue')
upstream = cl.GetUpstreamBranch()
if upstream is None:
parser.error('No upstream branch specified. Cannot reset branch')
RunGit(['reset', '--hard', upstream])
if options.pull:
RunGit(['pull'])
return cl.CMDPatchIssue(cl.GetIssue(), options.reject, options.nocommit,
options.directory)
if len(args) != 1 or not args[0]:
parser.error('Must specify issue number or url')
target_issue_arg = ParseIssueNumberArgument(args[0],
options.forced_codereview)
if not target_issue_arg.valid:
parser.error('invalid codereview url or CL id')
cl_kwargs = {
'auth_config': auth_config,
'codereview_host': target_issue_arg.hostname,
'codereview': options.forced_codereview,
}
detected_codereview_from_url = False
if target_issue_arg.codereview and not options.forced_codereview:
detected_codereview_from_url = True
cl_kwargs['codereview'] = target_issue_arg.codereview
cl_kwargs['issue'] = target_issue_arg.issue
# We don't want uncommitted changes mixed up with the patch.
if git_common.is_dirty_git_tree('patch'):
return 1
if options.newbranch:
if options.force:
RunGit(['branch', '-D', options.newbranch],
stderr=subprocess2.PIPE, error_ok=True)
RunGit(['new-branch', options.newbranch])
cl = Changelist(**cl_kwargs)
if cl.IsGerrit():
if options.reject:
parser.error('--reject is not supported with Gerrit codereview.')
if options.directory:
parser.error('--directory is not supported with Gerrit codereview.')
if detected_codereview_from_url:
print('canonical issue/change URL: %s (type: %s)\n' %
(cl.GetIssueURL(), target_issue_arg.codereview))
return cl.CMDPatchWithParsedIssue(target_issue_arg, options.reject,
options.nocommit, options.directory,
options.force)
def GetTreeStatus(url=None):
"""Fetches the tree status and returns either 'open', 'closed',
'unknown' or 'unset'."""
url = url or settings.GetTreeStatusUrl(error_ok=True)
if url:
status = urllib2.urlopen(url).read().lower()
if status.find('closed') != -1 or status == '0':
return 'closed'
elif status.find('open') != -1 or status == '1':
return 'open'
return 'unknown'
return 'unset'
def GetTreeStatusReason():
"""Fetches the tree status from a json url and returns the message
with the reason for the tree to be opened or closed."""
url = settings.GetTreeStatusUrl()
json_url = urlparse.urljoin(url, '/current?format=json')
connection = urllib2.urlopen(json_url)
status = json.loads(connection.read())
connection.close()
return status['message']
def CMDtree(parser, args):
"""Shows the status of the tree."""
_, args = parser.parse_args(args)
status = GetTreeStatus()
if 'unset' == status:
print('You must configure your tree status URL by running "git cl config".')
return 2
print('The tree is %s' % status)
print()
print(GetTreeStatusReason())
if status != 'open':
return 1
return 0
def CMDtry(parser, args):
"""Triggers try jobs using either BuildBucket or CQ dry run."""
group = optparse.OptionGroup(parser, 'Try job options')
group.add_option(
'-b', '--bot', action='append',
help=('IMPORTANT: specify ONE builder per --bot flag. Use it multiple '
'times to specify multiple builders. ex: '
'"-b win_rel -b win_layout". See '
'the try server waterfall for the builders name and the tests '
'available.'))
group.add_option(
'-B', '--bucket', default='',
help=('Buildbucket bucket to send the try requests.'))
group.add_option(
'-m', '--master', default='',
help=('DEPRECATED, use -B. The try master where to run the builds.'))
group.add_option(
'-r', '--revision',
help='Revision to use for the try job; default: the revision will '
'be determined by the try recipe that builder runs, which usually '
'defaults to HEAD of origin/master')
group.add_option(
'-c', '--clobber', action='store_true', default=False,
help='Force a clobber before building; that is don\'t do an '
'incremental build')
group.add_option(
'--category', default='git_cl_try', help='Specify custom build category.')
group.add_option(
'--project',
help='Override which project to use. Projects are defined '
'in recipe to determine to which repository or directory to '
'apply the patch')
group.add_option(
'-p', '--property', dest='properties', action='append', default=[],
help='Specify generic properties in the form -p key1=value1 -p '
'key2=value2 etc. The value will be treated as '
'json if decodable, or as string otherwise. '
'NOTE: using this may make your try job not usable for CQ, '
'which will then schedule another try job with default properties')
group.add_option(
'--buildbucket-host', default='cr-buildbucket.appspot.com',
help='Host of buildbucket. The default host is %default.')
parser.add_option_group(group)
auth.add_auth_options(parser)
_add_codereview_issue_select_options(parser)
options, args = parser.parse_args(args)
_process_codereview_issue_select_options(parser, options)
auth_config = auth.extract_auth_config_from_options(options)
if options.master and options.master.startswith('luci.'):
parser.error(
'-m option does not support LUCI. Please pass -B %s' % options.master)
# Make sure that all properties are prop=value pairs.
bad_params = [x for x in options.properties if '=' not in x]
if bad_params:
parser.error('Got properties with missing "=": %s' % bad_params)
if args:
parser.error('Unknown arguments: %s' % args)
cl = Changelist(auth_config=auth_config, issue=options.issue,
codereview=options.forced_codereview)
if not cl.GetIssue():
parser.error('Need to upload first')
if cl.IsGerrit():
# HACK: warm up Gerrit change detail cache to save on RPCs.
cl._codereview_impl._GetChangeDetail(['DETAILED_ACCOUNTS', 'ALL_REVISIONS'])
error_message = cl.CannotTriggerTryJobReason()
if error_message:
parser.error('Can\'t trigger try jobs: %s' % error_message)
if options.bucket and options.master:
parser.error('Only one of --bucket and --master may be used.')
buckets = _get_bucket_map(cl, options, parser)
# If no bots are listed and we couldn't get a list based on PRESUBMIT files,
# then we default to triggering a CQ dry run (see http://crbug.com/625697).
if not buckets:
if options.verbose:
print('git cl try with no bots now defaults to CQ dry run.')
print('Scheduling CQ dry run on: %s' % cl.GetIssueURL())
return cl.SetCQState(_CQState.DRY_RUN)
for builders in buckets.itervalues():
if any('triggered' in b for b in builders):
print('ERROR You are trying to send a job to a triggered bot. This type '
'of bot requires an initial job from a parent (usually a builder). '
'Instead send your job to the parent.\n'
'Bot list: %s' % builders, file=sys.stderr)
return 1
patchset = cl.GetMostRecentPatchset()
# TODO(tandrii): Checking local patchset against remote patchset is only
# supported for Rietveld. Extend it to Gerrit or remove it completely.
if not cl.IsGerrit() and patchset != cl.GetPatchset():
print('Warning: Codereview server has newer patchsets (%s) than most '
'recent upload from local checkout (%s). Did a previous upload '
'fail?\n'
'By default, git cl try uses the latest patchset from '
'codereview, continuing to use patchset %s.\n' %
(patchset, cl.GetPatchset(), patchset))
try:
_trigger_try_jobs(auth_config, cl, buckets, options, patchset)
except BuildbucketResponseException as ex:
print('ERROR: %s' % ex)
return 1
return 0
def CMDtry_results(parser, args):
"""Prints info about try jobs associated with current CL."""
group = optparse.OptionGroup(parser, 'Try job results options')
group.add_option(
'-p', '--patchset', type=int, help='patchset number if not current.')
group.add_option(
'--print-master', action='store_true', help='print master name as well.')
group.add_option(
'--color', action='store_true', default=setup_color.IS_TTY,
help='force color output, useful when piping output.')
group.add_option(
'--buildbucket-host', default='cr-buildbucket.appspot.com',
help='Host of buildbucket. The default host is %default.')
group.add_option(
'--json', help=('Path of JSON output file to write try job results to,'
'or "-" for stdout.'))
parser.add_option_group(group)
auth.add_auth_options(parser)
_add_codereview_issue_select_options(parser)
options, args = parser.parse_args(args)
_process_codereview_issue_select_options(parser, options)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
auth_config = auth.extract_auth_config_from_options(options)
cl = Changelist(
issue=options.issue, codereview=options.forced_codereview,
auth_config=auth_config)
if not cl.GetIssue():
parser.error('Need to upload first')
patchset = options.patchset
if not patchset:
patchset = cl.GetMostRecentPatchset()
if not patchset:
parser.error('Codereview doesn\'t know about issue %s. '
'No access to issue or wrong issue number?\n'
'Either upload first, or pass --patchset explicitly' %
cl.GetIssue())
# TODO(tandrii): Checking local patchset against remote patchset is only
# supported for Rietveld. Extend it to Gerrit or remove it completely.
if not cl.IsGerrit() and patchset != cl.GetPatchset():
print('Warning: Codereview server has newer patchsets (%s) than most '
'recent upload from local checkout (%s). Did a previous upload '
'fail?\n'
'By default, git cl try-results uses the latest patchset from '
'codereview, continuing to use patchset %s.\n' %
(patchset, cl.GetPatchset(), patchset))
try:
jobs = fetch_try_jobs(auth_config, cl, options.buildbucket_host, patchset)
except BuildbucketResponseException as ex:
print('Buildbucket error: %s' % ex)
return 1
if options.json:
write_try_results_json(options.json, jobs)
else:
print_try_jobs(options, jobs)
return 0
@subcommand.usage('[new upstream branch]')
def CMDupstream(parser, args):
"""Prints or sets the name of the upstream branch, if any."""
_, args = parser.parse_args(args)
if len(args) > 1:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist()
if args:
# One arg means set upstream branch.
branch = cl.GetBranch()
RunGit(['branch', '--set-upstream-to', args[0], branch])
cl = Changelist()
print('Upstream branch set to %s' % (cl.GetUpstreamBranch(),))
# Clear configured merge-base, if there is one.
git_common.remove_merge_base(branch)
else:
print(cl.GetUpstreamBranch())
return 0
def CMDweb(parser, args):
"""Opens the current CL in the web browser."""
_, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
issue_url = Changelist().GetIssueURL()
if not issue_url:
print('ERROR No issue to open', file=sys.stderr)
return 1
webbrowser.open(issue_url)
return 0
def CMDset_commit(parser, args):
"""Sets the commit bit to trigger the Commit Queue."""
parser.add_option('-d', '--dry-run', action='store_true',
help='trigger in dry run mode')
parser.add_option('-c', '--clear', action='store_true',
help='stop CQ run, if any')
auth.add_auth_options(parser)
_add_codereview_issue_select_options(parser)
options, args = parser.parse_args(args)
_process_codereview_issue_select_options(parser, options)
auth_config = auth.extract_auth_config_from_options(options)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
if options.dry_run and options.clear:
parser.error('Make up your mind: both --dry-run and --clear not allowed')
cl = Changelist(auth_config=auth_config, issue=options.issue,
codereview=options.forced_codereview)
if options.clear:
state = _CQState.NONE
elif options.dry_run:
state = _CQState.DRY_RUN
else:
state = _CQState.COMMIT
if not cl.GetIssue():
parser.error('Must upload the issue first')
cl.SetCQState(state)
return 0
def CMDset_close(parser, args):
"""Closes the issue."""
_add_codereview_issue_select_options(parser)
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
_process_codereview_issue_select_options(parser, options)
auth_config = auth.extract_auth_config_from_options(options)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist(auth_config=auth_config, issue=options.issue,
codereview=options.forced_codereview)
# Ensure there actually is an issue to close.
if not cl.GetIssue():
DieWithError('ERROR No issue to close')
cl.CloseIssue()
return 0
def CMDdiff(parser, args):
"""Shows differences between local tree and last upload."""
parser.add_option(
'--stat',
action='store_true',
dest='stat',
help='Generate a diffstat')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist(auth_config=auth_config)
issue = cl.GetIssue()
branch = cl.GetBranch()
if not issue:
DieWithError('No issue found for current branch (%s)' % branch)
base = cl._GitGetBranchConfigValue('last-upload-hash')
if not base:
base = cl._GitGetBranchConfigValue('gerritsquashhash')
if not base:
detail = cl._GetChangeDetail(['CURRENT_REVISION', 'CURRENT_COMMIT'])
revision_info = detail['revisions'][detail['current_revision']]
fetch_info = revision_info['fetch']['http']
RunGit(['fetch', fetch_info['url'], fetch_info['ref']])
base = 'FETCH_HEAD'
cmd = ['git', 'diff']
if options.stat:
cmd.append('--stat')
cmd.append(base)
subprocess2.check_call(cmd)
return 0
def CMDowners(parser, args):
"""Finds potential owners for reviewing."""
parser.add_option(
'--no-color',
action='store_true',
help='Use this option to disable color output')
parser.add_option(
'--batch',
action='store_true',
help='Do not run interactively, just suggest some')
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
author = RunGit(['config', 'user.email']).strip() or None
cl = Changelist(auth_config=auth_config)
if args:
if len(args) > 1:
parser.error('Unknown args')
base_branch = args[0]
else:
# Default to diffing against the common ancestor of the upstream branch.
base_branch = cl.GetCommonAncestorWithUpstream()
change = cl.GetChange(base_branch, None)
affected_files = [f.LocalPath() for f in change.AffectedFiles()]
if options.batch:
db = owners.Database(change.RepositoryRoot(), file, os.path)
print('\n'.join(db.reviewers_for(affected_files, author)))
return 0
return owners_finder.OwnersFinder(
affected_files,
change.RepositoryRoot(),
author,
cl.GetReviewers(),
fopen=file, os_path=os.path,
disable_color=options.no_color,
override_files=change.OriginalOwnersFiles()).run()
def BuildGitDiffCmd(diff_type, upstream_commit, args):
"""Generates a diff command."""
# Generate diff for the current branch's changes.
diff_cmd = ['-c', 'core.quotePath=false', 'diff',
'--no-ext-diff', '--no-prefix', diff_type,
upstream_commit, '--']
if args:
for arg in args:
if os.path.isdir(arg) or os.path.isfile(arg):
diff_cmd.append(arg)
else:
DieWithError('Argument "%s" is not a file or a directory' % arg)
return diff_cmd
def MatchingFileType(file_name, extensions):
"""Returns true if the file name ends with one of the given extensions."""
return bool([ext for ext in extensions if file_name.lower().endswith(ext)])
@subcommand.usage('[files or directories to diff]')
def CMDformat(parser, args):
"""Runs auto-formatting tools (clang-format etc.) on the diff."""
CLANG_EXTS = ['.cc', '.cpp', '.h', '.m', '.mm', '.proto', '.java']
GN_EXTS = ['.gn', '.gni', '.typemap']
parser.add_option('--full', action='store_true',
help='Reformat the full content of all touched files')
parser.add_option('--dry-run', action='store_true',
help='Don\'t modify any file on disk.')
parser.add_option('--python', action='store_true',
help='Format python code with yapf (experimental).')
parser.add_option('--js', action='store_true',
help='Format javascript code with clang-format.')
parser.add_option('--diff', action='store_true',
help='Print diff to stdout rather than modifying files.')
parser.add_option('--presubmit', action='store_true',
help='Used when running the script from a presubmit.')
opts, args = parser.parse_args(args)
# Normalize any remaining args against the current path, so paths relative to
# the current directory are still resolved as expected.
args = [os.path.join(os.getcwd(), arg) for arg in args]
# git diff generates paths against the root of the repository. Change
# to that directory so clang-format can find files even within subdirs.
rel_base_path = settings.GetRelativeRoot()
if rel_base_path:
os.chdir(rel_base_path)
# Grab the merge-base commit, i.e. the upstream commit of the current
# branch when it was created or the last time it was rebased. This is
# to cover the case where the user may have called "git fetch origin",
# moving the origin branch to a newer commit, but hasn't rebased yet.
upstream_commit = None
cl = Changelist()
upstream_branch = cl.GetUpstreamBranch()
if upstream_branch:
upstream_commit = RunGit(['merge-base', 'HEAD', upstream_branch])
upstream_commit = upstream_commit.strip()
if not upstream_commit:
DieWithError('Could not find base commit for this branch. '
'Are you in detached state?')
changed_files_cmd = BuildGitDiffCmd('--name-only', upstream_commit, args)
diff_output = RunGit(changed_files_cmd)
diff_files = diff_output.splitlines()
# Filter out files deleted by this CL
diff_files = [x for x in diff_files if os.path.isfile(x)]
if opts.js:
CLANG_EXTS.append('.js')
clang_diff_files = [x for x in diff_files if MatchingFileType(x, CLANG_EXTS)]
python_diff_files = [x for x in diff_files if MatchingFileType(x, ['.py'])]
dart_diff_files = [x for x in diff_files if MatchingFileType(x, ['.dart'])]
gn_diff_files = [x for x in diff_files if MatchingFileType(x, GN_EXTS)]
top_dir = os.path.normpath(
RunGit(["rev-parse", "--show-toplevel"]).rstrip('\n'))
# Set to 2 to signal to CheckPatchFormatted() that this patch isn't
# formatted. This is used to block during the presubmit.
return_value = 0
if clang_diff_files:
# Locate the clang-format binary in the checkout
try:
clang_format_tool = clang_format.FindClangFormatToolInChromiumTree()
except clang_format.NotFoundError as e:
DieWithError(e)
if opts.full:
cmd = [clang_format_tool]
if not opts.dry_run and not opts.diff:
cmd.append('-i')
stdout = RunCommand(cmd + clang_diff_files, cwd=top_dir)
if opts.diff:
sys.stdout.write(stdout)
else:
env = os.environ.copy()
env['PATH'] = str(os.path.dirname(clang_format_tool))
try:
script = clang_format.FindClangFormatScriptInChromiumTree(
'clang-format-diff.py')
except clang_format.NotFoundError as e:
DieWithError(e)
cmd = [sys.executable, script, '-p0']
if not opts.dry_run and not opts.diff:
cmd.append('-i')
diff_cmd = BuildGitDiffCmd('-U0', upstream_commit, clang_diff_files)
diff_output = RunGit(diff_cmd)
stdout = RunCommand(cmd, stdin=diff_output, cwd=top_dir, env=env)
if opts.diff:
sys.stdout.write(stdout)
if opts.dry_run and len(stdout) > 0:
return_value = 2
# Similar code to above, but using yapf on .py files rather than clang-format
# on C/C++ files
if opts.python:
yapf_tool = gclient_utils.FindExecutable('yapf')
if yapf_tool is None:
DieWithError('yapf not found in PATH')
if opts.full:
if python_diff_files:
cmd = [yapf_tool]
if not opts.dry_run and not opts.diff:
cmd.append('-i')
stdout = RunCommand(cmd + python_diff_files, cwd=top_dir)
if opts.diff:
sys.stdout.write(stdout)
else:
# TODO(sbc): yapf --lines mode still has some issues.
# https://github.com/google/yapf/issues/154
DieWithError('--python currently only works with --full')
# Dart's formatter does not have the nice property of only operating on
# modified chunks, so hard code full.
if dart_diff_files:
try:
command = [dart_format.FindDartFmtToolInChromiumTree()]
if not opts.dry_run and not opts.diff:
command.append('-w')
command.extend(dart_diff_files)
stdout = RunCommand(command, cwd=top_dir)
if opts.dry_run and stdout:
return_value = 2
except dart_format.NotFoundError as e:
print('Warning: Unable to check Dart code formatting. Dart SDK not '
'found in this checkout. Files in other languages are still '
'formatted.')
# Format GN build files. Always run on full build files for canonical form.
if gn_diff_files:
cmd = ['gn', 'format']
if opts.dry_run or opts.diff:
cmd.append('--dry-run')
for gn_diff_file in gn_diff_files:
gn_ret = subprocess2.call(cmd + [gn_diff_file],
shell=sys.platform == 'win32',
cwd=top_dir)
if opts.dry_run and gn_ret == 2:
return_value = 2 # Not formatted.
elif opts.diff and gn_ret == 2:
# TODO this should compute and print the actual diff.
print("This change has GN build file diff for " + gn_diff_file)
elif gn_ret != 0:
# For non-dry run cases (and non-2 return values for dry-run), a
# nonzero error code indicates a failure, probably because the file
# doesn't parse.
DieWithError("gn format failed on " + gn_diff_file +
"\nTry running 'gn format' on this file manually.")
# Skip the metrics formatting from the global presubmit hook. These files have
# a separate presubmit hook that issues an error if the files need formatting,
# whereas the top-level presubmit script merely issues a warning. Formatting
# these files is somewhat slow, so it's important not to duplicate the work.
if not opts.presubmit:
for xml_dir in GetDirtyMetricsDirs(diff_files):
tool_dir = os.path.join(top_dir, xml_dir)
cmd = [os.path.join(tool_dir, 'pretty_print.py'), '--non-interactive']
if opts.dry_run or opts.diff:
cmd.append('--diff')
stdout = RunCommand(cmd, cwd=top_dir)
if opts.diff:
sys.stdout.write(stdout)
if opts.dry_run and stdout:
return_value = 2 # Not formatted.
return return_value
def GetDirtyMetricsDirs(diff_files):
xml_diff_files = [x for x in diff_files if MatchingFileType(x, ['.xml'])]
metrics_xml_dirs = [
os.path.join('tools', 'metrics', 'actions'),
os.path.join('tools', 'metrics', 'histograms'),
os.path.join('tools', 'metrics', 'rappor'),
os.path.join('tools', 'metrics', 'ukm')]
for xml_dir in metrics_xml_dirs:
if any(file.startswith(xml_dir) for file in xml_diff_files):
yield xml_dir
@subcommand.usage('<codereview url or issue id>')
def CMDcheckout(parser, args):
"""Checks out a branch associated with a given Rietveld or Gerrit issue."""
_, args = parser.parse_args(args)
if len(args) != 1:
parser.print_help()
return 1
issue_arg = ParseIssueNumberArgument(args[0])
if not issue_arg.valid:
parser.error('invalid codereview url or CL id')
target_issue = str(issue_arg.issue)
def find_issues(issueprefix):
output = RunGit(['config', '--local', '--get-regexp',
r'branch\..*\.%s' % issueprefix],
error_ok=True)
for key, issue in [x.split() for x in output.splitlines()]:
if issue == target_issue:
yield re.sub(r'branch\.(.*)\.%s' % issueprefix, r'\1', key)
branches = []
for cls in _CODEREVIEW_IMPLEMENTATIONS.values():
branches.extend(find_issues(cls.IssueConfigKey()))
if len(branches) == 0:
print('No branch found for issue %s.' % target_issue)
return 1
if len(branches) == 1:
RunGit(['checkout', branches[0]])
else:
print('Multiple branches match issue %s:' % target_issue)
for i in range(len(branches)):
print('%d: %s' % (i, branches[i]))
which = raw_input('Choose by index: ')
try:
RunGit(['checkout', branches[int(which)]])
except (IndexError, ValueError):
print('Invalid selection, not checking out any branch.')
return 1
return 0
def CMDlol(parser, args):
# This command is intentionally undocumented.
print(zlib.decompress(base64.b64decode(
'eNptkLEOwyAMRHe+wupCIqW57v0Vq84WqWtXyrcXnCBsmgMJ+/SSAxMZgRB6NzE'
'E2ObgCKJooYdu4uAQVffUEoE1sRQLxAcqzd7uK2gmStrll1ucV3uZyaY5sXyDd9'
'JAnN+lAXsOMJ90GANAi43mq5/VeeacylKVgi8o6F1SC63FxnagHfJUTfUYdCR/W'
'Ofe+0dHL7PicpytKP750Fh1q2qnLVof4w8OZWNY')))
return 0
class OptionParser(optparse.OptionParser):
"""Creates the option parse and add --verbose support."""
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(
self, *args, prog='git cl', version=__version__, **kwargs)
self.add_option(
'-v', '--verbose', action='count', default=0,
help='Use 2 times for more debugging info')
def parse_args(self, args=None, values=None):
options, args = optparse.OptionParser.parse_args(self, args, values)
levels = [logging.WARNING, logging.INFO, logging.DEBUG]
logging.basicConfig(
level=levels[min(options.verbose, len(levels) - 1)],
format='[%(levelname).1s%(asctime)s %(process)d %(thread)d '
'%(filename)s] %(message)s')
return options, args
def main(argv):
if sys.hexversion < 0x02060000:
print('\nYour python version %s is unsupported, please upgrade.\n' %
(sys.version.split(' ', 1)[0],), file=sys.stderr)
return 2
# Reload settings.
global settings
settings = Settings()
colorize_CMDstatus_doc()
dispatcher = subcommand.CommandDispatcher(__name__)
try:
return dispatcher.execute(OptionParser(), argv)
except auth.AuthenticationError as e:
DieWithError(str(e))
except urllib2.HTTPError as e:
if e.code != 500:
raise
DieWithError(
('AppEngine is misbehaving and returned HTTP %d, again. Keep faith '
'and retry or visit go/isgaeup.\n%s') % (e.code, str(e)))
return 0
if __name__ == '__main__':
# These affect sys.stdout so do it outside of main() to simplify mocks in
# unit testing.
fix_encoding.fix_encoding()
setup_color.init()
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
|
Shouqun/node-gn
|
tools/depot_tools/git_cl.py
|
Python
|
mit
| 225,299
|
[
"VisIt"
] |
e230671946e96c329091bbccca0c02f921c448741572726dbe54aa7b5461f576
|
#._cv_part guppy.heapy.View
class Horizon:
def __init__(self, mod):
self.mod = mod
self._hiding_tag_ = mod._hiding_tag_
# Make preallocations of things that will be needed for news()
self.retset = self.mod.retset
self.hv = mod.hv
self.exc_info = self.mod._root.sys.exc_info
self.iso = self.mod.iso
str(self.retset(self.iso(1,[],(),{}, self.__dict__)) -
self.iso(()))
mod.hv.heap
mod.enter
mod.gc.collect()
self.hv_horizon = mod.heapyc.Horizon(self.hv)
def news(self):
r = self.retset(self.hv_horizon.news(self.mod.enter(self.hv.heap)))
return r
class ClearCallback(object):
__slots__ = 'callback',
def __init__(self, callback):
self.callback = callback
def __call__(self, wr):
if self.callback is not None:
self.callback(wr)
else:
print 'No callback'
class Gchook_type(object):
__slots__ = 'x', '__weakref__', 'cb'
def __init__(g):
g.x = g
class ObservationList(list):
__slots__ = '_hiding_tag_',
def __init__(self, iterable, hiding_tag):
list.__init__(self, iterable)
self._hiding_tag_ = hiding_tag
class _GLUECLAMP_:
_imports_ = (
'_parent.ImpSet:immnodeset',
'_parent.ImpSet:immnodeset_union',
'_parent.ImpSet:mutnodeset',
'_parent.ImpSet:NodeSet',
'_parent.UniSet:nodeset_adapt',
'_parent.UniSet:retset',
'_parent.Use:idset',
'_parent.Use:iso',
'_parent.Use:Type',
'_root:gc',
'_root:types',
)
_chgable_ = ('is_rg_update_all', 'referrers_lock', '_is_clear_drg_enabled')
_setable_ = ('_hiding_tag_','target', 'is_hiding_calling_interpreter',
)
is_hiding_calling_interpreter = False
is_rg_update_all = False
_is_clear_drg_enabled = 1 # Flag mainly for test, Note Apr 19 2005
_hiding_tag_ = []
#opt_rg_update_all = True
_uniset_exports = (
# 'dominos',
# 'domisize',
'imdom',
# 'indisize',
# 'referents',
# 'referrers',
'referrers_gc',
)
def _get__clear_hook(self):
return self.mutnodeset()
def clear_check(self):
ch = self._clear_hook
try:
wr = list(ch)[0]
except IndexError:
self.clear_setup()
else:
c = wr()
if c is None:
self.clear_setup()
elif self._root.sys.getrefcount(c) > 3:
print 'GC hook object was referred to from somebody!'
self.clear_callback(wr)
c.cb.callback = None
def clear_callback(self, wr):
# print 'clear callback'
self._clear_hook.clear()
for m in self.clear_methods:
m()
self.clear_setup()
def clear_setup(self):
ch = self._clear_hook
ch.clear()
c=self.gchook_type()
cb = self.ClearCallback(self.clear_callback)
c.cb = cb
ch.add(self._root.weakref.ref(c, cb))
def _get_clear_methods(self):
return []
def clear_register_method(self, m):
self.clear_methods.append(m)
self.clear_check()
def _get_dict_ownership(self):
drg = self.nodegraph()
def clear_drg():
# print 'clear_drg?'
if drg.is_sorted and self._is_clear_drg_enabled:
# print 'yes'
drg.clear()
else:
# print 'no, enabled = ', self.is_clear_drg_enabled
pass
self.clear_register_method(clear_drg)
return drg
def _get_gchook_type(self):
return Gchook_type
def _get_heapdef_modules(self):
# We touch self.heapyc to import it & its dependent guppy.sets;
# this is kinda specialcase-hacky but see Notes Apr 8 2005.
self.heapyc
return self.target.sys.modules.items()
def _get_heapdefs(self):
heapdefs = []
for n, m in self.heapdef_modules:
try:
hd = getattr(m, '_NyHeapDefs_')
except:
continue
heapdefs.append(hd)
return tuple(heapdefs)
def _get_heapyc(self): return self._parent.heapyc
def _get_hv(self):
hv = self.new_hv(_hiding_tag_=self._hiding_tag_,
is_hiding_calling_interpreter = self.is_hiding_calling_interpreter)
return hv
def _get_norefer(self): return self.mutnodeset()
def _get_referrers_targets(self): return []
def _get_rg(self):
rg = self.nodegraph()
self.clear_register_method(self._clear_rg)
return rg
def _clear_rg(self):
if self.referrers_lock:
return
rg = self.rg
if rg.is_sorted:
#print 'clearing', rg
rg.clear()
self.norefer.clear()
else:
#print 'no clear', rg, len(rg), len(self.norefer)
pass
def _get_referrers_lock(self) : return 0
def _get_root(self): return self.heapyc.RootState
def _get_target(self): return self._parent.Target.Target()
def _set_root(self, root):
self.clear_retainers()
self.hv.root = root
def call_with_referrers(self, X, f):
self.referrers_lock += 1
try:
self.update_referrers(X)
return f(X)
finally:
self.referrers_lock -= 1
def clear_retainers(self):
"""G.clear_retainers()
Clear the retainer graph V.rg.
"""
self.rg.clear()
self.norefer.clear()
def dominos(self, X):
"""dominos(X) -> idset
Return the dominos of a set of objects X. The dominos of X is the set
of objects that are dominated by X, which is the objects that will become
deallocated, directly or indirectly, when the objects in X are deallocated."""
return self.dominos_tuple((X,))[0]
def dominos_tuple(self, X):
"""V.dominos_tuple(X) -> tuple of idsets
Return a tuple of dominos for the tuple of sets of objects X."""
D_ = [self.nodeset_adapt(x) for x in X] # Convert to naming like in the appendix
T = self.hv.reachable
S = self.immnodeset([self.root])
D = self.immnodeset_union(D_)
W = T(S, D)
return tuple([self.retset(T(Di, W) - T(D, W | Di)) for Di in D_])
def domisize(self, X):
"""domisize(X) -> int
Return the dominated size of a set of objects X. The dominated size of X
is the total size of memory that will become deallocated, directly or
indirectly, when the objects in X are deallocated. See also: indisize."""
return self.domisize_tuple((X,))[0]
def domisize_tuple(self, X):
""""V.domisize_tuple(X) -> tuple of ints
Return a tuple of dominated sizes for the tuple of sets of objects X."""
return tuple([self.indisize(dominos_i)
for dominos_i in self.dominos_tuple(X)])
def enter(self, func):
if self.hv.is_hiding_calling_interpreter:
self.hv.limitframe = None
elif self.hv.limitframe is not None:
return func()
else:
import sys
try:
1/0
except:
type, value, traceback = sys.exc_info()
limitframe = traceback.tb_frame.f_back.f_back
sys.last_traceback=None
sys.exc_clear()
del type,value,traceback
self.hv.limitframe = limitframe
try:
retval = func()
finally:
self.hv.limitframe = None
return retval
def gchook(self, func):
c=self.gchook_type()
ho = self.mutnodeset()
def cb(wr):
func()
ho.clear()
c=self.gchook_type()
ho.add(self._root.weakref.ref(c, cb))
ho.add(self._root.weakref.ref(c, cb))
return self.mutnodeset([ho])
def heapg(self, rma=1):
# Almost the same as gc.get_objects(),
# except:
# 1. calls gc.collect() first (twice)
# 2. removes objects of type gchook
# 3. removes objects of type ClearCallback
# 4. removes all objects of type types.FrameType
# 5. removes all objects of weakref type
# 6. If rma = 1,
# removes all that is in the reachable heap
# except what is in the set itself.
# . wraps the result in an IdSet
self.gc.collect()
self.gc.collect()
objs = self.gc.get_objects()
cli = self.hv.cli_type()
objs = cli.select(objs, self.gchook_type, '!=')
objs = cli.select(objs, ClearCallback, '!=')
objs = cli.select(objs, self._root.types.FrameType, '!=')
objs = cli.select(objs, self._root.weakref.ReferenceType, '!=')
r = self.retset(objs)
del cli, objs
if rma:
r = (r - self.idset(self.heapyc.HeapView(
self.heapyc.RootState,
self.heapdefs
).reachable_x(
self.immnodeset([self.heapyc.RootState]),
self.observation_containers()
))
)
return r
def heapu(self, rma=1):
self.gc.collect()
self.gc.collect()
r = self.gc.get_objects()
exclude = (self.Type(self.gchook_type) |
self.Type(ClearCallback)
)
if rma:
exclude |= self.idset(self.heapyc.HeapView(
self.heapyc.RootState,
self.heapdefs
).reachable_x(
self.immnodeset([self.heapyc.RootState]),
self.immnodeset([r])
))
r = self.retset(r) - exclude
ref = r.referents - exclude
while not ref <= r:
r |= ref
ref = ref.referents - exclude
del ref, exclude
r = r.byclass # Avoid memoizing for complicated classification
return r
def heap(self):
"""V.heap() -> idset
Return the set of objects in the visible heap.
"""
global heap_one_time_initialized
# This is to make sure that the first time called
# the heap will contain things that may likely be loaded later
# because of common operations.
if not heap_one_time_initialized:
heap_one_time_initialized = 1
repr(self.idset(self.hv.heap()))
x=[]
repr(self.iso(x).shpaths)
repr(self.iso(x).rp)
self.gc.collect() # Sealing a leak at particular usage ; Notes Apr 13 2005
# Exclude current frame by encapsulting in enter(). Note Apr 20 2005
return self.enter(lambda:
self.idset(self.hv.heap()))
def horizon(self):
return self.Horizon(self)
def imdom(self, X):
"""imdom(X) -> idset
Return the immediate dominators of a set of objects X. The immediate
dominators is a subset of the referrers. It includes only those
referrers that are reachable directly, avoiding any other referrer."""
pred = self.nodeset_adapt(self.referrers(X))
visit = self.hv.reachable_x(self.immnodeset([self.root]), pred)
return self.retset(pred & visit)
def indisize(self, X):
"""indisize(X) -> int
Return the sum of the individual sizes of the set of objects X.
The individual size of an object is the size of memory that is
allocated directly in the object, not including any externally
visible subobjects. See also: domisize."""
return self.hv.indisize_sum(self.nodeset_adapt(X))
def new_hv(self, _hiding_tag_=None, is_hiding_calling_interpreter=False,
heapdefs=None, root=None, gchook_type=None):
if heapdefs is None:
heapdefs = self.heapdefs
if root is None:
root = self.root
if gchook_type is None:
gchook_type = self.gchook_type
hv = self.heapyc.HeapView(root, heapdefs)
hv._hiding_tag_ = _hiding_tag_
hv.is_hiding_calling_interpreter = is_hiding_calling_interpreter
hv.register_hidden_exact_type(gchook_type)
#hv.register__hiding_tag__type(self._parent.UniSet.UniSet)
hv.register__hiding_tag__type(self._parent.UniSet.Kind)
hv.register__hiding_tag__type(self._parent.UniSet.IdentitySetMulti)
hv.register__hiding_tag__type(self._parent.UniSet.IdentitySetSingleton)
return hv
def nodegraph(self, iterable = None, is_mapping = False):
ng = self.heapyc.NodeGraph(iterable, is_mapping)
ng._hiding_tag_ = self._hiding_tag_
return ng
def obj_at(self, addr):
try:
return self.immnodeset(self.hv.static_types).obj_at(addr)
except ValueError:
pass
try:
return self.immnodeset(self.gc.get_objects()).obj_at(addr)
except ValueError:
pass
try:
return self.immnodeset(self.hv.heap()).obj_at(addr)
except ValueError:
raise ValueError, 'No object found at address %s'%hex(addr)
def observation_containers(self):
# Return the current set of 'observation containers'
# as discussed in Notes Oct 27 2005.
# returns a nodeset, not an idset, to avoid recursive referenes
objs = self.gc.get_objects()
cli = self.hv.cli_type()
objs = (cli.select(objs, self.NodeSet, '<=') +
cli.select(objs, ObservationList, '<=') +
cli.select(objs, self._parent.UniSet.IdentitySetSingleton, '<=')
)
r = self.immnodeset([x for x in objs if getattr(x, '_hiding_tag_', None) is self._hiding_tag_])
del x, cli, objs
return r
def observation_list(self, iterable=()):
# Return an ObservationList object with our _hiding_tag_
return ObservationList(iterable, self._hiding_tag_)
def referents(self, X):
"""V.referents(X) -> idset
Return the set of objects that are directly referred to by
any of the objects in the set X."""
return self.retset(self.hv.relimg(self.nodeset_adapt(X)))
def referrers(self, X):
"""V.referrers(X) -> idset
Return the set of objects that directly refer to
any of the objects in the set X."""
X = self.nodeset_adapt(X)
if self.is_rg_update_all and self.root is self.heapyc.RootState:
if not (self.rg.domain_covers(X) or
self.rg.domain_covers(X - self.norefer)):
# print 'new update old len = %d'%len(self.rg)
# print self.idset(X-self.rg.get_domain())
self.rg.clear()
import gc
gc.collect()
self.hv.update_referrers_completely(self.rg)
addnoref = X - self.rg.get_domain()
#print 'done 1', len(X), len(addnoref)
self.norefer |= addnoref
#print 'done 1a', len(self.rg)
else:
# print 'X', X, len(X)
# print self.idset(X)
Y = self.mutnodeset(X)
Y -= self.norefer
if not self.rg.domain_covers(Y):
for wt in self.referrers_targets:
t = wt()
if t is not None:
Y |= t.set.nodes
if 0:
print 'old update'
print self.idset(Y - self.rg.get_domain())
Y |= self.rg.get_domain()
self.rg.clear()
self.hv.update_referrers(self.rg, Y)
self.norefer.clear()
self.norefer |= (X | Y | self.rg.get_range())
self.norefer -= self.rg.get_domain()
Y = self.mutnodeset(X) - self.norefer
if not self.rg.domain_covers(Y):
print 'update_referrers failed'
print 'Y - domain of rg:'
print self.idset(Y - self.rg.get_domain())
from pdb import pm, set_trace
set_trace()
Y = None
X = self.rg.relimg(X)
X = self.immnodeset(X) - [None]
X = self.retset(X)
return X
def referrers_gc(self, X):
"""V.referrers_gc(X) -> idset
Return the set of objects that directly refer to
any of the objects in the set X.
This differs from referrers in that it uses the
gc module's view of the referrers. This is more or less
valid depending on viewpoint.
"""
X = tuple(self.nodeset_adapt(X))
return self.idset(self.gc.get_referrers(*X)) - self.iso(X)
def referrers_add_target(self, t):
def remove(wr):
self.referrers_targets.remove(wr)
wr = self._root.weakref.ref(t, remove)
self.referrers_targets.append(wr)
def update_referrers(self, X):
"""V.update_referrers(X)
Update the view V from the set X. X must be adaptable to NodeSet. V.rg is
updated so that in addition to its previos mapping, it will also contain
mappings for the elements of X to their referrers, from them to their
referrers and so on.
"""
self.referrers(X)
def prime_builtin_types():
# Make sure builtin types have been completely allocated
# with all method descriptors etc.
# so subsequent events will not give spurios confusing allocations.
# This should need to be done only once.
# (Or whenever a new (extension) module is imported??)
# The problem & solution is further discussed in Notes Nov 9 2005.
import types
import guppy.heapy.heapyc
import guppy.sets.setsc
import sys
import weakref
for mod in sys.modules.values():
if mod is None:
continue
for t in mod.__dict__.values():
if isinstance(t, type):
dir(t)
# Other type(s)
for t in [type(iter([])), type(iter(())),
]:
dir(t)
prime_builtin_types()
# The following global variable is used by heap()
# to do extra initializations the first time it is called.
# having to do that we want to do import and init things
# but only if heap is actually called
heap_one_time_initialized = 0
|
vmanoria/bluemix-hue-filebrowser
|
hue-3.8.1-bluemix/desktop/core/ext-py/guppy-0.1.10/guppy/heapy/View.py
|
Python
|
gpl-2.0
| 15,803
|
[
"VisIt"
] |
bcfdce2c8280a7ea6da030ba6fb1b2bf2ab17a85776cfc36c91716b12741ee4f
|
#
# Copyright (c) 2016 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
import os.path
from collections import OrderedDict
from commoncode.testcase import FileBasedTesting
from packagedcode.models import AssertedLicense
from packagedcode import models
from packagedcode.models import Package
from packagedcode.models import Party
class TestModels(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_model_creation_and_dump(self):
package = models.AndroidApp(name='someAndroidPAcakge')
expected = [
('type', u'Android app'),
('name', u'someAndroidPAcakge'),
('version', None),
('primary_language', u'Java'),
('packaging', u'archive'),
('summary', None),
('description', None),
('payload_type', None),
('size', None),
('release_date', None),
('authors', []),
('maintainers', []),
('contributors', []),
('owners', []),
('packagers', []),
('distributors', []),
('vendors', []),
('keywords', []),
('keywords_doc_url', None),
('metafile_locations', []),
('metafile_urls', []),
('homepage_url', None),
('notes', None),
('download_urls', []),
('download_sha1', None),
('download_sha256', None),
('download_md5', None),
('bug_tracking_url', None),
('support_contacts', []),
('code_view_url', None),
('vcs_tool', None),
('vcs_repository', None),
('vcs_revision', None),
('copyright_top_level', None),
('copyrights', []),
('asserted_licenses', []),
('legal_file_locations', []),
('license_expression', None),
('license_texts', []),
('notice_texts', []),
('dependencies', {}),
('related_packages', [])
]
assert expected == package.to_dict().items()
package.validate()
def test_validate_package(self):
package = Package(
type='RPM',
name='Sample',
summary='Some package',
payload_type='source',
authors=[Party(
name='Some Author',
email='some@email.com'
)
],
keywords=['some', 'keyword'],
vcs_tool='git',
asserted_licenses=[
AssertedLicense(
license='apache-2.0'
)
],
)
expected = [
('type', 'RPM'),
('name', u'Sample'),
('version', None),
('primary_language', None),
('packaging', None),
('summary', u'Some package'),
('description', None),
('payload_type', u'source'),
('size', None),
('release_date', None),
('authors', [OrderedDict([('type', None), ('name', u'Some Author'), ('email', u'some@email.com'), ('url', None)])]),
('maintainers', []),
('contributors', []),
('owners', []),
('packagers', []),
('distributors', []),
('vendors', []),
('keywords', [u'some', u'keyword']),
('keywords_doc_url', None),
('metafile_locations', []),
('metafile_urls', []),
('homepage_url', None),
('notes', None),
('download_urls', []),
('download_sha1', None),
('download_sha256', None),
('download_md5', None),
('bug_tracking_url', None),
('support_contacts', []),
('code_view_url', None),
('vcs_tool', u'git'),
('vcs_repository', None),
('vcs_revision', None),
('copyright_top_level', None),
('copyrights', []),
('asserted_licenses', [OrderedDict([('license', u'apache-2.0'), ('url', None), ('text', None), ('notice', None)])]),
('legal_file_locations', []),
('license_expression', None),
('license_texts', []),
('notice_texts', []),
('dependencies', {}),
('related_packages', [])
]
assert expected == package.to_dict().items()
package.validate()
|
yashdsaraf/scancode-toolkit
|
tests/packagedcode/test_package_models.py
|
Python
|
apache-2.0
| 5,875
|
[
"VisIt"
] |
9fd4da740547b96187cda572bf4f95e3b6cef55175b18b8979eac6b0c67b512c
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import unittest
import os
import numpy as np
from pymatgen.analysis.elasticity.elastic import *
from pymatgen.analysis.elasticity.strain import Strain, Deformation
from pymatgen.analysis.elasticity.stress import Stress
from pymatgen.util.testing import PymatgenTest
from pymatgen import Structure, Lattice
from scipy.misc import central_diff_weights
import warnings
import json
import random
from six.moves import zip
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class ElasticTensorTest(PymatgenTest):
def setUp(self):
self.voigt_1 = [[59.33, 28.08, 28.08, 0, 0, 0],
[28.08, 59.31, 28.07, 0, 0, 0],
[28.08, 28.07, 59.32, 0, 0, 0],
[0, 0, 0, 26.35, 0, 0],
[0, 0, 0, 0, 26.35, 0],
[0, 0, 0, 0, 0, 26.35]]
mat = np.random.randn(6, 6)
mat = mat + np.transpose(mat)
self.rand_elastic_tensor = ElasticTensor.from_voigt(mat)
self.ft = np.array([[[[59.33, 0, 0],
[0, 28.08, 0],
[0, 0, 28.08]],
[[0, 26.35, 0],
[26.35, 0, 0],
[0, 0, 0]],
[[0, 0, 26.35],
[0, 0, 0],
[26.35, 0, 0]]],
[[[0, 26.35, 0],
[26.35, 0, 0],
[0, 0, 0]],
[[28.08, 0, 0],
[0, 59.31, 0],
[0, 0, 28.07]],
[[0, 0, 0],
[0, 0, 26.35],
[0, 26.35, 0]]],
[[[0, 0, 26.35],
[0, 0, 0],
[26.35, 0, 0]],
[[0, 0, 0],
[0, 0, 26.35],
[0, 26.35, 0]],
[[28.08, 0, 0],
[0, 28.07, 0],
[0, 0, 59.32]]]])
self.elastic_tensor_1 = ElasticTensor(self.ft)
filepath = os.path.join(test_dir, 'Sn_def_stress.json')
with open(filepath) as f:
self.def_stress_dict = json.load(f)
with open(os.path.join(test_dir, 'test_toec_data.json')) as f:
self.toec_dict = json.load(f)
self.structure = self.get_structure("Sn")
warnings.simplefilter("always")
def test_properties(self):
# compliance tensor
ct = ComplianceTensor.from_voigt(np.linalg.inv(self.elastic_tensor_1.voigt))
self.assertArrayAlmostEqual(ct, self.elastic_tensor_1.compliance_tensor)
# KG average properties
self.assertAlmostEqual(38.49111111111, self.elastic_tensor_1.k_voigt)
self.assertAlmostEqual(22.05866666666, self.elastic_tensor_1.g_voigt)
self.assertAlmostEqual(38.49110945133, self.elastic_tensor_1.k_reuss)
self.assertAlmostEqual(20.67146635306, self.elastic_tensor_1.g_reuss)
self.assertAlmostEqual(38.49111028122, self.elastic_tensor_1.k_vrh)
self.assertAlmostEqual(21.36506650986, self.elastic_tensor_1.g_vrh)
# universal anisotropy
self.assertAlmostEqual(0.33553509658699,
self.elastic_tensor_1.universal_anisotropy)
# homogeneous poisson
self.assertAlmostEqual(0.26579965576472,
self.elastic_tensor_1.homogeneous_poisson)
# voigt notation tensor
self.assertArrayAlmostEqual(self.elastic_tensor_1.voigt,
self.voigt_1)
# young's modulus
self.assertAlmostEqual(54087787667.160583,
self.elastic_tensor_1.y_mod)
# prop dict
prop_dict = self.elastic_tensor_1.property_dict
self.assertAlmostEqual(prop_dict["homogeneous_poisson"], 0.26579965576)
for k, v in prop_dict.items():
self.assertAlmostEqual(getattr(self.elastic_tensor_1, k), v)
def test_directional_elastic_mod(self):
self.assertAlmostEqual(self.elastic_tensor_1.directional_elastic_mod([1, 0, 0]),
self.elastic_tensor_1.voigt[0, 0])
self.assertAlmostEqual(self.elastic_tensor_1.directional_elastic_mod([1, 1, 1]),
73.624444444)
def test_compliance_tensor(self):
stress = self.elastic_tensor_1.calculate_stress([0.01] + [0]*5)
comp = self.elastic_tensor_1.compliance_tensor
strain = Strain(comp.einsum_sequence([stress]))
self.assertArrayAlmostEqual(strain.voigt, [0.01] + [0]*5)
def test_directional_poisson_ratio(self):
v_12 = self.elastic_tensor_1.directional_poisson_ratio([1, 0, 0], [0, 1, 0])
self.assertAlmostEqual(v_12, 0.321, places=3)
def test_structure_based_methods(self):
# trans_velocity
self.assertAlmostEqual(1996.35019877,
self.elastic_tensor_1.trans_v(self.structure))
# long_velocity
self.assertAlmostEqual(3534.68123832,
self.elastic_tensor_1.long_v(self.structure))
# Snyder properties
self.assertAlmostEqual(18.06127074,
self.elastic_tensor_1.snyder_ac(self.structure))
self.assertAlmostEqual(0.18937465,
self.elastic_tensor_1.snyder_opt(self.structure))
self.assertAlmostEqual(18.25064540,
self.elastic_tensor_1.snyder_total(self.structure))
# Clarke
self.assertAlmostEqual(0.3450307,
self.elastic_tensor_1.clarke_thermalcond(self.structure))
# Cahill
self.assertAlmostEqual(0.37896275,
self.elastic_tensor_1.cahill_thermalcond(self.structure))
# Debye
self.assertAlmostEqual(247.3058931,
self.elastic_tensor_1.debye_temperature(self.structure))
self.assertAlmostEqual(189.05670205,
self.elastic_tensor_1.debye_temperature_gibbs(self.structure))
# structure-property dict
sprop_dict = self.elastic_tensor_1.get_structure_property_dict(self.structure)
self.assertAlmostEqual(sprop_dict["long_v"], 3534.68123832)
for k, v in sprop_dict.items():
if k=="structure":
self.assertEqual(v, self.structure)
else:
f = getattr(self.elastic_tensor_1, k)
if callable(f):
self.assertAlmostEqual(getattr(self.elastic_tensor_1, k)(self.structure), v)
else:
self.assertAlmostEqual(getattr(self.elastic_tensor_1, k), v)
def test_new(self):
self.assertArrayAlmostEqual(self.elastic_tensor_1,
ElasticTensor(self.ft))
nonsymm = self.ft
nonsymm[0, 1, 2, 2] += 1.0
with warnings.catch_warnings(record=True) as w:
ElasticTensor(nonsymm)
self.assertEqual(len(w), 1)
badtensor1 = np.zeros((3, 3, 3))
badtensor2 = np.zeros((3, 3, 3, 2))
self.assertRaises(ValueError, ElasticTensor, badtensor1)
self.assertRaises(ValueError, ElasticTensor, badtensor2)
def test_from_pseudoinverse(self):
strain_list = [Strain.from_deformation(def_matrix)
for def_matrix in self.def_stress_dict['deformations']]
stress_list = [stress for stress in self.def_stress_dict['stresses']]
with warnings.catch_warnings(record=True):
et_fl = -0.1*ElasticTensor.from_pseudoinverse(strain_list,
stress_list).voigt
self.assertArrayAlmostEqual(et_fl.round(2),
[[59.29, 24.36, 22.46, 0, 0, 0],
[28.06, 56.91, 22.46, 0, 0, 0],
[28.06, 25.98, 54.67, 0, 0, 0],
[0, 0, 0, 26.35, 0, 0],
[0, 0, 0, 0, 26.35, 0],
[0, 0, 0, 0, 0, 26.35]])
def test_from_independent_strains(self):
strains = self.toec_dict["strains"]
stresses = self.toec_dict["stresses"]
with warnings.catch_warnings(record=True) as w:
et = ElasticTensor.from_independent_strains(strains, stresses)
self.assertArrayAlmostEqual(et.voigt, self.toec_dict["C2_raw"], decimal=-1)
def test_energy_density(self):
film_elac = ElasticTensor.from_voigt([
[324.32, 187.3, 170.92, 0., 0., 0.],
[187.3, 324.32, 170.92, 0., 0., 0.],
[170.92, 170.92, 408.41, 0., 0., 0.],
[0., 0., 0., 150.73, 0., 0.],
[0., 0., 0., 0., 150.73, 0.],
[0., 0., 0., 0., 0., 238.74]])
dfm = Deformation([[ -9.86004855e-01,2.27539582e-01,-4.64426035e-17],
[ -2.47802121e-01,-9.91208483e-01,-7.58675185e-17],
[ -6.12323400e-17,-6.12323400e-17,1.00000000e+00]])
self.assertAlmostEqual(film_elac.energy_density(dfm.green_lagrange_strain),
0.00125664672793)
film_elac.energy_density(Strain.from_deformation([[ 0.99774738, 0.11520994, -0. ],
[-0.11520994, 0.99774738, 0. ],
[-0., -0., 1., ]]))
class ElasticTensorExpansionTest(PymatgenTest):
def setUp(self):
with open(os.path.join(test_dir, 'test_toec_data.json')) as f:
self.data_dict = json.load(f)
self.strains = [Strain(sm) for sm in self.data_dict['strains']]
self.pk_stresses = [Stress(d) for d in self.data_dict['pk_stresses']]
self.c2 = self.data_dict["C2_raw"]
self.c3 = self.data_dict["C3_raw"]
self.exp = ElasticTensorExpansion.from_voigt([self.c2, self.c3])
self.cu = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3.623),
["Cu"], [[0]*3])
indices = [(0, 0), (0, 1), (3, 3)]
values = [167.8, 113.5, 74.5]
cu_c2 = ElasticTensor.from_values_indices(values, indices, structure=self.cu,
populate=True)
indices = [(0, 0, 0), (0, 0, 1), (0, 1, 2),
(0, 3, 3), (0, 5, 5), (3, 4, 5)]
values = [-1507., -965., -71., -7., -901., 45.]
cu_c3 = Tensor.from_values_indices(values, indices, structure=self.cu,
populate=True)
self.exp_cu = ElasticTensorExpansion([cu_c2, cu_c3])
cu_c4 = Tensor.from_voigt(self.data_dict["Cu_fourth_order"])
self.exp_cu_4 = ElasticTensorExpansion([cu_c2, cu_c3, cu_c4])
def test_init(self):
cijkl = Tensor.from_voigt(self.c2)
cijklmn = Tensor.from_voigt(self.c3)
exp = ElasticTensorExpansion([cijkl, cijklmn])
from_voigt = ElasticTensorExpansion.from_voigt([self.c2, self.c3])
self.assertEqual(exp.order, 3)
def test_from_diff_fit(self):
exp = ElasticTensorExpansion.from_diff_fit(self.strains, self.pk_stresses)
def test_calculate_stress(self):
calc_stress = self.exp.calculate_stress(self.strains[0])
self.assertArrayAlmostEqual(self.pk_stresses[0], calc_stress, decimal=2)
def test_energy_density(self):
edensity = self.exp.energy_density(self.strains[0])
self.assertAlmostEqual(edensity, 1.36363099e-4)
def test_gruneisen(self):
# Get GGT
ggt = self.exp_cu.get_ggt([1, 0, 0], [0, 1, 0])
self.assertArrayAlmostEqual(
np.eye(3)*np.array([4.92080537, 4.2852349, -0.7147651]), ggt)
# Get TGT
tgt = self.exp_cu.get_tgt()
self.assertArrayAlmostEqual(tgt, np.eye(3)*2.59631832)
# Get heat capacity
c0 = self.exp_cu.get_heat_capacity(0, self.cu, [1, 0, 0], [0, 1, 0])
self.assertEqual(c0, 0.0)
c = self.exp_cu.get_heat_capacity(300, self.cu, [1, 0, 0], [0, 1, 0])
self.assertAlmostEqual(c, 8.285611958)
# Get Gruneisen parameter
gp = self.exp_cu.get_gruneisen_parameter()
self.assertAlmostEqual(gp, 2.59631832)
gpt = self.exp_cu.get_gruneisen_parameter(temperature=200, structure=self.cu)
def test_thermal_expansion_coeff(self):
#TODO get rid of duplicates
alpha_dp = self.exp_cu.thermal_expansion_coeff(self.cu, 300,
mode="dulong-petit")
alpha_debye = self.exp_cu.thermal_expansion_coeff(self.cu, 300,
mode="debye")
self.assertArrayAlmostEqual(21.4533472e-06 * np.eye(3), alpha_debye)
def test_get_compliance_expansion(self):
ce_exp = self.exp_cu.get_compliance_expansion()
et_comp = ElasticTensorExpansion(ce_exp)
strain_orig = Strain.from_voigt([0.01, 0, 0, 0, 0, 0])
stress = self.exp_cu.calculate_stress(strain_orig)
strain_revert = et_comp.calculate_stress(stress)
self.assertArrayAlmostEqual(strain_orig, strain_revert, decimal=4)
def test_get_effective_ecs(self):
# Ensure zero strain is same as SOEC
test_zero = self.exp_cu.get_effective_ecs(np.zeros((3, 3)))
self.assertArrayAlmostEqual(test_zero, self.exp_cu[0])
s = np.zeros((3, 3))
s[0, 0] = 0.02
test_2percent = self.exp_cu.get_effective_ecs(s)
diff = test_2percent - test_zero
self.assertArrayAlmostEqual(self.exp_cu[1].einsum_sequence([s]), diff)
def test_get_strain_from_stress(self):
strain = Strain.from_voigt([0.05, 0, 0, 0, 0, 0])
stress3 = self.exp_cu.calculate_stress(strain)
strain_revert3 = self.exp_cu.get_strain_from_stress(stress3)
self.assertArrayAlmostEqual(strain, strain_revert3, decimal=2)
# fourth order
stress4 = self.exp_cu_4.calculate_stress(strain)
strain_revert4 = self.exp_cu_4.get_strain_from_stress(stress4)
self.assertArrayAlmostEqual(strain, strain_revert4, decimal=2)
def test_get_yield_stress(self):
ys = self.exp_cu_4.get_yield_stress([1, 0, 0])
class NthOrderElasticTensorTest(PymatgenTest):
def setUp(self):
with open(os.path.join(test_dir, 'test_toec_data.json')) as f:
self.data_dict = json.load(f)
self.strains = [Strain(sm) for sm in self.data_dict['strains']]
self.pk_stresses = [Stress(d) for d in self.data_dict['pk_stresses']]
self.c2 = NthOrderElasticTensor.from_voigt(self.data_dict["C2_raw"])
self.c3 = NthOrderElasticTensor.from_voigt(self.data_dict["C3_raw"])
def test_init(self):
c2 = NthOrderElasticTensor(self.c2.tolist())
c3 = NthOrderElasticTensor(self.c3.tolist())
c4 = NthOrderElasticTensor(np.zeros([3]*8))
for n, c in enumerate([c2, c3, c4]):
self.assertEqual(c.order, n+2)
self.assertRaises(ValueError, NthOrderElasticTensor, np.zeros([3]*5))
def test_from_diff_fit(self):
c3 = NthOrderElasticTensor.from_diff_fit(self.strains, self.pk_stresses,
eq_stress = self.data_dict["eq_stress"],
order=3)
self.assertArrayAlmostEqual(c3.voigt, self.data_dict["C3_raw"], decimal=2)
def test_calculate_stress(self):
calc_stress = self.c2.calculate_stress(self.strains[0])
self.assertArrayAlmostEqual(self.pk_stresses[0], calc_stress, decimal=0)
# Test calculation from voigt strain
calc_stress_voigt = self.c2.calculate_stress(self.strains[0].voigt)
def test_energy_density(self):
self.c3.energy_density(self.strains[0])
class DiffFitTest(PymatgenTest):
"""
Tests various functions related to diff fitting
"""
def setUp(self):
with open(os.path.join(test_dir, 'test_toec_data.json')) as f:
self.data_dict = json.load(f)
self.strains = [Strain(sm) for sm in self.data_dict['strains']]
self.pk_stresses = [Stress(d) for d in self.data_dict['pk_stresses']]
def test_get_strain_state_dict(self):
strain_inds = [(0,), (1,), (2,), (1, 3), (1, 2, 3)]
vecs = {}
strain_states = []
for strain_ind in strain_inds:
ss = np.zeros(6)
np.put(ss, strain_ind, 1)
strain_states.append(tuple(ss))
vec = np.zeros((4, 6))
rand_values = np.random.uniform(0.1, 1, 4)
for i in strain_ind:
vec[:, i] = rand_values
vecs[strain_ind] = vec
all_strains = [Strain.from_voigt(v).zeroed() for vec in vecs.values()
for v in vec]
random.shuffle(all_strains)
all_stresses = [Stress.from_voigt(np.random.random(6)).zeroed()
for s in all_strains]
strain_dict = {k.tostring():v for k,v in zip(all_strains, all_stresses)}
ss_dict = get_strain_state_dict(all_strains, all_stresses, add_eq=False)
# Check length of ss_dict
self.assertEqual(len(strain_inds), len(ss_dict))
# Check sets of strain states are correct
self.assertEqual(set(strain_states), set(ss_dict.keys()))
for strain_state, data in ss_dict.items():
# Check correspondence of strains/stresses
for strain, stress in zip(data["strains"], data["stresses"]):
self.assertArrayAlmostEqual(Stress.from_voigt(stress),
strain_dict[Strain.from_voigt(strain).tostring()])
def test_find_eq_stress(self):
random_strains = [Strain.from_voigt(s) for s in np.random.uniform(0.1, 1, (20, 6))]
random_stresses = [Strain.from_voigt(s) for s in np.random.uniform(0.1, 1, (20, 6))]
with warnings.catch_warnings(record=True):
no_eq = find_eq_stress(random_strains, random_stresses)
self.assertArrayAlmostEqual(no_eq, np.zeros((3,3)))
random_strains[12] = Strain.from_voigt(np.zeros(6))
eq_stress = find_eq_stress(random_strains, random_stresses)
self.assertArrayAlmostEqual(random_stresses[12], eq_stress)
def test_get_diff_coeff(self):
forward_11 = get_diff_coeff([0, 1], 1)
forward_13 = get_diff_coeff([0, 1, 2, 3], 1)
backward_26 = get_diff_coeff(np.arange(-6, 1), 2)
central_29 = get_diff_coeff(np.arange(-4, 5), 2)
self.assertArrayAlmostEqual(forward_11, [-1, 1])
self.assertArrayAlmostEqual(forward_13, [-11./6, 3, -3./2, 1./3])
self.assertArrayAlmostEqual(backward_26, [137./180, -27./5,33./2,-254./9,
117./4,-87./5,203./45])
self.assertArrayAlmostEqual(central_29, central_diff_weights(9, 2))
def test_generate_pseudo(self):
strain_states = np.eye(6).tolist()
m2, abs = generate_pseudo(strain_states, order=2)
m3, abs = generate_pseudo(strain_states, order=3)
m4, abs = generate_pseudo(strain_states, order=4)
def test_fit(self):
cdf = diff_fit(self.strains, self.pk_stresses,
self.data_dict["eq_stress"])
reduced = [(e, pk) for e, pk in zip(self.strains, self.pk_stresses)
if not (abs(abs(e)-0.05)<1e-10).any()]
# Get reduced dataset
r_strains, r_pk_stresses = zip(*reduced)
with warnings.catch_warnings(record=True):
c2 = diff_fit(r_strains, r_pk_stresses,
self.data_dict["eq_stress"], order=2)
c2, c3, c4 = diff_fit(r_strains, r_pk_stresses,
self.data_dict["eq_stress"],
order=4)
c2, c3 = diff_fit(self.strains, self.pk_stresses,
self.data_dict["eq_stress"], order=3)
c2_red, c3_red = diff_fit(r_strains, r_pk_stresses,
self.data_dict["eq_stress"],
order=3)
self.assertArrayAlmostEqual(c2.voigt, self.data_dict["C2_raw"])
self.assertArrayAlmostEqual(c3.voigt, self.data_dict["C3_raw"], decimal=5)
self.assertArrayAlmostEqual(c2, c2_red, decimal=0)
self.assertArrayAlmostEqual(c3, c3_red, decimal=-1)
if __name__ == '__main__':
unittest.main()
|
setten/pymatgen
|
pymatgen/analysis/elasticity/tests/test_elastic.py
|
Python
|
mit
| 21,160
|
[
"pymatgen"
] |
82fb49fa82f646997a8501489e4031c45fff426b2ddbb6e10a3826c2efbf33a7
|
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the GlyphAzznProblem."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
from magenta.models.svg_vae import svg_utils
import numpy as np
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import modalities
from tensor2tensor.utils import registry
import tensorflow.compat.v1 as tf
# Raw dataset paths (from datagen_beam.py)
# (Run t2t datagen on GlyphAzznProblem to convert these into a t2t dataset)
RAW_STAT_FILE = '/path/to/glyphazzn-internal-stats-00000-of-00001'
RAW_DATA_FILES = '/path/to/glyphazzn-internal-train*'
URL_SPLITS = 'third_party/py/magenta/models/svg_vae/glyphazzn_urls_split.txt'
class IdentityEncoder(object):
def encode(self, inputs):
return inputs
def decode(self, inputs):
return inputs
@registry.register_problem
class GlyphAzznProblem(problem.Problem):
"""Defines the GlyphAzznProblem class."""
@property
def dataset_splits(self):
"""Data splits to produce and number of shards for each."""
# 10% evaluation data
return [{
'split': problem.DatasetSplit.TRAIN,
'shards': 90,
}, {
'split': problem.DatasetSplit.TEST,
'shards': 10,
}]
@property
def is_generate_per_split(self):
# the data comes pre-split. so we should not shuffle and split it again.
# this also means generate_samples will be called twice (one per split)
return True
@property
def has_inputs(self):
return True
def feature_encoders(self, data_dir):
del data_dir
return {
'inputs': IdentityEncoder(),
'targets': IdentityEncoder()
}
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):
# ignore any encoding since we don't need that
return self.generate_samples(data_dir, tmp_dir, dataset_split)
def generate_data(self, data_dir, tmp_dir, task_id=-1):
del tmp_dir # unused argument
filepath_fns = {
problem.DatasetSplit.TRAIN: self.training_filepaths,
problem.DatasetSplit.EVAL: self.dev_filepaths,
problem.DatasetSplit.TEST: self.test_filepaths,
}
split_paths = [(split['split'], filepath_fns[split['split']](
data_dir, split['shards'], shuffled=False))
for split in self.dataset_splits]
all_paths = []
for _, paths in split_paths:
all_paths.extend(paths)
if self.is_generate_per_split:
for split, paths in split_paths:
generator_utils.generate_files(
self.generate_encoded_samples(data_dir, tmp_dir, split), paths)
else:
generator_utils.generate_files(
self.generate_encoded_samples(
data_dir, tmp_dir, problem.DatasetSplit.TRAIN), all_paths)
generator_utils.shuffle_dataset(all_paths)
@property
def categorical(self):
# indicates we're using one-hot categories for command type.
return True
@property
def feature_dim(self):
return 10
@property
def num_classes(self):
return 30
def generate_samples(self, data_dir, tmp_dir, dataset_split):
"""Generate samples of target svg commands."""
del tmp_dir # unused argument
if not hasattr(self, 'splits'):
tf.logging.info(
'Loading binary_fp: train/test from {}'.format(URL_SPLITS))
self.splits = {}
for line in tf.gfile.Open(URL_SPLITS, 'r').read().split('\n'):
if line:
line = line.split(', ')
self.splits[line[0]] = line[1]
if not tf.gfile.Exists(data_dir):
tf.gfile.MakeDirs(data_dir)
if not tf.gfile.Exists(os.path.join(data_dir, 'mean.npz')):
# FIRST, COPY THE MEAN/STDEV INTO DATA_DIR, in npz format
for serialized_stats in tf.python_io.tf_record_iterator(RAW_STAT_FILE):
stats = tf.train.Example()
stats.ParseFromString(serialized_stats)
mean = np.array(stats.features.feature['mean'].float_list.value)
stdev = np.array(stats.features.feature['stddev'].float_list.value)
# also want to set mean[:4] to zeros and stdev[:4] to ones, because
# these are the class labels
mean = np.concatenate((np.zeros([4]), mean[4:]), axis=0)
stdev = np.concatenate((np.ones([4]), stdev[4:]), axis=0)
# finally, save
np.save(tf.gfile.Open(os.path.join(data_dir, 'mean.npz'), 'w'), mean)
np.save(tf.gfile.Open(os.path.join(data_dir, 'stdev.npz'), 'w'), stdev)
logging.info('Generated mean and stdev npzs')
for raw_data_file in tf.gfile.Glob(RAW_DATA_FILES):
for serialized_example in tf.python_io.tf_record_iterator(raw_data_file):
example = tf.train.Example()
example.ParseFromString(serialized_example)
# determing whether this example belongs to a fontset in train or test
this_bfp = str(
example.features.feature['binary_fp'].bytes_list.value[0])
if this_bfp not in self.splits:
# randomly sample 10% to be test, the rest is train
should_be_test = np.random.random() < 0.1
self.splits[this_bfp] = 'test' if should_be_test else 'train'
if self.splits[this_bfp] != dataset_split:
continue
yield {
'targets_sln': np.array(
example.features.feature['seq_len'].int64_list.value).astype(
np.int64).tolist(),
'targets_cls': np.array(
example.features.feature['class'].int64_list.value).astype(
np.int64).tolist(),
'targets_rel': np.array(
example.features.feature['sequence'].float_list.value).astype(
np.float32).tolist(),
'targets_rnd': np.array(
example.features.feature['rendered'].float_list.value).astype(
np.float32).tolist()
}
def example_reading_spec(self):
data_fields = {'targets_rel': tf.FixedLenFeature([51*10], tf.float32),
'targets_rnd': tf.FixedLenFeature([64*64], tf.float32),
'targets_sln': tf.FixedLenFeature([1], tf.int64),
'targets_cls': tf.FixedLenFeature([1], tf.int64)}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)
def preprocess_example(self, example, unused_mode, hparams):
"""Time series are flat on disk, we un-flatten them back here."""
if not hasattr(self, 'mean_npz'):
mean_filename = os.path.join(hparams.data_dir, 'mean.npz')
stdev_filename = os.path.join(hparams.data_dir, 'stdev.npz')
with tf.gfile.Open(mean_filename, 'r') as f:
self.mean_npz = np.load(f)
with tf.gfile.Open(stdev_filename, 'r') as f:
self.stdev_npz = np.load(f)
example['targets_cls'] = tf.reshape(example['targets_cls'], [1])
example['targets_sln'] = tf.reshape(example['targets_sln'], [1])
example['targets_rel'] = tf.reshape(example['targets_rel'], [51, 1, 10])
# normalize (via gaussian)
example['targets_rel'] = (example['targets_rel'] -
self.mean_npz) / self.stdev_npz
# redefine shape inside model!
example['targets_psr'] = tf.reshape(example['targets_rnd'],
[1, 64 * 64]) / 255.
del example['targets_rnd']
if hparams.just_render:
# training vae mode, use the last image (rendered icon) as input & output
example['inputs'] = example['targets_psr'][-1, :]
example['targets'] = example['targets_psr'][-1, :]
else:
example['inputs'] = tf.identity(example['targets_rel'])
example['targets'] = tf.identity(example['targets_rel'])
return example
def hparams(self, defaults, model_hparams):
p = defaults
p.stop_at_eos = int(False)
p.vocab_size = {'inputs': self.feature_dim, 'targets': self.feature_dim}
p.modality = {'inputs': modalities.ModalityType.IDENTITY,
'targets': modalities.ModalityType.IDENTITY}
@property
def decode_hooks(self):
to_img = svg_utils.create_image_conversion_fn(
1, categorical=self.categorical)
def sample_image(decode_hook_args):
"""Converts decoded predictions into summaries."""
hparams = decode_hook_args.hparams
if not hasattr(self, 'mean_npz'):
mean_filename = os.path.join(hparams.data_dir, 'mean.npz')
stdev_filename = os.path.join(hparams.data_dir, 'stdev.npz')
with tf.gfile.open(mean_filename, 'r') as f:
self.mean_npz = np.load(f)
with tf.gfile.open(stdev_filename, 'r') as f:
self.stdev_npz = np.load(f)
values = []
for pred_dict in decode_hook_args.predictions[0]:
if hparams.just_render:
# vae mode, outputs is image, just do image summary and continue
values.append(svg_utils.make_image_summary(
pred_dict['outputs'], 'rendered_outputs'))
values.append(svg_utils.make_image_summary(
pred_dict['targets'], 'rendered_targets'))
continue
if common_layers.shape_list(pred_dict['targets'])[0] == 1:
continue
# undo normalize (via gaussian)
denorm_outputs = (pred_dict['outputs'] * self.stdev_npz) + self.mean_npz
denorm_targets = (pred_dict['targets'] * self.stdev_npz) + self.mean_npz
# simple cmds are 10 dim (4 one-hot, 6 args).
# Convert to full SVG spec dimensionality so we can convert it to text.
denorm_outputs = svg_utils.make_simple_cmds_long(denorm_outputs)
denorm_targets = svg_utils.make_simple_cmds_long(denorm_targets)
# sampled text summary
output_svg = to_img([np.reshape(denorm_outputs, [-1, 30])])
values.append(svg_utils.make_text_summary_value(output_svg,
'img/sampled'))
# original text summary
target_svg = to_img([np.reshape(denorm_targets, [-1, 30])])
values.append(svg_utils.make_text_summary_value(target_svg, 'img/og'))
return values
return [sample_image]
def eval_metrics(self):
return []
|
magenta/magenta
|
magenta/models/svg_vae/glyphazzn.py
|
Python
|
apache-2.0
| 10,793
|
[
"Gaussian"
] |
293de5f41151d004b6de58df9335e095247ac92efb724b0000b3873c7d5c33fc
|
#!/usr/bin/env python
"""
write_spec_files.py:
This script generates spec file for various distributions.
Last modified: Mon Aug 10 22:28:30 2015
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import re
import os
import sys
moogliBuildReq = [
"cmake"
, "gcc-c++"
, "PyQt4-devel"
, "PyQt"
, "qt-devel"
, "sip-devel"
]
moogliReq = [ "OpenSceneGraph", "python-qt4" ]
repos_ = { "CentOS" : [6, 7]
, "Fedora" : [20, 21, 22, 23 ]
, "RHEL" : [ 7 ]
, "Arch" : ["Core", "Extra"]
, "openSUSE" : [ "12.3", "13.1", "13.2", "Tumbleweed", "Factory_ARM", 'Leap_42.1' ]
, "CentOS" : [ "6", "7" ]
, "SLE" : [ "11_SP3", "11_SP4", "12" ]
}
_alternative = {
'openSUSE' : {
'PyQt' : 'python-qt4'
, 'qt-devel' : 'libqt4-devel'
, 'sip-devel' : 'python-sip'
, 'qt4-devel' : 'libqt4'
, 'PyQt4-devel' : 'python-qt4-devel'
, 'PyQt4' : 'python-qt4'
},
'CentOS' : {
'PyQt' : 'PyQt4'
},
}
def get_alternative_name(repoName, name):
global _alternative
rep = _alternative.get(repoName, None)
if rep:
alt = rep.get(name, None)
if alt:
return alt
else:
return name
else:
return name
class SpecFile():
def __init__(self, repository, version):
self.repository = repository
self.version = version
self.architecture = "i586"
self.url = None
self.specfileName = "moogli-{}_{}.spec".format(self.repository, version)
self.templateText = None
with open("moogli.spec.template", "r") as f:
self.templateText = f.read()
def writeSpecFile(self, **kwargs):
print("++ Writing spec file for %s" % self.repository)
# moogli - build
moogliBuildReqText = "\n".join(
[ "BuildRequires: %s" % get_alternative_name(self.repository, x) for x in moogliBuildReq]
)
self.templateText = self.templateText.replace("<<MoogliBuildRequires>>"
, moogliBuildReqText
)
moogliReqText = "\n".join(
[ "Requires: %s" % get_alternative_name(self.repository, x) for x in moogliReq ]
)
self.templateText = self.templateText.replace("<<moogliRequires>>"
, moogliReqText
)
# Just get the moose-core and moose-python build requirements.
print("Writing specfile: {}".format(self.specfileName))
with open(self.specfileName, "w") as specFile:
specFile.write(self.templateText)
def main():
global repos_
for r in repos_:
repo, versions = r, repos_[r]
for version in versions:
sl = SpecFile(repo, version)
sl.writeSpecFile()
if __name__ == '__main__':
main()
|
BhallaLab/moose-full
|
OBS/home:moose/moogli/write_spec_files.py
|
Python
|
gpl-2.0
| 3,279
|
[
"MOOSE"
] |
dba3ba5caa6dc423e07b3a71c24aaaa650fec6cb4800d0e05e3acbd13bee9cdd
|
"""
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array, deprecated
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class _GMMBase(BaseEstimator):
"""Gaussian Mixture Model.
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. The best results is kept.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the Dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) # doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) # doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
self.n_components = n_components
self.covariance_type = covariance_type
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance."""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: Due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate.
.. versionadded:: 0.17
*fit_predict* method in Gaussian Mixture Model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2,
estimator=self)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < self.tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
"""Perform the Mstep of the EM algorithm and return the cluster weights.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
@deprecated("The class GMM is deprecated and "
"will be removed in 0.20. Use class GaussianMixture instead.")
class GMM(_GMMBase):
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
super(GMM, self).__init__(
n_components=n_components, covariance_type=covariance_type,
random_state=random_state, tol=tol, min_covar=min_covar,
n_iter=n_iter, n_init=n_init, params=params,
init_params=init_params, verbose=verbose)
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model."""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model."""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model."""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values."""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template."""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for diagonal cases."""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Perform the covariance M step for spherical cases."""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for full cases."""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for tied cases."""
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
|
olologin/scikit-learn
|
sklearn/mixture/gmm.py
|
Python
|
bsd-3-clause
| 31,376
|
[
"Gaussian"
] |
06e2fa8270c76130feba2d862937e39bb5291ef586f52a1255f31ab642f75699
|
from libs.graph.DLinkedList import Queue, DoubledLinkedList as List
from libs.graph.PriorityQueue import PriorityQueueBinary as PriorityQueue
from libs.graph.Tree import *
#it is better to use a DoubledLinkedList to operate with a great efficiency on
#the lists those will be used in the graph representation
class Node:
def __init__(self, elem, index, weight = None):
"""
this class represents a graph node
:param elem: an object stored into the node
:param index: int, the index by which the node may be identified
:param weight: int, the weight of the node and of his object - may not be used
"""
self._elem = elem
self._index = index
self._weight = weight
self._token = None #used to mark each node during a generic visit
self._distance = 0 #used to set and retrieve the distance of the node in the visit
self._knights = 0 #used to keep trace of the knights in the node
self._knights_arrived = []
def get_elem(self):
"""
:return: object stored in the node
"""
return self._elem
def get_index(self):
"""
:return: int, the index of the node
"""
return self._index
def get_weight(self):
"""
:return: int, the weight of the node
"""
return self._weight
def get_token(self):
"""
:return: int, the token of the node
"""
return self._token
def set_token(self, token):
"""
:param token: int, the validation token
:return: int, the token of the node
"""
self._token = token
def get_node(self):
"""
:return: tuple, (index, elem, weight)
"""
return self.get_elem(), self.get_weight()
def set_distance(self, dist):
"""
this function can be used to set a particular distance in order to provide
a good interface for BFS and Dijkstra shortest-path algorithms
:param dist: int, distance
:return: None
"""
self._distance += dist
self._knights += 1
def get_distance(self):
"""
:return: int, the distance calculated for the node
"""
return self._distance
def get_count(self):
"""
:return: int, the number of knights
"""
return self._knights
#I'll use an AdjacenceList Graph because of the unitarian value of all the arcs
class GraphAdjacenceList:
def __init__(self):
"""
this class represents a graph using an adjacency list style
"""
self._nodes = dict() #to store the nodes
self._adjacency = dict() #to link the nodes to their adjacence list
self._nextId = 0 #it will be used to store the nodes - id > 0
self._nodes_elems = dict() #it will be used to store the elems inserted
def getNodes(self):
"""
this function is used as an interface to retrieve graph's nodes
:return: (dictionary, dictionary) the nodes and their adjacency lists
"""
return self._nodes, self._adjacency
def insertNode(self, elem, weight = None):
"""
this function allows the user to insert a node into the graph
:param elem: the elem to be stored into the node
:param weight: the weight of the node
:return: Node, the node already inserted or just inserted
"""
if elem in self._nodes_elems:
#if a node has already setted it will be returned
#assuming the computational cost of this check, as it is implemented in python,
#as memory access to the list -> O(1)
return self._nodes_elems[elem]
newNode = Node(elem, self._nextId, weight)
self._nodes[newNode.get_index()] = newNode
self._adjacency[newNode.get_index()] = List()
self._nextId += 1
#storing the elem just inserted
self._nodes_elems[elem] = newNode
return newNode
def linkNode(self, tail, head):
"""
this function links two nodes in a direct connection
:param tail: Node, the tail node
:param head: Node, the head node
:return: None
"""
adj = self._adjacency[tail.get_index()]
if head not in adj.getLastAddedList():
#assuming direct memory access... (see previous method)
adj.addAsLast(head)
def printGraph(self):
"""
this function builds a well formatted visualization of the nodes
:return: list, a list of nodes visual formatted
"""
print("Adjacency Lists:")
for identifier in self._nodes:
print("node", self._nodes[identifier].get_elem(), self._nodes[identifier].get_weight())
self._adjacency[identifier].printList()
print("")
#The chessboard's graph is unitary-weight-arcs formed so we can use a Breadth First Search to return the list of all the
#minimum-path-trees starting each from a knight
def validateNodes(self, token):
"""
this function validate all nodes with a token value in order to accomplish the visit
:param token: int, the token value to validate the node. 0 if not visited, 21 if explored and 42 (for Douglas) if closed
:return: None
"""
nodes = self.getNodes()[0]
for node in nodes.itervalues():
node.set_token(token)
def visitBFS(self, node):
"""
this is a Breadth First Search starting from a vertex. Please note that all the operations are done on the leaves
to let the algorithm be more modular (it doesn't seems be affecting the computational time for it remains proportional
to the dimension of the graph)
:param node: Node, the starting vertex
:return: Tree, representing the visit path
"""
#initializing some useful constants (funny constants too)
unexplored = 0
explored = 21
closed = 42 #So long and thanks for all the fish!
#validating all the nodes as unexplored and starting from the vertex
self.validateNodes(unexplored)
node.set_token(explored)
#initializing the tree containing the only vertex
T_root = Leaf(node)
T_root.setDistance(0.0) #using the float - it is not a counter value
T = Tree(T_root)
#initializing the fringe of the visit
F = Queue()
F.enqueue(T_root)
while not F.isEmpty():
u = F.dequeue()
n = u.getElem()
n.set_token(closed)
for v in self._adjacency[n.get_index()].getLastAddedList():
if v.get_token() == unexplored:
v.set_token(explored)
l = Leaf(v)
F.enqueue(l)
T.insertLeaf(l, u)
return T
def visitNodesBFS(self, Nodes):
"""
this is a simple implementation of a Breadth First Search algorithm to visit the graph
starting from a selected group of nodes
:param Nodes: Node list containing the nodes from which start the visit
:return: list of Trees, the list of all the visits
"""
T_list = []
for node in Nodes:
tree = self.visitBFS(node)
T_list.append(tree)
return T_list
#it is interesting to achieve the same result using minimum path algorithm of Dijkstra
def Dijkstra(self, node):
"""
this is a Dijstra shortest path algorithm implementation starting from a vertex
:param node: Node, the starting vertex
:return: Tree, the shortest paths tree
"""
INF = float('inf')
self.validateNodes(INF)
#we will use the nodes' tokens to store the distance info!
node.set_token(0.0) #0-distance from itself!
#initializing the tree
T_root = Leaf(node)
T_root.setDistance(node.get_token())
T = Tree(T_root)
#initializing a dictionary to keep trace of the leaves
leaves = dict()
leaves[node] = T_root
#initializing the priority queue to mantain the fringe
PQ = PriorityQueue()
PQ.insert(T_root, node.get_token())
while not PQ.isEmpty():
u = PQ.deleteMin() #retrieving the min node from the leaf
n = u.getElem()
for v in self._adjacency[n.get_index()].getLastAddedList():
if v.get_token() == INF:
l = Leaf(v)
leaves[v] = l #updating the leaves' dictionary
PQ.insert(l, n.get_token() + 1.0) #each edge will be unitary-cost
v.set_token(n.get_token() + 1.0)
T.insertLeaf(l, u)
elif n.get_token() + 1.0 < v.get_token():
relaxed = n.get_token() + 1.0
leaves[v].setDistance(relaxed)
#updating the tree... (we are now saving in the priority queue the leaves)
leaves[v].setFather(u)
leaves[n].addSon(leaves[v])
#updating the priority queue
PQ.decreaseKey(leaves[v], relaxed)
v.set_token(relaxed)
return T
def visitDijkstra(self, Nodes):
"""
this is an implementation of the Dijkstra algorithm to visit the graph
starting from a selected group of nodes
:param Nodes: Node list containing the nodes from which start the visit
:return: list of Trees, the list of all the visits
"""
T_list = []
for node in Nodes:
tree = self.Dijkstra(node)
T_list.append(tree)
return T_list
#Pay attention!
# -Bellman condition to decide a shortest path -> for each node it is O(k*n) where k is node's degree
# -save the all available paths in a tree instead of a list of lists -> O(n) (if it is possible...)
# -the chessboard graph is a direct graph with all the arcs costing a single unit
# (please note that it is necessary to consider each knight own k-value in order to calculate
# the move number!!)
# -general purpose: in python2.7 the infinite is... INF = float('inf') -> comparisons using floats
def FloydWarshall(self):
"""
this is a simple implementation of the Floyd-Warshall algorythm using an O(n^2) space
but O(n^3) computational complexity. Please note that in our case the chessboard graph
is unitary-weight-arch created
:return: list of lists, matrix of the distances between two vertices
"""
INF = float('inf')
nodes, adjacency = self.getNodes() #getting the dictionaries
indexes = nodes.keys() #it is the same to access the two dictionaries
dim = len(indexes)
#initializing the matrix
dist = [[INF for m in range(dim)] for n in range(dim)]
for i in range(dim):
ind = indexes[i]
dist[ind][ind] = 0.0
adj_nodes = adjacency[ind].getLastAddedList()
for adj in adj_nodes:
to_ind = adj.get_index()
dist[ind][to_ind] = 1.0
#executing the dinamic programming algorithm
for k in range(dim):
for i in range(dim):
for j in range(dim):
if dist[i][k] != INF and dist[k][j] != INF and dist[i][k] + dist[k][j] < dist[i][j]:
dist[i][j] = dist[i][k] + dist[k][j]
return dist
|
IA-MP/KnightTour
|
libs/graph/Graph.py
|
Python
|
mit
| 11,556
|
[
"VisIt"
] |
453423fcfc3e6495cb6fab9f76df887ec5d5392e5f4e15f20f0f67e79d75fd14
|
import SloppyCell.Collections as Collections
expt = Collections.Experiment('RasGreen1NGF')
expt.longname = 'NGF Stimulation 50 ng/ml - Qiu and Green 1994'
expt.comments = """REF: M-S. Qiu and S. H. Green, Neuron (1991) 7, 937-946
CELLTYPE: PC12
MEAS: GTP binding by Ras in response to NGF at 50 ng/ml
UNITS: Ras.GTP/(Ras.GTP + Ras.GDP) X 10 (percent total / 10)
NOTES: Multiplication by 10 is just to put data on a better scale for
NOTES: plotting
NOTES: Error bars come from the original data set"""
expt.SetFixedScaleFactors({'RasActive':1.0/(0.2*600000*0.1)})
expt.SetData({'NGFstim50': {
'RasActive': {
3.0:(1.7, 0.5),
10.0:(1.1, 0.2),
30.0:(0.6, 0.1)
}
}
}
)
|
GutenkunstLab/SloppyCell
|
Example/PC12/Experiments/RasGreen1NGF.py
|
Python
|
bsd-3-clause
| 946
|
[
"NEURON"
] |
643bbc19041b2b9ecdd5a3ae3b5f0eee7757a4dfe803f614d8790c74d585f9f6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Modified parameters file for the Hybrid LFP scheme, applying the methodology with
the model of:
Potjans, T. and Diesmann, M. "The Cell-Type Specific Cortical Microcircuit:
Relating Structure and Activity in a Full-Scale Spiking Network Model".
Cereb. Cortex (2014) 24 (3): 785-806.
doi: 10.1093/cercor/bhs358
'''
import numpy as np
import os
import json
from mpi4py import MPI # this is needed to initialize other classes correctly
import multiprocessing as mp # to facilitate OpenMP parallelization w. NEST
# if MPI.SIZE == 1
###################################
# Initialization of MPI stuff #
###################################
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
'''
TODO: rename to simulation_and_model_params.py
'''
####################################
# HELPER FUNCTIONS #
####################################
def flattenlist(lst): return sum(sum(lst, []), [])
####################################
# SPATIAL CONNECTIVITY EXTRACTION #
####################################
'''
Include functions that extract information from binzegger.json here
'''
def get_F_y(fname='binzegger_connectivity_table.json', y=['p23']):
'''
Extract frequency of occurrences of those cell types that are modeled.
The data set contains cell types that are not modeled (TCs etc.)
The returned percentages are renormalized onto modeled cell-types, i.e. they sum up to 1
'''
# Load data from json dictionary
f = open(fname, 'r')
data = json.load(f)
f.close()
occurr = []
for cell_type in y:
occurr += [data['data'][cell_type]['occurrence']]
return list(np.array(occurr) / np.sum(occurr))
def get_L_yXL(fname, y, x_in_X, L):
'''
compute the layer specificity, defined as:
::
L_yXL = k_yXL / k_yX
'''
def _get_L_yXL_per_yXL(fname, x_in_X, X_index,
y, layer):
# Load data from json dictionary
f = open(fname, 'r')
data = json.load(f)
f.close()
# Get number of synapses
if layer in [str(key)
for key in list(data['data'][y]['syn_dict'].keys())]:
# init variables
k_yXL = 0
k_yX = 0
for x in x_in_X[X_index]:
p_yxL = data['data'][y]['syn_dict'][layer][x] / 100.
k_yL = data['data'][y]['syn_dict'][layer]['number of synapses per neuron']
k_yXL += p_yxL * k_yL
for l in [str(key)
for key in list(data['data'][y]['syn_dict'].keys())]:
for x in x_in_X[X_index]:
p_yxL = data['data'][y]['syn_dict'][l][x] / 100.
k_yL = data['data'][y]['syn_dict'][l]['number of synapses per neuron']
k_yX += p_yxL * k_yL
if k_yXL != 0.:
return k_yXL / k_yX
else:
return 0.
else:
return 0.
# init dict
L_yXL = {}
# iterate over postsynaptic cell types
for y_value in y:
# container
data = np.zeros((len(L), len(x_in_X)))
# iterate over lamina
for i, Li in enumerate(L):
# iterate over presynapse population inds
for j in range(len(x_in_X)):
data[i][j] = _get_L_yXL_per_yXL(fname, x_in_X,
X_index=j,
y=y_value,
layer=Li)
L_yXL[y_value] = data
return L_yXL
def get_T_yX(fname, y, y_in_Y, x_in_X, F_y):
'''
compute the cell type specificity, defined as:
::
T_yX = K_yX / K_YX
= F_y * k_yX / sum_y(F_y*k_yX)
'''
def _get_k_yX_mul_F_y(y, y_index, X_index):
# Load data from json dictionary
f = open(fname, 'r')
data = json.load(f)
f.close()
# init variables
k_yX = 0.
for l in [str(key)
for key in list(data['data'][y]['syn_dict'].keys())]:
for x in x_in_X[X_index]:
p_yxL = data['data'][y]['syn_dict'][l][x] / 100.
k_yL = data['data'][y]['syn_dict'][l]['number of synapses per neuron']
k_yX += p_yxL * k_yL
return k_yX * F_y[y_index]
# container
T_yX = np.zeros((len(y), len(x_in_X)))
# iterate over postsynaptic cell types
for i, y_value in enumerate(y):
# iterate over presynapse population inds
for j in range(len(x_in_X)):
k_yX_mul_F_y = 0
for k, yy in enumerate(sum(y_in_Y, [])):
if y_value in yy:
for yy_value in yy:
ii = np.where(np.array(y) == yy_value)[0][0]
k_yX_mul_F_y += _get_k_yX_mul_F_y(yy_value, ii, j)
if k_yX_mul_F_y != 0:
T_yX[i, j] = _get_k_yX_mul_F_y(y_value, i, j) / k_yX_mul_F_y
return T_yX
class general_params(object):
def __init__(self):
'''class collecting general model parameters'''
####################################
# REASON FOR THIS SIMULATION #
####################################
self.reason = 'Modified Potjans model with AC modulated TC activity'
####################################
# #
# #
# SIMULATION PARAMETERS #
# #
# #
####################################
####################################
# MAIN SIMULATION CONTROL #
####################################
# simulation step size
self.dt = 0.1
# simulation start
self.tstart = 0
# simulation stop
self.tstop = 5200
####################################
# OUTPUT LOCATIONS #
####################################
# TODO: try except does not work with hambach
# folder for all simulation output and scripts
# using the cluster's dedicated SCRATCH area
if 'SCRATCH' in os.environ and os.path.isdir(
os.path.join(os.environ['SCRATCH'], os.environ['USER'])):
self.savefolder = os.path.join(
os.environ['SCRATCH'],
os.environ['USER'],
'hybrid_model',
'simulation_output_modified_ac_exc')
# LOCALLY
else:
self.savefolder = 'simulation_output_modified_ac_exc'
# folder for simulation scripts
self.sim_scripts_path = os.path.join(self.savefolder, 'sim_scripts')
# folder for each individual cell's output
self.cells_path = os.path.join(self.savefolder, 'cells')
# folder for figures
self.figures_path = os.path.join(self.savefolder, 'figures')
# folder for population resolved output signals
self.populations_path = os.path.join(self.savefolder, 'populations')
# folder for raw nest output files
self.raw_nest_output_path = os.path.join(self.savefolder,
'raw_nest_output')
# folder for processed nest output files
self.spike_output_path = os.path.join(self.savefolder,
'processed_nest_output')
####################################
# #
# #
# MODEL PARAMETERS #
# #
# #
####################################
####################################
# POPULATIONS #
####################################
# Number of populations
self.Npops = 9
# number of neurons in each population (unscaled)
self.full_scale_num_neurons = [[20683, # layer 23 e
5834], # layer 23 i
[21915, # layer 4 e
5479], # layer 4 i
[4850, # layer 5 e
1065], # layer 5 i
[14395, # layer 6 e
2948]] # layer 6 i
# Number of thalamic neurons/ point processes
self.n_thal = 902
# population names TODO: rename
self.X = [
'TC',
'L23E',
'L23I',
'L4E',
'L4I',
'L5E',
'L5I',
'L6E',
'L6I']
self.Y = self.X[1:]
# TC and cortical population sizes in one list TODO: rename
self.N_X = np.array([self.n_thal] +
flattenlist([self.full_scale_num_neurons]))
####################################
# CONNECTIVITY #
####################################
# intra-cortical connection probabilities between populations
# 23e 23i 4e 4i 5e 5i 6e 6i
self.conn_probs = np.array([[0.1009, 0.1689, 0.0437, 0.0818, 0.0323, 0., 0.0076, 0.], # 23e
[0.1346, 0.1371, 0.0316, 0.0515,
0.0755, 0., 0.0042, 0.], # 23i
[0.0077, 0.0059, 0.0497, 0.135,
0.0067, 0.0003, 0.0453, 0.], # 4e
[0.0691, 0.0029, 0.0794, 0.1597,
0.0033, 0., 0.1057, 0.], # 4i
[0.1004, 0.0622, 0.0505, 0.0057,
0.0831, 0.3726, 0.0204, 0.], # 5e
[0.0548, 0.0269, 0.0257, 0.0022,
0.06, 0.3158, 0.0086, 0.], # 5i
[0.0156, 0.0066, 0.0211, 0.0166, 0.0572,
0.0197, 0.0396, 0.2252], # 6e
[0.0364, 0.001, 0.0034, 0.0005, 0.0277, 0.008, 0.0658, 0.1443]]) # 6i
# connection probabilities for thalamic input
self.C_th = [[0.0, # layer 23 e
0.0], # layer 23 i
[0.0983, # layer 4 e
0.0619], # layer 4 i
[0.0, # layer 5 e
0.0], # layer 5 i
[0.0512, # layer 6 e
0.0196]] # layer 6 i
# full connection probabilities including TC connections
self.C_YX = np.c_[flattenlist([self.C_th]), self.conn_probs]
####################################
# CONNECTION PROPERTIES #
####################################
# mean EPSP amplitude (mV) for all connections except L4e->L23e
self.PSP_e = 0.15
# mean EPSP amplitude (mv) for L4e->L23e connections
# FIX POLISH NOTATION !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
self.PSP_23e_4e = self.PSP_e * 2
# standard deviation of PSC amplitudes relative to mean PSC amplitudes
# this is sigma/mu in probability distribution
# Gaussian (lognormal_weights = False): mu is mean, sigma is standard deviation
# Lognormal (lognormal_weights = False): mean and stdev can be
# calculated from mu and sigma
self.PSC_rel_sd = 3.0
# IPSP amplitude relative to EPSP amplitude
self.g = -4.
# L4i ->L4e stronger in order to get rid of 84 Hz peak
self.g_4e_4i = self.g * 1.125
# Whether to use lognormal weights or not
self.lognormal_weights = True
# mean dendritic delays for excitatory and inhibitory transmission (ms)
self.delays = [1.5, 0.75]
# standard deviation relative to mean delays; former delay_rel
self.delay_rel_sd = 0.5
####################################
# CELL-TYPE PARAMETERS #
####################################
# Note that these parameters are only relevant for the point-neuron network in case
# one wants to calculate depth-resolved cell-type specific input
# currents
# point to .json connectivity table file
self.connectivity_table = 'binzegger_connectivity_table.json'
# list of cell type names used in this script
# names of every post-syn pop layer
self.y_in_Y = [
[['p23'], ['b23', 'nb23']],
[['p4', 'ss4(L23)', 'ss4(L4)'], ['b4', 'nb4']],
[['p5(L23)', 'p5(L56)'], ['b5', 'nb5']],
[['p6(L4)', 'p6(L56)'], ['b6', 'nb6']]]
self.y = flattenlist(self.y_in_Y)
# need presynaptic cell type to population mapping
self.x_in_X = [['TCs', 'TCn']] + sum(self.y_in_Y, [])
# map the pre-synaptic populations to the post-syn populations
self.mapping_Yy = list(zip(
['L23E', 'L23I', 'L23I',
'L4E', 'L4E', 'L4E', 'L4I', 'L4I',
'L5E', 'L5E', 'L5I', 'L5I',
'L6E', 'L6E', 'L6I', 'L6I'],
self.y))
# Frequency of occurrence of each cell type (F_y); 1-d array
self.F_y = get_F_y(fname=self.connectivity_table, y=self.y)
# Relative frequency of occurrence of each cell type within its
# population (F_{y,Y})
self.F_yY = [[get_F_y(fname=self.connectivity_table, y=y)
for y in Y] for Y in self.y_in_Y]
# Number of neurons of each cell type (N_y); 1-d array
self.N_y = np.array([self.full_scale_num_neurons[layer][pop] * self.F_yY[layer][pop][k]
for layer, array in enumerate(self.y_in_Y)
for pop, cell_types in enumerate(array)
for k, _ in enumerate(cell_types)]).astype(int)
# compute the number of synapses as in Potjans&Diesmann 2012
K_YX = np.zeros(self.C_YX.shape)
for i in range(K_YX.shape[1]):
K_YX[:, i] = (np.log(1. - self.C_YX[:, i]) /
np.log(1. - 1. / (self.N_X[1:] *
self.N_X[i])))
# spatial connection probabilites on each subpopulation
# Each key must correspond to a subpopulation like 'L23E' used everywhere else,
# each array maps thalamic and intracortical connections.
# First column is thalamic connections, and the rest intracortical,
# ordered like 'L23E', 'L23I' etc., first row is normalised probability of
# connection withing L1, L2, etc.;
self.L_yXL = get_L_yXL(fname=self.connectivity_table,
y=self.y,
x_in_X=self.x_in_X,
L=['1', '23', '4', '5', '6'])
# compute the cell type specificity
self.T_yX = get_T_yX(fname=self.connectivity_table, y=self.y,
y_in_Y=self.y_in_Y, x_in_X=self.x_in_X,
F_y=self.F_y)
Y, y = list(zip(*self.mapping_Yy))
# assess relative distribution of synapses for a given celltype
self.K_yXL = {}
#self.T_yX = {}
for i, (Y, y) in enumerate(self.mapping_Yy):
# fill in K_yXL (layer specific connectivity)
self.K_yXL[y] = (self.T_yX[i, ] *
K_YX[np.array(self.Y) == Y, ] *
self.L_yXL[y]).astype(int)
# number of incoming connections per cell type per layer per cell
self.k_yXL = {}
for y, N_y in zip(self.y, self.N_y):
self.k_yXL.update({y: (1. * self.K_yXL[y]).astype(int) // N_y})
# calculate corresponding connectivity to K_yXL
self.C_yXL = {}
for y, N_y in zip(self.y, self.N_y):
self.C_yXL.update(
{y: 1. - (1. - 1. / (N_y * self.N_X))**self.K_yXL[y]})
##########################################################################
class point_neuron_network_params(general_params):
def __init__(self):
'''class point-neuron network parameters'''
# inherit general params
general_params.__init__(self)
####################################
# #
# #
# SIMULATION PARAMETERS #
# #
# #
####################################
# use same number of threads as MPI COMM.size() for parallel jobs
# else the number of processors for serial jobs
if SIZE > 1:
self.total_num_virtual_procs = SIZE
else:
self.total_num_virtual_procs = mp.cpu_count()
####################################
# RNG PROPERTIES #
####################################
# offset for RNGs
self.seed_offset = 45
####################################
# RECORDING PARAMETERS #
####################################
self.overwrite_existing_files = True
# recording can either be done from a fraction of neurons in each
# population or from a fixed number
# whether to record spikes from a fixed fraction of neurons in each
# population.
self.record_fraction_neurons_spikes = True
if self.record_fraction_neurons_spikes:
self.frac_rec_spikes = 1.
else:
self.n_rec_spikes = 100
# whether to record membrane potentials from a fixed fraction of
# neurons in each population
self.record_fraction_neurons_voltage = False
if self.record_fraction_neurons_voltage:
self.frac_rec_voltage = 0.1
else:
self.n_rec_voltage = 50 # 100
# whether to record weighted input spikes from a fixed fraction of
# neurons in each population
self.record_fraction_neurons_input_spikes = False
if self.record_fraction_neurons_input_spikes:
self.frac_rec_input_spikes = 0.1
else:
self.n_rec_input_spikes = 20 # 100
# number of recorded neurons for depth resolved input currents
self.n_rec_depth_resolved_input = 0
# NESTio recording format
self.record_to = 'ascii'
# whether to record thalamic spikes
self.record_thalamic_spikes = True
# global ID file name
self.GID_filename = 'population_GIDs.dat'
# readout global ID file name
self.readout_GID_filename = 'readout_GIDs.dat'
# stem for spike detector file labels
self.spike_recorder_label = 'spikes_'
# stem for voltmeter file labels
self.voltmeter_label = 'voltages_'
# stem for thalamic spike detector file labels
self.th_spike_recorder_label = 'spikes_0'
# stem for in-degree file labels
self.in_degree_label = 'in_degrees_'
# stem for file labels for in-degree from thalamus
self.th_in_degree_label = 'in_degrees_th_'
# stem for weighted input spikes labels
self.weighted_input_spikes_label = 'weighted_input_spikes_'
####################################
# #
# #
# MODEL PARAMETERS #
# #
# #
####################################
####################################
# SCALING #
####################################
# scaling parameter for population sizes
self.area = 1.0
# preserve indegrees when downscaling
self.preserve_K = False
####################################
# SINGLE NEURON PARAMS #
####################################
# neuron model
self.neuron_model = '/iaf_psc_exp'
# mean of initial membrane potential (mV)
self.Vm0_mean = -58.0
# std of initial membrane potential (mV)
self.Vm0_std = 10.0
# mean of threshold potential (mV)
self.V_th_mean = -50.
# std of threshold potential (mV)
self.V_th_std = 1E-8 # nest::NormalParameter: std > 0 required.
self.model_params = {'tau_m': 10., # membrane time constant (ms)
# excitatory synaptic time constant (ms)
'tau_syn_ex': 0.5,
# inhibitory synaptic time constant (ms)
'tau_syn_in': 0.5,
# absolute refractory period (ms)
't_ref': 2.,
# resting membrane potential (mV)
'E_L': -65.,
'V_th': self.V_th_mean, # spike threshold (mV)
'C_m': 250., # membrane capacitance (pF)
'V_reset': -65. # reset potential (mV)
}
####################################
# EXTERNAL INPUTS #
####################################
# number of external inputs (Potjans-Diesmann model 2012)
self.K_bg = [[1600, # layer 23 e
1500], # layer 23 i
[2100, # layer 4 e
1900], # layer 4 i
[2000, # layer 5 e
1900], # layer 5 i
[2900, # layer 6 e
2100]] # layer 6 i
# rate of Poisson input at each external input synapse (spikess)
self.bg_rate = 0.
# rate of equivalent input used for DC amplitude calculation,
# set to zero if self.bg_rate > 0.
self.bg_rate_dc = 8.
# DC amplitude at each external input synapse (pA)
# to each neuron via 'dc_amplitude = tau_syn_ex/1000*bg_rate*PSC_ext'
self.dc_amplitude = self.model_params["tau_syn_ex"] * \
self.bg_rate_dc * self._compute_J()
# mean EPSP amplitude (mV) for thalamic and non-thalamic external input
# spikes
self.PSP_ext = 0.15
# mean delay of thalamic input (ms)
self.delay_th = 1.5
# standard deviation relative to mean delay of thalamic input
self.delay_th_rel_sd = 0.5
####################################
# THALAMIC INPUT VERSIONS #
####################################
# off-option for start of thalamic input versions
self.off = 100. * self.tstop
# poisson_generator (pure Poisson input)
self.th_poisson_start = self.off # onset (ms)
self.th_poisson_duration = 10. # duration (ms)
self.th_poisson_rate = 120. # rate (spikess)
# spike_generator
# Note: This can be used with a large Gaussian delay distribution in order to mimic a
# Gaussian pulse packet which is different for each thalamic neuron
self.th_spike_times = [self.off] # time of the thalamic pulses (ms)
# create n_thal spikegenerator nodes connected to each respective
# postsynaptic parrot_neuron. Expected format is a len(self.n_thal) list
# of lists of activation times.
# Turn activation off by setting it as [[] for i in range(self.n_thal)]
self.th_spike_generator_times = [[] for i in range(self.n_thal)]
# sinusoidal_poisson_generator (oscillatory Poisson input)
self.th_sin_start = 0. # onset (ms)
self.th_sin_duration = 5000. # duration (ms)
self.th_sin_mean_rate = 30. # mean rate (spikess)
# rate modulation amplitude (spikess)
self.th_sin_fluc_rate = 30.
# frequency of the rate modulation (Hz)
self.th_sin_freq = 15.
# phase of rate modulation (deg)
self.th_sin_phase = 0.
# Gaussian_pulse_packages
self.th_gauss_times = [self.off] # package center times
self.th_gauss_num_spikes_per_packet = 1 # number of spikes per packet
self.th_gauss_sd = 5. # std of Gaussian pulse packet (ms^2)
####################################
# SPATIAL ORGANIZATION #
####################################
# needed for spatially resolved input currents
# number of layers TODO: find a better solution for that
self.num_input_layers = 5
def _compute_J(self):
'''
Compute the current amplitude corresponding to the exponential
synapse model PSP amplitude
Derivation using sympy:
::
from sympy import *
#define symbols
t, tm, Cm, ts, Is, Vmax = symbols('t tm Cm ts Is Vmax')
#assume zero delay, t >= 0
#using eq. 8.10 in Sterrat et al
V = tm*ts*Is*(exp(-t/tm) - exp(-t/ts)) / (tm-ts) / Cm
print 'V = %s' % V
#find time of V == Vmax
dVdt = diff(V, t)
print 'dVdt = %s' % dVdt
[t] = solve(dVdt, t)
print 't(t@dVdT==Vmax) = %s' % t
#solve for Is at time of maxima
V = tm*ts*Is*(exp(-t/tm) - exp(-t/ts)) / (tm-ts) / Cm
print 'V(%s) = %s' % (t, V)
[Is] = solve(V-Vmax, Is)
print 'Is = %s' % Is
resulting in:
::
Cm*Vmax*(-tm + ts)/(tm*ts*(exp(tm*log(ts/tm)/(tm - ts))
- exp(ts*log(ts/tm)/(tm - ts))))
Latex source:
::
J&=-\frac{C_\text{m} V_\text{PSP} (\tau_\text{m} - \tau_\text{syn})}{\tau_\text{m} \tau_\text{syn}(
\\exp\frac{\tau_\text{m} \\ln(\tau_\text{syn}/\tau_\text{m})}{\tau_\text{m} - \tau_\text{syn}}
-\\exp\frac{\tau_\text{syn} \\ln(\tau_\text{syn}/\tau_\text{m})}{\tau_\text{m} - \tau_\text{syn}})} \\
I^\text{ext} &= \tau_\text{syn} \nu^\text{ext} J \\
&=-\frac{\nu^\text{ext}C_\text{m} V_\text{PSP} (\tau_\text{m} - \tau_\text{syn})}{\tau_\text{m}(
\\exp\frac{\tau_\text{m} \\ln(\tau_\text{syn}/\tau_\text{m})}{\tau_\text{m} - \tau_\text{syn}}
-\\exp\frac{\tau_\text{syn} \\ln(\tau_\text{syn}/\tau_\text{m})}{\tau_\text{m} - \tau_\text{syn}})}
'''
# LIF params
tm = self.model_params['tau_m']
Cm = self.model_params['C_m']
# synapse
ts = self.model_params['tau_syn_ex']
Vmax = self.PSP_e
# max current amplitude
J = Cm * Vmax * (-tm + ts) / (tm * ts * (np.exp(tm * np.log(ts / tm) /
(tm - ts)) - np.exp(ts * np.log(ts / tm) / (tm - ts))))
# unit conversion pF*mV -> nA
J *= 1E-3
return J
class multicompartment_params(point_neuron_network_params):
'''
Inherited class defining additional attributes needed by e.g., the
classes population.Population and
population.DummyNetwork
This class do not take any kwargs
'''
def __init__(self):
'''
Inherited class defining additional attributes needed by e.g., the
classes population.Population and
population.DummyNetwork
This class do not take any kwargs
'''
# initialize parent classes
point_neuron_network_params.__init__(self)
####################################
# #
# #
# SIMULATION PARAMETERS #
# #
# #
####################################
#######################################
# PARAMETERS FOR LOADING NEST RESULTS #
#######################################
# parameters for class population.DummyNetwork class
self.networkSimParams = {
'simtime': self.tstop - self.tstart,
'dt': self.dt,
'spike_output_path': self.spike_output_path,
'label': 'population_spikes',
'ext': 'dat',
'GIDs': self.get_GIDs(),
'X': self.X,
'skiprows': 0,
}
####################################
# #
# #
# MODEL PARAMETERS #
# #
# #
####################################
####################################
# SCALING (VOLUME not density) #
####################################
self.SCALING = 1.0
####################################
# MORPHOLOGIES #
####################################
# list of morphology files with default location, testing = True
# will point to simplified morphologies
testing = False
if testing:
self.PATH_m_y = os.path.join('morphologies', 'ballnsticks')
self.m_y = [Y + '_' + y + '.hoc' for Y, y in self.mapping_Yy]
else:
self.PATH_m_y = os.path.join('morphologies', 'stretched')
self.m_y = [
'L23E_oi24rpy1.hoc',
'L23I_oi38lbc1.hoc',
'L23I_oi38lbc1.hoc',
'L4E_53rpy1.hoc',
'L4E_j7_L4stellate.hoc',
'L4E_j7_L4stellate.hoc',
'L4I_oi26rbc1.hoc',
'L4I_oi26rbc1.hoc',
'L5E_oi15rpy4.hoc',
'L5E_j4a.hoc',
'L5I_oi15rbc1.hoc',
'L5I_oi15rbc1.hoc',
'L6E_51-2a.CNG.hoc',
'L6E_oi15rpy4.hoc',
'L6I_oi15rbc1.hoc',
'L6I_oi15rbc1.hoc',
]
####################################
# CONNECTION WEIGHTS #
####################################
# compute the synapse weight from fundamentals of exp synapse LIF
# neuron
self.J = self._compute_J()
# set up matrix containing the synapse weights between any population X
# and population Y, including exceptions for certain connections
J_YX = np.zeros(self.C_YX.shape)
J_YX += self.J
J_YX[:, 2::2] *= self.g
if hasattr(self, 'PSP_23e_4e'):
J_YX[0, 3] *= self.PSP_23e_4e / self.PSP_e
if hasattr(self, 'g_4e_4i'):
J_YX[2, 4] *= self.g_4e_4i / self.g
# knockout experiment, set all inhibitory weights to zero
J_YX[J_YX <= 0] = 0.
# extrapolate weights between populations X and
# cell type y in population Y
self.J_yX = {}
for Y, y in self.mapping_Yy:
[i] = np.where(np.array(self.Y) == Y)[0]
self.J_yX.update({y: J_YX[i, ]})
####################################
# GEOMETRY OF CORTICAL COLUMN #
####################################
# set the boundaries of each layer, L1->L6,
# and mean depth of soma layers
self.layerBoundaries = np.array([[0.0, -81.6],
[-81.6, -587.1],
[-587.1, -922.2],
[-922.2, -1170.0],
[-1170.0, -1491.7]])
# assess depth of each 16 subpopulation
self.depths = self._calcDepths()
# make a nice structure with data for each subpopulation
self.y_zip_list = list(zip(self.y, self.m_y,
self.depths, self.N_y))
##############################################################
# POPULATION PARAMS (cells, population, synapses, electrode) #
##############################################################
# Global LFPy.Cell-parameters, by default shared between populations
# Some passive parameters will not be fully consistent with LIF params
self.cellParams = {
'v_init': self.model_params['E_L'],
'cm': 1.0,
'Ra': 150,
'passive': True,
'passive_parameters': dict(g_pas=1. / (self.model_params['tau_m'] * 1E3), # assume cm=1
e_pas=self.model_params['E_L']),
'nsegs_method': 'lambda_f',
'lambda_f': 100,
'dt': self.dt,
'tstart': self.tstart,
'tstop': self.tstop,
'verbose': False,
}
# layer specific LFPy.Cell-parameters as nested dictionary
self.yCellParams = self._yCellParams()
# set the axis of which each cell type y is randomly rotated,
# SS types and INs are rotated around both x- and z-axis
# in the population class, while P-types are
# only rotated around the z-axis
self.rand_rot_axis = {}
for y, _, _, _ in self.y_zip_list:
# identify pyramidal cell populations:
if y.rfind('p') >= 0:
self.rand_rot_axis.update({y: ['z']})
else:
self.rand_rot_axis.update({y: ['x', 'z']})
# additional simulation kwargs, see LFPy.Cell.simulate() docstring
self.simulationParams = {'rec_imem': True}
# a dict setting the number of cells N_y and geometry
# of cell type population y
self.populationParams = {}
for y, _, depth, N_y in self.y_zip_list:
self.populationParams.update({
y: {
'number': int(N_y * self.SCALING),
'radius': np.sqrt(1000**2 / np.pi),
'z_min': depth - 25,
'z_max': depth + 25,
'min_cell_interdist': 1.,
'min_r': [[-1E199, -1600, -1550, 1E99], [0, 0, 10, 10]]
}
})
# Set up cell type specific synapse parameters in terms of synapse model
# and synapse locations
self.synParams = {}
for y in self.y:
if y.rfind('p') >= 0:
# pyramidal types have apical dendrites
section = ['apic', 'dend']
else:
# other cell types do not
section = ['dend']
self.synParams.update({
y: {
'syntype': 'ExpSynI', # current based exponential synapse
'section': section,
# 'tau' : self.model_params["tau_syn_ex"],
},
})
# set up dictionary of synapse time constants specific to each
# postsynaptic cell type and presynaptic population
self.tau_yX = {}
for y in self.y:
self.tau_yX.update({
y: [self.model_params["tau_syn_in"] if 'I' in X else
self.model_params["tau_syn_ex"] for X in self.X]
})
# synaptic delay parameters, loc and scale is mean and std for every
# network population, negative values will be removed
self.synDelayLoc, self.synDelayScale = self._synDelayParams()
# Define electrode geometry corresponding to a laminar electrode,
# where contact points have a radius r, surface normal vectors N,
# and LFP calculated as the average LFP in n random points on
# each contact. Recording electrode emulate NeuroNexus array,
# contact 0 is superficial
self.electrodeParams = {
# contact locations:
'x': np.zeros(16),
'y': np.zeros(16),
'z': -np.mgrid[0:16] * 100,
# extracellular conductivity:
'sigma': 0.3,
# contact surface normals, radius, n-point averaging
'N': np.array([[1, 0, 0]] * 16),
'r': 7.5,
'n': 50,
'seedvalue': None,
# dendrite line sources, soma sphere source (Linden2014)
'method': 'soma_as_point',
# no somas within the constraints of the "electrode shank":
'method': 'root_as_point',
}
# parameters for LFPykit.LaminarCurrentSourceDensity
self.CSDParams = dict(
z=np.array([[-(i + 1) * 100, -i * 100] for i in range(16)]) + 50.,
r=np.ones(16) * np.sqrt(1000**2 / np.pi) # same as pop radius
)
# these variables will be saved to file for each cell and electrdoe
# object
self.savelist = []
#########################################
# MISC #
#########################################
# time resolution of downsampled data in ms
self.dt_output = 1.
# set fraction of neurons from population which LFP output is stored
self.recordSingleContribFrac = 0.
def get_GIDs(self):
GIDs = {}
ind = 1
for i, (X, N_X) in enumerate(zip(self.X, self.N_X)):
GIDs[X] = [ind, N_X]
ind += N_X
return GIDs
def _synDelayParams(self):
'''
set up the detailed synaptic delay parameters,
loc is mean delay,
scale is std with low bound cutoff,
assumes numpy.random.normal is used later
'''
delays = {}
# mean delays
loc = np.zeros((len(self.y), len(self.X)))
loc[:, 0] = self.delays[0]
loc[:, 1::2] = self.delays[0]
loc[:, 2::2] = self.delays[1]
# standard deviations
scale = loc * self.delay_rel_sd
# prepare output
delay_loc = {}
for i, y in enumerate(self.y):
delay_loc.update({y: loc[i]})
delay_scale = {}
for i, y in enumerate(self.y):
delay_scale.update({y: scale[i]})
return delay_loc, delay_scale
def _calcDepths(self):
'''
return the cortical depth of each subpopulation
'''
depths = self.layerBoundaries.mean(axis=1)[1:]
depth_y = []
for y in self.y:
if y in ['p23', 'b23', 'nb23']:
depth_y = np.r_[depth_y, depths[0]]
elif y in ['p4', 'ss4(L23)', 'ss4(L4)', 'b4', 'nb4']:
depth_y = np.r_[depth_y, depths[1]]
elif y in ['p5(L23)', 'p5(L56)', 'b5', 'nb5']:
depth_y = np.r_[depth_y, depths[2]]
elif y in ['p6(L4)', 'p6(L56)', 'b6', 'nb6']:
depth_y = np.r_[depth_y, depths[3]]
else:
raise Exception('Error, revise parameters')
return depth_y
def _yCellParams(self):
'''
Return dict with parameters for each population.
The main operation is filling in cell type specific morphology
'''
# cell type specific parameters going into LFPy.Cell
yCellParams = {}
for layer, morpho, _, _ in self.y_zip_list:
yCellParams.update({layer: self.cellParams.copy()})
yCellParams[layer].update({
'morphology': os.path.join(self.PATH_m_y, morpho),
})
return yCellParams
if __name__ == '__main__':
params = multicompartment_params()
print(dir(params))
|
espenhgn/hybridLFPy
|
examples/Hagen_et_al_2016_cercor/cellsim16popsParams_modified_ac_exc.py
|
Python
|
gpl-3.0
| 39,720
|
[
"Gaussian",
"NEURON"
] |
c9333004f5790f864c7be1112bb83c5922d26b3f238a03bb7a415bb4f5f85394
|
# -*- coding: utf-8 -*-
"""
===================================================================
Determining and plotting the altitude/azimuth of a celestial object
===================================================================
This example demonstrates coordinate transformations and the creation of
visibility curves to assist with observing run planning.
In this example, we make a `~astropy.coordinates.SkyCoord` instance for M33.
The altitude-azimuth coordinates are then found using
`astropy.coordinates.EarthLocation` and `astropy.time.Time` objects.
This example is meant to demonstrate the capabilities of the
`astropy.coordinates` package. For more convenient and/or complex observation
planning, consider the `astroplan <https://astroplan.readthedocs.org/>`_
package.
*By: Erik Tollerud, Kelle Cruz*
*License: BSD*
"""
##############################################################################
# Let's suppose you are planning to visit picturesque Bear Mountain State Park
# in New York, USA. You're bringing your telescope with you (of course), and
# someone told you M33 is a great target to observe there. You happen to know
# you're free at 11:00 pm local time, and you want to know if it will be up.
# Astropy can answer that.
#
# Import numpy and matplotlib. For the latter, use a nicer set of plot
# parameters and set up support for plotting/converting quantities.
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style, quantity_support
plt.style.use(astropy_mpl_style)
quantity_support()
##############################################################################
# Import the packages necessary for finding coordinates and making
# coordinate transformations
import astropy.units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
##############################################################################
# `astropy.coordinates.SkyCoord.from_name` uses Simbad to resolve object
# names and retrieve coordinates.
#
# Get the coordinates of M33:
m33 = SkyCoord.from_name('M33')
##############################################################################
# Use `astropy.coordinates.EarthLocation` to provide the location of Bear
# Mountain and set the time to 11pm EDT on 2012 July 12:
bear_mountain = EarthLocation(lat=41.3*u.deg, lon=-74*u.deg, height=390*u.m)
utcoffset = -4*u.hour # Eastern Daylight Time
time = Time('2012-7-12 23:00:00') - utcoffset
##############################################################################
# `astropy.coordinates.EarthLocation.get_site_names` and
# `~astropy.coordinates.EarthLocation.get_site_names` can be used to get
# locations of major observatories.
#
# Use `astropy.coordinates` to find the Alt, Az coordinates of M33 at as
# observed from Bear Mountain at 11pm on 2012 July 12.
m33altaz = m33.transform_to(AltAz(obstime=time,location=bear_mountain))
print(f"M33's Altitude = {m33altaz.alt:.2}")
##############################################################################
# This is helpful since it turns out M33 is barely above the horizon at this
# time. It's more informative to find M33's airmass over the course of
# the night.
#
# Find the alt,az coordinates of M33 at 100 times evenly spaced between 10pm
# and 7am EDT:
midnight = Time('2012-7-13 00:00:00') - utcoffset
delta_midnight = np.linspace(-2, 10, 100)*u.hour
frame_July13night = AltAz(obstime=midnight+delta_midnight,
location=bear_mountain)
m33altazs_July13night = m33.transform_to(frame_July13night)
##############################################################################
# convert alt, az to airmass with `~astropy.coordinates.AltAz.secz` attribute:
m33airmasss_July13night = m33altazs_July13night.secz
##############################################################################
# Plot the airmass as a function of time:
plt.plot(delta_midnight, m33airmasss_July13night)
plt.xlim(-2, 10)
plt.ylim(1, 4)
plt.xlabel('Hours from EDT Midnight')
plt.ylabel('Airmass [Sec(z)]')
plt.show()
##############################################################################
# Use `~astropy.coordinates.get_sun` to find the location of the Sun at 1000
# evenly spaced times between noon on July 12 and noon on July 13:
from astropy.coordinates import get_sun
delta_midnight = np.linspace(-12, 12, 1000)*u.hour
times_July12_to_13 = midnight + delta_midnight
frame_July12_to_13 = AltAz(obstime=times_July12_to_13, location=bear_mountain)
sunaltazs_July12_to_13 = get_sun(times_July12_to_13).transform_to(frame_July12_to_13)
##############################################################################
# Do the same with `~astropy.coordinates.get_moon` to find when the moon is
# up. Be aware that this will need to download a 10MB file from the internet
# to get a precise location of the moon.
from astropy.coordinates import get_moon
moon_July12_to_13 = get_moon(times_July12_to_13)
moonaltazs_July12_to_13 = moon_July12_to_13.transform_to(frame_July12_to_13)
##############################################################################
# Find the alt,az coordinates of M33 at those same times:
m33altazs_July12_to_13 = m33.transform_to(frame_July12_to_13)
##############################################################################
# Make a beautiful figure illustrating nighttime and the altitudes of M33 and
# the Sun over that time:
plt.plot(delta_midnight, sunaltazs_July12_to_13.alt, color='r', label='Sun')
plt.plot(delta_midnight, moonaltazs_July12_to_13.alt, color=[0.75]*3, ls='--', label='Moon')
plt.scatter(delta_midnight, m33altazs_July12_to_13.alt,
c=m33altazs_July12_to_13.az, label='M33', lw=0, s=8,
cmap='viridis')
plt.fill_between(delta_midnight, 0*u.deg, 90*u.deg,
sunaltazs_July12_to_13.alt < -0*u.deg, color='0.5', zorder=0)
plt.fill_between(delta_midnight, 0*u.deg, 90*u.deg,
sunaltazs_July12_to_13.alt < -18*u.deg, color='k', zorder=0)
plt.colorbar().set_label('Azimuth [deg]')
plt.legend(loc='upper left')
plt.xlim(-12*u.hour, 12*u.hour)
plt.xticks((np.arange(13)*2-12)*u.hour)
plt.ylim(0*u.deg, 90*u.deg)
plt.xlabel('Hours from EDT Midnight')
plt.ylabel('Altitude [deg]')
plt.show()
|
pllim/astropy
|
examples/coordinates/plot_obs-planning.py
|
Python
|
bsd-3-clause
| 6,289
|
[
"VisIt"
] |
12bef7dc219d41692d2cbc9fe75a49900ea1f0df6e831e5d70e413d075a0fb12
|
#!/usr/bin/env python3
"""
Copyright 2020 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import cgi
from http import cookies
import hashlib
import time
import pymysql
import dbSession
import dbShared
import urllib.parse
import datetime
sys.path.append("../")
import dbInfo
C = cookies.SimpleCookie()
useCookies = 1
result = ''
linkappend = ''
exactUser = ''
newhashDate = datetime.datetime(2016, 5, 16, 20, 30)
try:
C.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
form = cgi.FieldStorage()
src_url = form.getfirst('src_url')
sid = form.getfirst('gh_sid')
loginp = form.getfirst('loginu')
passp = form.getfirst('passu')
passc = form.getfirst('passc')
persist = form.getfirst('persist')
push_key = form.getfirst('push_key')
#sessions persist up to 180 days
duration = 15552000
#escape input to prevent sql injection
loginp = dbShared.dbInsertSafe(loginp)
sid = dbShared.dbInsertSafe(sid)
push_key = form.getfirst('push_key')
if (loginp == None or (passp == None and passc == None)):
result = 'no login data'
else:
conn = dbShared.ghConn()
cursor = conn.cursor()
cursor.execute('SELECT userID, userPassword, userState, created, lastReset FROM tUsers WHERE userID=%s', (loginp,))
row = cursor.fetchone()
if row == None:
result = 'bad user'
elif not row[2] > 0:
result = 'unverified account'
elif row[2] == 3:
result = 'account deleted'
else:
exactUser = row[0]
# New hash date is when salt that goes with password to create hash was
# changed from loginp to DB_KEY3 since loginp did not always exactly match username
newHash = dbInfo.DB_KEY3 + passp
oldHash = loginp + passp
if row[3] > newhashDate or (row[4] != None and row[4] > newhashDate):
crypt_pass = hashlib.sha1(newHash.encode()).hexdigest()
else:
crypt_pass = hashlib.sha1(oldHash.encode()).hexdigest()
if passc != None:
# already encrypted password was sent
crypt_pass = passc
if row[1] == crypt_pass:
updatestr = 'UPDATE tUsers SET lastLogin=NOW() WHERE userID=%s'
cursor.execute(updatestr, (loginp,))
dbSession.verifySessionDB()
sidHash = str(time.time()) + exactUser
sid = hashlib.sha1(sidHash.encode()).hexdigest()
updatestr = 'INSERT INTO tSessions (sid, userID, expires, pushKey) VALUES (%s, %s, %s, %s)'
cursor.execute(updatestr, (sid, exactUser, time.time() + duration, push_key))
result = 'success'
else:
result = 'bad password or user name'
cursor.close()
conn.close()
if sid == None:
sid = ""
if useCookies:
C['loginAttempt'] = result
if result == "success":
# session id cookie expires when browser closes unless we are told to persist
expiration = datetime.datetime.utcnow() + datetime.timedelta(days=180)
C['gh_sid'] = sid
if persist != None:
C['gh_sid']['expires'] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S GMT")
# userid and theme stay for up to 7 days
expiration = datetime.datetime.now() + datetime.timedelta(days=7)
C['userID'] = exactUser
C['userID']['expires'] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S GMT")
C['uiTheme'] = dbShared.getUserAttr(loginp, 'themeName')
C['uiTheme']['expires'] = expiration.strftime("%a, %d-%b-%Y %H:%M:%S GMT")
print(C)
else:
# add results to url if not using cookies
linkappend = 'loginAttempt=' + urllib.parse.quote(result) + '&gh_sid=' + sid
if src_url != None:
if src_url.find('?') > -1:
queryChar = '&'
else:
queryChar = '?'
# go back where they came from
print('Status: 303 See Other')
print('Location: ' + src_url + queryChar + linkappend)
print('')
else:
print('Content-Type: text/html\n')
print(result + '-' + sid)
|
pwillworth/galaxyharvester
|
html/authUser.py
|
Python
|
gpl-3.0
| 4,311
|
[
"Galaxy"
] |
c752c6b6074c7a9d00def465275ac21226bc5a2314f6413f49aee0ecbe1c8d5a
|
#!/galaxy/home/mgehrin/hiclib/bin/python
"""
Convert a qual (qv) file to several BinnedArray files for fast seek.
This script takes approximately 4 seconds per 1 million base pairs.
The input format is fasta style quality -- fasta headers followed by
whitespace separated integers.
usage: %prog qual_file output_file
"""
import string
import psyco_full
import sys
from binned_array import *
from bx.cookbook import *
import fileinput
def main():
args = sys.argv[1:]
try:
qual_file = args[ 0 ]
output_file = args[ 1 ]
except:
print "usage: qual_file output_file"
sys.exit()
qual = fileinput.FileInput( qual_file )
outfile = None
outbin = None
base_count = 0
mega_count = 0
for line in qual:
line = line.rstrip("\r\n")
if line.startswith(">"):
# close old
if outbin and outfile:
print "\nFinished region " + region + " at " + str(base_count) + " base pairs."
outbin.finish()
outfile.close()
# start new file
region = line.lstrip(">")
outfname = output_file + "." + region + ".bqv"
print "Writing region " + region + " to file " + outfname
outfile = open( outfname , "wb")
outbin = BinnedArrayWriter(outfile, typecode='b', default=0)
base_count = 0
mega_count = 0
else:
if outfile and outbin:
nums = line.split()
for val in nums:
outval = int(val)
assert outval <= 255 and outval >= 0
outbin.write(outval)
base_count += 1
if (mega_count * 1000000) <= base_count:
sys.stdout.write(str(mega_count)+" ")
sys.stdout.flush()
mega_count = base_count // 1000000 + 1
if outbin and outfile:
print "\nFinished region " + region + " at " + str(base_count) + " base pairs."
outbin.finish()
outfile.close()
if __name__ == "__main__":
main()
|
bxlab/HiFive_Paper
|
Scripts/HiCLib/bx-python-0.7.1/build/scripts-2.7/qv_to_bqv.py
|
Python
|
bsd-3-clause
| 2,126
|
[
"Galaxy"
] |
36ee50fb5a8586892adcef27fc8629b61688509f97924c1eae45ec43c118df7d
|
#!/usr/bin/env python
import copy
import json
import os
import os.path
import re
import sys
from collections import OrderedDict
from CTDopts.CTDopts import (
_Choices,
_FileFormat,
_InFile,
_Null,
_NumericRange,
_OutFile,
_OutPrefix,
ModelError,
ParameterGroup
)
from lxml import etree
from lxml.etree import (
CDATA,
Element,
ElementTree,
parse,
ParseError,
strip_elements,
SubElement
)
from ..common import (
logger,
utils
)
from ..common.exceptions import (
ApplicationException,
InvalidModelException
)
# mapping to CTD types to Galaxy types
TYPE_TO_GALAXY_TYPE = {int: 'integer', float: 'float', str: 'text', bool: 'boolean', _InFile: 'txt',
_OutFile: 'txt', _Choices: 'select', _OutPrefix: 'output-prefix'}
GALAXY_TYPE_TO_TYPE = dict()
for k in TYPE_TO_GALAXY_TYPE:
GALAXY_TYPE_TO_TYPE[TYPE_TO_GALAXY_TYPE[k]] = k
STDIO_MACRO_NAME = "stdio"
REQUIREMENTS_MACRO_NAME = "requirements"
ADVANCED_OPTIONS_NAME = "adv_opts_"
REQUIRED_MACROS = [REQUIREMENTS_MACRO_NAME, STDIO_MACRO_NAME, ADVANCED_OPTIONS_NAME + "macro"]
class ExitCode:
def __init__(self, code_range="", level="", description=None):
self.range = code_range
self.level = level
self.description = description
class DataType:
def __init__(self, extension, galaxy_extension, composite=None):
self.extension = extension
self.galaxy_extension = galaxy_extension
self.composite = composite
def add_specific_args(parser):
"""
add command line arguments specific for galaxy tool generation
@param parser an instance of ArgumentParser
"""
parser.add_argument("-f", "--formats-file", dest="formats_file",
help="File containing the supported file formats. Run with '-h' or '--help' to see a "
"brief example on the layout of this file.", default=None, required=False)
parser.add_argument("-a", "--add-to-command-line", dest="add_to_command_line",
help="Adds content to the command line", default="", required=False)
parser.add_argument("-d", "--datatypes-destination", dest="data_types_destination",
help="Specify the location of a datatypes_conf.xml to modify and add the registered "
"data types. If the provided destination does not exist, a new file will be created.",
default=None, required=False)
parser.add_argument("-c", "--default-category", dest="default_category", default="DEFAULT", required=False,
help="Default category to use for tools lacking a category when generating tool_conf.xml")
parser.add_argument("-t", "--tool-conf-destination", dest="tool_conf_destination", default=None, required=False,
help="Specify the location of an existing tool_conf.xml that will be modified to include "
"the converted tools. If the provided destination does not exist, a new file will"
"be created.")
parser.add_argument("-g", "--galaxy-tool-path", dest="galaxy_tool_path", default=None, required=False,
help="The path that will be prepended to the file names when generating tool_conf.xml")
parser.add_argument("-r", "--required-tools", dest="required_tools_file", default=None, required=False,
help="Each line of the file will be interpreted as a tool name that needs translation. "
"Run with '-h' or '--help' to see a brief example on the format of this file.")
parser.add_argument("-s", "--skip-tools", dest="skip_tools_file", default=None, required=False,
help="File containing a list of tools for which a Galaxy stub will not be generated. "
"Run with '-h' or '--help' to see a brief example on the format of this file.")
parser.add_argument("-m", "--macros", dest="macros_files", default=[], nargs="*",
action="append", required=None, help="Import the additional given file(s) as macros. "
"The macros stdio, requirements and advanced_options are "
"required. Please see galaxy/macros.xml for an example of a "
"valid macros file. All defined macros will be imported.")
parser.add_argument("--test-macros", dest="test_macros_files", default=[], nargs="*",
action="append", required=None,
help="Import tests from the files given file(s) as macros. "
"The macro names must end with the id of the tools")
parser.add_argument("--test-macros-prefix", dest="test_macros_prefix", default=[], nargs="*",
action="append", required=None, help="The prefix of the macro name in the corresponding trest macros file")
parser.add_argument("--test-test", dest="test_test", action='store_true', default=False, required=False,
help="Generate a simple test for the internal unit tests.")
parser.add_argument("--test-only", dest="test_only", action='store_true', default=False, required=False,
help="Generate only the test section.")
parser.add_argument("--test-unsniffable", dest="test_unsniffable", nargs="+", default=[], required=False,
help="File extensions that can't be sniffed in Galaxy."
"Needs to be the OpenMS extensions (1st column in --formats-file)."
"For testdata with such extensions ftype will be set in the tes according to the file extension")
parser.add_argument("--tool-version", dest="tool_version", required=False, default=None,
help="Tool version to use (if not given its extracted from the CTD)")
parser.add_argument("--tool-profile", dest="tool_profile", required=False, default=None,
help="Tool profile version to use (if not given its not set)")
parser.add_argument("--bump-file", dest="bump_file", required=False,
default=None, help="json file defining tool versions."
"tools not listed in the file default to 0."
"if not given @GALAXY_VERSION@ is used")
def modify_param_for_galaxy(param):
"""
some parameters need galaxy specific modifications
"""
if param.type is _InFile:
# if file default is given (happens for external applications and
# files for which the default is taken from share/OpenMS) set the
# parm to not required and remove the default (external applications
# need to be taken care by hardcoded values and the other cases
# are chosen automatically if not specified on the command line)
if param.required and not (param.default is None or type(param.default) is _Null):
logger.warning(f"Data parameter {param.name} with default ({param.default})", 1)
param.required = False
param.default = _Null()
return param
def convert_models(args, parsed_ctds):
"""
main conversion function
@param args command line arguments
@param parsed_ctds the ctds
"""
# validate and prepare the passed arguments
validate_and_prepare_args(args, parsed_ctds[0].ctd_model)
# parse the given supported file-formats file
supported_file_formats = parse_file_formats(args.formats_file)
# extract the names of the macros and check that we have found the ones we need
macros_to_expand = parse_macros_files(args.macros_files,
tool_version=args.tool_version,
supported_file_types=supported_file_formats,
required_macros=REQUIRED_MACROS,
dont_expand=[ADVANCED_OPTIONS_NAME + "macro", "references",
"list_string_val", "list_string_san",
"list_float_valsan", "list_integer_valsan"])
bump = parse_bump_file(args.bump_file)
check_test_macros(args.test_macros_files, args.test_macros_prefix, parsed_ctds)
# parse the skip/required tools files
skip_tools = parse_tools_list_file(args.skip_tools_file)
required_tools = parse_tools_list_file(args.required_tools_file)
_convert_internal(parsed_ctds,
supported_file_formats=supported_file_formats,
default_executable_path=args.default_executable_path,
add_to_command_line=args.add_to_command_line,
required_tools=required_tools,
skip_tools=skip_tools,
macros_file_names=args.macros_files,
macros_to_expand=macros_to_expand,
parameter_hardcoder=args.parameter_hardcoder,
test_test=args.test_test,
test_only=args.test_only,
test_unsniffable=args.test_unsniffable,
test_macros_file_names=args.test_macros_files,
test_macros_prefix=args.test_macros_prefix,
tool_version=args.tool_version,
tool_profile=args.tool_profile,
bump=bump)
def parse_bump_file(bump_file):
if bump_file is None:
return None
with open(bump_file) as fp:
return json.load(fp)
def parse_tools_list_file(tools_list_file):
"""
"""
tools_list = None
if tools_list_file is not None:
tools_list = []
with open(tools_list_file) as f:
for line in f:
if line is None or not line.strip() or line.strip().startswith("#"):
continue
else:
tools_list.append(line.strip())
return tools_list
def parse_macros_files(macros_file_names, tool_version, supported_file_types, required_macros=[], dont_expand=[]):
"""
"""
macros_to_expand = []
for macros_file_name in macros_file_names:
try:
macros_file = open(macros_file_name)
logger.info("Loading macros from %s" % macros_file_name, 0)
root = parse(macros_file).getroot()
for xml_element in root.findall("xml"):
name = xml_element.attrib["name"]
if name in macros_to_expand:
logger.warning("Macro %s has already been found. Duplicate found in file %s." %
(name, macros_file_name), 0)
continue
logger.info("Macro %s found" % name, 1)
macros_to_expand.append(name)
except ParseError as e:
raise ApplicationException("The macros file " + macros_file_name + " could not be parsed. Cause: " + str(e))
except OSError as e:
raise ApplicationException("The macros file " + macros_file_name + " could not be opened. Cause: " + str(e))
else:
macros_file.close()
tool_ver_tk = root.find("token[@name='@TOOL_VERSION@']")
galaxy_ver_tk = root.find("token[@name='@GALAXY_VERSION@']")
if tool_ver_tk is None:
tool_ver_tk = add_child_node(root, "token", OrderedDict([("name", "@TOOL_VERSION@")]))
tool_ver_tk.text = tool_version
if galaxy_ver_tk is not None:
if tool_version == tool_ver_tk.text:
galaxy_ver_tk.text = str(int(galaxy_ver_tk.text))
else:
tool_ver_tk.text = tool_version
galaxy_ver_tk.text = "0"
ext_foo = root.find("token[@name='@EXT_FOO@']")
if ext_foo is None:
ext_foo = add_child_node(root, "token", OrderedDict([("name", "@EXT_FOO@")]))
g2o, o2g = get_fileformat_maps(supported_file_types)
# make sure that the backup data type is in the map
if 'txt' not in g2o:
g2o['txt'] = 'txt'
ext_foo.text = CDATA("""#def oms2gxyext(o)
#set m={}
#return m[o]
#end def
#def gxy2omsext(g)
#set m={}
#return m[g]
#end def
""".format(str(o2g), str(g2o)))
tree = ElementTree(root)
tree.write(macros_file_name, encoding="UTF-8", xml_declaration=True, pretty_print=True)
# with open(macros_file_name, "w") as macros_file:
# tree = ElementTree(root)
# tree.write(macros_file, encoding="UTF-8", xml_declaration=True, pretty_print=True)
# we depend on "stdio", "requirements" and "advanced_options" to exist on all the given macros files
missing_needed_macros = []
for required_macro in required_macros:
if required_macro not in macros_to_expand:
missing_needed_macros.append(required_macro)
if missing_needed_macros:
raise ApplicationException(
"The following required macro(s) were not found in any of the given macros files: %s, "
"see galaxy/macros.xml for an example of a valid macros file."
% ", ".join(missing_needed_macros))
# remove macros that should not be expanded
for m in dont_expand:
try:
idx = macros_to_expand.index(m)
del macros_to_expand[idx]
except ValueError:
pass
return macros_to_expand
def check_test_macros(test_macros_files, test_macros_prefix, parsed_ctds):
tool_ids = set()
for parsed_ctd in parsed_ctds:
model = parsed_ctd.ctd_model
tool_ids.add(model.name.replace(" ", "_"))
for mf, mp in zip(test_macros_files, test_macros_prefix):
macro_ids = set()
try:
with open(mf) as macros_file:
root = parse(macros_file).getroot()
for xml_element in root.findall("xml"):
name = xml_element.attrib["name"]
if not name.startswith(mp):
logger.warning("Testmacro with invalid prefix %s." % (mp), 0)
continue
name = name[len(mp):]
macro_ids.add(name)
except ParseError as e:
raise ApplicationException("The macros file " + mf + " could not be parsed. Cause: " + str(e))
except OSError as e:
raise ApplicationException("The macros file " + mf + " could not be opened. Cause: " + str(e))
for t in tool_ids - macro_ids:
logger.error("missing %s" % t)
add_child_node(root, "xml", OrderedDict([("name", mp + t)]))
if len(macro_ids - tool_ids):
logger.warning("Unnecessary macros in {}: {}".format(mf, macro_ids - tool_ids))
tree = ElementTree(root)
tree.write(mf, encoding="UTF-8", xml_declaration=True, pretty_print=True)
def parse_file_formats(formats_file):
"""
"""
supported_formats = []
if formats_file is not None:
line_number = 0
with open(formats_file) as f:
for line in f:
line_number += 1
if line is None or not line.strip() or line.strip().startswith("#"):
# ignore (it'd be weird to have something like:
# if line is not None and not (not line.strip()) ...
continue
parsed_formats = line.strip().split()
# valid lines contain either one or two columns
if len(parsed_formats) == 1:
supported_formats.append(DataType(parsed_formats[0], parsed_formats[0]))
elif len(parsed_formats) == 2:
supported_formats.append(DataType(parsed_formats[0], parsed_formats[1]))
elif len(parsed_formats) == 3:
composite = [tuple(x.split(":")) for x in parsed_formats[2].split(",")]
supported_formats.append(DataType(parsed_formats[0],
parsed_formats[1],
composite))
else:
logger.warning("Invalid line at line number %d of the given formats file. Line will be ignored:\n%s" % (line_number, line), 0)
return supported_formats
def get_fileformat_maps(supported_formats):
"""
convenience functions to compute dictionaries mapping
Galaxy data types <-> CTD formats
"""
o2g = {}
g2o = {}
for s in supported_formats:
if s.extension not in o2g:
o2g[s.extension] = s.galaxy_extension
if s.galaxy_extension not in g2o:
g2o[s.galaxy_extension] = s.extension
return g2o, o2g
def validate_and_prepare_args(args, model):
"""
check command line arguments
@param args command line arguments
@return None
"""
# check that only one of skip_tools_file and required_tools_file has been provided
if args.skip_tools_file is not None and args.required_tools_file is not None:
raise ApplicationException(
"You have provided both a file with tools to ignore and a file with required tools.\n"
"Only one of -s/--skip-tools, -r/--required-tools can be provided.")
# flatten macros_files to make sure that we have a list containing file names and not a list of lists
utils.flatten_list_of_lists(args, "macros_files")
utils.flatten_list_of_lists(args, "test_macros_files")
utils.flatten_list_of_lists(args, "test_macros_prefix")
# check that the arguments point to a valid, existing path
input_variables_to_check = ["skip_tools_file", "required_tools_file", "macros_files", "formats_file"]
for variable_name in input_variables_to_check:
utils.validate_argument_is_valid_path(args, variable_name)
# check that the provided output files, if provided, contain a valid file path (i.e., not a folder)
output_variables_to_check = ["data_types_destination", "tool_conf_destination"]
for variable_name in output_variables_to_check:
file_name = getattr(args, variable_name)
if file_name is not None and os.path.isdir(file_name):
raise ApplicationException("The provided output file name (%s) points to a directory." % file_name)
if not args.macros_files:
# list is empty, provide the default value
logger.warning("Using default macros from galaxy/macros.xml", 0)
args.macros_files = [os.path.dirname(os.path.abspath(__file__)) + "/macros.xml"]
if args.tool_version is None:
args.tool_version = model.version
def get_preferred_file_extension():
"""
get the file extension for the output files
@return "xml"
"""
return "xml"
def _convert_internal(parsed_ctds, **kwargs):
"""
parse all input files into models using CTDopts (via utils)
@param parsed_ctds the ctds
@param kwargs skip_tools, required_tools, and additional parameters for
expand_macros, create_command, create_inputs, create_outputs
@return a tuple containing the model, output destination, origin file
"""
parameter_hardcoder = kwargs["parameter_hardcoder"]
for parsed_ctd in parsed_ctds:
model = parsed_ctd.ctd_model
if kwargs["skip_tools"] is not None and model.name in kwargs["skip_tools"]:
logger.info("Skipping tool %s" % model.name, 0)
continue
elif kwargs["required_tools"] is not None and model.name not in kwargs["required_tools"]:
logger.info("Tool %s is not required, skipping it" % model.name, 0)
continue
origin_file = parsed_ctd.input_file
output_file = parsed_ctd.suggested_output_file
# overwrite attributes of the parsed ctd parameters as specified in hardcoded parameterd json
for param in utils.extract_and_flatten_parameters(model):
hardcoded_attributes = parameter_hardcoder.get_hardcoded_attributes(utils.extract_param_name(param), model.name, 'CTD')
if hardcoded_attributes is not None:
for a in hardcoded_attributes:
if not hasattr(param, a):
continue
if a == "type":
try:
t = GALAXY_TYPE_TO_TYPE[hardcoded_attributes[a]]
except KeyError:
logger.error("Could not set hardcoded attribute {}={} for {}".format(a, hardcoded_attributes[a], param.name))
sys.exit(1)
setattr(param, a, t)
elif type(getattr(param, a)) is _FileFormat or (param.type in [_InFile, _OutFile, _OutPrefix] and a == "restrictions"):
setattr(param, a, _FileFormat(str(hardcoded_attributes[a])))
elif type(getattr(param, a)) is _Choices:
setattr(param, a, _Choices(str(hardcoded_attributes[a])))
elif type(getattr(param, a)) is _NumericRange:
raise Exception("Overwriting of Numeric Range not implemented")
else:
setattr(param, a, hardcoded_attributes[a])
if "test_only" in kwargs and kwargs["test_only"]:
test = create_test_only(parsed_ctd.ctd_model, **kwargs)
tree = ElementTree(test)
output_file = parsed_ctd.suggested_output_file
logger.info("Writing to %s" % utils.get_filename(output_file), 1)
tree.write(output_file, encoding="UTF-8", xml_declaration=False, pretty_print=True)
continue
logger.info("Converting {} (source {})".format(model.name, utils.get_filename(origin_file)), 0)
tool = create_tool(model,
kwargs.get("tool_profile", None),
kwargs.get("bump", None))
write_header(tool, model)
create_description(tool, model)
import_macros(tool, model, **kwargs)
expand_macros(tool, kwargs["macros_to_expand"])
# command, inputs, outputs = create_cio(tool, model, **kwargs)
create_command(tool, model, **kwargs)
create_configfiles(tool, model, **kwargs)
inputs = create_inputs(tool, model, **kwargs)
outputs = create_outputs(tool, model, **kwargs)
if kwargs["test_test"]:
create_tests(tool, inputs=copy.deepcopy(inputs), outputs=copy.deepcopy(outputs))
if kwargs["test_macros_prefix"]:
create_tests(tool, test_macros_prefix=kwargs['test_macros_prefix'], name=model.name)
create_help(tool, model)
# citations are required to be at the end
expand_macro(tool, "references")
# wrap our tool element into a tree to be able to serialize it
tree = ElementTree(tool)
logger.info("Writing to %s" % utils.get_filename(output_file), 1)
tree.write(output_file, encoding="UTF-8", xml_declaration=True, pretty_print=True)
def write_header(tool, model):
"""
add comments to the tool header
@param tool the tool xml
@param model the ctd model
"""
tool.addprevious(etree.Comment(
"This is a configuration file for the integration of a tools into Galaxy (https://galaxyproject.org/). "
"This file was automatically generated using CTDConverter."))
tool.addprevious(etree.Comment('Proposed Tool Section: [%s]' % model.opt_attribs.get("category", "")))
def create_tool(model, profile, bump):
"""
initialize the tool
@param model the ctd model
"""
tool_id = model.name.replace(" ", "_")
if bump is None:
gxy_version = "@GALAXY_VERSION@"
elif model.name in bump:
gxy_version = str(bump[model.name])
elif tool_id in bump:
gxy_version = str(bump[tool_id])
else:
gxy_version = "@GALAXY_VERSION@"
attrib = OrderedDict([("id", tool_id),
("name", model.name),
("version", "@TOOL_VERSION@+galaxy" + gxy_version)])
if profile is not None:
attrib["profile"] = profile
return Element("tool", attrib)
def create_description(tool, model):
"""
add description to the tool
@param tool the Galaxy tool
@param model the ctd model
"""
if "description" in model.opt_attribs.keys() and model.opt_attribs["description"] is not None:
description = SubElement(tool, "description")
description.text = model.opt_attribs["description"]
def create_configfiles(tool, model, **kwargs):
"""
create
- <configfiles><inputs>
- <configfiles><configfile>
The former will create a json file containing the tool parameter values
that can be accessed in cheetah with $args_json. Note that
data_style="paths" (i.e. input data sets are included in the json) is set
even if input files are given on the CLI. Reason is that in this way
default values in the CTD can be restored for optional input files.
The latter will contain hardcoded parameters.
"""
configfiles_node = add_child_node(tool, "configfiles")
add_child_node(configfiles_node, "inputs",
OrderedDict([("name", "args_json"), ("data_style", "paths")]))
parameter_hardcoder = kwargs.get("parameter_hardcoder")
hc_dict = dict()
for param in utils.extract_and_flatten_parameters(model):
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if hardcoded_value is None:
continue
path = utils.extract_param_path(param)
for i, v in enumerate(path[:-1]):
try:
utils.getFromDict(hc_dict, path[:i + 1])
except KeyError:
utils.setInDict(hc_dict, path[:i + 1], {})
utils.setInDict(hc_dict, path, hardcoded_value)
hc_node = add_child_node(configfiles_node, "configfile",
OrderedDict([("name", "hardcoded_json")]))
hc_node.text = CDATA(json.dumps(hc_dict).replace('$', r'\$'))
# print(json.dumps(hc_dict))
def create_command(tool, model, **kwargs):
"""
@param tool the Galaxy tool
@param model the ctd model
@param kwargs
"""
# main command
final_cmd = OrderedDict([('preprocessing', []), ('command', []), ('postprocessing', [])])
advanced_cmd = {'preprocessing': [], 'command': [], 'postprocessing': []}
final_cmd['preprocessing'].extend(["@QUOTE_FOO@", "@EXT_FOO@", "#import re", "", "## Preprocessing"])
# - call the executable with -write_ctd to write the ctd file (with defaults)
# - use fill_ctd.py to overwrite the defaults in the ctd file with the
# Galaxy parameters in the JSON file (from inputs config file)
# - feed the ctd file to the executable (with -ini)
# note: input and output file parameters are still given on the command line
# - output file parameters are not included in the JSON file
# - input and output files are accessed through links / files that have the correct extension
final_cmd['command'].extend(["", "## Main program call"])
final_cmd['command'].append("""
set -o pipefail &&
@EXECUTABLE@ -write_ctd ./ &&
python3 '$__tool_directory__/fill_ctd.py' '@EXECUTABLE@.ctd' '$args_json' '$hardcoded_json' &&
@EXECUTABLE@ -ini @EXECUTABLE@.ctd""")
final_cmd['command'].extend(kwargs["add_to_command_line"])
final_cmd['postprocessing'].extend(["", "## Postprocessing"])
advanced_command_start = "#if ${aon}cond.{aon}selector=='advanced':".format(aon=ADVANCED_OPTIONS_NAME)
advanced_command_end = "#end if"
parameter_hardcoder = kwargs["parameter_hardcoder"]
supported_file_formats = kwargs["supported_file_formats"]
g2o, o2g = get_fileformat_maps(supported_file_formats)
for param in utils.extract_and_flatten_parameters(model):
param = modify_param_for_galaxy(param)
param_cmd = {'preprocessing': [], 'command': [], 'postprocessing': []}
command_line_prefix = utils.extract_command_line_prefix(param, model)
# TODO use utils.extract_param_name(param).replace(":", "_")? Then hardcoding ctd variables (with :) and tool variables (with _) can be distinguished
if parameter_hardcoder.get_blacklist(utils.extract_param_name(param), model.name):
continue
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if hardcoded_value is not None:
pass # TODO hardcoded values should go to <inputs>
# param_cmd['command'].append("%s %s" % (command_line_prefix, hardcoded_value))
else:
# in the else branch the parameter is neither blacklisted nor hardcoded...
_actual_parameter = get_galaxy_parameter_path(param)
actual_parameter = get_galaxy_parameter_path(param, fix_underscore=True)
# all but bool params need the command line argument (bools have it already in the true/false value)
if param.type is _OutFile or param.type is _OutPrefix or param.type is _InFile:
param_cmd['command'].append(command_line_prefix)
# preprocessing for file inputs:
# - create a dir with name param.name
# - create a link to id.ext in this directory
# rationale: in the autogenerated tests the same file was used as input to multiple parameters
# this leads to conflicts while linking... might also be better in general
if param.type is _InFile:
param_cmd['preprocessing'].append("mkdir %s &&" % actual_parameter)
if param.is_list:
param_cmd['preprocessing'].append("mkdir ${' '.join([\"'" + actual_parameter + "/%s'\" % (i) for i, f in enumerate($" + _actual_parameter + ") if f])} && ")
param_cmd['preprocessing'].append("${' '.join([\"ln -s '%s' '" + actual_parameter + "/%s/%s.%s' && \" % (f, i, re.sub('[^\\w\\-_]', '_', f.element_identifier), $gxy2omsext(f.ext)) for i, f in enumerate($" + _actual_parameter + ") if f])}")
param_cmd['command'].append("${' '.join([\"'" + actual_parameter + "/%s/%s.%s'\"%(i, re.sub('[^\\w\\-_]', '_', f.element_identifier), $gxy2omsext(f.ext)) for i, f in enumerate($" + _actual_parameter + ") if f])}")
else:
param_cmd['preprocessing'].append("ln -s '$" + _actual_parameter + "' '" + actual_parameter + "/${re.sub(\"[^\\w\\-_]\", \"_\", $" + _actual_parameter + ".element_identifier)}.$gxy2omsext($" + _actual_parameter + ".ext)' &&")
param_cmd['command'].append("'" + actual_parameter + "/${re.sub(\"[^\\w\\-_]\", \"_\", $" + _actual_parameter + ".element_identifier)}.$gxy2omsext($" + _actual_parameter + ".ext)'")
elif param.type is _OutPrefix:
param_cmd['preprocessing'].append("mkdir %s &&" % actual_parameter)
param_cmd['command'].append(actual_parameter + "/")
elif param.type is _OutFile:
_actual_parameter = get_galaxy_parameter_path(param, separator="_")
actual_parameter = get_galaxy_parameter_path(param, separator="_", fix_underscore=True)
# check if there is a parameter that sets the format
# if so we add an extension to the generated files which will be used to
# determine the format in the output tag
# in all other cases (corresponding input / there is only one allowed format)
# the format will be set in the output tag
formats = get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[param.type])
type_param = get_out_type_param(param, model, parameter_hardcoder)
corresponding_input, fmt_from_corresponding = get_corresponding_input(param, model)
# print("ci %s ffc %s" % (corresponding_input.name, fmt_from_corresponding))
# print("formats %s" % (formats))
if corresponding_input is not None:
actual_input_parameter = get_galaxy_parameter_path(corresponding_input)
else:
actual_input_parameter = None
# print(len(formats) > 1, (corresponding_input is None or not
# fmt_from_corresponding))
if type_param is not None:
type_param_name = get_galaxy_parameter_path(type_param)
elif len(formats) > 1 and (corresponding_input is None or not
fmt_from_corresponding): # and not param.is_list:
type_param_name = get_galaxy_parameter_path(param, suffix="type")
else:
type_param_name = None
# print("tp %s" % type_param_name)
param_cmd['preprocessing'].append("mkdir " + actual_parameter + " &&")
# if there is only one format (the outoput node sets format using the format attribute of the data/discover node)
# - single file: write to temp file with oms extension and move this to the actual result file
# - lists: write to files with the oms extension and remove the extension afterwards (discovery with __name__)
if len(formats) == 1:
fmt = formats.pop()
if param.is_list:
logger.info(f"1 fmt + list {param.name} -> {actual_input_parameter}", 1)
param_cmd['preprocessing'].append("mkdir ${' '.join([\"'" + actual_parameter + "/%s'\" % (i) for i, f in enumerate($" + actual_input_parameter + ") if f])} && ")
param_cmd['command'].append("${' '.join([\"'" + actual_parameter + "/%s/%s.%s'\"%(i, re.sub('[^\\w\\-_]', '_', f.element_identifier), $gxy2omsext(\"" + fmt + "\")) for i, f in enumerate($" + actual_input_parameter + ") if f])}")
param_cmd['postprocessing'].append("${' '.join([\"&& mv -n '" + actual_parameter + "/%(bn)s/%(id)s.%(gext)s' '" + _actual_parameter + "/%(bn)s/%(id)s'\"%{\"bn\": i, \"id\": re.sub('[^\\w\\-_]', '_', f.element_identifier), \"gext\": $gxy2omsext(\"" + fmt + "\")} for i, f in enumerate($" + actual_input_parameter + ") if f])}")
else:
logger.info("1 fmt + dataset %s" % param.name, 1)
param_cmd['command'].append("'" + actual_parameter + "/output.${gxy2omsext(\"" + fmt + "\")}'")
param_cmd['postprocessing'].append("&& mv '" + actual_parameter + "/output.${gxy2omsext(\"" + fmt + "\")}' '$" + _actual_parameter + "'")
# if there is a type parameter then we use the type selected by the user
# - single: write to temp file with the oms extension and mv it to the actual file output which is treated via change_format
# - list: let the command create output files with the oms extensions, postprocessing renames them to the galaxy extensions, output is then discover + __name_and_ext__
elif type_param_name is not None:
if param.is_list:
logger.info("type + list %s" % param.name, 1)
param_cmd['preprocessing'].append("mkdir ${' '.join([\"'" + actual_parameter + "/%s'\" % (i) for i, f in enumerate($" + actual_input_parameter + ") if f])} && ")
param_cmd['command'].append("${' '.join([\"'" + actual_parameter + "/%s/%s.%s'\"%(i, re.sub('[^\\w\\-_]', '_', f.element_identifier), $" + type_param_name + ") for i, f in enumerate($" + actual_input_parameter + ") if f])}")
param_cmd['postprocessing'].append("${' '.join([\"&& mv -n '" + actual_parameter + "/%(bn)s/%(id)s.%(omsext)s' '" + actual_parameter + "/%(bn)s/%(id)s.%(gext)s'\"%{\"bn\": i, \"id\": re.sub('[^\\w\\-_]', '_', f.element_identifier), \"omsext\":$" + type_param_name + ", \"gext\": $oms2gxyext(str($" + type_param_name + "))} for i, f in enumerate($" + actual_input_parameter + ") if f])}")
else:
logger.info("type + dataset %s" % param.name, 1)
# 1st create file with openms extension (often required by openms)
# then move it to the actual place specified by the parameter
# the format is then set by the <data> tag using <change_format>
param_cmd['command'].append("'" + actual_parameter + "/output.${" + type_param_name + "}'")
param_cmd['postprocessing'].append("&& mv '" + actual_parameter + "/output.${" + type_param_name + "}' '$" + actual_parameter + "'")
elif actual_input_parameter is not None:
if param.is_list:
logger.info("actual + list %s" % param.name, 1)
param_cmd['preprocessing'].append("mkdir ${' '.join([\"'" + actual_parameter + "/%s'\" % (i) for i, f in enumerate($" + actual_input_parameter + ") if f])} && ")
param_cmd['command'].append("${' '.join([\"'" + actual_parameter + "/%s/%s.%s'\"%(i, re.sub('[^\\w\\-_]', '_', f.element_identifier), f.ext) for i, f in enumerate($" + actual_input_parameter + ") if f])}")
else:
logger.info(f"actual + dataset {param.name} {actual_input_parameter} {corresponding_input.is_list}", 1)
if corresponding_input.is_list:
param_cmd['command'].append("'" + actual_parameter + "/output.${" + actual_input_parameter + "[0].ext}'")
param_cmd['postprocessing'].append("&& mv '" + actual_parameter + "/output.${" + actual_input_parameter + "[0].ext}' '$" + _actual_parameter + "'")
else:
param_cmd['command'].append("'" + actual_parameter + "/output.${" + actual_input_parameter + ".ext}'")
param_cmd['postprocessing'].append("&& mv '" + actual_parameter + "/output.${" + actual_input_parameter + ".ext}' '$" + _actual_parameter + "'")
else:
if param.is_list:
raise Exception("output parameter itemlist %s without corresponding input")
else:
logger.info("else + dataset %s" % param.name, 1)
param_cmd['command'].append("'$" + _actual_parameter + "'")
# # select with multiple = true
# elif is_selection_parameter(param) and param.is_list:
# param_cmd['command'].append("${' '.join(['\"%s\"'%str(_) for _ in str($" + actual_parameter + ").split(',')])}")
# elif param.is_list:
# param_cmd['command'].append("$quote($%s" % actual_parameter + ")")
# #command += "${' '.join([\"'%s'\"%str(_) for _ in $" + actual_parameter + "])}\n"
# elif is_boolean_parameter(param):
# param_cmd['command'].append("$%s" % actual_parameter + "")
# else:
# param_cmd['command'].append('"$' + actual_parameter + '"')
# add if statement for optional parameters and preprocessing
# - for optional outputs (param_out_x) the presence of the parameter
# depends on the additional input (param_x) -> need no if
# - real string parameters (i.e. ctd type string wo restrictions) also
# need no if (otherwise the empty string could not be provided)
if not (param.required or is_boolean_parameter(param) or (param.type is str and param.restrictions is None)):
# and not(param.type is _InFile and param.is_list):
actual_parameter = get_galaxy_parameter_path(param, suffix="FLAG", fix_underscore=True)
_actual_parameter = get_galaxy_parameter_path(param, suffix="FLAG")
for stage in param_cmd:
if len(param_cmd[stage]) == 0:
continue
# special case for optional itemlists: for those if no option is selected only the parameter must be specified
if is_selection_parameter(param) and param.is_list and param.required is False:
param_cmd[stage] = [param_cmd[stage][0]] + ["#if $" + _actual_parameter + ":"] + utils.indent(param_cmd[stage][1:]) + ["#end if"]
elif is_selection_parameter(param) or param.type is _InFile:
param_cmd[stage] = ["#if $" + _actual_parameter + ":"] + utils.indent(param_cmd[stage]) + ["#end if"]
elif param.type is _OutFile or param.type is _OutPrefix:
param_cmd[stage] = ["#if \"" + param.name + "_FLAG\" in str($OPTIONAL_OUTPUTS).split(',')"] + utils.indent(param_cmd[stage]) + ["#end if"]
else:
param_cmd[stage] = ["#if str($" + _actual_parameter + "):"] + utils.indent(param_cmd[stage]) + ["#end if"]
for stage in param_cmd:
if len(param_cmd[stage]) == 0:
continue
if param.advanced and hardcoded_value is None and not (param.type is _OutFile or param.type is _OutPrefix):
advanced_cmd[stage].extend(param_cmd[stage])
else:
final_cmd[stage].extend(param_cmd[stage])
for stage in advanced_cmd:
if len(advanced_cmd[stage]) == 0:
continue
advanced_cmd[stage] = [advanced_command_start] + utils.indent(advanced_cmd[stage]) + [advanced_command_end]
final_cmd[stage].extend(advanced_cmd[stage])
out, optout = all_outputs(model, parameter_hardcoder)
if len(optout) > 0 or len(out) + len(optout) == 0:
stdout = ["| tee '$stdout'"]
if len(optout) > 0:
stdout = ["#if len(str($OPTIONAL_OUTPUTS).split(',')) == 0"] + utils.indent(stdout) + ["#end if"]
final_cmd['command'].extend(stdout)
ctd_out = ["#if \"ctd_out_FLAG\" in $OPTIONAL_OUTPUTS"] + utils.indent(["&& mv '@EXECUTABLE@.ctd' '$ctd_out'"]) + ["#end if"]
final_cmd['postprocessing'].extend(ctd_out)
command_node = add_child_node(tool, "command")
command_node.attrib["detect_errors"] = "exit_code"
command_node.text = CDATA("\n".join(sum(final_cmd.values(), [])))
def import_macros(tool, model, **kwargs):
"""
creates the xml elements needed to import the needed macros files
@param tool the Galaxy tool
@param model the ctd model
@param kwargs
"""
macros_node = add_child_node(tool, "macros")
token_node = add_child_node(macros_node, "token")
token_node.attrib["name"] = "@EXECUTABLE@"
token_node.text = utils.extract_tool_executable_path(model, kwargs["default_executable_path"])
# add <import> nodes
for macro_file_name in kwargs["macros_file_names"] + kwargs["test_macros_file_names"]:
macro_file = open(macro_file_name)
import_node = add_child_node(macros_node, "import")
# do not add the path of the file, rather, just its basename
import_node.text = os.path.basename(macro_file.name)
def expand_macro(node, macro, attribs=None):
"""Add <expand macro="..." ... /> to node."""
expand_node = add_child_node(node, "expand")
expand_node.attrib["macro"] = macro
if attribs:
for a in attribs:
expand_node.attrib[a] = attribs[a]
return expand_node
# and to "expand" the macros in a node
def expand_macros(node, macros_to_expand):
# add <expand> nodes
for expand_macro in macros_to_expand:
expand_node = add_child_node(node, "expand")
expand_node.attrib["macro"] = expand_macro
def get_galaxy_parameter_path(param, separator=".", suffix=None, fix_underscore=False):
"""
Get the complete path for a parameter as a string where the path
components are joined by the given separator. A given suffix can
be appended.
"""
p = get_galaxy_parameter_name(param, suffix, fix_underscore)
path = utils.extract_param_path(param, fix_underscore)
if len(path) > 1:
return (separator.join(path[:-1]) + separator + p).replace("-", "_")
elif param.advanced and (param.type is not _OutFile or suffix):
return ADVANCED_OPTIONS_NAME + "cond." + p
else:
return p
def get_galaxy_parameter_name(param, suffix=None, fix_underscore=False):
"""
get the name of the parameter used in the galaxy tool
- replace : and - by _
- add suffix for output parameters if not None
the idea of suffix is to be used for optional outputs (out_x) for
which an additional boolean input (out_x_FLAG) exists
@param param the parameter
@param suffix suffix to append
@return the name used for the parameter in the tool form
"""
p = param.name.replace("-", "_")
if fix_underscore and p.startswith("_"):
p = p[1:]
if param.type is _OutFile and suffix is not None:
return f"{p}_{suffix}"
else:
return "%s" % p
def get_out_type_param(out_param, model, parameter_hardcoder):
"""
check if there is a parameter that has the same name with appended _type
and return it if present, otherwise return None
"""
if parameter_hardcoder.get_blacklist(out_param.name + "_type", model.name):
return None
for param in utils.extract_and_flatten_parameters(model):
if param.name == out_param.name + "_type":
return param
return None
def is_in_type_param(param, model):
return is_type_param(param, model, [_InFile])
def is_out_type_param(param, model):
"""
check if the parameter is output_type parameter
- the name ends with _type and there is an output parameter without this suffix
and return True iff this is the case
"""
return is_type_param(param, model, [_OutFile, _OutPrefix])
def is_type_param(param, model, tpe):
"""
check if the parameter is _type parameter of an in/output
- the name ends with _type and there is an output parameter without this suffix
and return True iff this is the case
"""
if not param.name.endswith("_type"):
return False
for out_param in utils.extract_and_flatten_parameters(model):
if out_param.type not in tpe:
continue
if param.name == out_param.name + "_type":
return True
return False
def get_corresponding_input(out_param, model):
"""
get the input parameter corresponding to the given output
1st try to get the input with the type (single file/list) and same format restrictions
if this fails get the input that has the same type
in both cases there must be only one such input
return the found input parameter and True iff the 1st case applied
"""
c = get_input_with_same_restrictions(out_param, model, True)
if c is None:
return (get_input_with_same_restrictions(out_param, model, False), False)
else:
return (c, True)
def get_input_with_same_restrictions(out_param, model, check_formats):
"""
get the input parameter that has the same restrictions (ctd file_formats)
- input and output must both be lists of both be simple parameters
"""
matching = []
for allow_different_type in [False, True]:
for param in utils.extract_and_flatten_parameters(model):
if param.type is not _InFile:
continue
# logger.error("%s %s %s %s %s %s" %(out_param.name, param.name, param.is_list, out_param.is_list, param.restrictions, out_param.restrictions))
if allow_different_type or param.is_list == out_param.is_list:
if check_formats:
if param.restrictions is None and out_param.restrictions is None:
matching.append(param)
elif param.restrictions is not None and out_param.restrictions is not None and param.restrictions.formats == out_param.restrictions.formats:
matching.append(param)
else:
matching.append(param)
# logger.error("match %s "%([_.name for _ in matching]))
if len(matching) > 0:
break
if len(matching) == 1:
return matching[0]
else:
return None
def create_inputs(tool, model, **kwargs):
"""
create input section of the Galaxy tool
@param tool the Galaxy tool
@param model the ctd model
@param kwargs
@return inputs node
"""
inputs_node = SubElement(tool, "inputs")
section_nodes = dict()
section_params = dict()
# some suites (such as OpenMS) need some advanced options when handling inputs
advanced_node = Element("expand", OrderedDict([("macro", ADVANCED_OPTIONS_NAME + "macro")]))
parameter_hardcoder = kwargs["parameter_hardcoder"]
supported_file_formats = kwargs["supported_file_formats"]
g2o, o2g = get_fileformat_maps(supported_file_formats)
# treat all non output-file/advanced/blacklisted/hardcoded parameters as inputs
for param in utils.extract_and_flatten_parameters(model, True):
if type(param) is ParameterGroup:
title, help_text = generate_label_and_help(param.description)
section_params[utils.extract_param_name(param)] = param
section_nodes[utils.extract_param_name(param)] = Element("section", OrderedDict([("name", param.name), ("title", title), ("help", help_text), ("expanded", "false")]))
continue
param = modify_param_for_galaxy(param)
# no need to show hardcoded parameters
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if hardcoded_value is not None:
continue
if parameter_hardcoder.get_blacklist(utils.extract_param_name(param), model.name):
continue
# do not output file type parameters for inputs since file types are
# known by Galaxy and set automatically by extension (which comes from
# the Galaxy data type which is translated to OpenMS datatype as defined
# in filetypes.txt )
if is_in_type_param(param, model):
continue
if utils.extract_param_name(param.parent) in section_nodes:
parent_node = section_nodes[utils.extract_param_name(param.parent)]
elif param.advanced:
parent_node = advanced_node
else:
parent_node = inputs_node
# sometimes special inputs are needed for outfiles:
if param.type is _OutFile or param.type is _OutPrefix:
# if there are multiple possible output formats, but no parameter to choose the type or a
# corresponding input then add a selection parameter
formats = get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[_OutFile])
type_param = get_out_type_param(param, model, parameter_hardcoder)
corresponding_input, fmt_from_corresponding = get_corresponding_input(param, model)
if len(formats) > 1 and type_param is None and (corresponding_input is None or not
fmt_from_corresponding): # and not param.is_list:
fmt_select = add_child_node(parent_node, "param", OrderedDict([("name", param.name + "_type"), ("type", "select"), ("optional", "false"), ("label", f"File type of output {param.name} ({param.description})")]))
g2o, o2g = get_fileformat_maps(kwargs["supported_file_formats"])
# for f in formats:
# option_node = add_child_node(fmt_select, "option", OrderedDict([("value", g2o[f])]), f)
for choice in param.restrictions.formats:
option_node = add_child_node(fmt_select, "option", OrderedDict([("value", str(choice))]))
option_node.text = o2g[str(choice)]
if choice.lower() != o2g[str(choice)]:
option_node.text += " (%s)" % choice
continue
# create the actual param node and fill the attributes
param_node = add_child_node(parent_node, "param")
create_param_attribute_list(param_node, param, model, kwargs["supported_file_formats"])
hardcoded_attributes = parameter_hardcoder.get_hardcoded_attributes(param.name, model.name, 'XML')
if hardcoded_attributes is not None:
for a in hardcoded_attributes:
param_node.attrib[a] = str(hardcoded_attributes[a])
section_parents = [utils.extract_param_name(section_params[sn].parent) for sn in section_nodes]
for sn in section_nodes:
if len(section_nodes[sn]) == 0 and sn not in section_parents:
continue
if utils.extract_param_name(section_params[sn].parent) in section_nodes:
section_nodes[utils.extract_param_name(section_params[sn].parent)].append(section_nodes[sn])
else:
inputs_node.append(section_nodes[sn])
# if there is an advanced section then append it at the end of the inputs
inputs_node.append(advanced_node)
# Add select for optional outputs
out, optout = all_outputs(model, parameter_hardcoder)
attrib = OrderedDict([("name", "OPTIONAL_OUTPUTS"),
("type", "select"),
("optional", "true"),
("multiple", "true"),
("label", "Optional outputs")])
# if len(out) == 0 and len(out) + len(optout) > 0:
# attrib["optional"] = "false"
# else:
# attrib["optional"] = "true"
param_node = add_child_node(inputs_node, "param", attrib)
for o in optout:
title, help_text = generate_label_and_help(o.description)
option_node = add_child_node(param_node, "option",
OrderedDict([("value", o.name + "_FLAG")]),
text=f"{o.name} ({title})")
option_node = add_child_node(param_node, "option",
OrderedDict([("value", "ctd_out_FLAG")]),
text="Output used ctd (ini) configuration file")
return inputs_node
def is_default(value, param):
"""
check if the value is the default of the param or if the value is in the defaults of param
"""
return param.default == value or (type(param.default) is list and value in param.default)
def get_formats(param, model, o2g):
"""
determine format attribute from the CTD restictions (i.e. the OpenMS extensions)
- also check if all listed possible formats are supported in Galaxy and warn if necessary
"""
if param.restrictions is None:
return []
elif type(param.restrictions) is _FileFormat:
choices = param.restrictions.formats
elif is_out_type_param(param, model):
choices = param.restrictions.choices
else:
raise InvalidModelException("Unrecognized restriction type [%(type)s] "
"for [%(name)s]" % {"type": type(param.restrictions),
"name": param.name})
# check if there are formats that have not been registered yet...
formats = set()
for format_name in choices:
if format_name not in o2g:
logger.warning(f"Ignoring unknown format {format_name} for parameter {param.name}", 1)
else:
formats.add(format_name)
return sorted(formats)
def get_galaxy_formats(param, model, o2g, default=None):
"""
determine galaxy formats for a parm (i.e. list of allowed Galaxy extensions)
from the CTD restictions (i.e. the OpenMS extensions)
- if there is a single one, then take this
- if there is none than use given default
"""
formats = get_formats(param, model, o2g)
gxy_formats = {o2g[_] for _ in formats if _ in o2g}
if len(gxy_formats) == 0:
if default is not None:
gxy_formats.add(default)
else:
raise InvalidModelException("No supported formats [%(type)s] "
"for [%(name)s]" % {"type": type(param.restrictions),
"name": param.name})
return sorted(gxy_formats)
def create_param_attribute_list(param_node, param, model, supported_file_formats):
"""
get the attributes of input parameters
@param param_node the galaxy tool param node
@param param the ctd parameter
@param supported_file_formats
"""
g2o, o2g = get_fileformat_maps(supported_file_formats)
# set the name, argument and a first guess for the type (which will be over written
# in some cases .. see below)
# even if the conversion relies on the fact that the param names are identical
# to the ctd ITEM names we replace dashes by underscores because input and output
# parameters need to be treated in cheetah. variable names are currently fixed back
# to dashes in fill_ctd.py. currently there seems to be only a single tool
# requiring this https://github.com/OpenMS/OpenMS/pull/4529
param_node.attrib["name"] = get_galaxy_parameter_name(param)
param_node.attrib["argument"] = "-%s" % utils.extract_param_name(param)
param_type = TYPE_TO_GALAXY_TYPE[param.type]
if param_type is None:
raise ModelError("Unrecognized parameter type %(type)s for parameter %(name)s"
% {"type": param.type, "name": param.name})
# ITEMLIST is rendered as text field (even if its integers or floats), an
# exception is files which are treated a bit below
if param.is_list:
param_type = "text"
if is_selection_parameter(param):
param_type = "select"
if len(param.restrictions.choices) < 5:
param_node.attrib["display"] = "checkboxes"
if param.is_list:
param_node.attrib["multiple"] = "true"
if is_boolean_parameter(param):
param_type = "boolean"
if param.type is _InFile:
# assume it's just text unless restrictions are provided
param_node.attrib["type"] = "data"
param_node.attrib["format"] = ",".join(get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[_InFile]))
# in the case of multiple input set multiple flag
if param.is_list:
param_node.attrib["multiple"] = "true"
else:
param_node.attrib["type"] = param_type
# set the optional attribute of parameters
#
# OpenMS uses sets text, int, select, bool parameters that have a default
# as optional (required=False), the default value is set implicitly if no
# value is given.
# This is reasonable for the CLI because one certainly does not want the
# user to specify the default manually for all parameters.
# For Galaxy tools setting these parameters as required leads to the
# equivalent behavior. Assuming required is better because it makes
# the implicit setting of parameters more transparent to the user
# (in Galaxy the default would be prefilled in the form and at least
# one option needs to be selected).
if not (param.default is None or type(param.default) is _Null) and param_node.attrib["type"] in ["integer", "float", "text", "boolean", "select"]:
logger.error("%s %s %s %s %s" % (param.name, param.default is None, type(param.default) is _Null, param_type, param.type))
param_node.attrib["optional"] = "false"
else:
param_node.attrib["optional"] = str(not param.required).lower()
# check for parameters with restricted values (which will correspond to a "select" in galaxy)
if param.restrictions is not None or param_type == "boolean":
# it could be either _Choices or _NumericRange, with special case for boolean types
if param_type == "boolean":
create_boolean_parameter(param_node, param)
elif type(param.restrictions) is _Choices:
# TODO if the parameter is used to select the output file type the
# options need to be replaced with the Galaxy data types
# if is_out_type_param(param, model):
# param.restrictions.choices = get_supported_file_types(param.restrictions.choices, supported_file_formats)
# create as many <option> elements as restriction values
if is_out_type_param(param, model):
logger.warning(f"{param.name} {param.type}")
formats = get_formats(param, model, o2g)
for fmt in formats:
option_node = add_child_node(param_node, "option",
OrderedDict([("value", str(fmt))]))
option_node.text = o2g[str(fmt)]
if fmt.lower() != o2g[str(fmt)]:
option_node.text += " (%s)" % fmt
if is_default(fmt, param):
option_node.attrib["selected"] = "true"
else:
for choice in param.restrictions.choices:
option_node = add_child_node(param_node, "option",
OrderedDict([("value", str(choice))]),
text=str(choice))
if is_default(choice, param):
option_node.attrib["selected"] = "true"
# add validator to check that "nothing selected" is not seletcedto mandatory options w/o default
if param_node.attrib["optional"] == "False" and (param.default is None or type(param.default) is _Null):
validator_node = add_child_node(param_node, "validator", OrderedDict([("type", "expression"), ("message", "A value needs to be selected")]))
validator_node.text = 'value != "select a value"'
# numeric ranges (which appear for int and float ITEMS and ITEMLISTS)
# these are reflected by min and max attributes
# since item lists become text parameters + validator these don't need these attributes
elif type(param.restrictions) is _NumericRange and param_type == "text":
pass
elif type(param.restrictions) is _NumericRange and param_type != "text":
if param.type is not int and param.type is not float:
raise InvalidModelException("Expected either 'int' or 'float' in the numeric range restriction for "
"parameter [%(name)s], but instead got [%(type)s]" %
{"name": param.name, "type": type(param.restrictions)})
# extract the min and max values and add them as attributes
# validate the provided min and max values
if param.restrictions.n_min is not None:
param_node.attrib["min"] = str(param.restrictions.n_min)
if param.restrictions.n_max is not None:
param_node.attrib["max"] = str(param.restrictions.n_max)
elif type(param.restrictions) is _FileFormat:
# has already been handled
pass
else:
raise InvalidModelException("Unrecognized restriction type [%(type)s] for parameter [%(name)s]"
% {"type": type(param.restrictions), "name": param.name})
if param_type == "text":
# for repeats (which are rendered as text field in the tool form) that are actually
# integer/floats special validation is necessary (try to convert them and check if
# in the min max range if a range is given)
if TYPE_TO_GALAXY_TYPE[param.type] in ["integer", "float"]:
valsan = expand_macro(param_node,
"list_%s_valsan" % TYPE_TO_GALAXY_TYPE[param.type],
dict([("name", get_galaxy_parameter_name(param))]))
if type(param.restrictions) is _NumericRange and not (param.restrictions.n_min is None and param.restrictions.n_max is None):
expression = "len(value.split(' ')) == len([_ for _ in value.split(' ') if "
message = "a space separated list of %s values " % TYPE_TO_GALAXY_TYPE[param.type]
if param.restrictions.n_min is not None and param.restrictions.n_max is not None:
expression += f" {param.restrictions.n_min} <= {param.type.__name__}(_) <= {param.restrictions.n_max}"
message += f"in the range {param.restrictions.n_min}:{param.restrictions.n_max} "
elif param.restrictions.n_min is not None:
expression += f" {param.restrictions.n_min} <= {param.type.__name__}(_)"
message += "in the range %s: " % (param.restrictions.n_min)
elif param.restrictions.n_max is not None:
expression += f" {param.type.__name__}(_) <= {param.restrictions.n_max}"
message += "in the range :%s " % (param.restrictions.n_min)
expression += "])\n"
message += "is required"
validator_node = SubElement(valsan, "validator", OrderedDict([("type", "expression"), ("message", message)]))
validator_node.text = CDATA(expression)
else:
# add quotes to the default values (only if they include spaces .. then the UI looks nicer)
if not (param.default is None or type(param.default) is _Null) and param.type is not _InFile:
if type(param.default) is list:
for i, d in enumerate(param.default):
if " " in d:
param.default[i] = '"%s"' % d
# elif " " in param.default:
# param.default = '"%s"' %param.default
# add sanitizer nodes to
# - text (only those that are not actually integer selects which are treated above) and
# - select params,
# this is needed for special character like "[" which are used for example by FeatureFinderMultiplex
if ((param_type == "text" and not TYPE_TO_GALAXY_TYPE[param.type] in ["integer", "float"]) or is_selection_parameter(param)) and param.type is not _InFile:
if param.is_list and not is_selection_parameter(param):
valsan = expand_macro(param_node, "list_string_val",
dict([("name", get_galaxy_parameter_name(param))]))
valsan = expand_macro(param_node, "list_string_san",
dict([("name", get_galaxy_parameter_name(param))]))
# check for default value
if not (param.default is None or type(param.default) is _Null):
# defaults of selects are set via the selected attribute of the options (happens above)
if param_type == "select":
pass
elif type(param.default) is list:
# we ASSUME that a list of parameters looks like:
# $ tool -ignore He Ar Xe
# meaning, that, for example, Helium, Argon and Xenon will be ignored
param_node.attrib["value"] = ' '.join(map(str, param.default))
elif param_type != "boolean":
param_node.attrib["value"] = str(param.default)
else:
# simple boolean with a default
if param.default is True:
param_node.attrib["checked"] = "true"
elif param.type is int or param.type is float or param.type is str:
if param_type == "select":
pass
else:
param_node.attrib["value"] = ""
# add label, help, and argument
label = "%s parameter" % param.name
help_text = ""
if param.description is not None:
label, help_text = generate_label_and_help(param.description)
if param.is_list and not is_selection_parameter(param) and param.type is not _InFile:
help_text += " (space separated list, in order to allow for spaces in list items surround them by single quotes)"
if param.type is _InFile:
help_text += " select %s data sets(s)" % (",".join(get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[_InFile])))
param_node.attrib["label"] = label
param_node.attrib["help"] = help_text
def generate_label_and_help(desc):
help_text = ""
# This tag is found in some descriptions
if not isinstance(desc, str):
desc = str(desc)
# desc = desc.encode("utf8")
desc = desc.replace("#br#", ". ")
# Get rid of dots in the end
if desc.endswith("."):
desc = desc.rstrip(".")
# Check if first word is a normal word and make it uppercase
if str(desc).find(" ") > -1:
first_word, rest = str(desc).split(" ", 1)
if str(first_word).islower():
# check if label has a quotient of the form a/b
if first_word.find("/") != 1:
first_word.capitalize()
desc = first_word + " " + rest
# label = desc.decode("utf8")
label = desc
# split delimiters ".,?!;("
if len(desc) > 50:
m = re.search(r"([.?!] |e\.g\.|\(e\.g\.|i\.e\.|\(i\.e\.)", desc)
if m is not None:
label = desc[:m.start()].rstrip(".?!, ")
help_text = desc[m.start():].lstrip(".?!, ")
# # Try to split the label if it is too long
# if len(desc) > 50:
# # find an example and put everything before in the label and the e.g. in the help
# if desc.find("e.g.") > 1 :
# label, help_text = desc.split("e.g.",1)
# help_text = "e.g." + help_text
# else:
# # find the end of the first sentence
# # look for ". " because some labels contain .file or something similar
# delimiter = ""
# if desc.find(". ") > 1 and desc.find("? ") > 1:
# if desc.find(". ") < desc.find("? "):
# delimiter = ". "
# else:
# delimiter = "? "
# elif desc.find(". ") > 1:
# delimiter = ". "
# elif desc.find("? ") > 1:
# delimiter = "? "
# if delimiter != "":
# label, help_text = desc.split(delimiter, 1)
#
# # add the question mark back
# if delimiter == "? ":
# label += "? "
# remove all linebreaks
label = label.rstrip().rstrip('<br>').rstrip()
return label, help_text
def is_boolean_parameter(param):
"""
determines if the given choices are boolean (basically, if the possible values are true/false)
@param param the ctd parameter
@return True iff a boolean parameter
"""
# detect boolean selects of OpenMS
if type(param.restrictions) is _Choices:
return set(param.restrictions.choices) == {"true", "false"}
else:
return param.type is bool
def is_selection_parameter(param):
"""
determines if there are choices for the parameter and its not bool
@param param the ctd parameter
@return True iff a selection parameter
"""
if type(param.restrictions) is _Choices:
return set(param.restrictions.choices) != {"true", "false"}
else:
return False
def get_lowercase_list(some_list):
return [str(_).lower().strip() for _ in some_list]
def create_boolean_parameter(param_node, param):
"""
creates a galaxy boolean parameter type
this method assumes that param has restrictions, and that only two restictions are present
(either yes/no or true/false)
TODO: true and false values can be way more than 'true' and 'false'
but for that we need CTD support
"""
# in ctd (1.6.2) bools are strings with restriction true,false
# - if the default is false then they are flags
# - otherwise the true or false value needs to be added (where the true case is unnecessary)
# A special case are restrictions false,true which are not treated as flags
if param.type == str:
choices = get_lowercase_list(param.restrictions.choices)
if set(choices) == {"true", "false"}:
param_node.attrib["truevalue"] = "true"
param_node.attrib["falsevalue"] = "false"
else:
param_node.attrib["truevalue"] = choices[0]
param_node.attrib["falsevalue"] = choices[1]
# set the checked attribute
if param.default is not None:
checked_value = "false"
default = param.default.lower().strip()
if default == "yes" or default == "true":
checked_value = "true"
param_node.attrib["checked"] = checked_value
else:
param_node.attrib["truevalue"] = "true"
param_node.attrib["falsevalue"] = "false"
param_node.attrib["checked"] = str(param.default).lower()
if "optional" in param_node.attrib:
del param_node.attrib["optional"]
def all_outputs(model, parameter_hardcoder):
"""
return lists of reqired and optional output parameters
"""
out = []
optout = []
for param in utils.extract_and_flatten_parameters(model):
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if parameter_hardcoder.get_blacklist(utils.extract_param_name(param), model.name) or hardcoded_value:
# let's not use an extra level of indentation and use NOP
continue
if not (param.type is _OutFile or param.type is _OutPrefix):
continue
if not param.required:
optout.append(param)
else:
out.append(param)
return out, optout
def output_filter_text(param):
"""
get the text or the filter for optional outputs
"""
return '"%s_FLAG" in OPTIONAL_OUTPUTS' % param.name
def create_outputs(parent, model, **kwargs):
"""
create outputs section of the Galaxy tool
@param tool the Galaxy tool
@param model the ctd model
@param kwargs
- parameter_hardcoder and
- supported_file_formats ()
"""
outputs_node = add_child_node(parent, "outputs")
parameter_hardcoder = kwargs["parameter_hardcoder"]
for param in utils.extract_and_flatten_parameters(model):
param = modify_param_for_galaxy(param)
# no need to show hardcoded parameters
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if parameter_hardcoder.get_blacklist(utils.extract_param_name(param), model.name) or hardcoded_value:
# let's not use an extra level of indentation and use NOP
continue
if param.type is not _OutFile and param.type is not _OutPrefix:
continue
create_output_node(outputs_node, param, model, kwargs["supported_file_formats"], parameter_hardcoder)
# If there are no outputs defined in the ctd the node will have no children
# and the stdout will be used as output
out, optout = all_outputs(model, parameter_hardcoder)
if len(out) == 0:
stdout = add_child_node(outputs_node, "data",
OrderedDict([("name", "stdout"), ("format", "txt"),
("label", "${tool.name} on ${on_string}: stdout"),
("format", "txt")]))
add_child_node(stdout, "filter", text="OPTIONAL_OUTPUTS is None")
# manually add output for the ctd file
ctd_out = add_child_node(outputs_node, "data", OrderedDict([("name", "ctd_out"), ("format", "xml"), ("label", "${tool.name} on ${on_string}: ctd")]))
add_child_node(ctd_out, "filter", text='OPTIONAL_OUTPUTS is not None and "ctd_out_FLAG" in OPTIONAL_OUTPUTS')
return outputs_node
def create_output_node(parent, param, model, supported_file_formats, parameter_hardcoder):
g2o, o2g = get_fileformat_maps(supported_file_formats)
# add a data node / collection + discover_datasets
# in the former case we just set the discover_node equal to the data node
# then we can just use this to set the common format attribute
if not param.is_list and param.type is not _OutPrefix:
data_node = add_child_node(parent, "data")
discover_node = data_node
else:
data_node = add_child_node(parent, "collection")
data_node.attrib["type"] = "list"
discover_node = add_child_node(data_node, "discover_datasets",
OrderedDict([("directory", get_galaxy_parameter_path(param, separator="_")),
("recurse", "true")]))
data_node.attrib["name"] = get_galaxy_parameter_path(param, separator="_")
data_node.attrib["label"] = "${tool.name} on ${on_string}: %s" % utils.extract_param_name(param)
formats = get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[_OutFile])
type_param = get_out_type_param(param, model, parameter_hardcoder)
corresponding_input, fmt_from_corresponding = get_corresponding_input(param, model)
if type_param is not None:
type_param_name = get_galaxy_parameter_path(type_param)
type_param_choices = get_formats(param, model, o2g) # [_ for _ in type_param.restrictions.choices]
elif len(formats) > 1 and (corresponding_input is None or not
fmt_from_corresponding): # and not param.is_list:
type_param_name = get_galaxy_parameter_path(param, suffix="type")
type_param_choices = get_formats(param, model, o2g)
else:
type_param_name = None
# if there is only a single possible output format we set this
# logger.error("%s %s %s %s %s" %(param.name, formats, type_param, fmt_from_corresponding, corresponding_input))
if len(formats) == 1:
logger.info(f"OUTPUT {param.name} 1 fmt {formats}", 1)
discover_node.attrib["format"] = formats.pop()
if param.is_list:
discover_node.attrib["pattern"] = "__name__"
elif param.type is _OutPrefix:
discover_node.attrib["pattern"] = r"_?(?P<designation>.*)\.[^.]*"
# if there is another parameter where the user selects the format
# then this format was added as file extension on the CLI, now we can discover this
elif type_param_name is not None:
logger.info("OUTPUT %s type" % param.name, 1)
if not param.is_list:
if len(type_param_choices) > 1:
change_node = add_child_node(data_node, "change_format")
for i, r in enumerate(type_param_choices):
f = o2g.get(r, None)
# TODO this should not happen for fully specified fileformats file
if f is None:
f = r
if i == 0:
data_node.attrib["format"] = f
else:
add_child_node(change_node, "when", OrderedDict([("input", type_param_name), ("value", r), ("format", f)]))
else:
discover_node.attrib["pattern"] = "__name_and_ext__"
elif corresponding_input is not None:
logger.info(f"OUTPUT {param.name} input {corresponding_input.name}", 1)
if param.is_list:
discover_node.attrib["pattern"] = "__name_and_ext__"
# data_node.attrib["structured_like"] = get_galaxy_parameter_name(corresponding_input)
# data_node.attrib["inherit_format"] = "true"
else:
data_node.attrib["format_source"] = get_galaxy_parameter_path(corresponding_input)
data_node.attrib["metadata_source"] = get_galaxy_parameter_path(corresponding_input)
else:
logger.info("OUTPUT %s else" % (param.name), 1)
if not param.is_list:
data_node.attrib["auto_format"] = "true"
else:
raise InvalidModelException("No way to know the format for"
"for output [%(name)s]" % {"name": param.name})
# # data output has fomat (except if fromat_source has been added already)
# # note .. collection output has no format
# if not param.is_list and not "format_source" in data_node.attrib:
# data_node.attrib["format"] = data_format
# add filter for optional parameters
if not param.required:
filter_node = add_child_node(data_node, "filter")
filter_node.text = "OPTIONAL_OUTPUTS is not None and " + output_filter_text(param)
return data_node
def get_supported_file_types(formats, supported_file_formats):
r = set()
for f in formats:
if f in supported_file_formats:
r.add(supported_file_formats[f].galaxy_extension)
return r
# print f, f in supported_file_formats, supported_file_formats[f].galaxy_extension
# return set([supported_file_formats[_].galaxy_extension
# for _ in formats if _ in supported_file_formats])
def create_change_format_node(parent, data_formats, input_ref):
# <change_format>
# <when input="secondary_structure" value="true" format="txt"/>
# </change_format>
change_format_node = add_child_node(parent, "change_format")
for data_format in data_formats:
add_child_node(change_format_node, "when",
OrderedDict([("input", input_ref), ("value", data_format), ("format", data_format)]))
def create_tests(parent, inputs=None, outputs=None, test_macros_prefix=None, name=None):
"""
create tests section of the Galaxy tool
@param tool the Galaxy tool
@param inputs a copy of the inputs
"""
tests_node = add_child_node(parent, "tests")
if not (inputs is None or outputs is None):
fidx = 0
test_node = add_child_node(tests_node, "test")
strip_elements(inputs, "validator", "sanitizer")
for node in inputs.iter():
if node.tag == "expand" and node.attrib["macro"] == ADVANCED_OPTIONS_NAME + "macro":
node.tag = "conditional"
node.attrib["name"] = ADVANCED_OPTIONS_NAME + "cond"
add_child_node(node, "param", OrderedDict([("name", ADVANCED_OPTIONS_NAME + "selector"), ("value", "advanced")]))
if "type" not in node.attrib:
continue
if (node.attrib["type"] == "select" and "true" in {_.attrib.get("selected", "false") for _ in node}) or\
(node.attrib["type"] == "select" and node.attrib.get("value", "") != ""):
node.tag = "delete_node"
continue
# TODO make this optional (ie add aparameter)
if node.attrib.get("optional", None) == "true" and node.attrib["type"] != "boolean":
node.tag = "delete_node"
continue
if node.attrib["type"] == "boolean":
if node.attrib["checked"] == "true":
node.attrib["value"] = "true" # node.attrib["truevalue"]
else:
node.attrib["value"] = "false" # node.attrib["falsevalue"]
elif node.attrib["type"] == "text" and node.attrib["value"] == "":
node.attrib["value"] = "1 2" # use a space separated list here to cover the repeat (int/float) case
elif node.attrib["type"] == "integer" and node.attrib["value"] == "":
node.attrib["value"] = "1"
elif node.attrib["type"] == "float" and node.attrib["value"] == "":
node.attrib["value"] = "1.0"
elif node.attrib["type"] == "select":
if node.attrib.get("display", None) == "radio" or node.attrib.get("multiple", "false") == "false":
node.attrib["value"] = node[0].attrib["value"]
elif node.attrib.get("multiple", None) == "true":
node.attrib["value"] = ",".join([_.attrib["value"] for _ in node if "value" in _.attrib])
elif node.attrib["type"] == "data":
node.attrib["ftype"] = node.attrib["format"].split(',')[0]
if node.attrib.get("multiple", "false") == "true":
node.attrib["value"] = "{fidx}test.ext,{fidx}test2.ext".format(fidx=fidx)
else:
node.attrib["value"] = f"{fidx}test.ext"
fidx += 1
for node in inputs.iter():
for a in set(node.attrib) - {"name", "value", "ftype"}:
del node.attrib[a]
strip_elements(inputs, "delete_node", "option", "expand")
for node in inputs:
test_node.append(node)
outputs_cnt = 0
for node in outputs.iter():
if node.tag == "data" or node.tag == "collection":
# assuming that all filters avaluate to false
has_filter = False
for c in node:
if c.tag == "filter":
has_filter = True
break
if not has_filter:
outputs_cnt += 1
else:
node.tag = "delete_node"
if node.tag == "data":
node.tag = "output"
try:
node.attrib["ftype"] = node.attrib["format"]
except KeyError:
pass
node.attrib["value"] = "outfile.txt"
if node.tag == "collection":
node.tag = "output_collection"
if node.attrib.get("name", None) == "stdout":
node.attrib["lines_diff"] = "2"
for a in set(node.attrib) - {"name", "value", "ftype", "lines_diff"}:
del node.attrib[a]
strip_elements(outputs, "delete_node", "discover_datasets", "filter", "change_format")
for node in outputs:
test_node.append(node)
# if no optional output is selected the stdout is added as output
if outputs_cnt == 0:
outputs_cnt = 1
test_node.attrib["expect_num_outputs"] = str(outputs_cnt)
elif not (test_macros_prefix is None or name is None):
expand_macros(tests_node, [p + name for p in test_macros_prefix])
def create_test_only(model, **kwargs):
parameter_hardcoder = kwargs["parameter_hardcoder"]
unsniffable = kwargs["test_unsniffable"]
supported_file_formats = kwargs["supported_file_formats"]
g2o, o2g = get_fileformat_maps(supported_file_formats)
section_nodes = dict()
section_params = dict()
test = Element("test")
advanced = add_child_node(test, "conditional", OrderedDict([("name", "adv_opts_cond")]))
add_child_node(advanced, "param", OrderedDict([("name", "adv_opts_selector"), ("value", "advanced")]))
optout = ["ctd_out_FLAG"]
outcnt = 1
for param in utils.extract_and_flatten_parameters(model, True):
ext = None
# no need to show hardcoded parameters
# except for the test parameter
hardcoded_value = parameter_hardcoder.get_hardcoded_value(utils.extract_param_name(param), model.name)
if parameter_hardcoder.get_blacklist(utils.extract_param_name(param), model.name) or hardcoded_value is not None:
if param.name != "test":
continue
if utils.extract_param_name(param.parent) in section_nodes:
parent = section_nodes[utils.extract_param_name(param.parent)]
elif type(param) is not ParameterGroup and param.advanced:
parent = advanced
else:
parent = test
if type(param) is ParameterGroup:
section_params[utils.extract_param_name(param)] = param
section_nodes[utils.extract_param_name(param)] = add_child_node(parent, "section", OrderedDict([("name", param.name)]))
continue
if param.type is _OutFile:
given = type(param.default) is _OutFile or (type(param.default) is list) and len(param.default) > 0
if not param.required and given:
optout.append("%s_FLAG" % param.name)
if given:
formats = get_galaxy_formats(param, model, o2g, TYPE_TO_GALAXY_TYPE[_OutFile])
type_param = get_out_type_param(param, model, parameter_hardcoder)
corresponding_input, fmt_from_corresponding = get_corresponding_input(param, model)
if type(param.default) is _OutFile:
f = param.default
elif type(param.default) is list:
f = param.default[0]
else:
raise Exception("Outfile with non str or list default {}[{}]".format(param, type(param.default)))
# get the file type from the longest possible extension that
# matches the known extensions
# longest: because e.g. pep.xml should be prefered over xml
if f.endswith(".tmp"):
f = f[:-4]
splitted = f.split(".")
ext = None
for i in range(len(splitted)):
check_ext = ".".join(splitted[i:])
if check_ext in o2g:
ext = o2g[check_ext]
break
if ext not in formats:
if ext == "txt" and "csv" in formats:
ext = "csv"
elif ext == "txt" and "tsv" in formats:
ext = "tsv"
elif len(formats) == 1:
ext = formats[0]
if len(formats) > 1 and (corresponding_input is None or not
fmt_from_corresponding): # and not param.is_list:
if type_param is None:
try:
print("{} -> {}".format(ext, g2o[ext]))
attrib = OrderedDict([("name", param.name + "_type"), ("value", g2o[ext])])
add_child_node(parent, "param", attrib)
except KeyError:
raise Exception(f"parent {parent} name {param.name} ext {ext}")
if type_param is not None and type(type_param.default) is _Null:
if ext is not None:
type_param.default = ext
if param.required or given:
outcnt += 1
# don't output empty values for bool, and data parameters
if type(param.default) is _Null and not param.required:
if is_boolean_parameter(param):
continue
elif param.type is _OutFile:
continue
elif param.type is _InFile:
continue
elif type(param.restrictions) is _Choices and (param.default is None or type(param.default) is _Null):
continue
# lists need to be joined appropriately
# - special care for outfile lists (ie collections): since we do not know (easily) the names of the collection elements we just use the count
# exception of list parameters that are hardcoded to non-lists (the the default is still a list)
if not param.is_list and type(param.default) is list:
logger.info("Found non-list parameter %s with list default (hardcoded?). Using only first value/" % param.name, 0)
try:
param.default = param.default[0]
except KeyError:
param.default = _Null()
if param.is_list and type(param.default) is not _Null:
if param.type is _InFile:
value = ','.join(map(str, param.default))
elif param.type is _OutFile:
value = str(len(param.default))
elif param.type is str:
if type(param.restrictions) is _Choices:
value = ','.join(map(str, param.default))
else:
value = '"' + '" "'.join(map(str, param.default)) + '"'
else:
value = ' '.join(map(str, param.default))
else:
if type(param.default) is bool:
value = str(param.default).lower()
else:
value = str(param.default)
# use name where dashes are replaced by underscores
# see also create inputs
if param.type is _OutFile:
name = get_galaxy_parameter_path(param, separator="_")
if param.is_list:
nd = add_child_node(test, "output_collection", OrderedDict([("name", name), ("count", value)]))
else:
# TODO use delta_frac https://github.com/galaxyproject/galaxy/pull/9425
nd = add_child_node(test, "output", OrderedDict([("name", name), ("file", value), ("compare", "sim_size"), ("delta", "5700")]))
if ext:
nd.attrib["ftype"] = ext
elif param.type is _OutPrefix:
# #for outprefix elements / count need to be added manually
name = get_galaxy_parameter_path(param, separator="_")
nd = add_child_node(test, "output_collection", OrderedDict([("name", name), ("count", "")]))
else:
name = get_galaxy_parameter_name(param)
nd = add_child_node(parent, "param", OrderedDict([("name", name), ("value", value)]))
# add format attribute for unsniffable extensions
if param.type is _InFile:
ext = os.path.splitext(value)[1][1:]
if ext in unsniffable and ext in o2g:
nd.attrib["ftype"] = o2g[ext]
add_child_node(test, "param", OrderedDict([("name", "OPTIONAL_OUTPUTS"),
("value", ",".join(optout))]))
ctd_out = add_child_node(test, "output", OrderedDict([("name", "ctd_out"), ("ftype", "xml")]))
ctd_assert = add_child_node(ctd_out, "assert_contents")
add_child_node(ctd_assert, "is_valid_xml")
if outcnt == 0:
outcnt += 1
nd = add_child_node(test, "output", OrderedDict([("name", "stdout"),
("value", "stdout.txt"),
("compare", "sim_size")]))
test.attrib["expect_num_outputs"] = str(outcnt)
# if all_optional_outputs(model, parameter_hardcoder):
return test
def create_help(tool, model):
"""
create help section of the Galaxy tool
@param tool the Galaxy tool
@param model the ctd model
@param kwargs
"""
help_node = add_child_node(tool, "help")
help_node.text = CDATA(utils.extract_tool_help_text(model))
def add_child_node(parent_node, child_node_name, attributes=OrderedDict([]), text=None):
"""
helper function to add a child node using the given name to the given parent node
@param parent_node the parent
@param child_node_name the desired name of the child
@param attributes desired attributes of the child
@return the created child node
"""
child_node = SubElement(parent_node, child_node_name, attributes)
if text is not None:
child_node.text = text
return child_node
|
WorkflowConversion/CTDConverter
|
ctdconverter/galaxy/converter.py
|
Python
|
gpl-3.0
| 95,121
|
[
"Galaxy",
"OpenMS"
] |
57d482a4b020ff44676360befdb75122dc04bad8c02fc0b517d05ee720e26617
|
__author__ = 'sebastians'
# The function provided here can be used if you get no image output with cmd.png (can be no or a black picture).
# Can be also used if you experience segmentation faults with cmd.ray
from pymol import cmd
import os
def png_workaround(filepath, width=1024, height=768):
"""Workaround for (a) severe bug(s) in PyMOL preventing ray-traced images to be produced in command-line mode.
Use this function in case neither cmd.ray() or cmd.png() work.
"""
cmd.set('ray_trace_frames', 1) # Frames are raytraced before saving an image.
cmd.viewport(width, height) # Set resolution
### Workaround for raytracing in command-line mode
cmd.mpng(filepath, 1, 1) # Use batch png mode with 1 frame only
cmd.mplay() # cmd.mpng needs the animation to 'run'
os.rename("".join([filepath[:-4], '0001.png']), "".join([filepath[:-4], '.png'])) # Remove frame number in filename
|
ssalentin/pymol-animations
|
special-topics/workaround-png.py
|
Python
|
mit
| 923
|
[
"PyMOL"
] |
1b3428834feeebc8fd6c20c5049c8624a95296d8df8ed602c7428b21ccecb11b
|
import numpy
from VirtualObservatoryCatalog import VirtualObservatoryCatalog
class GBMBurstCatalog(VirtualObservatoryCatalog):
def __init__(self):
super(GBMBurstCatalog, self).__init__('fermigbrst',
'http://heasarc.gsfc.nasa.gov/cgi-bin/vo/cone/coneGet.pl?table=fermigbrst&',
'Fermi/GBM burst catalog')
def applyFormat(self, votable):
table = votable.to_table()
table['ra'].format = '5.3f'
table['dec'].format = '5.3f'
return table
#########
threefgl_types = {
'agn' : 'other non-blazar active galaxy',
'bcu' : 'active galaxy of uncertain type',
'bin' : 'binary',
'bll' : 'BL Lac type of blazar',
'css' : 'compact steep spectrum quasar',
'fsrq' : 'FSRQ type of blazar',
'gal' : 'normal galaxy (or part)',
'glc' : 'globular cluster',
'hmb' : 'high-mass binary',
'nlsy1' : 'narrow line Seyfert 1',
'nov' : 'nova',
'PSR' : 'pulsar, identified by pulsations',
'psr' : 'pulsar, no pulsations seen in LAT yet',
'pwn' : 'pulsar wind nebula',
'rdg' : 'radio galaxy',
'sbg' : 'starburst galaxy',
'sey' : 'Seyfert galaxy',
'sfr' : 'star-forming region',
'snr' : 'supernova remnant',
'spp' : 'special case - potential association with SNR or PWN',
'ssrq' : 'soft spectrum radio quasar',
'' : 'unknown'
}
class LATSourceCatalog(VirtualObservatoryCatalog):
def __init__(self):
super(LATSourceCatalog, self).__init__('fermilpsc',
'http://heasarc.gsfc.nasa.gov/cgi-bin/vo/cone/coneGet.pl?table=fermilpsc&',
'Fermi/LAT source catalog')
def applyFormat(self, votable):
table = votable.to_table()
table['ra'].format = '5.3f'
table['dec'].format = '5.3f'
table['Search_Offset'].format = '5.3f'
def translate(key):
if(key.lower()=='psr'):
return threefgl_types[key]
else:
return threefgl_types[key.lower()]
#Translate the 3 letter code to a more informative category, according
#to the dictionary above
table['source_type'] = numpy.array(map(translate, table['source_type']))
new_table = table['name',
'source_type',
'ra','dec',
'assoc_name_1',
'tevcat_assoc',
'Search_Offset']
return new_table.group_by('Search_Offset')
|
sybenzvi/3ML
|
threeML/catalogs/Fermi.py
|
Python
|
bsd-3-clause
| 2,590
|
[
"Galaxy"
] |
0c61ff7ff1153a92e727f7f3484c2ac911b7675b45222cfe95653f6f0c882ca9
|
import pytest
import re
from time import sleep
import capybara
from capybara.exceptions import ReadOnlyElementError
from capybara.node.element import Element
from capybara.tests.helpers import extract_results, isfirefox, ismarionette, ismarionettelt
class NodeTestCase:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/with_html")
class TestNode(NodeTestCase):
def test_acts_like_a_session_object(self, session):
session.visit("/form")
form = session.find("css", "#get-form")
assert form.has_field("Middle Name")
assert form.has_no_field("Languages")
form.fill_in("Middle Name", value="Monkey")
form.click_button("med")
assert extract_results(session)["form[middle_name]"] == "Monkey"
def test_scopes_css_selectors(self, session):
assert not session.find("css", "#second").has_css("h1")
class TestNodeQueryScope(NodeTestCase):
def test_returns_a_reference_to_the_element_the_query_was_evaluated_on(self, session):
node = session.find("css", "#first")
assert node.query_scope == node.session.document
assert node.find("css", "#foo").query_scope == node
class TestNodeText(NodeTestCase):
def test_extracts_node_text(self, session):
assert session.find_all("//a")[0].text == "labore"
assert session.find_all("//a")[1].text == "ullamco"
def test_returns_document_text_on_html_selector(self, session):
session.visit("/with_simple_html")
assert session.find("/html").text == "Bar"
class TestNodeAttribute(NodeTestCase):
def test_extracts_node_attributes(self, session):
assert session.find_all("//a")[0]["class"] == "simple"
assert session.find_all("//a")[1]["id"] == "foo"
assert session.find_all("//input")[0]["type"] == "text"
def test_extracts_boolean_node_attributes(self, session):
assert session.find("//input[@id='checked_field']")["checked"]
@pytest.mark.requires("css")
class TestNodeStyle(NodeTestCase):
def test_returns_the_computed_style_value(self, session):
assert session.find("css", "#first").style("display") == {"display": "block"}
assert session.find("css", "#second").style("display") == {"display": "inline"}
def test_returns_multiple_style_values(self, session):
assert session.find("css", "#first").style("display", "line-height") == {
"display": "block",
"line-height": "25px"}
class TestNodeValue(NodeTestCase):
def test_allows_retrieval_of_the_value(self, session):
assert session.find("//textarea[@id='normal']").value == "banana"
def test_does_not_swallow_extra_newlines_in_textarea(self, session):
assert session.find("//textarea[@id='additional_newline']").value == "\nbanana"
def test_does_not_swallow_newlines_for_set_content_in_textarea(self, session):
session.find("//textarea[@id='normal']").set("\nbanana")
assert session.find("//textarea[@id='normal']").value == "\nbanana"
def test_returns_any_html_content_in_textarea(self, session):
session.find("//textarea[1]").set("some <em>html</em>here")
assert session.find("//textarea[1]").value == "some <em>html</em>here"
def test_defaults_to_on_for_checkboxes(self, session):
session.visit("/form")
assert session.find("//input[@id='valueless_checkbox']").value == "on"
def test_defaults_to_on_for_radio_buttons(self, session):
session.visit("/form")
assert session.find("//input[@id='valueless_radio']").value == "on"
class TestNodeSet(NodeTestCase):
def test_allows_assignment_of_field_value(self, session):
assert session.find_first("//input").value == "monkey"
session.find_first("//input").set("gorilla")
assert session.find_first("//input").value == "gorilla"
@pytest.mark.requires("js")
def test_fills_the_field_even_if_the_caret_was_not_at_the_end(self, session):
session.execute_script(
"var el = document.getElementById('test_field');"
"el.focus();"
"el.setSelectionRange(0, 0);")
session.find_first("//input").set("")
assert session.find_first("//input").value == ""
def test_raises_if_the_text_field_is_readonly(self, session):
assert session.find_first("//input[@readonly]").value == "should not change"
with pytest.raises(ReadOnlyElementError):
session.find_first("//input[@readonly]").set("changed")
assert session.find_first("//input[@readonly]").value == "should not change"
def test_raises_if_the_textarea_is_readonly(self, session):
assert session.find_first("//textarea[@readonly]").value == "textarea should not change"
with pytest.raises(ReadOnlyElementError):
session.find_first("//textarea[@readonly]").set("changed")
assert session.find_first("//textarea[@readonly]").value == "textarea should not change"
@pytest.mark.requires("js")
def test_allows_me_to_change_the_contents_of_a_contenteditable_element(self, session):
session.visit("/with_js")
session.find("css", "#existing_content_editable").set("WYSIWYG")
assert session.find("css", "#existing_content_editable").text == "WYSIWYG"
@pytest.mark.requires("js")
def test_allows_me_to_set_the_contents_of_a_contenteditable_element(self, session):
session.visit("/with_js")
session.find("css", "#blank_content_editable").set("WYSIWYG")
assert session.find("css", "#blank_content_editable").text == "WYSIWYG"
class TestNodeTagName(NodeTestCase):
def test_extracts_node_tag_name(self, session):
assert session.find_all("//a")[0].tag_name == "a"
assert session.find_all("//a")[1].tag_name == "a"
assert session.find_all("//p")[0].tag_name == "p"
class TestNodeDisabled(NodeTestCase):
def test_extracts_disabled_node(self, session):
session.visit("/form")
assert session.find("//input[@id='customer_name']").disabled
assert not session.find("//input[@id='customer_email']").disabled
def test_sees_disabled_options_as_disabled(self, session):
session.visit("/form")
assert not session.find("//select[@id='form_title']/option[1]").disabled
assert session.find("//select[@id='form_title']/option[@disabled]").disabled
def test_sees_enabled_options_in_disabled_select_as_disabled(self, session):
session.visit("/form")
assert session.find("//select[@id='form_disabled_select']/option").disabled
assert not session.find("//select[@id='form_title']/option[1]").disabled
def test_sees_enabled_options_in_disabled_optgroup_as_disabled(self, session):
session.visit("/form")
assert session.find("//option", text="A.B.1").disabled
assert not session.find("//option", text="A.2").disabled
def test_sees_a_disabled_fieldset_as_disabled(self, session):
session.visit("/form")
assert session.find("css", "#form_disabled_fieldset").disabled
def test_sees_elements_not_in_first_legend_in_a_disabled_fieldset_as_disabled(self, session):
session.visit("/form")
assert session.find("//input[@id='form_disabled_fieldset_child']").disabled
assert session.find("//input[@id='form_disabled_fieldset_second_legend_child']").disabled
assert not session.find("//input[@id='form_enabled_fieldset_child']").disabled
def test_sees_elements_in_first_legend_in_a_disabled_fieldset_as_enabled(self, session):
session.visit("/form")
assert not session.find("//input[@id='form_disabled_fieldset_legend_child']").disabled
def test_sees_options_not_in_first_legend_in_a_disabled_fieldset_as_disabled(self, session):
session.visit("/form")
assert session.find("//option", text="Disabled Child Option").disabled
def test_is_boolean(self, session):
session.visit("/form")
assert session.find("//select[@id='form_disabled_select']/option").disabled is True
assert session.find("//select[@id='form_disabled_select2']/option").disabled is True
assert session.find("//select[@id='form_title']/option[1]").disabled is False
class TestNodeVisible(NodeTestCase):
def test_extracts_node_visibility(self, session):
capybara.ignore_hidden_elements = False
assert session.find_first("//a").visible
assert not session.find("//div[@id='hidden']").visible
assert not session.find("//div[@id='hidden_via_ancestor']").visible
assert not session.find("//div[@id='hidden_attr']").visible
assert not session.find("//a[@id='hidden_attr_via_ancestor']").visible
assert not session.find("//input[@id='hidden_input']").visible
def test_is_boolean(self, session):
capybara.ignore_hidden_elements = False
assert session.find_first("//a").visible is True
assert session.find("//div[@id='hidden']").visible is False
class TestNodeChecked(NodeTestCase):
def test_extracts_node_checked_state(self, session):
session.visit("/form")
assert session.find("//input[@id='gender_female']").checked is True
assert session.find("//input[@id='gender_male']").checked is False
assert session.find_first("//h1").checked is False
class TestNodeSelected(NodeTestCase):
def test_extracts_node_selected_state(self, session):
session.visit("/form")
assert session.find("//option[@value='en']").selected is True
assert session.find("//option[@value='sv']").selected is False
assert session.find_first("//h1").checked is False
class TestNodeEquals(NodeTestCase):
def test_is_true_for_the_same_element(self, session):
assert session.find("//h1") == session.find("//h1")
def test_is_false_for_different_elements(self, session):
assert session.find("//h1") != session.find_first("//p")
def test_is_false_for_unrelated_objects(self, session):
assert session.find("//h1") != "Not a node"
class TestNodePath(NodeTestCase):
def test_returns_xpath_which_points_to_itself(self, session):
session.visit("/path")
element = session.find("link", "Second Link")
assert session.find("xpath", element.path) == element
@pytest.mark.requires("js", "drag")
class TestNodeDragTo(NodeTestCase):
def test_drags_and_drops_an_object(self, session):
session.visit("/with_js")
element = session.find("//div[@id='drag']")
target = session.find("//div[@id='drop']")
element.drag_to(target)
assert session.find("//div[contains(., 'Dropped!')]")
@pytest.mark.requires("hover")
class TestNodeHover(NodeTestCase):
def test_allows_hovering_on_an_element(self, session):
session.visit("/with_hover")
assert not session.find("css", ".hidden_until_hover", visible=False).visible
session.find("css", ".wrapper").hover()
assert session.find("css", ".hidden_until_hover", visible=False).visible
class TestNodeClick(NodeTestCase):
def test_does_not_follow_a_link_if_no_href(self, session):
session.find("css", "#link_placeholder").click()
assert session.current_url.endswith("/with_html")
def test_is_able_to_check_a_checkbox(self, session):
session.visit("/form")
checkbox = session.find("checkbox", "form_terms_of_use")
assert not checkbox.checked
checkbox.click()
assert checkbox.checked
def test_is_able_to_uncheck_a_checkbox(self, session):
session.visit("/form")
checkbox = session.find("checkbox", "form_pets_dog")
assert checkbox.checked
checkbox.click()
assert not checkbox.checked
def test_is_able_to_select_a_radio_button(self, session):
session.visit("/form")
radio = session.find("radio_button", "gender_male")
assert not radio.checked
radio.click()
assert radio.checked
@pytest.mark.requires("js")
def test_allows_modifiers(self, session):
Keys = pytest.importorskip("selenium.webdriver.common.keys").Keys
session.visit("/with_js")
session.find("css", "#click-test").click(Keys.SHIFT)
assert session.has_link("Has been shift clicked")
@pytest.mark.requires("js")
def test_allows_multiple_modifiers(self, session):
Keys = pytest.importorskip("selenium.webdriver.common.keys").Keys
session.visit("/with_js")
session.find("css", "#click-test").click(Keys.ALT, Keys.META, Keys.SHIFT)
assert session.has_link("alt meta shift")
@pytest.mark.requires("js")
def test_allows_adjusting_the_click_offset(self, session):
session.visit("/with_js")
session.find("css", "#click-test").click(x=5, y=5)
link = session.find("link", "has-been-clicked")
regex = re.compile(r"^Has been clicked at (?P<x>[\d\.-]+),(?P<y>[\d\.-]+)$")
locations = regex.search(link.text).groupdict()
assert float(locations['x']) == pytest.approx(5, 1)
assert float(locations['y']) == pytest.approx(5, 1)
def test_handles_fixed_headers_and_footers(self, session):
session.visit("/with_fixed_header_footer")
# session.click_link("Go to root")
session.find("link", "Go to root").click()
assert session.has_current_path("/")
@pytest.mark.requires("js")
class TestNodeDoubleClick(NodeTestCase):
def test_double_clicks_an_element(self, session):
if ismarionettelt(59, session):
pytest.skip("selenium/geckodriver doesn't support double-click")
session.visit("/with_js")
session.find("css", "#click-test").double_click()
assert session.find("css", "#has-been-double-clicked")
def test_allows_modifiers(self, session):
if ismarionettelt(59, session):
pytest.skip("selenium/geckodriver doesn't support double-click")
Keys = pytest.importorskip("selenium.webdriver.common.keys").Keys
session.visit("/with_js")
session.find("css", "#click-test").double_click(Keys.ALT)
assert session.has_link("Has been alt double clicked")
def test_allows_adjusting_the_click_offset(self, session):
if ismarionettelt(59, session):
pytest.skip("selenium/geckodriver doesn't support double-click")
session.visit("/with_js")
session.find("css", "#click-test").double_click(x=10, y=5)
link = session.find("link", "has-been-double-clicked")
regex = re.compile(r"^Has been double clicked at (?P<x>[\d\.-]+),(?P<y>[\d\.-]+)$")
locations = regex.search(link.text).groupdict()
assert float(locations['x']) == pytest.approx(10, 1)
assert float(locations['y']) == pytest.approx(5, 1)
@pytest.mark.requires("js")
class TestNodeRightClick(NodeTestCase):
def test_right_clicks_an_element(self, session):
session.visit("/with_js")
session.find("css", "#click-test").right_click()
assert session.find("css", "#has-been-right-clicked")
def test_allows_modifiers(self, session):
if isfirefox(session) and not ismarionette(session):
pytest.skip("Firefox without Marionette/geckodriver doesn't support modified right-click")
Keys = pytest.importorskip("selenium.webdriver.common.keys").Keys
session.visit("/with_js")
session.find("css", "#click-test").right_click(Keys.META)
assert session.has_link("Has been meta right clicked")
def test_allows_adjusting_the_click_offset(self, session):
session.visit("/with_js")
session.find("css", "#click-test").right_click(x=10, y=10)
link = session.find("link", "has-been-right-clicked")
regex = re.compile(r"^Has been right clicked at (?P<x>[\d\.-]+),(?P<y>[\d\.-]+)$")
locations = regex.search(link.text).groupdict()
assert float(locations['x']) == pytest.approx(10, 1)
assert float(locations['y']) == pytest.approx(10, 1)
@pytest.mark.requires("send_keys")
class TestNodeSendKeys(NodeTestCase):
def test_sends_a_string_of_keys_to_an_element(self, session):
session.visit("/form")
session.find("css", "#address1_city").send_keys("Oceanside")
assert session.find("css", "#address1_city").value == "Oceanside"
def test_sends_special_characters(self, session):
Keys = pytest.importorskip("selenium.webdriver.common.keys").Keys
if ismarionette(session):
pytest.skip("selenium/geckodriver doesn't support some special characters")
session.visit("/form")
session.find("css", "#address1_city").send_keys(
"Ocean", Keys.SPACE, "sie", Keys.LEFT, "d")
assert session.find("css", "#address1_city").value == "Ocean side"
@pytest.mark.requires("js")
class TestNodeExecuteScript(NodeTestCase):
def test_executes_the_given_script_in_the_context_of_the_element_and_returns_nothing(self, session):
session.visit("/with_js")
assert session.find("css", "#change").execute_script("this.textContent = 'Funky Doodle'") is None
assert session.has_css("#change", text="Funky Doodle")
def test_passes_arguments_to_the_script(self, session):
session.visit("/with_js")
session.find("css", "#change").execute_script("this.textContent = arguments[0]", "Doodle Funk")
assert session.has_css("#change", text="Doodle Funk")
@pytest.mark.requires("js")
class TestNodeEvaluateScript(NodeTestCase):
def test_evaluates_the_given_script_in_the_context_of_the_element_and_returns_whatever_it_produces(self, session):
session.visit("/with_js")
el = session.find("css", "#with_change_event")
assert el.evaluate_script("this.value") == "default value"
def test_ignores_leading_whitespace(self, session):
session.visit("/with_js")
assert session.find("css", "#change").evaluate_script("""
2 + 3
""") == 5
def test_passes_arguments_to_the_script(self, session):
session.visit("/with_js")
session.find("css", "#change").evaluate_script("this.textContent = arguments[0]", "Doodle Funk")
assert session.has_css("#change", text="Doodle Funk")
def test_passes_multiple_arguments(self, session):
session.visit("/with_js")
change = session.find("css", "#change")
assert change.evaluate_script("arguments[0] + arguments[1]", 2, 3) == 5
def test_supports_returning_elements(self, session):
session.visit("/with_js")
change = session.find("css", "#change") # ensure page has loaded and element is available
el = change.evaluate_script("this")
assert isinstance(el, Element)
assert el == change
@pytest.mark.requires("js")
class TestNodeEvaluateAsyncScript(NodeTestCase):
def test_evaluates_the_given_script_in_the_context_of_the_element(self, session):
session.visit("/with_js")
el = session.find("css", "#with_change_event")
assert el.evaluate_async_script("arguments[0](this.value)") == "default value"
def test_supports_returning_elements_after_asynchronous_operation(self, session):
session.visit("/with_js")
change = session.find("css", "#change") # ensure page has loaded and element is available
el = change.evaluate_async_script("var cb = arguments[0]; setTimeout(function(el) { cb(el); }, 100, this);")
assert el == change
@pytest.mark.requires("js")
class TestNodeReloadWithoutAutomaticReload(NodeTestCase):
@pytest.fixture(autouse=True)
def setup_capybara(self):
capybara.automatic_reload = False
def test_reloads_the_current_context_of_the_node(self, session):
session.visit("/with_js")
node = session.find("css", "#reload-me")
session.click_link("Reload!")
sleep(0.3)
assert node.reload().text == "has been reloaded"
assert node.text == "has been reloaded"
def test_reloads_a_parent_node(self, session):
session.visit("/with_js")
node = session.find("css", "#reload-me").find("css", "em")
session.click_link("Reload!")
sleep(0.3)
assert node.reload().text == "has been reloaded"
assert node.text == "has been reloaded"
def test_does_not_automatically_reload(self, session):
session.visit("/with_js")
node = session.find("css", "#reload-me")
session.click_link("Reload!")
sleep(0.3)
with pytest.raises(Exception) as excinfo:
assert node.has_text("has been reloaded")
assert isinstance(excinfo.value, session.driver.invalid_element_errors)
@pytest.mark.requires("js")
class TestNodeReloadWithAutomaticReload(NodeTestCase):
@pytest.fixture(autouse=True)
def setup_capybara(self):
capybara.automatic_reload = True
def test_reloads_the_current_context_of_the_node_automatically(self, session):
session.visit("/with_js")
node = session.find("css", "#reload-me")
session.click_link("Reload!")
sleep(0.3)
assert node.text == "has been reloaded"
def test_reloads_a_parent_node_automatically(self, session):
session.visit("/with_js")
node = session.find("css", "#reload-me").find("css", "em")
session.click_link("Reload!")
sleep(0.3)
assert node.text == "has been reloaded"
def test_reloads_a_node_automatically_when_using_find(self, session):
session.visit("/with_js")
node = session.find("css", "#reload-me")
session.click_link("Reload!")
sleep(0.3)
assert node.find("css", "a").text == "has been reloaded"
def test_does_not_reload_nodes_which_have_not_been_found_with_reevaluatable_queries(self, session):
session.visit("/with_js")
node = session.find_all("css", "#the-list li")[1]
session.click_link("Fetch new list!")
sleep(0.3)
with pytest.raises(Exception) as excinfo:
assert node.has_text("Foo")
assert isinstance(excinfo.value, session.driver.invalid_element_errors)
with pytest.raises(Exception) as excinfo:
assert node.has_text("Bar")
assert isinstance(excinfo.value, session.driver.invalid_element_errors)
def test_reloads_nodes_with_options(self, session):
session.visit("/with_js")
node = session.find("css", "em", text="reloaded")
session.click_link("Reload!")
sleep(1)
assert node.text == "has been reloaded"
|
elliterate/capybara.py
|
capybara/tests/session/test_node.py
|
Python
|
mit
| 22,594
|
[
"VisIt"
] |
adeb4a7e889aeb6cba4a5605da2913f89e362e8350def7cc4f05fadc3b55057a
|
#
# @file TestValidASTNode.py
# @brief Test the isWellFormedASTNode function
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestValidASTNode.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestValidASTNode(unittest.TestCase):
def test_ValidASTNode_Name(self):
n = libsbml.parseFormula("c")
self.assertEqual( True, n.isWellFormedASTNode() )
#d = libsbml.parseFormula("d")
#i = n.addChild(d)
#self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
n = None
pass
def test_ValidASTNode_Number(self):
n = libsbml.parseFormula("1.2")
self.assertEqual( True, n.isWellFormedASTNode() )
#d = libsbml.parseFormula("d")
#i = n.addChild(d)
#self.assert_( i == libsbml.LIBSBML_INVALID_OBJECT )
n = None
pass
def test_ValidASTNode_binary(self):
n = libsbml.ASTNode(libsbml.AST_DIVIDE)
self.assertEqual( False, (n.isWellFormedASTNode()) )
c = libsbml.parseFormula("c")
n.addChild(c)
self.assertEqual( False, (n.isWellFormedASTNode()) )
d = libsbml.parseFormula("d")
n.addChild(d)
self.assertEqual( True, n.isWellFormedASTNode() )
n = None
pass
def test_ValidASTNode_infix_nary_plus0(self):
n = libsbml.readMathMLFromString("<math xmlns='http://www.w3.org/1998/Math/MathML'>" +
" <apply>" +
" <plus/>" +
" </apply>" +
"</math>")
self.assert_( n != None )
formula = libsbml.formulaToString(n)
node = libsbml.parseFormula(formula)
self.assert_( node != None )
n = None
node = None
pass
def test_ValidASTNode_infix_nary_plus1(self):
n = libsbml.readMathMLFromString("<math xmlns='http://www.w3.org/1998/Math/MathML'>" +
" <apply>" +
" <plus/>" +
" <cn> 0 </cn>" +
" </apply>" +
"</math>")
self.assert_( n != None )
formula = libsbml.formulaToString(n)
node = libsbml.parseFormula(formula)
self.assert_( node != None )
n = None
node = None
pass
def test_ValidASTNode_infix_nary_times0(self):
n = libsbml.readMathMLFromString("<math xmlns='http://www.w3.org/1998/Math/MathML'>" +
" <apply>" +
" <times/>" +
" </apply>" +
"</math>")
self.assert_( n != None )
formula = libsbml.formulaToString(n)
node = libsbml.parseFormula(formula)
self.assert_( node != None )
n = None
node = None
pass
def test_ValidASTNode_infix_nary_times1(self):
n = libsbml.readMathMLFromString("<math xmlns='http://www.w3.org/1998/Math/MathML'>" +
" <apply>" +
" <times/>" +
" <cn> 0 </cn>" +
" </apply>" +
"</math>")
self.assert_( n != None )
formula = libsbml.formulaToString(n)
node = libsbml.parseFormula(formula)
self.assert_( node != None )
n = None
node = None
pass
def test_ValidASTNode_lambda(self):
n = libsbml.ASTNode(libsbml.AST_LAMBDA)
self.assertEqual( False, (n.isWellFormedASTNode()) )
c = libsbml.parseFormula("c")
n.addChild(c)
self.assertEqual( True, n.isWellFormedASTNode() )
d = libsbml.parseFormula("d")
n.addChild(d)
self.assertEqual( True, n.isWellFormedASTNode() )
e = libsbml.parseFormula("e")
n.addChild(e)
self.assertEqual( True, n.isWellFormedASTNode() )
n = None
pass
def test_ValidASTNode_nary(self):
n = libsbml.ASTNode(libsbml.AST_DIVIDE)
self.assertEqual( False, (n.isWellFormedASTNode()) )
c = libsbml.parseFormula("c")
n.addChild(c.deepCopy())
self.assertEqual( False, (n.isWellFormedASTNode()) )
n.addChild(c.deepCopy())
self.assertEqual( True, (n.isWellFormedASTNode()) )
n.addChild(c.deepCopy())
self.assertEqual( False, (n.isWellFormedASTNode()) )
n = libsbml.ASTNode(libsbml.AST_TIMES)
self.assertEqual( True, (n.isWellFormedASTNode()) )
n.addChild(c)
self.assertEqual( True, (n.isWellFormedASTNode()) )
d = libsbml.parseFormula("d")
n.addChild(d)
self.assertEqual( True, n.isWellFormedASTNode() )
e = libsbml.parseFormula("e")
n.addChild(e)
self.assertEqual( True, n.isWellFormedASTNode() )
n = None
pass
def test_ValidASTNode_returnsBoolean(self):
node = libsbml.ASTNode( libsbml.AST_LOGICAL_AND )
self.assertEqual( True, node.returnsBoolean() )
node.setType(libsbml.AST_LOGICAL_NOT)
self.assertEqual( True, node.returnsBoolean() )
node.setType(libsbml.AST_LOGICAL_OR)
self.assertEqual( True, node.returnsBoolean() )
node.setType(libsbml.AST_LOGICAL_XOR)
self.assertEqual( True, node.returnsBoolean() )
node.setType(libsbml.AST_FUNCTION_PIECEWISE)
self.assertEqual( True, node.returnsBoolean() )
node.setType(libsbml.AST_RELATIONAL_EQ)
self.assertEqual( True, node.returnsBoolean() )
node.setType(libsbml.AST_RELATIONAL_GEQ)
self.assertEqual( True, node.returnsBoolean() )
node.setType(libsbml.AST_RELATIONAL_GT)
self.assertEqual( True, node.returnsBoolean() )
node.setType(libsbml.AST_RELATIONAL_LEQ)
self.assertEqual( True, node.returnsBoolean() )
node.setType(libsbml.AST_RELATIONAL_LT)
self.assertEqual( True, node.returnsBoolean() )
node.setType(libsbml.AST_RELATIONAL_NEQ)
self.assertEqual( True, node.returnsBoolean() )
node.setType(libsbml.AST_CONSTANT_TRUE)
self.assertEqual( True, node.returnsBoolean() )
node.setType(libsbml.AST_CONSTANT_FALSE)
self.assertEqual( True, node.returnsBoolean() )
pass
def test_ValidASTNode_root(self):
n = libsbml.ASTNode(libsbml.AST_FUNCTION_ROOT)
self.assertEqual( False, (n.isWellFormedASTNode()) )
c = libsbml.parseFormula("c")
n.addChild(c)
self.assertEqual( True, n.isWellFormedASTNode() )
d = libsbml.parseFormula("3")
n.addChild(d)
self.assertEqual( True, n.isWellFormedASTNode() )
e = libsbml.parseFormula("3")
n.addChild(e)
self.assertEqual( False, (n.isWellFormedASTNode()) )
n = None
pass
def test_ValidASTNode_setType(self):
n = libsbml.ASTNode()
i = n.setType(libsbml.AST_REAL)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( n.getType() == libsbml.AST_REAL )
i = n.setType(libsbml.AST_PLUS)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( n.getType() == libsbml.AST_PLUS )
self.assert_( n.getCharacter() == '+' )
i = n.setType(libsbml.AST_FUNCTION_ARCCOSH)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( n.getType() == libsbml.AST_FUNCTION_ARCCOSH )
i = n.setType(libsbml.AST_UNKNOWN)
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
self.assert_( n.getType() == libsbml.AST_UNKNOWN )
n = None
pass
def test_ValidASTNode_unary(self):
n = libsbml.ASTNode(libsbml.AST_FUNCTION_ABS)
self.assertEqual( False, (n.isWellFormedASTNode()) )
c = libsbml.parseFormula("c")
n.addChild(c)
self.assertEqual( True, n.isWellFormedASTNode() )
d = libsbml.parseFormula("d")
n.addChild(d)
self.assertEqual( False, (n.isWellFormedASTNode()) )
n = None
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestValidASTNode))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
TheCoSMoCompany/biopredyn
|
Prototype/src/libsbml-5.10.0/src/bindings/python/test/math/TestValidASTNode.py
|
Python
|
bsd-3-clause
| 8,581
|
[
"VisIt"
] |
de6d9218c72aeb3c2924aa761889a8d3cefcb9430c530cd8ae428c5a1009a09c
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Murphy Lab
# Carnegie Mellon University
#
# Written by Luis Pedro Coelho <luis@luispedro.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License,
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
# For additional information visit http://murphylab.web.cmu.edu or
# send email to murphy@cmu.edu
import subprocess
from numpy.distutils.core import setup, Extension
def popen3(cmd):
p = subprocess.Popen(cmd, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
return p.stdout, p.stdin, p.stderr
def readmagick_args(verbose=True):
def _execute(opts):
output,input,error = popen3('pkg-config ImageMagick++ %s' % opts)
errors = error.read()
if errors:
output,input,error = popen3('ImageMagick++-config %s' % opts)
errors += error.read()
if errors:
if verbose:
print '''
Could not find ImageMagick++ headers using
pkg-config or ImageMagick++-config.
Error was: %s
readmagick will not be built.
''' % errors
raise ValueError
tokens = output.readline().split()
input.close()
output.close()
return tokens
try:
libstokens = _execute('--libs')
cflagstokens = _execute('--cflags')
return {
'libraries' : [t[2:] for t in libstokens if t.startswith('-l')],
'library_dirs' : [t[2:] for t in libstokens if t.startswith('-L')],
'include_dirs' : [t[2:] for t in cflagstokens if t.startswith('-I')],
}
except:
return None
long_description = '''ReadMagick
Read and write images using ImageMagick++.
Supports modern image formats such as JPEG2000.
'''
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: C++',
'Topic :: Scientific/Engineering',
]
readmagick = Extension('readmagick', sources = ['readmagick/readmagick.cpp'], **readmagick_args())
setup(name = 'readmagick',
version = '1.0.5',
description = 'Read and write images using ImageMagick',
long_description = long_description,
classifiers = classifiers,
author = 'Luis Pedro Coelho',
author_email = 'luis@luispedro.org',
license = 'GPL',
ext_modules = [readmagick]
)
|
luispedro/readmagick
|
setup.py
|
Python
|
gpl-3.0
| 3,112
|
[
"VisIt"
] |
4f291b849b2a7fb6adff1e5647370f1dc1eb95aff0e45fc7bfe9169bf96c19df
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.