text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# coding: utf-8
# In[5]:
from __future__ import division
from sys import stdout
import numpy as np
import sys
import networkx as nx
import numpy as np
import networkx as nx
import sklearn.metrics as met
import sklearn.manifold as man
import matplotlib.pyplot as plt
MIN_EXPANSION_SIZE = 10
#function to generate real valued som for graph input
def initialise_network(X, num_neurons):
#network will be a one dimensional list
network = nx.Graph()
#number of data points
num_nodes = len(X)
#dimension of data in X
d = len(X[0])
#regular lattice
lattice_size = np.floor(np.sqrt(num_neurons))
for i in range(num_neurons):
##position
network.add_node(i)
##weight
# network.node[i]['v'] = 2 * (np.random.rand(d) - 0.5) * w
r = np.random.randint(num_nodes)
network.node[i]['v'] = X[r]
##list of closest nodes
network.node[i]['ls'] = []
##error of neuron
network.node[i]['e'] = 0
##som for neuron
network.node[i]['n'] = []
#connections
if i % lattice_size > 0:
#horizontal connection
network.add_edge(i, i - 1)
if i >= lattice_size:
#vertical connection
network.add_edge(i, i - lattice_size)
if i % lattice_size < lattice_size - 1:
#diagonal connection
network.add_edge(i, i - lattice_size + 1)
#return network
return network
# function to train SOM on given graph
def train_network(X, network, num_epochs, eta_0, sigma_0, N, layer, MQE, target, num_deleted_neurons):
#initial learning rate
eta = eta_0
#initial neighbourhood size
sigma = sigma_0
#list if all patterns to visit
training_patterns = [p for p in range(len(X))]
for e in range(num_epochs):
#shuffle nodes
np.random.shuffle(training_patterns)
# iterate through N nodes of graph
for i in range(N):
#data point to consider
x = X[training_patterns[i]]
#determine winning neuron
win_neuron = winning_neuron(x, network)
# update weights
update_weights(x, network, win_neuron, eta, sigma)
# drop neighbourhood
sigma = sigma_0 * np.exp(-2 * sigma_0 * e / num_epochs);
stdout.write("\rLayer: {}, training epoch: {}/{}, size of map: {}, network size: {}, MQE: {}, target: {}, number of deleted neurons: {}".format(layer,
e, num_epochs, len(network), len(X), MQE, target, num_deleted_neurons) + " " * 20)
# winning neuron
def winning_neuron(x, network):
# minimum distance so far
min_dist = np.inf
winning_neuron = []
# iterate through network
for i in network.nodes():
#unpack network
v = network.node[i]['v']
#distance between input vector and neuron weight
distance = np.linalg.norm(x - v)
# if we have a new closest neuron
if distance < min_dist:
min_dist = distance
winning_neuron = i
#return
return winning_neuron
# function to update weights
def update_weights(x, network, win_neuron, eta, sigma):
# iterate through all neurons in network
for i in network.nodes():
#unpack
v = network.node[i]['v']
#new v -- move along shortest path by move distance
v += eta * neighbourhood(network, i, win_neuron, sigma) * (x - v)
#save to network
network.node[i]['v'] = v
# neighbourhood function
def neighbourhood(network, r, win_neuron, sigma):
return np.exp(-(nx.shortest_path_length(network, r, win_neuron)) ** 2 / (2 * sigma ** 2))
# assign nodes into clusters
def assign_nodes(G, X, network, layer):
#number of neurons in network
num_neurons = nx.number_of_nodes(network)
#number of nodes
num_nodes = nx.number_of_nodes(G)
# clear existing closest node list
for i in network.nodes():
network.node[i]['ls'] = []
network.node[i]['e'] = 0
# assign colour to each node
for n in range(num_nodes):
#data point to assign
x = X[n]
#intialise distance to be infinity
min_distance = np.inf
#closest reference vector to this ndoe
closest_ref = []
# find which neuron's referece vector this node is closest to
for i in network.nodes():
#unpack network
v = network.node[i]['v']
# calculate distance to that reference vector
d = np.linalg.norm(x - v)
if d < min_distance:
min_distance = d
closest_ref = i
#unable to find closest ref
if closest_ref == []:
continue
#add node to closest nodes list
network.node[closest_ref]['ls'].append(G.nodes()[n])
#increase e by distance
network.node[closest_ref]['e'] += min_distance
##function to return lattice grid of errors
def update_errors(network, num_deleted_neurons):
#mean network error
mqe = 0;
#neurons with assigned nodes
num_neurons = 0
#iterate over all neurons and average distance
for i in network.nodes():
#unpack network
e = network.node[i]['e']
ls = network.node[i]['ls']
if len(ls) == 0:
delete_node(network, i)
num_deleted_neurons += 1
continue
num_neurons += 1
#divide by len(ls) for mean
e /= len(ls)
#sum total errors
mqe += e
#save error to network
network.node[i]['e'] = e
#mean
mqe /= num_neurons
return mqe, num_deleted_neurons
def connect_closest_neurons(network, s1, s2):
min_dist = np.inf
c1 = []
c2 = []
for i in s1:
v1 = network.node[i]['v']
for j in s2:
v2 = network.node[j]['v']
d = np.linalg.norm(v1 - v2)
if d < min_dist:
min_dist = d
c1 = i
c2 = j
##connect
network.add_edge(c1, c2)
def intersection(a, b):
return list(set(a) & set(b))
def delete_node(network, n):
neighbours = network.neighbors(n)
network.remove_node(n)
if not neighbours:
return
components = [c for c in nx.connected_components(network)]
for i in range(len(components)):
conn_neigh_1 = intersection(components[i], neighbours)
for j in range(i + 1, len(components)):
conn_neigh_2 = intersection(components[j], neighbours)
##make connection between closest neurons in input space
connect_closest_neurons(network, conn_neigh_1, conn_neigh_2)
##function to identify neuron with greatest error
def identify_error_unit(network):
#initial value for maximum error found
max_e = 0
#initial index to return
error_node = []
for i in network.nodes():
#unpack
e = network.node[i]['e']
#check if this unit has greater error than maximum
if e > max_e:
max_e = e
error_node = i
#return id of unit with maximum error
return error_node
# def get_vector(node):
# d = 0
# while 'embedding'+str(d) in node:
# d += 1
# v = np.zeros(d)
# for i in range(d):
# v[i] = node['embedding{}'.format(i)]
# return v
def expand_network(G, network, error_unit):
#id of new node
id = max(network) + 1
network.add_node(id)
#v goes to random vector in range of error unit
ls = network.node[error_unit]['ls']
r = np.random.randint(len(ls))
# node = G.node[ls[r]]
# v = get_vector(node)
network.node[id]['v'] = G.node[ls[r]]["embedding"]
##list of closest nodes
ls = []
network.node[id]['ls'] = ls
##error of neuron
e = 0
network.node[id]['e'] = e
##som for neuron
n = []
network.node[id]['n'] = n
#connections to other neurons
#identify neighbour pointing furthest away
error_unit_neighbours = network.neighbors(error_unit)
if len(error_unit_neighbours) == 0:
##add edge
network.add_edge(error_unit, id)
else:
##find closest neighbour
n = closest_neuron(network, error_unit, error_unit_neighbours)
#connect to error unit and closest neighbour
network.add_edge(n, id)
network.add_edge(error_unit, id)
##function to find neuron pointing furthest away in list
def furthest_neuron(network, error_unit, ls):
vi = network.node[error_unit]['v']
max_dist = -np.inf
#neighbour id
furthest_node = []
#iterate through neighbours
for i in ls:
#unpack neighbour
v = network.node[i]['v']
#distance in input space
d = np.linalg.norm(v - vi)
#is d > max_dist?
if d > max_dist:
max_dist = d
furthest_node = i
return furthest_node
def closest_neuron(network, error_unit, ls):
vi = network.node[error_unit]['v']
min_dist = np.inf
#neighbour id
closest_node = []
#iterate through neighbours
for i in ls:
#unpack neighbour
v = network.node[i]['v']
#distance in input space
d = np.linalg.norm(v - vi)
#is d > max_dist?
if d < min_dist:
min_dist = d
closest_node = i
return closest_node
##GHSOM algorithm
def ghsom(G, lam, eta, sigma, e_0, e_sg, e_en, init, layer):
#embedding
X = get_embedding(G)
#number of nodes in G
num_nodes = nx.number_of_nodes(G)
##number of training patterns to visit
# N = min(num_nodes, 100)
N = num_nodes
#create som for this neuron
network = initialise_network(X, init)
##inital training phase
#meal quantization error
MQE = np.inf
#number of neurons deleted from the map
num_deleted_neurons = 0
#train for lam epochs
train_network(X, network, lam, eta, sigma, N, layer, MQE, e_sg * e_0, num_deleted_neurons)
#classify nodes
assign_nodes(G, X, network, layer)
#calculate mean network error
MQE, num_deleted_neurons = update_errors(network, num_deleted_neurons)
##som growth phase
#repeat until error is low enough
while MQE > e_sg * e_0 and num_deleted_neurons < 3:
#find neuron with greatest error
error_unit = identify_error_unit(network)
#expand network
expand_network(G, network, error_unit)
#train for lam epochs
train_network(X, network, lam, eta, sigma, N, layer, MQE, e_sg * e_0, num_deleted_neurons)
#classify nodes
assign_nodes(G, X, network, layer)
#calculate mean network error
MQE, num_deleted_neurons = update_errors(network, num_deleted_neurons)
#recalculate error after neuron expansion
# MQE = 0
print "growth terminated, MQE: {}, target: {}, number of deleted neurons: {}".format(MQE, e_0 * e_sg, num_deleted_neurons)
##neuron expansion phase
#iterate thorugh all neruons and find neurons with error great enough to expand
for i in range(len(network)):
#unpack
ls = network.node[network.nodes()[i]]['ls']
e = network.node[network.nodes()[i]]['e']
#check error
if (e > e_en * e_0 and len(ls) > MIN_EXPANSION_SIZE and num_deleted_neurons < 3):
#subgraph
H = G.subgraph(ls)
#recursively run algorithm to create new network for subgraph of this neurons nodes
n, e = ghsom(H, lam, eta, sigma, e, e_sg, e_en, init, layer + 1)
#repack
network.node[network.nodes()[i]]['e'] = e
network.node[network.nodes()[i]]['n'] = n
#increase overall network error
# MQE += e
#mean MQE
# MQE /= nx.number_of_nodes(network)
#return network
return network, MQE
def unassign_all_nodes(G, labels):
#number of layers of communities
num_layers = len(labels)
for l in range(num_layers):
nx.set_node_attributes(G, "assigned_community_layer_{}".format(l), 'unassigned')
##function to recursively label nodes in graph
def label_graph(G, network, layer, neuron_count):
for i in network.nodes():
#unpack
l = network.node[i]['ls']
for node in l:
G.node[node]["assigned_community_layer_{}".format(layer)] = neuron_count[layer]
n = network.node[i]['n']
if len(n) > 0:
H = G.subgraph(l)
label_graph(H, n, layer + 1, neuron_count)
neuron_count[layer] += 1
##function to calculate community detection error given a generated benchmark graph
def mutual_information(G, labels):
#number of layers of communities
num_layers = len(labels)
#initialise scores
scores = np.zeros(num_layers)
#iterate over all levels of labels
for i in range(num_layers):
#assigned first layer community
actual_community = nx.get_node_attributes(G, labels[num_layers - i - 1])
#predicted first layer community
predicted_community = nx.get_node_attributes(G, "assigned_community_layer_{}".format(i))
#only retrieve labels for assigned nodes
labels_true = [v for k, v in actual_community.items()]
labels_pred = [v for k, v in predicted_community.items()]
print labels_true
print labels_pred
# if len(labels_pred) == 0:
# continue
#mutual information to score classifcation -- scale by number of assigned nodes out of all nodes
score = met.normalized_mutual_info_score(labels_true, labels_pred)
scores[i] = score
#return
return scores
## get embedding
def get_embedding(G):
# #get number of niodes in the graph
# num_nodes = nx.number_of_nodes(G)
# #dimension of embedding
# d = 0
# while 'embedding'+str(d) in G.node[G.nodes()[0]]:
# d += 1
# #initialise embedding
# X = np.zeros((num_nodes, d))
# for i in range(num_nodes):
# for j in range(d):
# X[i,j] = G.node[G.nodes()[i]]['embedding'+str(j)]
# return X
return np.array([v for k, v in nx.get_node_attributes(G, "embedding").items()])
def modularity(G, H):
#number of links in communitiy H
l_s = nx.number_of_edges(H)
#total degree of communitiy H
d_s = np.sum(list(H.degree().values()))
#number of links in G
L = nx.number_of_edges(G)
#modularitiy
Q = l_s / L - (d_s / (2 * L)) ** 2
return Q
##function to visualise graph
def visualise_graph(G, colours, layer):
## create new figure for graph plot
fig, ax = plt.subplots()
# graph layout
pos = nx.spring_layout(G)
#attributes in this graph
attributes = np.unique([v for k,v in nx.get_node_attributes(G, "assigned_community_layer_{}".format(layer)).items()])
# draw nodes -- colouring by cluster
for i in range(min(len(colours), len(attributes))):
node_list = [n for n in G.nodes() if G.node[n]["assigned_community_layer_{}".format(layer)] == attributes[i]]
colour = [colours[i] for n in range(len(node_list))]
nx.draw_networkx_nodes(G, pos, nodelist=node_list, node_color=colour)
#draw edges
nx.draw_networkx_edges(G, pos)
# draw labels
nx.draw_networkx_labels(G, pos, )
#title of plot
plt.title('Nodes coloured by cluster, layer: '+str(layer))
#show plot
plt.show()
## visualise graph based on network clusters
def visualise_network(network, colours, layer):
#num neurons in lattice
num_neurons = len(network)
##create new figure for lattice plot
fig, ax = plt.subplots()
# graph layout
pos = nx.spring_layout(network)
# draw nodes -- colouring by cluster
for i in range(len(colours)):
nx.draw_networkx_nodes(network, pos, nodelist = [network.nodes()[i]], node_color = colours[i])
#draw edges
nx.draw_networkx_edges(network, pos)
# draw labels
nx.draw_networkx_labels(network, pos)
#label axes
plt.title('Neurons in lattice, layer: '+str(layer))
#show lattice plot
plt.show()
###evaluate fitness
def fitness(eta, sigma, e_sg, e_en, filename, labels, init, lam):
G = nx.read_gpickle(filename)
labels = labels.split(',')
X = get_embedding(G)
m = np.mean(X, axis=0)
MQE_0 = np.mean([np.linalg.norm(x - m) for x in X])
#start layer
layer = 0
#run ghsom algorithm
network, MQE = ghsom(G, lam, eta, sigma, MQE_0, e_sg, e_en, init, layer)
#label graph
neurons = np.zeros(10, dtype=np.int)
unassign_all_nodes(G, labels)
label_graph(G, network, layer, neurons)
##calculate error
mi_score = mutual_information(G, labels)
num_communities_detected = len(network)
return G, mi_score, num_communities_detected
def main(params, filename, labels, init=1, lam=10000):
return fitness(params['eta'], params['sigma'],
params['e_sg'], params['e_en'], filename, labels, init, lam)
def main_no_labels(params, filename, init=1, lam=10000):
G = nx.read_gpickle(filename)
#start layer
layer = 0
X = get_embedding(G)
m = np.mean(X, axis=0)
MQE_0 = np.mean([np.linalg.norm(x - m) for x in X])
#run ghsom algorithm
network, MQE = ghsom(G, lam, params['eta'],
params['sigma'], MQE_0, params['e_sg'], params['e_en'], init, layer)
return G, network
# In[1]:
# params = {'eta': 0.001,
# 'sigma': 1,
# 'e_sg': 0.4,
# 'e_en': 1.0}
# In[2]:
# %%time
# G, mi_score, num_communities_detected = main(params=params,
# filename="embedded_benchmark.gpickle", labels="firstlevelcommunity", lam=1000)
# In[3]:
# mi_score
# In[4]:
# colours = np.random.rand(num_communities_detected, 3)
# visualise_graph(G, colours, 0)
# In[7]:
G = nx.read_gpickle("embedded_yeast_reactome.gpickle")
# In[11]:
X = get_embedding(G)
# In[12]:
m = np.mean(X, axis=0)
MQE_0 = np.mean([np.linalg.norm(x - m) for x in X])
# In[14]:
MQE_0 * 0.3
# In[ ]:
|
DavidMcDonald1993/ghsom
|
ghsom.py
|
Python
|
gpl-2.0
| 19,300
|
[
"NEURON",
"VisIt"
] |
77b259794440bf3c463b1cd47cc43228c2b6f70102fa6134f76f6576d57a0d69
|
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import unittest
import chigger
import mock
import subprocess
import os.path
class Test(unittest.TestCase):
@mock.patch('subprocess.call')
def testAnimate(self, subproc):
chigger.utils.animate(os.path.join('..', 'adapt', 'gold', 'adapt_*.png'), 'out.gif')
subproc.assert_called_with(['convert', '-delay', '20', os.path.join('..', 'adapt', 'gold', 'adapt_0.png'),
'-delay', '20', os.path.join('..', 'adapt', 'gold', 'adapt_4.png'),
'-delay', '500', os.path.join('..', 'adapt', 'gold', 'adapt_9.png'), '-loop', '0', 'out.gif'])
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
|
harterj/moose
|
python/chigger/tests/utils/test_animate.py
|
Python
|
lgpl-2.1
| 1,087
|
[
"MOOSE"
] |
b2fc44931d68c0b6e268f7fe336b9f2f6507f6dbde30d811980ffb6ac20be269
|
# encoding: utf-8
# Class which creates "Calculator" objects that uses the EMT() model to calculate the quantities used in the ErrorFunction script in order to optimize the value of the seven parameters used in the EMT() model.
# Written by: Rasmus E. Christiansen
# Imported Packages
import numpy
from asap3.EMT2013 import EMT
from ase.lattice.cubic import FaceCenteredCubic, BodyCenteredCubic
from ase.lattice.hexagonal import HexagonalClosedPacked
from ase.lattice.compounds import L1_0,L1_2
from ase.data import atomic_numbers, chemical_symbols
from ase.units import Bohr
import numpy.lib.polynomial as numpyfit
from ase.utils.eos import EquationOfState
from ase.all import view
from ase.optimize import BFGS
from ase.io import read
# Global parameters
parameters = {
# E0 s0 V0 eta2 kappa lambda n0 Lattice type
# eV bohr eV bohr^-1 bohr^-1 bohr^-1 bohr^-3
'H': (-2.21, 0.71, 2.132, 1.652, 2.790, 1.892, 0.00547, 'dimer'),
'Al': (-3.28, 3.00, 1.493, 1.240, 2.000, 1.169, 0.00700, 'fcc'),
'Cu': (-3.51, 2.67, 2.476, 1.652, 2.740, 1.906, 0.00910, 'fcc'),
'Ag': (-2.96, 3.01, 2.132, 1.652, 2.790, 1.892, 0.00547, 'fcc'),
'Au': (-3.80, 3.00, 2.321, 1.674, 2.873, 2.182, 0.00703, 'fcc'),
'Ni': (-4.44, 2.60, 3.673, 1.669, 2.757, 1.948, 0.01030, 'fcc'),
'Pd': (-3.90, 2.87, 2.773, 1.818, 3.107, 2.155, 0.00688, 'fcc'),
'Pt': (-5.85, 2.90, 4.067, 1.812, 3.145, 2.192, 0.00802, 'fcc'),
'C': (-1.97, 1.18, 0.132, 3.652, 5.790, 2.892, 0.01322, 'dimer'),
'N': (-4.97, 1.18, 0.132, 2.652, 3.790, 2.892, 0.01222, 'dimer'),
'O': (-2.97, 1.25, 2.132, 3.652, 5.790, 4.892, 0.00850, 'dimer')}
beta = 1.809 # Calculated from the following equation
# beta = ((16 * Pi) / 3)^(1/3) / 2^(1/2)
class Calculator:
""" The class creates calculator objects which can be used to calculate the following 16 different quantities for
a crystal in the FCC structure using the new version of the EMT calculator. For pure crystals: Bulk modulus,
Lattice Constant, Vacancy formation energy, Energy difference between the element in question in an FCC
configuration and an HCP or BCC configuration, The elastic constants C44 and C11 along with the cohesive energy
and the surface energy for a 001 and 111 surface.
For the L10 and L12 alloys of two elements: The heat of formation energy, the bulk modulus and the lattice
constant for the L10 and L11 alloys. """
def __init__(self,Element,Size=3,Tol=0.002):
# The element chosen for the calculator is set
self.Element = Element
self.Size = Size
self.Tol = Tol
def BM_LC_Calculator(self,EMT,PARAMETERS):
# Return 0 is used when the method is not desired to be used
# return 0
""" The method BM_LC_Calculator uses the EMT calculator to find the lattice constant which gives
the lowest possible energy for a system of atoms and from a polynomial fit of the energy as a function of
the lattice constant it calculates and returns the Bulk modulus, the volume of the system at the lowest
possible energy and this energy """
# Three values for the lattice constant, a0, are chosen. The highest and lowest value is chosen so that it
# is certain that these are to high/low respectively compared to the "correct" value. This is done by using the
# experimental value of the lattice constant, a0_exp, as the middle value and to high/low values are then:
# a0_exp +- a0_mod, a0_mod = a0_exp/10
a0_exp = numpy.sqrt(2) * beta * PARAMETERS[self.Element][1] * Bohr
a0_mod = a0_exp * 0.10
a0_guesses = numpy.array([a0_exp - a0_mod,a0_exp,a0_exp + a0_mod])
# An atoms object consisting of atoms of the chosen element is initialized
atoms = FaceCenteredCubic(size=(self.Size,self.Size,self.Size), symbol=self.Element)
atoms.set_calculator(EMT)
# An identity matrix for the system is saved to a variable
IdentityMatrix = numpy.array([[1,0,0],[0,1,0],[0,0,1]])
# An array for the energy for the chosen guesses for a0 is initialized:
E_guesses = numpy.zeros(5)
# The energies are calculated
for i in range(3):
# Changes the lattice constant for the atoms object
atoms.set_cell(a0_guesses[i] * self.Size * IdentityMatrix,scale_atoms=True)
# Calculates the energy of the system for the new lattice constant
E_guesses[i] = atoms.get_potential_energy()
# Bisection is used in order to find a small interval of the lattice constant for the minimum of the energy (an
# abitrary interval length is chosen), This is possible because we are certian from theory that there is only
# one minimum of the energy function in the interval of interest and thus we wont "fall" into a local minimum
# and stay there by accident.
while (a0_guesses[2]-a0_guesses[0]) >= self.Tol:
if min([E_guesses[0],E_guesses[2]]) == E_guesses[0]:
# A new guess for the lattice constant is introduced
a0_new_guess = 0.67 * a0_guesses[1] + 0.33 * a0_guesses[2]
# The energy for this new guess is calculated
atoms.set_cell(a0_new_guess * self.Size * IdentityMatrix,scale_atoms=True)
E_new_guess = atoms.get_potential_energy()
# A check for changes in the energy minimum is made and the guesses for a0 and their corrosponding
# energies are ajusted.
if min(E_new_guess,min(E_guesses[0:3])) != E_new_guess:
a0_guesses[2] = a0_new_guess
E_guesses[2] = E_new_guess
else:
a0_guesses[0] = a0_guesses[1]
a0_guesses[1] = a0_new_guess
E_guesses[0] = E_guesses[1]
E_guesses[1] = E_new_guess
elif min([E_guesses[0],E_guesses[2]]) == E_guesses[2]:
# A new guess for the lattice constant is introduced
a0_new_guess = 0.33 * a0_guesses[0] + 0.67 * a0_guesses[1]
# The energy for this new guess is calculated
atoms.set_cell(a0_new_guess * self.Size * IdentityMatrix,scale_atoms=True)
E_new_guess = atoms.get_potential_energy()
# A check for changes in the energy minimum is made and the guesses for a0 and their corrosponding
# energies are ajusted.
if min(E_new_guess,min(E_guesses[0:3])) != E_new_guess:
a0_guesses[0] = a0_new_guess
E_guesses[0] = E_new_guess
else:
a0_guesses[2] = a0_guesses[1]
a0_guesses[1] = a0_new_guess
E_guesses[2] = E_guesses[1]
E_guesses[1] = E_new_guess
# An estimate of the minimum energy can now be found from a second degree polynomial fit through the three
# current guesses for a0 and the corresponding values of the energy.
Poly = numpyfit.polyfit(a0_guesses,E_guesses[0:3],2)
# The lattice constant corresponding to the lowest energy from the Polynomiel fit is found
a0 = - Poly[1] / (2 * Poly[0])
# Now five guesses for a0 and the corresponding energy are evaluated from and around the current a0.
a0_guesses = a0 * numpy.array([1 - 2 * self.Tol / 5,1 - self.Tol / 5,1,1 + self.Tol / 5,1 + 2 * self.Tol / 5])
for i in range(5):
# Changes the lattice constant for the atoms object
atoms.set_cell(a0_guesses[i] * self.Size * IdentityMatrix,scale_atoms=True)
# Calculates the energy of the system for the new lattice constant
E_guesses[i] = atoms.get_potential_energy()
# The method EquationOfState is now used to find the Bulk modulus and the minimum energy for the system.
# The volume of the sample for the given a0_guesses
Vol = (self.Size * a0_guesses )**3
# The equilibrium volume, energy and bulk modulus are calculated
(Vol0 , E0 , B ) = EquationOfState(Vol.tolist(),E_guesses.tolist()).fit()
return (Vol0,E0,B,Vol0**(1./3) / self.Size)
def BulkModulus(self,EMT,PARAMETERS):
return self.BM_LC_Calculator(EMT,PARAMETERS)[2]
def LatticeConstant(self,EMT,PARAMETERS):
return self.BM_LC_Calculator(EMT,PARAMETERS)[3]
def EvocCalculator(self,EMT,PARAMETERS):
# Return 0 is used when the method is not desired to be used
# return 0
""" This method calculates the vacancy formation energy for the system of atoms. That is, this method calculates
the potential energy for a complete system of atoms, then removes an atom and calculates the potential energy
again. Then the energy for the full system (scaled with the number of atoms in the reduced system) are
subtracted from that of the reduced system and the vacancy formation energy, Evoc, is returned. """
# The atoms object is initialized for the chosen size and type of system
atoms = FaceCenteredCubic(size=(self.Size,self.Size,self.Size), symbol=self.Element)
# The EMT calculator given is attached to the atoms object
atoms.set_calculator(EMT)
# The energy of the full system is calculated
E_FullSystem = atoms.get_potential_energy()
# an atom is removed from the system
atoms.pop()
# The energy of the reduced system is calculated
E_ReducedSystem = atoms.get_potential_energy()
# The energy of a full system compared to the energy pr atom of the reduced system is calculated and returned
return E_ReducedSystem - E_FullSystem * len(atoms) / (len(atoms) + 1)
def EfccEhcpCalculator(self,EMT,PARAMETERS):
# Return 0 is used when the method is not desired to be used
# return 0
""" This method uses the EMT calculator to calculate and return the difference in energy between a system of
atoms placed in the HCP and FCC structure. """
# The atoms objects are created using the input size and element and the energy calculator
# is set to the EMT calculator
# The Lattice Constants, a,c, for the HCP lattice is here given by the nearest neighbor distance of
# the system in the FCC crystal structure, a = dnn, and the ideal relation between a and
# c: a/c = sqrt(8/3) => c = dnn / sqrt(8/3)
a = beta * PARAMETERS[self.Element][1] * Bohr
c = a * numpy.sqrt(8./3.)
# The HCP crystal is created, the size of the crystal is defined as 5,5,5, any smaller crystal will result in
# Neighborlist errors.
atoms1 = HexagonalClosedPacked(size=(5,5,5),directions=[[2,-1,-1,0],[0,1,-1,0],[0,0,0,1]],
symbol=self.Element,latticeconstant={'a':a,'c':c})
atoms1.set_calculator(EMT)
# The FCC crystal is created
atoms2 = FaceCenteredCubic(size=(self.Size,self.Size,self.Size),symbol=self.Element)
atoms2.set_calculator(EMT)
# The energy difference pr atom is calculated and returned
return atoms1.get_potential_energy() / len(atoms1) - atoms2.get_potential_energy() / len(atoms2)
def EfccEbccCalculator(self,EMT,PARAMETERS):
# Return 0 is used when the method is not desired to be used
# return 0
""" This method uses the given EMT calculator to calculate and return the difference in energy between a system
of atoms placed in the BCC and FCC structure. """
# The atoms objects are created using the input size and element and the energy calculator
# is set to the EMT calculator
# The Lattice Constant for the BCC lattice is here given so that the volume pr atom of the system is
# held fixed for the FCC and BCC lattice, that is Vol_pr_atom_BCC = Vol_pr_atom_FCC.
atoms1 = FaceCenteredCubic(size=(self.Size,self.Size,self.Size),symbol=self.Element)
atoms1.set_calculator(EMT)
# The Lattice constant which produces the same volume pr atom for an BCC crystal is calculated
LCBcc = 1./(2.**(1./3.)) * beta * PARAMETERS[self.Element][1] * Bohr * numpy.sqrt(2)
atoms2 = BodyCenteredCubic(size=(self.Size,self.Size,self.Size),symbol=self.Element,latticeconstant=LCBcc)
atoms2.set_calculator(EMT)
# The energy difference pr atom is calculated and returned
return atoms2.get_potential_energy() / len(atoms2) - atoms1.get_potential_energy() / len(atoms1)
def C44_Calculator(self,EMT,PARAMETERS):
# Return 0 is used when the method is not desired to be used
# return 0
""" This method uses the given EMT calculator to calculate and return the value of the matrix element C44 for
a system of atoms of a given element type. The calculation is done by using that:
C44 = 1 / Volume * d^2/depsilon^2 (E_system) where epsilon is the displacement in one direction of the
system along one axis diveded by the highth of the system. """
# An atom object is created and the calculator attached
atoms = FaceCenteredCubic(size=(self.Size,self.Size,self.Size),symbol=self.Element)
atoms.set_calculator(EMT)
# The volume of the sample is calculated
Vol = atoms.get_volume()
# The value of the relative displacement, epsilon, is set
epsilon = 1. / 1000
# The matrix used to change the unitcell by n*epsilon is initialized
LMM = numpy.array([[1,0,-10*epsilon],[0,1,0],[0,0,1]])
# The original unit cell is conserved
OCell = atoms.get_cell()
# The array for storing the energies is initialized
E_calc = numpy.zeros(20)
# The numerical value of C44 is calculated
for i in range(20):
# The new system cell based on the pertubation epsilon is set
atoms.set_cell(numpy.dot(OCell, LMM), scale_atoms=True)
# The energy of the system is calculated
E_calc[i] = atoms.get_potential_energy()
# The value of LMM is updated
LMM[0,2] += epsilon
# A polynomial fit is made for the energy as a function of epsilon
# The displaced axis is defined
da = numpy.arange(-10, 10) * epsilon
# The fit is made
Poly = numpyfit.polyfit(da,E_calc,2)
# Now C44 can be estimated from this fit by the second derivative of Poly = a * x^2 + b * x + c , multiplied
# with 1 / (2 * Volume) of system
C44 = 2. / Vol * Poly[0]
return C44
def C11_Calculator(self,EMT,PARAMETERS):
# Return 0 is used when the method is not desired to be used
# return 0
""" This method uses the given EMT calculator to calculate and return the value of the matrix element C11 for
a system of atoms of a given element type. The calculation is done by using that:
C11 = 1 / Volume * d^2/depsilon^2 (E_system). """
# An atom object is created and the calculator attached
atoms = FaceCenteredCubic(size=(self.Size,self.Size,self.Size),symbol=self.Element)
atoms.set_calculator(EMT)
# The volume of the sample is calculated
Vol = atoms.get_volume()
# The value of the relative displacement, epsilon, is set
epsilon = 1. / 1000
# The matrix used to change the unitcell by n*epsilon is initialized
LMM = numpy.array([[1-10*epsilon,0,0],[0,1,0],[0,0,1]])
# The original unit cell is conserved
OCell = atoms.get_cell()
# The array for storing the energies is initialized
E_calc = numpy.zeros(20)
# The numerical value of C11 is calculated
for i in range(20):
# The new system cell based on the pertubation epsilon is set
atoms.set_cell(numpy.dot(OCell, LMM), scale_atoms=True)
# The energy of the system is calculated
E_calc[i] = atoms.get_potential_energy()
# The value of LMM is updated
LMM[0,0] += epsilon
# A second degree polynomial fit is made for the energy as a function of epsilon
# The displaced axis is defined
da = numpy.arange(-10, 10) * epsilon
# The fit is made
Poly = numpyfit.polyfit(da,E_calc,2)
# Now C11 can be estimated from this fit by the second derivative of Poly = a * x^2 + b * x + c, multiplied
# with 1 / (2 * Volume) of system
C11 = 2. / Vol * Poly[0]
return C11
def E_cohCalculator(self,EMT,PARAMETERS):
# Return 0 is used when the method is not desired to be used
# return 0
""" Calculates the Cohesive energy of a system of atoms using the EMT calculator specified. """
# As the EMT calculator calculates the energy of the system such that the energy of the individual atoms in
# their equilibrium distance in the crystal is zero it is only needed to calculate the energy of a single atom
# in an empty system.
# The crystal is created
atoms = FaceCenteredCubic(size=(self.Size,self.Size,self.Size),symbol=self.Element)
# a single atom is taken out of the crystal
atoms2 = atoms[[0,]]
# The calculator is attached to the atoms objects
atoms.set_calculator(EMT)
atoms2.set_calculator(EMT)
# The energy difference between the atom alone in vacuum and in the crystal structure is calculated and returned
return atoms2.get_potential_energy() - atoms.get_potential_energy() / len(atoms)
def SurfaceEnergy_Calculator(self,EMT,PARAMETERS):
""" The Method calculates and returns the surface energy for the given element along the [0,0,1] and [1,1,1]
directions in the FCC crystal structure. """
# The size of the crystals are set " NEED TO THINK ABOUT THESE LATER! "
S001 = 3,3,5
S111 = 5,5,5
# The surfaces (slabs) are created (pbc=(1,1,0) creates periodic boudry conditions
# in two of three directions and thus leaves the last direction as two surfaces.
Surface001 = FaceCenteredCubic(size=S001,symbol=self.Element,pbc=(1,1,0))
Surface111 = FaceCenteredCubic(size=S111,directions=[[1,-1,0],[1,1,-2],[1,1,1]],
symbol=self.Element,pbc=(1,1,0))
Surface001.set_calculator(EMT)
Surface111.set_calculator(EMT)
# A structural relaxsation is run for the surface crystal in order to secure
# the correct structure of the crystal.
dyn001 = BFGS(Surface001, trajectory='relaxedsurface001.traj')
dyn111 = BFGS(Surface111, trajectory='relaxedsurface111.traj')
dyn001.run(fmax=0.01)
dyn111.run(fmax=0.01)
# The referance bulk crystals are created
Bulk001 = FaceCenteredCubic(size=S001,symbol=self.Element)
Bulk111 = FaceCenteredCubic(size=S111,directions=[[1,-1,0],[1,1,-2],[1,1,1]],symbol=self.Element)
# The calculator is assigned
Bulk001.set_calculator(EMT)
Bulk111.set_calculator(EMT)
# The surface area is calculated
# The cross product between the x and y axis in the crystal is determined
Cross001 = numpy.cross(Bulk001.get_cell()[:,0],Bulk001.get_cell()[:,1])
Cross111 = numpy.cross(Bulk111.get_cell()[:,0],Bulk111.get_cell()[:,1])
# The area of the surface is determined from the formular A = |X x Y|.
area001 = numpy.sqrt(numpy.dot(Cross001,Cross001))
area111 = numpy.sqrt(numpy.dot(Cross111,Cross111))
# The surface energy is calculated and returned (two surfaces are present in
# SurfaceRelaxed)
return ( (Surface001.get_potential_energy() - Bulk001.get_potential_energy()) / 2 / area001,
(Surface111.get_potential_energy() - Bulk111.get_potential_energy()) / 2 / area111)
def Surface001(self,EMT,PARAMETERS):
""" The Method calculates and returns the surface energy for the given element along the [0,0,1] direction in
the FCC crystal structure. """
# The size of the crystals are set:
S001 = 3,3,5
# The surfaces (slabs) are created (pbc=(1,1,0) creates periodic boudry conditions
# in two of three directions and thus leaves the last direction as two surfaces.
Surface001 = FaceCenteredCubic(size=S001,symbol=self.Element,pbc=(1,1,0))
Surface001.set_calculator(EMT)
# A structural relaxsation is run for the surface crystal in order to secure
# the correct structure of the crystal.
dyn001 = BFGS(Surface001, logfile=None)
dyn001.run(fmax=0.01)
# The referance bulk crystals are created
Bulk001 = FaceCenteredCubic(size=S001,symbol=self.Element)
# The calculator is assigned
Bulk001.set_calculator(EMT)
# The surface area is calculated
# The cross product between the x and y axis in the crystal is determined
Cross001 = numpy.cross(Bulk001.get_cell()[:,0],Bulk001.get_cell()[:,1])
# The area of the surface is determined from the formular A = |X x Y|.
area001 = numpy.sqrt(numpy.dot(Cross001,Cross001))
# The surface energy is calculated and returned (two surfaces are present in
# SurfaceRelaxed)
return ( (Surface001.get_potential_energy() - Bulk001.get_potential_energy()) / 2 / area001)
def Surface110(self,EMT,PARAMETERS):
""" The Method calculates and returns the surface energy for the given element along the [1,1,0] direction in
the FCC crystal structure. """
# The size of the crystals are set:
S110 = 3,5,5
# The surfaces (slabs) are created (pbc=(1,1,0) creates periodic boudry conditions
# in two of three directions and thus leaves the last direction as two surfaces.
Surface110 = FaceCenteredCubic(size=S110,directions=[[1,-1,0],[0,0,1],[1,1,0]],
symbol=self.Element,pbc=(1,1,0))
Surface110.set_calculator(EMT)
# A structural relaxsation is run for the surface crystal in order to secure
# the correct structure of the crystal.
dyn001 = BFGS(Surface110, logfile=None)
dyn001.run(fmax=0.01)
# The referance bulk crystals are created
Bulk110 = FaceCenteredCubic(size=S110,directions=[[1,-1,0],[0,0,1],[1,1,0]],
symbol=self.Element)
# The calculator is assigned
Bulk110.set_calculator(EMT)
# The surface area is calculated
# The cross product between the x and y axis in the crystal is determined
Cross110 = numpy.cross(Bulk110.get_cell()[:,0],Bulk110.get_cell()[:,1])
# The area of the surface is determined from the formular A = |X x Y|.
area110 = numpy.sqrt(numpy.dot(Cross110,Cross110))
# The surface energy is calculated and returned (two surfaces are present in
# SurfaceRelaxed)
return ( (Surface110.get_potential_energy() - Bulk110.get_potential_energy()) / 2 / area110)
def Surface111(self,EMT,PARAMETERS):
""" The Method calculates and returns the surface energy for the given element along the [1,1,1]
direction in the FCC crystal structure. """
# The size of the crystals are set " NEED TO THINK ABOUT THESE LATER! "
S111 = 4,4,4
# The surfaces (slabs) are created (pbc=(1,1,0) creates periodic boudry conditions
# in two of three directions and thus leaves the last direction as two surfaces.
Surface111 = FaceCenteredCubic(size=S111,directions=[[1,-1,0],[1,1,-2],[1,1,1]],
symbol=self.Element,pbc=(1,1,0))
Surface111.set_calculator(EMT)
# A structural relaxsation is run for the surface crystal in order to secure
# the correct structure of the crystal.
dyn111 = BFGS(Surface111, logfile=None)
dyn111.run(fmax=0.01)
# The referance bulk crystals are created
Bulk111 = FaceCenteredCubic(size=S111,directions=[[1,-1,0],[1,1,-2],[1,1,1]],symbol=self.Element)
# The calculator is assigned
Bulk111.set_calculator(EMT)
# The surface area is calculated
# The cross product between the x and y axis in the crystal is determined
Cross111 = numpy.cross(Bulk111.get_cell()[:,0],Bulk111.get_cell()[:,1])
# The area of the surface is determined from the formular A = |X x Y|.
area111 = numpy.sqrt(numpy.dot(Cross111,Cross111))
# The surface energy is calculated and returned (two surfaces are present in
# SurfaceRelaxed)
return ( (Surface111.get_potential_energy() - Bulk111.get_potential_energy()) / 2 / area111)
def L12_BM_LC_Calculator(self,EMT,PARAMETERS):
# Return 0 is used when the method is not desired to be used
# return 0
""" The method L12_BM_LC_Calculator uses the EMT calculator to find the lattice constant which gives
the lowest possible energy for an alloy of two elements and from a polynomial fit of the energy as a
function of the lattice constant it calculates and returns the Bulk modulus, the volume of the system at
the lowest possible energy and this energy """
# Three values for the lattice constant, a0, are chosen. The highest and lowest value is chosen so that it
# is certain that these are to high/low respectively compared to the "correct" value. This is done by using the
# experimental value of the lattice constant, a0_exp, as the middle value and to high/low values are then:
# a0_exp +- a0_mod, a0_mod = a0_exp/10
a0_exp = ( ( PARAMETERS[self.Element[0]][1] + 3 * PARAMETERS[self.Element[1]][1] )
* numpy.sqrt(2) * beta * Bohr / 4 )
a0_mod = a0_exp * 0.20
a0_guesses = numpy.array([a0_exp - a0_mod,a0_exp,a0_exp + a0_mod])
# An atoms object consisting of atoms of the chosen element is initialized
atoms = L1_2(size=(self.Size,self.Size,self.Size),symbol=self.Element,latticeconstant=a0_exp)
atoms.set_calculator(EMT)
# An identity matrix for the system is saved to a variable
IdentityMatrix = numpy.array([[1,0,0],[0,1,0],[0,0,1]])
# An array for the energy for the chosen guesses for a0 is initialized:
E_guesses = numpy.zeros(5)
# The energies are calculated
for i in range(3):
# Changes the lattice constant for the atoms object
atoms.set_cell(a0_guesses[i] * self.Size * IdentityMatrix,scale_atoms=True)
# Calculates the energy of the system for the new lattice constant
E_guesses[i] = atoms.get_potential_energy()
# Bisection is used in order to find a small interval of the lattice constant for the minimum of the energy (an
# abitrary interval length is chosen), This is possible because we are certian from theory that there is only
# one minimum of the energy function in the interval of interest and thus we wont "fall" into a local minimum
# and stay there by accident.
while (a0_guesses[2]-a0_guesses[0]) >= self.Tol:
if min([E_guesses[0],E_guesses[2]]) == E_guesses[0]:
# A new guess for the lattice constant is introduced
a0_new_guess = 0.67 * a0_guesses[1] + 0.33 * a0_guesses[2]
# The energy for this new guess is calculated
atoms.set_cell(a0_new_guess * self.Size * IdentityMatrix,scale_atoms=True)
E_new_guess = atoms.get_potential_energy()
# A check for changes in the energy minimum is made and the guesses for a0 and their corrosponding
# energies are ajusted.
if min(E_new_guess,min(E_guesses[0:3])) != E_new_guess:
a0_guesses[2] = a0_new_guess
E_guesses[2] = E_new_guess
else:
a0_guesses[0] = a0_guesses[1]
a0_guesses[1] = a0_new_guess
E_guesses[0] = E_guesses[1]
E_guesses[1] = E_new_guess
elif min([E_guesses[0],E_guesses[2]]) == E_guesses[2]:
# A new guess for the lattice constant is introduced
a0_new_guess = 0.33 * a0_guesses[0] + 0.67 * a0_guesses[1]
# The energy for this new guess is calculated
atoms.set_cell(a0_new_guess * self.Size * IdentityMatrix,scale_atoms=True)
E_new_guess = atoms.get_potential_energy()
# A check for changes in the energy minimum is made and the guesses for a0 and their corrosponding
# energies are ajusted.
if min(E_new_guess,min(E_guesses[0:3])) != E_new_guess:
a0_guesses[0] = a0_new_guess
E_guesses[0] = E_new_guess
else:
a0_guesses[2] = a0_guesses[1]
a0_guesses[1] = a0_new_guess
E_guesses[2] = E_guesses[1]
E_guesses[1] = E_new_guess
# An estimate of the minimum energy can now be found from a second degree polynomial fit through the three
# current guesses for a0 and the corresponding values of the energy.
Poly = numpyfit.polyfit(a0_guesses,E_guesses[0:3],2)
# The lattice constant corresponding to the lowest energy from the Polynomiel fit is found
a0 = - Poly[1] / (2 * Poly[0])
# Now five guesses for a0 and the corresponding energy are evaluated from and around the current a0.
a0_guesses = a0 * numpy.array([1 - 2 * self.Tol / 5,1 - self.Tol / 5,1,1 + self.Tol / 5,1 + 2 * self.Tol / 5])
for i in range(5):
# Changes the lattice constant for the atoms object
atoms.set_cell(a0_guesses[i] * self.Size * IdentityMatrix,scale_atoms=True)
# Calculates the energy of the system for the new lattice constant
E_guesses[i] = atoms.get_potential_energy()
# The method EquationOfState is now used to find the Bulk modulus and the minimum energy for the system.
# The volume of the sample for the given a0_guesses
Vol = (self.Size * a0_guesses )**3
# The equilibrium volume, energy and bulk modulus are calculated
(Vol0 , E0 , B ) = EquationOfState(Vol.tolist(),E_guesses.tolist()).fit()
return (Vol0,E0,B,Vol0**(1./3) / self.Size)
def L12_BulkModulus(self,EMT,PARAMETERS):
return self.L12_BM_LC_Calculator(EMT,PARAMETERS)[2]
def L12_LatticeConstant(self,EMT,PARAMETERS):
return self.L12_BM_LC_Calculator(EMT,PARAMETERS)[3]
|
auag92/n2dm
|
Asap-3.8.4/Python/asap3/Tools/ParameterOptimization/Rasmus2011/QuantityCalculatorEMT.py
|
Python
|
mit
| 31,380
|
[
"ASE",
"CRYSTAL"
] |
f07383e7fd04575aaa459e8f9d2095f53b960b33ba9fc5f3c054d85eb59adc2c
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RDnacopy(RPackage):
"""Implements the circular binary segmentation (CBS) algorithm
to segment DNA copy number data and identify genomic regions
with abnormal copy number."""
homepage = "https://www.bioconductor.org/packages/DNAcopy/"
url = "https://git.bioconductor.org/packages/DNAcopy"
version('1.50.1', git='https://git.bioconductor.org/packages/DNAcopy', commit='a20153029e28c009df813dbaf13d9f519fafa4e8')
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-dnacopy/package.py
|
Python
|
lgpl-2.1
| 1,704
|
[
"Bioconductor"
] |
793c1b0aae90fdb271e702b2afe97be40b844dc3a8b7585a23c38ea6b24c6072
|
import numpy as np
from ase.units import Hartree
from weakref import proxy
hbar=0.02342178
class LinearlyPolarizedLaser:
""" Class for laser potential. """
def __init__(self,energy,flux,polarization,phase=0.0):
""" Laser parameters.
electrostatic potential=-E0*sin(omega*t+phase)*dot(polarization,r)
Parameters:
-----------
omega: laser energy (in eV)
flux: electric field flux [ E_0=5.3E-9*sqrt(flux) ]
flux ~ 10E8...10E16, ~10E12='normal' laser?
polarization: direction for static polarization (3-array)
phase: phase for laser pulse (do not start from zero)
"""
self.omega=(energy/Hartree)/hbar # hbar*omega=energy
self.E0=np.sqrt(flux)*5.33802445585E-09
self.pol=polarization/np.linalg.norm(polarization)
self.phase=phase
def __call__(self,r,t):
""" Return the electrostatic potential.
Parameters:
-----------
r: position in Bohrs
t: time in atomic units (~fs)
"""
return -self.E0*np.sin(self.omega*t+self.phase)*np.dot(r,self.pol)
class Environment:
def __init__(self,calc):
self.t=0.0
self.phis=[]
self.calc=proxy(calc)
def __del__(self):
pass
def propagate_time(self,dt):
""" Increase time by dt (dt in atomic units) """
self.t+=dt
def add_phi(self,phi):
""" Add external electrostatic potential function in atomic units.
phi=phi(r,t) is any function, where r is position (Bohrs)
and t time (atomic units, ~fs)
"""
self.phis.append(phi)
def phi(self,r):
""" Return external electrostatic potential.
Current internal environment time is used.
Parameters:
-----------
r: position (Bohrs) or atom index;
index if r is an integer
"""
if isinstance(r,int):
r=self.calc.el.nvector(r)
pot=0.0
for f in self.phis:
pot+=f(r,self.t)
return pot
|
pekkosk/hotbit
|
hotbit/environment.py
|
Python
|
gpl-2.0
| 2,077
|
[
"ASE"
] |
1704d31aef6f7c3d10bbb164127648f3e37340f3abb1032e53818e4dfa45df3a
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os.path
import shutil
import pytest
from unittest import TestCase
import bigdl.orca.data
import bigdl.orca.data.pandas
from bigdl.orca import OrcaContext
from bigdl.orca.data import SharedValue
from bigdl.dllib.nncontext import *
from bigdl.orca.data import SparkXShards
class TestSparkXShards(TestCase):
def setup_method(self, method):
self.resource_path = os.path.join(os.path.split(__file__)[0], "../resources")
OrcaContext.pandas_read_backend = "pandas"
def tearDown(self):
OrcaContext.pandas_read_backend = "spark"
def test_read_local_csv(self):
file_path = os.path.join(self.resource_path, "orca/data/csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
data = data_shard.collect()
assert len(data) == 2, "number of shard should be 2"
df = data[0]
assert "location" in df.columns, "location is not in columns"
file_path = os.path.join(self.resource_path, "abc")
with self.assertRaises(Exception) as context:
xshards = bigdl.orca.data.pandas.read_csv(file_path)
self.assertTrue('No such file or directory' in str(context.exception))
file_path = os.path.join(self.resource_path, "image3d")
with self.assertRaises(Exception) as context:
xshards = bigdl.orca.data.pandas.read_csv(file_path)
# This error is raised by pandas.errors.ParserError
self.assertTrue('Error tokenizing data' in str(context.exception), str(context.exception))
def test_read_local_json(self):
file_path = os.path.join(self.resource_path, "orca/data/json")
data_shard = bigdl.orca.data.pandas.read_json(file_path, orient='columns', lines=True)
data = data_shard.collect()
assert len(data) == 2, "number of shard should be 2"
df = data[0]
assert "value" in df.columns, "value is not in columns"
def test_read_s3(self):
access_key_id = os.getenv("AWS_ACCESS_KEY_ID")
secret_access_key = os.getenv("AWS_SECRET_ACCESS_KEY")
if access_key_id and secret_access_key:
file_path = "s3://analytics-zoo-data/nyc_taxi.csv"
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
data = data_shard.collect()
df = data[0]
assert "value" in df.columns, "value is not in columns"
def test_repartition(self):
file_path = os.path.join(self.resource_path, "orca/data/json")
data_shard = bigdl.orca.data.pandas.read_json(file_path, orient='columns', lines=True)
partitions_num_1 = data_shard.rdd.getNumPartitions()
assert partitions_num_1 == 2, "number of partition should be 2"
data_shard.cache()
partitioned_shard = data_shard.repartition(1)
assert data_shard.is_cached(), "data_shard should be cached"
assert partitioned_shard.is_cached(), "partitioned_shard should be cached"
data_shard.uncache()
assert not data_shard.is_cached(), "data_shard should be uncached"
partitions_num_2 = partitioned_shard.rdd.getNumPartitions()
assert partitions_num_2 == 1, "number of partition should be 1"
def test_apply(self):
file_path = os.path.join(self.resource_path, "orca/data/json")
data_shard = bigdl.orca.data.pandas.read_json(file_path, orient='columns', lines=True)
data = data_shard.collect()
assert data[0]["value"].values[0] > 0, "value should be positive"
def negative(df, column_name):
df[column_name] = df[column_name] * (-1)
return df
trans_data_shard = data_shard.transform_shard(negative, "value")
data2 = trans_data_shard.collect()
assert data2[0]["value"].values[0] < 0, "value should be negative"
def test_read_csv_with_args(self):
file_path = os.path.join(self.resource_path, "orca/data/csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path, sep=',', header=0)
data = data_shard.collect()
assert len(data) == 2, "number of shard should be 2"
df = data[0]
assert "location" in df.columns, "location is not in columns"
def test_partition_by_single_column(self):
file_path = os.path.join(self.resource_path, "orca/data/csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
partitioned_shard = data_shard.partition_by(cols="location", num_partitions=4)
partitions = partitioned_shard.rdd.glom().collect()
assert len(partitions) == 4
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
partitioned_shard = data_shard.partition_by(cols="location", num_partitions=3)
assert not data_shard.is_cached(), "data_shard should be uncached"
assert partitioned_shard.is_cached(), "partitioned_shard should be cached"
partitions = partitioned_shard.rdd.glom().collect()
assert len(partitions) == 3
def test_unique(self):
file_path = os.path.join(self.resource_path, "orca/data/csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
location_list = data_shard["location"].unique()
assert len(location_list) == 6
def test_split(self):
file_path = os.path.join(self.resource_path, "orca/data/csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
trans_data_shard = data_shard.transform_shard(lambda df: (df[0:-1], df[-1:]))
assert trans_data_shard.is_cached(), "trans_data_shard should be cached"
shards_splits = trans_data_shard.split()
assert not trans_data_shard.is_cached(), "shards_splits should be uncached"
trans_data_shard.uncache()
del trans_data_shard
assert len(shards_splits) == 2
assert shards_splits[0].is_cached(), "shards in shards_splits should be cached"
data1 = shards_splits[0].collect()
data2 = shards_splits[1].collect()
assert len(data1[0].index) > 1
assert len(data2[0].index) == 1
def test_len(self):
file_path = os.path.join(self.resource_path, "orca/data/csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
assert len(data_shard) == 14
assert len(data_shard['ID']) == 14
with self.assertRaises(Exception) as context:
len(data_shard['abc'])
self.assertTrue('Invalid key for this XShards' in str(context.exception))
def to_dict(df):
return {'ID': df['ID'].to_numpy(), 'location': df['location'].to_numpy()}
data_shard = data_shard.transform_shard(to_dict)
assert len(data_shard['ID']) == 14
assert len(data_shard) == 4
with self.assertRaises(Exception) as context:
len(data_shard['abc'])
self.assertTrue('Invalid key for this XShards' in str(context.exception))
def to_number(d):
return 4
data_shard = data_shard.transform_shard(to_number)
assert len(data_shard) == 2
with self.assertRaises(Exception) as context:
len(data_shard['abc'])
self.assertTrue('No selection operation available for this XShards' in
str(context.exception))
def test_save(self):
temp = tempfile.mkdtemp()
file_path = os.path.join(self.resource_path, "orca/data/csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
path = os.path.join(temp, "data.pkl")
data_shard.save_pickle(path)
shards = bigdl.orca.data.XShards.load_pickle(path)
assert isinstance(shards, bigdl.orca.data.SparkXShards)
shutil.rmtree(temp)
def test_transform(self):
def trans_func(df):
data1 = {'ID': df['ID'].values, 'price': df['sale_price'].values}
data2 = {'location': df['location'].values}
return {'x': data1, 'y': data2}
file_path = os.path.join(self.resource_path, "orca/data/csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
assert data_shard.is_cached(), "data_shard should be cached"
transformed_data_shard = data_shard.transform_shard(trans_func)
assert not data_shard.is_cached(), "data_shard should be uncached"
assert transformed_data_shard.is_cached(), "transformed_data_shard should be cached"
data = data_shard.collect()
assert len(data) == 2, "number of shard should be 2"
df = data[0]
assert "location" in df.columns, "location is not in columns"
trans_data = transformed_data_shard.collect()
assert len(trans_data) == 2, "number of shard should be 2"
trans_dict = trans_data[0]
assert "x" in trans_dict, "x is not in the dictionary"
def test_transform_broadcast(self):
def negative(df, column_name, minus_val):
df[column_name] = df[column_name] * (-1)
df[column_name] = df[column_name] - minus_val.value
return df
file_path = os.path.join(self.resource_path, "orca/data/json")
data_shard = bigdl.orca.data.pandas.read_json(file_path, orient='columns', lines=True)
data = data_shard.collect()
assert data[0]["value"].values[0] > 0, "value should be positive"
col_name = "value"
minus_val = 2
minus_val_shared_value = SharedValue(minus_val)
trans_shard = data_shard.transform_shard(negative, col_name,
minus_val_shared_value)
data2 = trans_shard.collect()
assert data2[0]["value"].values[0] < 0, "value should be negative"
assert data[0]["value"].values[0] + data2[0]["value"].values[0] == -2, "value should be -2"
def test_get_item(self):
file_path = os.path.join(self.resource_path, "orca/data/json")
data_shard = bigdl.orca.data.pandas.read_json(file_path, orient='columns', lines=True)
selected_shard = data_shard["value"]
assert data_shard.is_cached(), "data_shard should be cached"
assert not selected_shard.is_cached(), "selected_shard should not be cached"
data1 = data_shard.collect()
data2 = selected_shard.collect()
assert data1[0]["value"].values[0] == data2[0][0], "value should be same"
assert data1[1]["value"].values[0] == data2[1][0], "value should be same"
with self.assertRaises(Exception) as context:
len(data_shard['abc'])
self.assertTrue('Invalid key for this XShards' in str(context.exception))
def test_for_each(self):
file_path = os.path.join(self.resource_path, "orca/data/csv")
shards = bigdl.orca.data.pandas.read_csv(file_path)
def get_item(data, key):
return data[key]
result1 = shards._for_each(get_item, 'location')
import pandas as pd
assert isinstance(result1.first(), pd.Series)
result2 = shards._for_each(get_item, 'abc')
assert isinstance(result2.first(), KeyError)
def test_zip(self):
def negative(df, column_name, minus_val):
df[column_name] = df[column_name] * (-1)
df[column_name] = df[column_name] - minus_val
return df
file_path = os.path.join(self.resource_path, "orca/data/json")
data_shard = bigdl.orca.data.pandas.read_json(file_path, orient='columns', lines=True)
data_shard = data_shard.repartition(2)
data_shard.cache()
transformed_shard = data_shard.transform_shard(negative, "value", 2)
zipped_shard = data_shard.zip(transformed_shard)
assert not transformed_shard.is_cached(), "transformed_shard should be uncached."
data = zipped_shard.collect()
assert data[0][0]["value"].values[0] + data[0][1]["value"].values[0] == -2, \
"value should be -2"
list1 = list([1, 2, 3])
with self.assertRaises(Exception) as context:
data_shard.zip(list1)
self.assertTrue('other should be a SparkXShards' in str(context.exception))
transformed_shard = transformed_shard.repartition(data_shard.num_partitions() - 1)
with self.assertRaises(Exception) as context:
data_shard.zip(transformed_shard)
self.assertTrue('The two SparkXShards should have the same number of partitions' in
str(context.exception))
dict_data = [{"x": 1, "y": 2}, {"x": 2, "y": 3}]
sc = init_nncontext()
rdd = sc.parallelize(dict_data)
dict_shard = SparkXShards(rdd)
dict_shard = dict_shard.repartition(1)
with self.assertRaises(Exception) as context:
transformed_shard.zip(dict_shard)
self.assertTrue('The two SparkXShards should have the same number of elements in '
'each partition' in str(context.exception))
def test_transform_with_repartition(self):
# shards of pandas dataframe
file_path = os.path.join(self.resource_path, "orca/data/csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
partitions = data_shard.rdd.glom().collect()
for par in partitions:
assert len(par) <= 1
def negative(df, column_name):
df[column_name] = df[column_name] * (-1)
return df
shard2 = data_shard.transform_shard(negative, "sale_price")
shard3 = shard2.repartition(4)
partitions3 = shard3.rdd.glom().collect()
for par in partitions3:
assert len(par) <= 1
shard4 = shard2.repartition(1)
partitions4 = shard4.rdd.glom().collect()
for par in partitions4:
assert len(par) <= 1
shard5 = shard4.transform_shard(negative, "sale_price")
partitions5 = shard5.rdd.glom().collect()
for par in partitions5:
assert len(par) <= 1
# shards of list
data = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]
sc = init_nncontext()
rdd = sc.parallelize(data)
data_shard = SparkXShards(rdd)
shard2 = data_shard.repartition(6)
partitions2 = shard2.rdd.glom().collect()
for par in partitions2:
assert len(par) <= 1
shard3 = data_shard.repartition(1)
partitions2 = shard3.rdd.glom().collect()
for par in partitions2:
assert len(par) <= 1
# shards of numpy array
data = [np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9, 10, 11, 12]), np.array([13, 14, 15, 16])]
sc = init_nncontext()
rdd = sc.parallelize(data)
data_shard = SparkXShards(rdd)
shard2 = data_shard.repartition(6)
partitions2 = shard2.rdd.glom().collect()
for par in partitions2:
assert len(par) <= 1
shard3 = data_shard.repartition(1)
partitions2 = shard3.rdd.glom().collect()
for par in partitions2:
assert len(par) <= 1
if __name__ == "__main__":
pytest.main([__file__])
|
intel-analytics/BigDL
|
python/orca/test/bigdl/orca/data/test_pandas_backend.py
|
Python
|
apache-2.0
| 15,558
|
[
"ORCA"
] |
f444e48c2a66f3616a8769c2c113b724881d1e63bb2e14c6df162d5df47ee859
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Autocompletion config for YouCompleteMe in Chromium.
#
# USAGE:
#
# 1. Install YCM [https://github.com/Valloric/YouCompleteMe]
# (Googlers should check out [go/ycm])
#
# 2. Create a symbolic link to this file called .ycm_extra_conf.py in the
# directory above your Chromium checkout (i.e. next to your .gclient file).
#
# cd src
# ln -rs tools/vim/chromium.ycm_extra_conf.py ../.ycm_extra_conf.py
#
# 3. (optional) Whitelist the .ycm_extra_conf.py from step #2 by adding the
# following to your .vimrc:
#
# let g:ycm_extra_conf_globlist=['<path to .ycm_extra_conf.py>']
#
# You can also add other .ycm_extra_conf.py files you want to use to this
# list to prevent excessive prompting each time you visit a directory
# covered by a config file.
#
# 4. Profit
#
#
# Usage notes:
#
# * You must use ninja & clang to build Chromium.
#
# * You must have run gyp_chromium and built Chromium recently.
#
#
# Hacking notes:
#
# * The purpose of this script is to construct an accurate enough command line
# for YCM to pass to clang so it can build and extract the symbols.
#
# * Right now, we only pull the -I and -D flags. That seems to be sufficient
# for everything I've used it for.
#
# * That whole ninja & clang thing? We could support other configs if someone
# were willing to write the correct commands and a parser.
#
# * This has only been tested on gPrecise.
import os
import os.path
import re
import shlex
import subprocess
import sys
# Flags from YCM's default config.
_default_flags = [
'-DUSE_CLANG_COMPLETER',
'-std=c++11',
'-x',
'c++',
]
_header_alternates = ('.cc', '.cpp', '.c', '.mm', '.m')
_extension_flags = {
'.m': ['-x', 'objective-c'],
'.mm': ['-x', 'objective-c++'],
}
def PathExists(*args):
return os.path.exists(os.path.join(*args))
def FindChromeSrcFromFilename(filename):
"""Searches for the root of the Chromium checkout.
Simply checks parent directories until it finds .gclient and src/.
Args:
filename: (String) Path to source file being edited.
Returns:
(String) Path of 'src/', or None if unable to find.
"""
curdir = os.path.normpath(os.path.dirname(filename))
while not (os.path.basename(curdir) == 'src'
and PathExists(curdir, 'DEPS')
and (PathExists(curdir, '..', '.gclient')
or PathExists(curdir, '.git'))):
nextdir = os.path.normpath(os.path.join(curdir, '..'))
if nextdir == curdir:
return None
curdir = nextdir
return curdir
def GetDefaultSourceFile(chrome_root, filename):
"""Returns the default source file to use as an alternative to |filename|.
Compile flags used to build the default source file is assumed to be a
close-enough approximation for building |filename|.
Args:
chrome_root: (String) Absolute path to the root of Chromium checkout.
filename: (String) Absolute path to the source file.
Returns:
(String) Absolute path to substitute source file.
"""
blink_root = os.path.join(chrome_root, 'third_party', 'WebKit')
if filename.startswith(blink_root):
return os.path.join(blink_root, 'Source', 'core', 'Init.cpp')
else:
if 'test.' in filename:
return os.path.join(chrome_root, 'base', 'logging_unittest.cc')
return os.path.join(chrome_root, 'base', 'logging.cc')
def GetNinjaBuildOutputsForSourceFile(out_dir, filename):
"""Returns a list of build outputs for filename.
The list is generated by invoking 'ninja -t query' tool to retrieve a list of
inputs and outputs of |filename|. This list is then filtered to only include
.o and .obj outputs.
Args:
out_dir: (String) Absolute path to ninja build output directory.
filename: (String) Absolute path to source file.
Returns:
(List of Strings) List of target names. Will return [] if |filename| doesn't
yield any .o or .obj outputs.
"""
# Ninja needs the path to the source file relative to the output build
# directory.
rel_filename = os.path.relpath(filename, out_dir)
p = subprocess.Popen(['ninja', '-C', out_dir, '-t', 'query', rel_filename],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True)
stdout, _ = p.communicate()
if p.returncode != 0:
return []
# The output looks like:
# ../../relative/path/to/source.cc:
# outputs:
# obj/reative/path/to/target.source.o
# obj/some/other/target2.source.o
# another/target.txt
#
outputs_text = stdout.partition('\n outputs:\n')[2]
output_lines = [line.strip() for line in outputs_text.split('\n')]
return [target for target in output_lines
if target and (target.endswith('.o') or target.endswith('.obj'))]
def GetClangCommandLineForNinjaOutput(out_dir, build_target):
"""Returns the Clang command line for building |build_target|
Asks ninja for the list of commands used to build |filename| and returns the
final Clang invocation.
Args:
out_dir: (String) Absolute path to ninja build output directory.
build_target: (String) A build target understood by ninja
Returns:
(String or None) Clang command line or None if a Clang command line couldn't
be determined.
"""
p = subprocess.Popen(['ninja', '-v', '-C', out_dir,
'-t', 'commands', build_target],
stdout=subprocess.PIPE, universal_newlines=True)
stdout, stderr = p.communicate()
if p.returncode != 0:
return None
# Ninja will return multiple build steps for all dependencies up to
# |build_target|. The build step we want is the last Clang invocation, which
# is expected to be the one that outputs |build_target|.
for line in reversed(stdout.split('\n')):
if 'clang' in line:
return line
return None
def GetClangCommandLineFromNinjaForSource(out_dir, filename):
"""Returns a Clang command line used to build |filename|.
The same source file could be built multiple times using different tool
chains. In such cases, this command returns the first Clang invocation. We
currently don't prefer one toolchain over another. Hopefully the tool chain
corresponding to the Clang command line is compatible with the Clang build
used by YCM.
Args:
out_dir: (String) Absolute path to Chromium checkout.
filename: (String) Absolute path to source file.
Returns:
(String or None): Command line for Clang invocation using |filename| as a
source. Returns None if no such command line could be found.
"""
build_targets = GetNinjaBuildOutputsForSourceFile(out_dir, filename)
for build_target in build_targets:
command_line = GetClangCommandLineForNinjaOutput(out_dir, build_target)
if command_line:
return command_line
return None
def GetClangOptionsFromCommandLine(clang_commandline, out_dir,
additional_flags):
"""Extracts relevant command line options from |clang_commandline|
Args:
clang_commandline: (String) Full Clang invocation.
out_dir: (String) Absolute path to ninja build directory. Relative paths in
the command line are relative to |out_dir|.
additional_flags: (List of String) Additional flags to return.
Returns:
(List of Strings) The list of command line flags for this source file. Can
be empty.
"""
clang_flags = [] + additional_flags
# Parse flags that are important for YCM's purposes.
clang_tokens = shlex.split(clang_commandline)
for flag_index, flag in enumerate(clang_tokens):
if flag.startswith('-I'):
# Relative paths need to be resolved, because they're relative to the
# output dir, not the source.
if flag[2] == '/':
clang_flags.append(flag)
else:
abs_path = os.path.normpath(os.path.join(out_dir, flag[2:]))
clang_flags.append('-I' + abs_path)
elif flag.startswith('-std'):
clang_flags.append(flag)
elif flag.startswith('-') and flag[1] in 'DWFfmO':
if flag == '-Wno-deprecated-register' or flag == '-Wno-header-guard':
# These flags causes libclang (3.3) to crash. Remove it until things
# are fixed.
continue
clang_flags.append(flag)
elif flag == '-isysroot':
# On Mac -isysroot <path> is used to find the system headers.
# Copy over both flags.
if flag_index + 1 < len(clang_tokens):
clang_flags.append(flag)
clang_flags.append(clang_tokens[flag_index + 1])
elif flag.startswith('--sysroot='):
# On Linux we use a sysroot image.
sysroot_path = flag.lstrip('--sysroot=')
if sysroot_path.startswith('/'):
clang_flags.append(flag)
else:
abs_path = os.path.normpath(os.path.join(out_dir, sysroot_path))
clang_flags.append('--sysroot=' + abs_path)
return clang_flags
def GetClangOptionsFromNinjaForFilename(chrome_root, filename):
"""Returns the Clang command line options needed for building |filename|.
Command line options are based on the command used by ninja for building
|filename|. If |filename| is a .h file, uses its companion .cc or .cpp file.
If a suitable companion file can't be located or if ninja doesn't know about
|filename|, then uses default source files in Blink and Chromium for
determining the commandline.
Args:
chrome_root: (String) Path to src/.
filename: (String) Absolute path to source file being edited.
Returns:
(List of Strings) The list of command line flags for this source file. Can
be empty.
"""
if not chrome_root:
return []
# Generally, everyone benefits from including Chromium's src/, because all of
# Chromium's includes are relative to that.
additional_flags = ['-I' + os.path.join(chrome_root)]
# Version of Clang used to compile Chromium can be newer then version of
# libclang that YCM uses for completion. So it's possible that YCM's libclang
# doesn't know about some used warning options, which causes compilation
# warnings (and errors, because of '-Werror');
additional_flags.append('-Wno-unknown-warning-option')
sys.path.append(os.path.join(chrome_root, 'tools', 'vim'))
from ninja_output import GetNinjaOutputDirectory
out_dir = GetNinjaOutputDirectory(chrome_root)
basename, extension = os.path.splitext(filename)
if extension == '.h':
candidates = [basename + ext for ext in _header_alternates]
else:
candidates = [filename]
clang_line = None
buildable_extension = extension
for candidate in candidates:
clang_line = GetClangCommandLineFromNinjaForSource(out_dir, candidate)
if clang_line:
buildable_extension = os.path.splitext(candidate)[1]
break
additional_flags += _extension_flags.get(buildable_extension, [])
if not clang_line:
# If ninja didn't know about filename or it's companion files, then try a
# default build target. It is possible that the file is new, or build.ninja
# is stale.
clang_line = GetClangCommandLineFromNinjaForSource(
out_dir, GetDefaultSourceFile(chrome_root, filename))
if not clang_line:
return additional_flags
return GetClangOptionsFromCommandLine(clang_line, out_dir, additional_flags)
def FlagsForFile(filename):
"""This is the main entry point for YCM. Its interface is fixed.
Args:
filename: (String) Path to source file being edited.
Returns:
(Dictionary)
'flags': (List of Strings) Command line flags.
'do_cache': (Boolean) True if the result should be cached.
"""
abs_filename = os.path.abspath(filename)
chrome_root = FindChromeSrcFromFilename(abs_filename)
clang_flags = GetClangOptionsFromNinjaForFilename(chrome_root, abs_filename)
# If clang_flags could not be determined, then assume that was due to a
# transient failure. Preventing YCM from caching the flags allows us to try to
# determine the flags again.
should_cache_flags_for_file = bool(clang_flags)
final_flags = _default_flags + clang_flags
return {
'flags': final_flags,
'do_cache': should_cache_flags_for_file
}
|
geminy/aidear
|
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/tools/vim/chromium.ycm_extra_conf.py
|
Python
|
gpl-3.0
| 12,264
|
[
"VisIt"
] |
7c25b72657e9c4a0301e3edd0cf0b02260b55ee3b8f3cc1a271698ecc7831530
|
#!/usr/bin/python
import json,datetime,sys,os,code,random,collections
from optparse import make_option
import openpyxl as xl
#Django Imports
from django.contrib.auth.models import User
from django.db.models import Max
from django.core.management import ManagementUtility
from django.core.management.base import BaseCommand
from django.conf import settings
from django.db import transaction
from django.contrib.auth.models import User
from constance import config
import contacts.models as cont
class Command(BaseCommand):
help = 'Delete and reset messages, contacts and visits for the Demo Site'
option_list = BaseCommand.option_list + (
make_option('-d','--dry-run',default=False,action='store_true',
help='Do not make changes in database. Default: False'),
)
def handle(self,*args,**options):
if options['dry_run']:
print 'Dry Run: No database changes'
else:
print 'Live Run: Changing database'
try:
# Make sure demo facility exists
demo = cont.Facility.objects.get(name='demo')
print 'Demo Facility Exists....Deleting'
demo.delete()
# Make sure participants exist
except cont.Facility.DoesNotExist:
print 'Demo Facility Does Not Exist'
if not options['dry_run']:
demo = create_facility()
admin_user = User.objects.get(username='admin')
try:
admin_practitioner = cont.Practitioner.objects.get(user=admin_user)
print 'Admin Practitioner Exists'
except cont.Practitioner.DoesNotExist as e:
print 'Admin Practitioner Does Not Exist....Creating'
cont.Practitioner.objects.create(user=User.objects.first(),facility=demo)
excel_file = 'ignore/demo_messages.xlsx'
if settings.ON_OPENSHIFT:
excel_file = os.path.join(os.environ['OPENSHIFT_DATA_DIR'],'demo_messages.xlsx')
clients,messages = load_excel(excel_file)
if not options['dry_run']:
# Returns {nickname => contact}
contacts = create_participants(demo,clients)
create_messages(contacts,messages)
config.CURRENT_DATE = '2015-07-20'
######################################################################
# Utility Functions
######################################################################
### ****** Named Tuples ****** ###
# Headers: nickname, due_date, last_msg_client, status
Client = collections.namedtuple('Client',('nickname','due_date','last_msg_client','status','visit'))
#Headers: Dates, Time, Sender, Name Client, Message
Message = collections.namedtuple('Message',('created','is_system','is_outgoing','client','message'))
### ****** Functions to parse the Excel File ****** ###
def get_values(row):
return [cell.value for cell in row]
def make_date(date_str):
if date_str is not None:
return datetime.datetime.strptime(date_str+'-2015','%d-%b-%Y').date()
def make_time(time_str):
if time_str is not None:
return datetime.datetime.strptime(time_str,'%I:%M %p').time()
def make_client(row):
nickname,due_date,last_msg_client,status,visit = get_values(row[:5])
return Client(nickname,make_date(due_date),make_date(last_msg_client),status,make_date(visit))
def make_message(row):
date,time,sender,client,message = get_values(row[:5])
print date,time,sender,client
is_system = sender.startswith('S')
is_outgoing = not sender.startswith('C')
created = datetime.datetime.combine(make_date(date),time)
return Message(created,is_system,is_outgoing,client,message)
def load_excel(file_name):
wb = xl.load_workbook(file_name)
client_ws = wb['Clients']
message_ws = wb['Messages']
clients = [make_client(row) for row in client_ws.rows[1:]]
messages = [make_message(row) for row in message_ws.rows[1:]]
return clients,messages
### ****** Functions to make database objects ****** ###
def create_facility():
return cont.Facility.objects.create(name="demo")
def create_participants(facility,clients):
contacts = {}
for c in clients:
contacts[c.nickname] = create_contact(facility,c)
return contacts
def create_messages(contacts,messages):
for m in messages:
create_message(contacts[m.client],m)
def create_message(contact,message):
new_message = {
'text':message.message,
'is_outgoing':message.is_outgoing,
'is_system':message.is_system,
'contact':contact,
'connection':contact.connection(),
'translated_text':message.message,
'translate_skipped':True,
}
if message.created < datetime.datetime(2015,7,1):
new_message['is_viewed'] = True
_message = cont.Message.objects.create(**new_message)
_message.created = message.created
_message.save()
def create_contact(facility,client):
new_client = {
'study_id':random.randint(10000,100000),
'anc_num':random.randint(1000,10000),
'nickname':client.nickname,
'birthdate':datetime.date(1980,1,1),
'study_group':'two-way',
'due_date':client.due_date,
'last_msg_client':client.last_msg_client,
'facility':facility,
'status':client.status,
}
contact = cont.Contact.objects.create(**new_client)
connection = cont.Connection.objects.create(
identity='+2500111{}'.format(new_client['anc_num']),
contact=contact,
is_primary=True)
if client.visit:
cont.Visit.objects.create(contact=contact,scheduled=client.visit,reminder_last_seen=client.visit)
return contact
|
tperrier/mwachx
|
utils/management/commands/reset_demo.py
|
Python
|
apache-2.0
| 5,688
|
[
"VisIt"
] |
b21bffcc0a1d4bdab7d89c873d21f902f748d0360e0558ed06f0f9ad38deadcd
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_pool
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of Pool Avi RESTful Object
description:
- This module is used to configure Pool object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
a_pool:
description:
- Name of container cloud application that constitutes a pool in a a-b pool configuration, if different from vs app.
ab_pool:
description:
- A/b pool configuration.
ab_priority:
description:
- Priority of this pool in a a-b pool pair.
- Internally used.
apic_epg_name:
description:
- Synchronize cisco apic epg members with pool servers.
application_persistence_profile_ref:
description:
- Persistence will ensure the same user sticks to the same server for a desired duration of time.
- It is a reference to an object of type applicationpersistenceprofile.
autoscale_launch_config_ref:
description:
- Reference to the launch configuration profile.
- It is a reference to an object of type autoscalelaunchconfig.
autoscale_networks:
description:
- Network ids for the launch configuration.
autoscale_policy_ref:
description:
- Reference to server autoscale policy.
- It is a reference to an object of type serverautoscalepolicy.
capacity_estimation:
description:
- Inline estimation of capacity of servers.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
capacity_estimation_ttfb_thresh:
description:
- The maximum time-to-first-byte of a server.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
cloud_config_cksum:
description:
- Checksum of cloud configuration for pool.
- Internally set by cloud connector.
cloud_ref:
description:
- It is a reference to an object of type cloud.
connection_ramp_duration:
description:
- Duration for which new connections will be gradually ramped up to a server recently brought online.
- Useful for lb algorithms that are least connection based.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
created_by:
description:
- Creator name.
default_server_port:
description:
- Traffic sent to servers will use this destination server port unless overridden by the server's specific port attribute.
- The ssl checkbox enables avi to server encryption.
- Default value when not specified in API or module is interpreted by Avi Controller as 80.
description:
description:
- A description of the pool.
domain_name:
description:
- Comma separated list of domain names which will be used to verify the common names or subject alternative names presented by server certificates.
- It is performed only when common name check host_check_enabled is enabled.
east_west:
description:
- Inherited config from virtualservice.
enabled:
description:
- Enable or disable the pool.
- Disabling will terminate all open connections and pause health monitors.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
fail_action:
description:
- Enable an action - close connection, http redirect, local http response, or backup pool - when a pool failure happens.
- By default, a connection will be closed, in case the pool experiences a failure.
fewest_tasks_feedback_delay:
description:
- Periodicity of feedback for fewest tasks server selection algorithm.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
graceful_disable_timeout:
description:
- Used to gracefully disable a server.
- Virtual service waits for the specified time before terminating the existing connections to the servers that are disabled.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
health_monitor_refs:
description:
- Verify server health by applying one or more health monitors.
- Active monitors generate synthetic traffic from each service engine and mark a server up or down based on the response.
- The passive monitor listens only to client to server communication.
- It raises or lowers the ratio of traffic destined to a server based on successful responses.
- It is a reference to an object of type healthmonitor.
host_check_enabled:
description:
- Enable common name check for server certificate.
- If enabled and no explicit domain name is specified, avi will use the incoming host header to do the match.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
inline_health_monitor:
description:
- The passive monitor will monitor client to server connections and requests and adjust traffic load to servers based on successful responses.
- This may alter the expected behavior of the lb method, such as round robin.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
ipaddrgroup_ref:
description:
- Use list of servers from ip address group.
- It is a reference to an object of type ipaddrgroup.
lb_algorithm:
description:
- The load balancing algorithm will pick a server within the pool's list of available servers.
- Default value when not specified in API or module is interpreted by Avi Controller as LB_ALGORITHM_LEAST_CONNECTIONS.
lb_algorithm_consistent_hash_hdr:
description:
- Http header name to be used for the hash key.
lb_algorithm_hash:
description:
- Criteria used as a key for determining the hash between the client and server.
- Default value when not specified in API or module is interpreted by Avi Controller as LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS.
max_concurrent_connections_per_server:
description:
- The maximum number of concurrent connections allowed to each server within the pool.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
max_conn_rate_per_server:
description:
- Rate limit connections to each server.
name:
description:
- The name of the pool.
required: true
networks:
description:
- (internal-use) networks designated as containing servers for this pool.
- The servers may be further narrowed down by a filter.
- This field is used internally by avi, not editable by the user.
pki_profile_ref:
description:
- Avi will validate the ssl certificate present by a server against the selected pki profile.
- It is a reference to an object of type pkiprofile.
placement_networks:
description:
- Manually select the networks and subnets used to provide reachability to the pool's servers.
- Specify the subnet using the following syntax 10-1-1-0/24.
- Use static routes in vrf configuration when pool servers are not directly connected butroutable from the service engine.
prst_hdr_name:
description:
- Header name for custom header persistence.
request_queue_depth:
description:
- Minimum number of requests to be queued when pool is full.
- Default value when not specified in API or module is interpreted by Avi Controller as 128.
request_queue_enabled:
description:
- Enable request queue when pool is full.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
rewrite_host_header_to_server_name:
description:
- Rewrite incoming host header to server name of the server to which the request is proxied.
- Enabling this feature rewrites host header for requests to all servers in the pool.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
rewrite_host_header_to_sni:
description:
- If sni server name is specified, rewrite incoming host header to the sni server name.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
server_auto_scale:
description:
- Server autoscale.
- Not used anymore.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
server_count:
description:
- Number of server_count.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
server_name:
description:
- Fully qualified dns hostname which will be used in the tls sni extension in server connections if sni is enabled.
- If no value is specified, avi will use the incoming host header instead.
server_reselect:
description:
- Server reselect configuration for http requests.
servers:
description:
- The pool directs load balanced traffic to this list of destination servers.
- The servers can be configured by ip address, name, network or via ip address group.
sni_enabled:
description:
- Enable tls sni for server connections.
- If disabled, avi will not send the sni extension as part of the handshake.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
ssl_key_and_certificate_ref:
description:
- Service engines will present a client ssl certificate to the server.
- It is a reference to an object of type sslkeyandcertificate.
ssl_profile_ref:
description:
- When enabled, avi re-encrypts traffic to the backend servers.
- The specific ssl profile defines which ciphers and ssl versions will be supported.
- It is a reference to an object of type sslprofile.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
use_service_port:
description:
- Do not translate the client's destination port when sending the connection to the server.
- The pool or servers specified service port will still be used for health monitoring.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
uuid:
description:
- Uuid of the pool.
vrf_ref:
description:
- Virtual routing context that the pool is bound to.
- This is used to provide the isolation of the set of networks the pool is attached to.
- The pool inherits the virtual routing conext of the virtual service, and this field is used only internally, and is set by pb-transform.
- It is a reference to an object of type vrfcontext.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a Pool with two servers and HTTP monitor
avi_pool:
controller: 10.10.1.20
username: avi_user
password: avi_password
name: testpool1
description: testpool1
state: present
health_monitor_refs:
- '/api/healthmonitor?name=System-HTTP'
servers:
- ip:
addr: 10.10.2.20
type: V4
- ip:
addr: 10.10.2.21
type: V4
'''
RETURN = '''
obj:
description: Pool (api/pool) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
a_pool=dict(type='str',),
ab_pool=dict(type='dict',),
ab_priority=dict(type='int',),
apic_epg_name=dict(type='str',),
application_persistence_profile_ref=dict(type='str',),
autoscale_launch_config_ref=dict(type='str',),
autoscale_networks=dict(type='list',),
autoscale_policy_ref=dict(type='str',),
capacity_estimation=dict(type='bool',),
capacity_estimation_ttfb_thresh=dict(type='int',),
cloud_config_cksum=dict(type='str',),
cloud_ref=dict(type='str',),
connection_ramp_duration=dict(type='int',),
created_by=dict(type='str',),
default_server_port=dict(type='int',),
description=dict(type='str',),
domain_name=dict(type='list',),
east_west=dict(type='bool',),
enabled=dict(type='bool',),
fail_action=dict(type='dict',),
fewest_tasks_feedback_delay=dict(type='int',),
graceful_disable_timeout=dict(type='int',),
health_monitor_refs=dict(type='list',),
host_check_enabled=dict(type='bool',),
inline_health_monitor=dict(type='bool',),
ipaddrgroup_ref=dict(type='str',),
lb_algorithm=dict(type='str',),
lb_algorithm_consistent_hash_hdr=dict(type='str',),
lb_algorithm_hash=dict(type='str',),
max_concurrent_connections_per_server=dict(type='int',),
max_conn_rate_per_server=dict(type='dict',),
name=dict(type='str', required=True),
networks=dict(type='list',),
pki_profile_ref=dict(type='str',),
placement_networks=dict(type='list',),
prst_hdr_name=dict(type='str',),
request_queue_depth=dict(type='int',),
request_queue_enabled=dict(type='bool',),
rewrite_host_header_to_server_name=dict(type='bool',),
rewrite_host_header_to_sni=dict(type='bool',),
server_auto_scale=dict(type='bool',),
server_count=dict(type='int',),
server_name=dict(type='str',),
server_reselect=dict(type='dict',),
servers=dict(type='list',),
sni_enabled=dict(type='bool',),
ssl_key_and_certificate_ref=dict(type='str',),
ssl_profile_ref=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
use_service_port=dict(type='bool',),
uuid=dict(type='str',),
vrf_ref=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'pool',
set([]))
if __name__ == '__main__':
main()
|
adityacs/ansible
|
lib/ansible/modules/network/avi/avi_pool.py
|
Python
|
gpl-3.0
| 16,763
|
[
"VisIt"
] |
cd533968a399085a7bb42bd0282192680762166c07617f536d0d9dd39013329d
|
from ConfigObjects import Recipe, Enchantment, ItemStack, Factory, CraftedRecipe
from ParseConfig import ParseConfig
coeffs={}
gMod=1
mMod=0.1
def main():
print 'Running....'
ItemStack.importMaterials()
Enchantment.importEnchantments()
createConfigFile()
def createConfigFile():
config={}
config['factories'],config['recipes']=createFactorieAndRecipes()
config['disabled_recipes']=[]
config['enabled_recipes']=createCraftingRecipes()
checkConflicts(config['factories'])
print 'Fixing Conflicts...'
fixConflicts(config['factories'])
checkConflicts(config['factories'])
ParseConfig.saveConfig(config)
ParseConfig.prettyList(config)
def createFactorieAndRecipes():
inputs={}
outputs={}
enchantments={}
recipes={}
factories={}
#Smelting
#Stone
id='Smelt_Stone'
inputs[id]=[ItemStack(name='Cobblestone',amount=640)]
outputs[id]=[ItemStack(name='Stone',amount=640*1.333)]
recipes[id]=Recipe(identifier=id,name='Smelt Stone',inputs=inputs[id],outputs=outputs[id],time=80)
id='Stone_Smelter'
inputs[id]=[ItemStack(name='Stone',amount=2048*gMod)]
factories[id]=Factory(identifier=id,name='Stone Smelter',inputs=inputs[id],outputRecipes=[recipes['Smelt_Stone']])
#Charcoal
woods=['Oak Wood','Spruce Wood','Birch Wood','Jungle Wood']
id='Charcoal_Smelter'
inputs[id]=[ItemStack(name='Charcoal',amount=600*gMod)]
factories[id]=Factory(identifier=id,name='Charcoal Burner',inputs=inputs[id])
for wood in woods:
id='Smelt_'+wood.replace(' ','_')
inputs[id]=[ItemStack(name=wood,amount=256)]
outputs[id]=[ItemStack(name='Charcoal',amount=256*2)]
recipes[id]=Recipe(identifier=id,name='Burn '+wood,inputs=inputs[id],outputs=outputs[id],time=256/8*3/4)
factories['Charcoal_Smelter'].addRecipe(recipes[id])
id='Smelt_Coal'
inputs[id]=[ItemStack(name='Coal',amount=256)]
outputs[id]=[ItemStack(name='Charcoal',amount=256*2)]
recipes[id]=Recipe(identifier=id,name='Burn Coal',inputs=inputs[id],outputs=outputs[id],time=256/8*3/4)
factories['Charcoal_Smelter'].addRecipe(recipes[id])
#Glass
id='Smelt_Glass'
inputs[id]=[ItemStack(name='Sand',amount=256)]
outputs[id]=[ItemStack(name='Glass',amount=256*3)]
recipes[id]=Recipe(identifier=id,name='Smelt Glass',inputs=inputs[id],outputs=outputs[id],time=48)
id='Glass_Smelter'
inputs[id]=[ItemStack(name='Sand',amount=2048*gMod),ItemStack(name='Charcoal',amount=256*gMod)]
factories[id]=Factory(identifier=id,name='Glass Smelter',inputs=inputs[id],outputRecipes=[recipes['Smelt_Glass']])
#Stone Brick Smelter
bricks={'Cracked':'Flint','Mossy':'Vine','Chiseled':'Gravel'}
id='Stone_Brick_Smelter'
inputs[id]=[ItemStack(name='Stone Brick',amount=512*gMod),ItemStack(name='Lapis Lazuli',amount=256*gMod)]
factories[id]=Factory(identifier=id,name='Fancy Stone Brick Smelter',inputs=inputs[id])
factoryid=id
for brick in bricks:
id='Smelt_'+brick+'_Stone_Brick'
inputs[id]=[ItemStack(name='Stone Brick',amount=64),ItemStack(name='Lapis Lazuli',amount=32),ItemStack(bricks[brick],amount=64)]
outputs[id]=[ItemStack(brick+' Stone Brick',amount=64)]
recipes[id]=Recipe(identifier=id,name='Smelt '+brick+' Stone Brick',inputs=inputs[id],outputs=outputs[id],time=64)
factories[factoryid].addRecipe(recipes[id])
#Smelter
ores={'Coal Ore':('Coal',512,3,128),'Iron Ore':('Iron Ingot',384,1.75,128),'Gold Ore':('Gold Ingot',192,7,32),'Diamond Ore':('Diamond',96,3,16)}
inputs['Smelter']=[ItemStack(name=values[0],amount=values[1]) for ore,values in ores.items()]
factories['Smelter']=Factory(identifier='Smelter',name='Ore Smelter',inputs=inputs['Smelter'])
for ore,values in ores.items():
id='Smelt_'+ore.replace(' ','_')
inputs[id]=[ItemStack(name=ore,amount=values[3])]
outputs[id]=[ItemStack(name=values[0],amount=values[3]*values[2])]
recipes[id]=Recipe(identifier=id,name='Smelt '+ore,inputs=inputs[id],outputs=outputs[id],time=values[3]/8*3/4)
factories['Smelter'].addRecipe(recipes[id])
#Equipment
enchantmentData=[]
enchantmentData.extend([('Unbreaking',[(3,1)]),('Silk Touch',[(1,0.1)]),('Efficiency',[(1,.3),(2,.2),(3,0.1),(4,0.05),(5,0.01)])])
enchantmentData.extend([('Bane of the Anthropods',[(1,.4),(2,.3),(3,.2),(4,.1),(5,0.3)]),('Smite',[(1,.4),(2,.3),(3,.2),(4,.1),(5,0.05)]),('Looting',[(1,0.5),(2,0.4),(3,0.3)])])
enchantmentData.extend([('Respiration',[(1,0.5),(2,0.4),(3,0.3),(4,0.4)]),('Blast Protection',[(1,0.5),(2,0.4),(3,0.3),(4,0.4)]),('Feather Falling',[(1,0.5),(2,0.4),(3,0.3),(4,0.4)]),('Fire Protection',[(1,0.5),(2,0.4),(3,0.3),(4,0.4)]),('Projectile Protection',[(1,0.5),(2,0.4),(3,0.3),(4,0.4)])])
enchantmentsInputs=sum([[Enchantment(name=name,level=level,probability=prob) for level,prob in pairs] for name,pairs in enchantmentData],[])
inputDict={'Iron':'Iron Ingot','Gold':'Gold Ingot','Diamond':'Diamond'}
coeffs['i']={'Helmet':5,'Chestplate':8,'Leggings':7,'Boots':4,'Sword':2,'Axe':3,'Pickaxe':3,'Spade':1,'Hoe':2}# Modifier for different branches of the tree, based on vanilla costs
coeffs['b']={'Helmet':1,'Chestplate':1,'Leggings':1,'Boots':1,'Sword':1,'Axe':1,'Pickaxe':1,'Spade':1,'Hoe':1}
for key,value in coeffs['b'].items():coeffs['b'][key]=value*5
coeffs['e']={'Helmet':3,'Chestplate':3,'Leggings':3,'Boots':3,'Sword':3,'Axe':6,'Pickaxe':3,'Spade':3,'Hoe':6}
buildCosts={'Helmet':192,'Chestplate':320,'Leggings':256,'Boots':160,'Sword':80,'Axe':64,'Pickaxe':96,'Spade':48,'Hoe':32}
for tech in inputDict.keys():
for equipment in coeffs['i'].keys():
enchantments[tech+'_'+equipment]=[]
if tech=='Gold':
enchantments[tech+'_'+equipment]=list(enchantmentsInputs)
inputs[tech+'_'+equipment]=[ItemStack(name=inputDict[tech],amount=coeffs['i'][equipment]*coeffs['b'][equipment])]
outputs[tech+'_'+equipment]=[ItemStack(name=tech+' '+equipment,amount=coeffs['b'][equipment]*coeffs['e'][equipment])]
recipes[tech+'_'+equipment]=Recipe(identifier=tech+'_'+equipment,name='Forge '+tech+' '+equipment+'.',inputs=inputs[tech+'_'+equipment],outputs=outputs[tech+'_'+equipment],enchantments=enchantments[tech+'_'+equipment],time=inputs[tech+'_'+equipment][0].amount)
inputs[tech+'_'+equipment+'_Smithy']=[ItemStack(name=inputDict[tech],amount=buildCosts[equipment])]
factories[tech+'_'+equipment+'_Smithy']=Factory(identifier=tech+'_'+equipment+'_Smithy',name=tech+' '+equipment+' Smithy',inputs=inputs[tech+'_'+equipment+'_Smithy'],outputRecipes=[recipes[tech+'_'+equipment]])
#Food output:([inputs],build cost,efficieny,bulk)
#Butchers
oi={('Cooked Chicken',1):([('Raw Chicken',1)],192,2,64),('Grilled Pork',1):([('Pork',1)],160,2,64),('Cooked Beef',1):([('Raw Beef',1)],64,2,64),('Cooked Fish',1):([('Raw Fish',1)],16,2,64)}
id='Grill'
inputs[id]=[ItemStack(name=key[0],amount=value[1]) for key,value in oi.items()]
factories[id]=Factory(identifier=id,name='Bakery',inputs=inputs[id])
for key,value in oi.items():
id=key[0].replace(' ','_')
inputs[id]=[ItemStack(name=name,amount=amount*value[3]) for name,amount in value[0]]
outputs[id]=[ItemStack(name=key[0],amount=key[1]*value[2]*value[3])]
recipes[id]=Recipe(identifier=id,name='Grill '+name,inputs=inputs[id],outputs=outputs[id],time=inputs[id][0].amount/8*3/4)
factories['Grill'].addRecipe(recipes[id])
#Bakery
oi={('Bread',1):([('Wheat',3)],256,2,128),('Baked Potato',1):([('Potato',1)],512,2,192),('Cookie',8):([('Wheat',2),('Cocoa',1)],1024,2,128)}
id='Bakery'
inputs[id]=[ItemStack(name=key[0],amount=value[1]) for key,value in oi.items()]
factories[id]=Factory(identifier=id,name='Bakery',inputs=inputs[id])
for key,value in oi.items():
id=key[0].replace(' ','_')
inputs[id]=[ItemStack(name=name,amount=amount*value[3]) for name,amount in value[0]]
outputs[id]=[ItemStack(name=key[0],amount=key[1]*value[2]*value[3])]
recipes[id]=Recipe(identifier=id,name='Bake '+name,inputs=inputs[id],outputs=outputs[id],time=256/8*3/4)
factories['Bakery'].addRecipe(recipes[id])
#Items
##Wool
inputColors=['White', 'Light Gray', 'Gray', 'Black', 'Brown', 'Pink']
dyes={'White':'Bone Meal','Light Gray':'Light Gray Dye','Gray':'Gray Dye','Black':'Ink Sack','Red':'Rose Red','Orange':'Orange Dye','Yellow':'Dandelion Yellow','Lime':'Lime Dye','Green':'Cactus Green','Cyan':'Cyan Dye','Light Blue':'Light Blue Dye','Blue':'Lapis Lazuli','Purple':'Purple Dye','Magenta':'Magenta Dye','Pink':'Pink Dye','Brown':'Cocoa'}
for inputColor in inputColors:
factoryId=inputColor.replace(' ','_')+'_Wool_Processing'
inputs[factoryId]=[ItemStack(name=dye,amount=20*gMod) for dye in dyes.values()]+[ItemStack(name=inputColor+' Wool',amount=20)]
factories[factoryId]=Factory(identifier=factoryId,name=inputColor+' Wool Processing',inputs=inputs[factoryId])
for outputColor,dye in dyes.items():
if inputColor!=outputColor:
id='Dye_'+inputColor.replace(' ','_')+'_Wool_'+outputColor.replace(' ','_')
inputs[id]=[ItemStack(name=inputColor+' Wool',amount=64),ItemStack(name=dyes[outputColor],amount=4)]
outputs[id]=[ItemStack(name=outputColor+' Wool',amount=64)]
recipes[id]=Recipe(identifier=id,name='Dye '+inputColor+' Wool '+outputColor,inputs=inputs[id],outputs=outputs[id])
factories[factoryId].addRecipe(recipes[id])
##Rail
factoryid='Rail_Factory'
inputs[factoryid]=[ItemStack(name='Iron Ingot',amount=256),ItemStack(name='Stick',amount=96),ItemStack(name='Gold Ingot',amount=192),ItemStack(name='Redstone',amount=32)]
factories[factoryid]=Factory(identifier=factoryid,name='Rail Factory',inputs=inputs[factoryid])
id='Produce_Rail'
inputs[id]=[ItemStack(name='Iron Ingot',amount=128),ItemStack(name='Stick',amount=32)]
outputs[id]=[ItemStack(name='Rail',amount=528)]
recipes[id]=Recipe(identifier=id,name='Produce Rails',inputs=inputs[id],outputs=outputs[id])
factories[factoryid].addRecipe(recipes[id])
id='Produce_Powered_Rail'
inputs[id]=[ItemStack(name='Gold Ingot',amount=64),ItemStack(name='Redstone',amount=10),ItemStack(name='Stick',amount=10)]
outputs[id]=[ItemStack(name='Powered Rail',amount=102)]
recipes[id]=Recipe(identifier=id,name='Produce Powered Rails',inputs=inputs[id],outputs=outputs[id])
factories[factoryid].addRecipe(recipes[id])
#Enchanting
inputs['Wood_Cauldron']=[ItemStack(name='Stick',amount=1024*gMod)]
inputs['Iron_Cauldron']=[ItemStack(name='Iron Ingot',amount=200*gMod)]
inputs['Diamond_Cauldron']=[ItemStack(name='Diamond',amount=50*gMod)]
factories['Wood_Cauldron']=Factory(identifier='Wood_Cauldron',name='Wood Cauldron',inputs=inputs['Wood_Cauldron'])
factories['Iron_Cauldron']=Factory(identifier='Iron_Cauldron',name='Iron Cauldron',inputs=inputs['Iron_Cauldron'])
factories['Diamond_Cauldron']=Factory(identifier='Diamond_Cauldron',name='Diamond Cauldron',inputs=inputs['Diamond_Cauldron'])
##cauldronInputs[Cauldron Type].append(([(Input Name 1,Input amount 1),(Input Name 2,Input amount 2),...],Number of XP bottles output))
cauldronInputs={}
cauldronInputs['Wood']=[]
cauldronInputs['Wood'].append(([('Glass Bottle',24),('Wheat',1280)],24))
cauldronInputs['Wood'].append(([('Glass Bottle',10),('Nether Wart',1280)],10))
cauldronInputs['Wood'].append(([('Glass Bottle',10),('Baked Potato',1280)],10))
cauldronInputs['Wood'].append(([('Glass Bottle',8),('Cookie',1280)],8))
cauldronInputs['Wood'].append(([('Glass Bottle',14),('Carrot',1280)],14))
#cauldronInputs['Wood'].append(([('Glass Bottle',64),('Melon',1280)],64))
cauldronInputs['Iron']=[]
cauldronInputs['Iron'].append(([('Glass Bottle',24),('Carrot',256),('Cactus',256),('Bread',256)],24))
cauldronInputs['Iron'].append(([('Glass Bottle',14),('Carrot',256),('Nether Wart',256),('Baked Potato',256)],14))
cauldronInputs['Iron'].append(([('Glass Bottle',42),('Carrot',128),('Cocoa',64),('Pumpkin',64),('Cactus',64),('Bread',64),('Cooked Beef',32)],42))
cauldronInputs['Iron'].append(([('Glass Bottle',42),('Nether Wart',256),('Melon Block',64),('Sugar Cane',64),('Cookie',512),('Baked Potato',64),('Grilled Pork',64)],42))
cauldronInputs['Diamond']=[]
cauldronInputs['Diamond'].append(([('Glass Bottle',128),('Carrot',96),('Melon Block',32),('Cactus',256),('Red Rose',8),('Rotten Flesh',128),('Red Mushroom',32),('Vine',32),('Bread',128),('Grilled Pork',32)],128))
cauldronInputs['Diamond'].append(([('Glass Bottle',128),('Nether Wart',64),('Melon Block',32),('Sugar Cane',128),('Yellow Flower',16),('Rotten Flesh',128),('Brown Mushroom',64),('Vine',32),('Baked Potato',256),('Cooked Chicken',16)],128))
cauldronInputs['Diamond'].append(([('Glass Bottle',128),('Wheat',128),('Cocoa',16),('Pumpkin',128),('Cactus',256),('Red Rose',8),('Spider Eye',32),('Red Mushroom',16),('Grass',32),('Cooked Fish',16)],128))
cauldronInputs['Diamond'].append(([('Glass Bottle',128),('Nether Wart',64),('Pumpkin',128),('Sugar Cane',128),('Yellow Flower',16),('Spider Eye',32),('Brown Mushroom',64),('Grass',64),('Cookie',256),('Cooked Beef',32)],128))
for cauldron in cauldronInputs.keys():
i=0
for recipeInput,bottles in cauldronInputs[cauldron]:
id=cauldron+'_XP_Bottle_'+str(i)
i+=1
inputs[id]=[ItemStack(name=name,amount=amount) for name,amount in recipeInput]
outputs[id]=[ItemStack(name='Exp Bottle',amount=bottles)]
recipes[id]=Recipe(identifier=id,name='Brew XP Bottles - '+str(i),inputs=inputs[id],outputs=outputs[id])
factories[cauldron+'_Cauldron'].addRecipe(recipes[id])
#inputs[id+'_Bulk']=[itemStack.modifyAmount(64) for itemStack in recipes[id].inputs]
#outputs[id+'_Bulk']=[itemStack.modifyAmount(64) for itemStack in recipes[id].outputs]
#recipes[id+'_Bulk']=Recipe(identifier=id+'_Bulk',name='Brew XP Bottles - '+str(i),inputs=inputs[id+'_Bulk'],outputs=outputs[id+'_Bulk'],time=128)
#factories[cauldron+'_Cauldron'].addRecipe(recipes[id+'_Bulk'])
#Add in repair
for factory in factories.values():
factory.repairMultiple=min([input.amount for input in [input.modifyAmount(mMod) for input in factory.inputs]])
factory.repairInputs=[input.modifyAmount(1.0/factory.repairMultiple) for input in [input.modifyAmount(mMod) for input in factory.inputs]]
return (factories,recipes)
def createCraftingRecipes():
enabledRecipes=[]
enabledRecipes.append(CraftedRecipe('XP to Emerald',inputs={'a':ItemStack('Exp Bottle',amount=9)},output=ItemStack('Emerald')))
enabledRecipes.append(CraftedRecipe('Emerald to XP',inputs={'a':ItemStack('Emerald')},output=ItemStack('Exp Bottle',amount=9)))
enabledRecipes.append(CraftedRecipe('Stone to Double Slab',inputs={'s':ItemStack('Stone')},shape=['sss','sss'],output=ItemStack('Double Stone Slab')))
enabledRecipes.append(CraftedRecipe('Slab to Double Slab',inputs={'s':ItemStack('Stone Slab')},shape=['s','s'],output=ItemStack('Double Stone Slab')))
return enabledRecipes
def checkConflicts(factories):
for factory in factories.values():
for otherFactory in factories.values():
if factory!=otherFactory:
sameInputs=len(factory.inputs)==len(otherFactory.inputs)
for itemStack in factory.inputs:
inOtherFactory=False
for otherItemStack in otherFactory.inputs:
if itemStack.equals(otherItemStack):
inOtherFactory=True
sameInputs=sameInputs and inOtherFactory
if sameInputs:
print 'Conflict of '+factory.name+' and '+otherFactory.name
def fixConflicts(factories):
for factory in factories.values():
for otherFactory in factories.values():
if factory!=otherFactory:
sameInputs=len(factory.inputs)==len(otherFactory.inputs)
for itemStack in factory.inputs:
inOtherFactory=False
for otherItemStack in otherFactory.inputs:
if itemStack.equals(otherItemStack):
inOtherFactory=True
sameInputs=sameInputs and inOtherFactory
if sameInputs:
factory.inputs[0].amount+=1
if __name__ == '__main__':
main()
|
ProgrammerDan/FactoryMod
|
Config Scripts/GenerateConfig.py
|
Python
|
bsd-3-clause
| 16,879
|
[
"BLAST"
] |
9cd3b3114e2a9c34d07c0b7f9b94020a473a783b64b297628b18ae9084b10ca3
|
import numpy as np
###############
## Class for univariate gaussian
## p(x) = 1/sqrt(2*pi*simga^2) * e ^ - (x-miu)^2/2*sigma^2
## Where miu is the gaussian mean, and sigma^2 is the gaussian variance
################
class Gaussian():
def __init__(self,mean,variance):
self.mean = mean;
self.variance = variance;
def sample(self,points):
return np.random.normal(self.mean,self.variance,points)
### Returns the mean and the variance of a data set of X points assuming that the points come from a gaussian distribution
### X
def estimate_gaussian(X):
mean = np.mean(X,0)
variance = np.var(X,0)
return Gaussian(mean,variance)
|
oplatek/lxmls-toolkit
|
lxmls/distributions/gaussian.py
|
Python
|
mit
| 677
|
[
"Gaussian"
] |
626f259efd29df95a81e56b1dc697bc422bd91b2e82fddacdbbd1fb68795248f
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import json
import unittest
from dashboard import bisect_report
from dashboard.common import testing_common
from dashboard.models import try_job
_SAMPLE_BISECT_RESULTS_JSON = json.loads("""
{
"issue_url": "https://test-rietveld.appspot.com/200039",
"aborted_reason": null,
"bad_revision": "",
"bisect_bot": "staging_android_nexus5X_perf_bisect",
"bug_id": 12345,
"buildbot_log_url": "http://build.chromium.org/513",
"change": "7.35%",
"command": "src/tools/perf/run_benchmark foo",
"culprit_data": null,
"good_revision": "",
"metric": "Total/Score",
"culprit_data": null,
"revision_data": [],
"secondary_regressions": [],
"status": "completed",
"test_type": "perf",
"try_job_id": 123456,
"warnings": []
}
""")
_SAMPLE_BISECT_REVISION_JSON = json.loads("""
{
"build_id": null,
"commit_hash": "",
"depot_name": "chromium",
"failed": false,
"failure_reason": null,
"n_observations": 0,
"result": "unknown",
"revision_string": ""
}
""")
_SAMPLE_BISECT_CULPRIT_JSON = json.loads("""
{
"author": "author",
"cl": "cl",
"cl_date": "Thu Dec 08 01:25:35 2016",
"commit_info": "commit_info",
"email": "email",
"revisions_links": [],
"subject": "subject"
}
""")
_ABORTED_NO_VALUES = ('Bisect cannot identify a culprit: No values were found '\
'while testing the reference range.')
_ABORTED_NO_OUTPUT = ('Bisect cannot identify a culprit: Testing the \"good\" '\
'revision failed: Test runs failed to produce output.')
class BisectReportTest(testing_common.TestCase):
def setUp(self):
super(BisectReportTest, self).setUp()
def _AddTryJob(self, results_data, **kwargs):
job = try_job.TryJob(results_data=results_data, **kwargs)
job.put()
return job
def _Revisions(self, revisions):
revision_data = []
for r in revisions:
data = copy.deepcopy(_SAMPLE_BISECT_REVISION_JSON)
data['commit_hash'] = r['commit']
data['failed'] = r.get('failed', False)
data['failure_reason'] = r.get('failure_reason', None)
data['n_observations'] = r.get('num', 0)
data['revision_string'] = r['commit']
data['result'] = r.get('result', 'unknown')
if 'mean' in r:
data['mean_value'] = r.get('mean', 0)
data['std_dev'] = r.get('std_dev', 0)
data['depot_name'] = r.get('depot_name', 'chromium')
revision_data.append(data)
return revision_data
def _Culprit(self, **kwargs):
culprit = copy.deepcopy(_SAMPLE_BISECT_CULPRIT_JSON)
culprit.update(kwargs)
return culprit
def _BisectResults(self, **kwargs):
results = copy.deepcopy(_SAMPLE_BISECT_RESULTS_JSON)
results.update(kwargs)
return results
def testGetReport_CompletedWithCulprit(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 102, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 103, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
culprit_data=self._Culprit(cl=102),
good_revision=100, bad_revision=103)
job = self._AddTryJob(results_data)
log_with_culprit = r"""
=== BISECT JOB RESULTS ===
<b>Perf regression found with culprit</b>
Suspected Commit
Author : author
Commit : 102
Date : Thu Dec 08 01:25:35 2016
Subject: subject
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Revision Result N
100 100 +- 0 10 good
101 100 +- 0 10 good
102 200 +- 0 10 bad <--
103 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(log_with_culprit, bisect_report.GetReport(job))
def testGetReport_CompletedWithCulprit_Memory(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 102, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 103, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
command='src/tools/perf/run_benchmark system_health.memory_foo',
culprit_data=self._Culprit(cl=102),
good_revision=100, bad_revision=103)
job = self._AddTryJob(results_data)
log_with_culprit = r"""
=== BISECT JOB RESULTS ===
<b>Perf regression found with culprit</b>
Suspected Commit
Author : author
Commit : 102
Date : Thu Dec 08 01:25:35 2016
Subject: subject
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : system_health.memory_foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Revision Result N
100 100 +- 0 10 good
101 100 +- 0 10 good
102 200 +- 0 10 bad <--
103 200 +- 0 10 bad
Please refer to the following doc on diagnosing memory regressions:
https://chromium.googlesource.com/chromium/src/+/master/docs/memory-infra/memory_benchmarks.md
To Run This Test
src/tools/perf/run_benchmark system_health.memory_foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(log_with_culprit, bisect_report.GetReport(job))
def testGetReport_CompletedWithCulpritReturnCode(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 0, 'num': 10, 'result': 'good'},
{'commit': 101, 'mean': 0, 'num': 10, 'result': 'good'},
{'commit': 102, 'mean': 1, 'num': 10, 'result': 'bad'},
{'commit': 103, 'mean': 1, 'num': 10, 'result': 'bad'},
]),
culprit_data=self._Culprit(cl=102),
good_revision=100, bad_revision=103, test_type='return_code')
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Test failure found with culprit</b>
Suspected Commit
Author : author
Commit : 102
Date : Thu Dec 08 01:25:35 2016
Subject: subject
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Revision Exit Code N
100 0 +- 0 10 good
101 0 +- 0 10 good
102 1 +- 0 10 bad <--
103 1 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_CompletedWithoutCulprit(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101},
{'commit': 102},
{'commit': 103, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
culprit_data=None,
good_revision=100, bad_revision=103)
job = self._AddTryJob(results_data)
log_without_culprit = r"""
=== BISECT JOB RESULTS ===
<b>NO Perf regression found</b>
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Revision Result N
100 100 +- 0 10 good
103 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(log_without_culprit, bisect_report.GetReport(job))
def testGetReport_CompletedWithoutCulpritBuildFailuresAfterReference(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 102, 'failed': True, 'failure_reason': 'reason'},
{'commit': 103, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
culprit_data=None,
good_revision=100, bad_revision=103)
job = self._AddTryJob(results_data)
log_without_culprit = r"""
=== BISECT JOB RESULTS ===
<b>Perf regression found but unable to narrow commit range</b>
Build failures prevented the bisect from narrowing the range further.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Suspected Commit Range
2 commits in range
https://chromium.googlesource.com/chromium/src/+log/100..103
Revision Result N
100 100 +- 0 10 good
102 --- --- build failure
103 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(log_without_culprit, bisect_report.GetReport(job))
def testGetReport_CompletedWithoutCulpritUnknownDepot(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 1, 'num': 10,
'depot_name': 'a', 'result': 'good'},
{'commit': 101, 'mean': 1, 'num': 10,
'depot_name': 'a', 'result': 'good'},
{'commit': 102, 'mean': 2, 'num': 10,
'depot_name': 'a', 'result': 'bad'},
{'commit': 103, 'mean': 2, 'num': 10,
'depot_name': 'a', 'result': 'bad'},
]),
culprit_data=None,
good_revision=100, bad_revision=103)
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Perf regression found but unable to narrow commit range</b>
Build failures prevented the bisect from narrowing the range further.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 1 -> 2
Suspected Commit Range
1 commits in range
Unknown depot, please contact team to have this added.
Revision Result N
100 1 +- 0 10 good
101 1 +- 0 10 good
102 2 +- 0 10 bad
103 2 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_CompletedWithBuildFailures(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'failed': True, 'failure_reason': 'reason'},
{'commit': 102, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 103, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 104, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 105, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
culprit_data=self._Culprit(cl=104),
good_revision=100, bad_revision=105)
job = self._AddTryJob(results_data)
log_without_culprit = r"""
=== BISECT JOB RESULTS ===
<b>Perf regression found with culprit</b>
Suspected Commit
Author : author
Commit : 104
Date : Thu Dec 08 01:25:35 2016
Subject: subject
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Revision Result N
100 100 +- 0 10 good
102 100 +- 0 10 good
103 100 +- 0 10 good
104 200 +- 0 10 bad <--
105 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(log_without_culprit, bisect_report.GetReport(job))
def testGetReport_Completed_AbortedWithNoValues(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100},
{'commit': 105},
]),
aborted=True, aborted_reason=_ABORTED_NO_VALUES,
good_revision=100, bad_revision=105)
job = self._AddTryJob(results_data)
log_without_culprit = r"""
=== BISECT JOB RESULTS ===
<b>NO Perf regression found, tests failed to produce values</b>
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(log_without_culprit, bisect_report.GetReport(job))
def testGetReport_Completed_AbortedWithNoOutput(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100},
{'commit': 105},
]),
aborted=True, aborted_reason=_ABORTED_NO_OUTPUT,
good_revision=100, bad_revision=105)
job = self._AddTryJob(results_data)
log_without_culprit = r"""
=== BISECT JOB RESULTS ===
<b>NO Perf regression found, tests failed to produce values</b>
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(log_without_culprit, bisect_report.GetReport(job))
def testGetReport_CompletedCouldntNarrowCulprit(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'failed': True, 'failure_reason': 'reason'},
{'commit': 102, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 103, 'failed': True, 'failure_reason': 'reason'},
{'commit': 104, 'failed': True, 'failure_reason': 'reason'},
{'commit': 105, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 106, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
culprit_data=None,
good_revision=100, bad_revision=106)
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Perf regression found but unable to narrow commit range</b>
Build failures prevented the bisect from narrowing the range further.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Suspected Commit Range
3 commits in range
https://chromium.googlesource.com/chromium/src/+log/102..105
Revision Result N
100 100 +- 0 10 good
102 100 +- 0 10 good
103 --- --- build failure
104 --- --- build failure
105 200 +- 0 10 bad
106 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_CompletedMoreThan10BuildFailures(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'failed': True, 'failure_reason': 'reason'},
{'commit': 102, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 103, 'failed': True, 'failure_reason': 'reason'},
{'commit': 104, 'failed': True, 'failure_reason': 'reason'},
{'commit': 105, 'failed': True, 'failure_reason': 'reason'},
{'commit': 106, 'failed': True, 'failure_reason': 'reason'},
{'commit': 107, 'failed': True, 'failure_reason': 'reason'},
{'commit': 108, 'failed': True, 'failure_reason': 'reason'},
{'commit': 109, 'failed': True, 'failure_reason': 'reason'},
{'commit': 110, 'failed': True, 'failure_reason': 'reason'},
{'commit': 111, 'failed': True, 'failure_reason': 'reason'},
{'commit': 112, 'failed': True, 'failure_reason': 'reason'},
{'commit': 113, 'failed': True, 'failure_reason': 'reason'},
{'commit': 114, 'failed': True, 'failure_reason': 'reason'},
{'commit': 115, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 116, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
culprit_data=None,
good_revision=100, bad_revision=116)
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Perf regression found but unable to narrow commit range</b>
Build failures prevented the bisect from narrowing the range further.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Suspected Commit Range
13 commits in range
https://chromium.googlesource.com/chromium/src/+log/102..115
Revision Result N
100 100 +- 0 10 good
102 100 +- 0 10 good
103 --- --- build failure
--- --- --- too many build failures to list
114 --- --- build failure
115 200 +- 0 10 bad
116 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_FailedBisect(self):
results_data = self._BisectResults(
good_revision=100, bad_revision=110, status='failed')
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Bisect failed for unknown reasons</b>
Please contact the team (see below) and report the error.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_BisectWithWarnings(self):
results_data = self._BisectResults(
status='failed', good_revision=100, bad_revision=103,
warnings=['A warning.', 'Another warning.'])
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Bisect failed for unknown reasons</b>
Please contact the team (see below) and report the error.
The following warnings were raised by the bisect job:
* A warning.
* Another warning.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_BisectWithAbortedReason(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 102, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 103, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
good_revision=100, bad_revision=103,
status='aborted', aborted_reason='Something terrible happened.')
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Bisect failed unexpectedly</b>
Bisect was aborted with the following:
Something terrible happened.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Revision Result N
100 100 +- 0 10 good
101 100 +- 0 10 good
102 200 +- 0 10 bad
103 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_StatusStarted(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 105, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 106, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
good_revision=100, bad_revision=106,
status='started')
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Bisect was unable to run to completion</b>
The bisect was able to narrow the range, you can try running with:
good_revision: 101
bad_revision : 105
If failures persist contact the team (see below) and report the error.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Revision Result N
100 100 +- 0 10 good
101 100 +- 0 10 good
105 200 +- 0 10 bad
106 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_StatusStarted_FailureReason(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 105, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 106, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
good_revision=100, bad_revision=106,
failure_reason='INFRA_FAILURE',
status='started')
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Bisect was unable to run to completion</b>
Error: INFRA_FAILURE
The bisect was able to narrow the range, you can try running with:
good_revision: 101
bad_revision : 105
If failures persist contact the team (see below) and report the error.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Revision Result N
100 100 +- 0 10 good
101 100 +- 0 10 good
105 200 +- 0 10 bad
106 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_StatusInProgress(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 105, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 106, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
good_revision=100, bad_revision=106,
status='in_progress')
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Bisect is still in progress, results below are incomplete</b>
The bisect was able to narrow the range, you can try running with:
good_revision: 101
bad_revision : 105
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 100 -> 200
Revision Result N
100 100 +- 0 10 good
101 100 +- 0 10 good
105 200 +- 0 10 bad
106 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_StatusStartedDepotMismatch(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 1, 'num': 10,
'depot_name': 'a', 'result': 'good'},
{'commit': 101, 'mean': 1, 'num': 10,
'depot_name': 'a', 'result': 'good'},
{'commit': 102, 'mean': 2, 'num': 10,
'depot_name': 'b', 'result': 'bad'},
{'commit': 103, 'mean': 2, 'num': 10,
'depot_name': 'b', 'result': 'bad'},
]),
good_revision=100, bad_revision=103,
status='started')
job = self._AddTryJob(results_data)
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Bisect was unable to run to completion</b>
Please try rerunning the bisect.
If failures persist contact the team (see below) and report the error.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35% | 1 -> 2
Revision Result N
100 1 +- 0 10 good
101 1 +- 0 10 good
102 2 +- 0 10 bad
103 2 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!"""
self.assertEqual(expected_output, bisect_report.GetReport(job))
def testGetReport_WithBugIdBadBisectFeedback(self):
results_data = self._BisectResults(
revision_data=self._Revisions(
[
{'commit': 100, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 101, 'mean': 100, 'num': 10, 'result': 'good'},
{'commit': 102, 'mean': 200, 'num': 10, 'result': 'bad'},
{'commit': 103, 'mean': 200, 'num': 10, 'result': 'bad'},
]),
good_revision=100, bad_revision=103, bug_id=6789)
job = self._AddTryJob(results_data, bug_id=6789)
job_id = job.key.id()
expected_output = r"""
=== BISECT JOB RESULTS ===
<b>Perf regression found but unable to narrow commit range</b>
Build failures prevented the bisect from narrowing the range further.
Bisect Details
Configuration: staging_android_nexus5X_perf_bisect
Benchmark : foo
Metric : Total/Score
Change : 7.35%% | 100 -> 200
Suspected Commit Range
1 commits in range
https://chromium.googlesource.com/chromium/src/+log/101..102
Revision Result N
100 100 +- 0 10 good
101 100 +- 0 10 good
102 200 +- 0 10 bad
103 200 +- 0 10 bad
To Run This Test
src/tools/perf/run_benchmark foo
Debug Info
https://test-rietveld.appspot.com/200039
Is this bisect wrong?
https://chromeperf.appspot.com/bad_bisect?try_job_id=%s
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \ | file a bug with component Speed>Bisection. Thank you!""" % job_id
self.assertEqual(expected_output, bisect_report.GetReport(job))
if __name__ == '__main__':
unittest.main()
|
sahiljain/catapult
|
dashboard/dashboard/bisect_report_test.py
|
Python
|
bsd-3-clause
| 32,498
|
[
"VisIt"
] |
615af7f1c151965ef373ed407326af268fa2e21c54fdbdf7ddc53013779369f7
|
# -*- coding: utf-8 -*-
import numpy as np
import numpy.linalg as lin
from gp_kernels import kernel_gp, kernel_gaussian
def gp_loglik(K_test, K_test_train, K_train, test_t, train_t, sig2, beta):
"""Gaussian process log-likelihood calculation."""
# Kernel transformation
A = kernel_gp(K_train, sig2, beta)
B = kernel_gp(K_test, sig2, beta)
C = kernel_gaussian(K_test_train, sig2)
# GP
Q = C.dot(lin.pinv(A))
Btt = B-Q.dot(C.T)
gp_prediction = Q.dot(train_t)
# Log-likelihood
gp_dist = gp_prediction - test_t
gp_log_test = -0.5*np.log(lin.det(Btt)) - 0.5*gp_dist.dot(lin.pinv(Btt)).dot(gp_dist)
gp_std_test = np.sqrt(np.abs(np.diag(Btt)))
return((gp_log_test, gp_std_test, gp_prediction))
|
aaskov/nsp
|
gp/gp_loglik.py
|
Python
|
mit
| 750
|
[
"Gaussian"
] |
834ab3bbd38428f190f0604e2808cf34ee340a5a02dae34797461adbf37cd483
|
#!/usr/bin/env python
# Python example script that uses the vtkRIinterface to create an instance of the
# R interpreter and pass it some data, modify the data in R, and pass the result
# back to VTK.
# VTK must be built with VTK_USE_GNU_R turned on for this example to work!
from vtk import *
import math
if __name__ == "__main__":
# Create a character buffer to store R output echoed to the terminal
Routput_buffer = 1000*' '
# Create an instance of the R interpreter. Note, rinterface is not a VTK pipeline object.
rinterface = vtkRInterface()
# Create an array of 10 doubles in VTK and fill it with some data
darray = vtkDoubleArray()
for d in range(0, 10):
darray.InsertNextValue(math.sqrt(d));
# Tell R to store its terminal output in our python buffer
rinterface.OutputBuffer(Routput_buffer, len(Routput_buffer))
# Copy the array of doubles into the R interpreter as a matrix called d, with 10 rows and 1 column.
rinterface.AssignVTKDataArrayToRVariable(darray, "d")
# Execute R command to echo contents of d to the terminal.
rinterface.EvalRscript("d",1)
# Execute a command on the R interpreter to create a matrix b with 10 rows and 1 column.
rinterface.EvalRscript("b = matrix(sqrt(10:19),10,1)",1)
# Execute a command on the R interpreter to column append b to d.
rinterface.EvalRscript("d = cbind(d,b)",1);
# Copy matrix d from R back to VTK as an array of doubles in bdarray.
bdarray = rinterface.AssignRVariableToVTKDataArray("d")
# Display the contents of bdarray.
print "\n\nContents of bdarray copied to VTK from R\n\n"
for i in range(bdarray.GetNumberOfTuples()):
t = bdarray.GetTuple2(i)
print'%6.4f %6.4f' % (t[0], t[1])
# Display the contents of R output echoed to the terminal.
print "\n\nOutput of R interpreter\n\n"
print Routput_buffer
|
msmolens/VTK
|
Examples/Infovis/Python/Rinterface.py
|
Python
|
bsd-3-clause
| 1,841
|
[
"VTK"
] |
481b075cdce52cc2610179a7dad81c6476d7dc998ac8abbf7ee6ed0494983888
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RStatmod(RPackage):
"""Statistical Modeling
A collection of algorithms and functions to aid statistical modeling.
Includes limiting dilution analysis (aka ELDA), growth curve comparisons,
mixed linear models, heteroscedastic regression, inverse-Gaussian
probability calculations, Gauss quadrature and a secure convergence
algorithm for nonlinear models. Also includes advanced generalized linear
model functions including Tweedie and Digamma distributional families and a
secure convergence algorithm."""
homepage = "https://cloud.r-project.org/package=statmod"
url = "https://cloud.r-project.org/src/contrib/statmod_1.4.30.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/statmod"
version('1.4.35', sha256='de5e428f81c306849af47b9ae583362855e166b1da62893734f1154cb5b3f8fe')
version('1.4.32', sha256='2f67a1cfa66126e6345f8a40564a3077d08f1748f17cb8c8fb05c94ed0f57e20')
version('1.4.30', sha256='9d2c1722a85f53623a9ee9f73d835119ae22ae2b8ec7b50d675401e314ea641f')
depends_on('r@3.0.0:', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-statmod/package.py
|
Python
|
lgpl-2.1
| 1,321
|
[
"Gaussian"
] |
e7dd01307b0d96de4204dbd163a9bc25d94a4cb718d1f91db7f478861e44d44b
|
"""
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',
'darkorange'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
|
walterreade/scikit-learn
|
examples/mixture/plot_gmm_selection.py
|
Python
|
bsd-3-clause
| 3,271
|
[
"Gaussian"
] |
4f13008dd343a8ae90eba67c9cd6f1cde4df25ec65275c740bf45e98a03762fa
|
#coding:utf-8
## Spark Application - execute with spark-submit
## Imports
import csv
import os
import time
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.tsa.stattools as sts
from StringIO import StringIO
from pyspark import SparkConf, SparkContext, SQLContext
## Module Constants
APP_NAME = "ADF Spark Application"
TABLE_STOCKS_BASIC = 'stock_basic_list'
TABLE_STOCKS_PAIRS = 'stock_pairing_list'
TABLE_WEIGHT = 'stock_linrreg.csv'
DownloadDir = './stockdata/'
weightdict = {} #previous weight dict broadcast
## Closure Functions
#date example 2011/10/13
tudateparser = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d')
def save_stk_pairings():
stock_list = pd.read_csv(TABLE_STOCKS_BASIC + '.csv', dtype=str)
code = stock_list['code']
reindexed_code = code.reset_index(drop=True)
reindexed_code = reindexed_code[100:200]
reindexed_code = reindexed_code.reset_index(drop=True)
stockPool = pd.DataFrame(columns=['code1','code2'], dtype=str)
print len(reindexed_code)
for i in range(len(reindexed_code)):
for j in range(i+1, len(reindexed_code)):
stockPool.append({'code1':str(reindexed_code[i]), \
'code2':str(reindexed_code[j])}, ignore_index=True)
stockPool.to_csv(TABLE_STOCKS_PAIRS + '.csv', header=False, index=False)
# input: int or string
# output: string
def getSixDigitalStockCode(code):
strZero = ''
for i in range(len(str(code)), 6):
strZero += '0'
return strZero + str(code)
def split(line):
"""
Operator function for splitting a line with csv module
"""
reader = csv.reader(StringIO(line))
return reader.next()
# 功能:从csv文件中读取一个字典
# 输入:文件名称,keyIndex,valueIndex
def readDictCSV(fileName="", dataDict = {}):
if not os.path.exists(fileName) :
return {}
with open(fileName, "r") as csvFile:
reader = csv.reader(csvFile)
for row in reader:
dataDict[str(row[0])] = [float(row[1]), float(row[2])]
csvFile.close()
return dataDict
# 功能:将一字典写入到csv文件中
# 输入:文件名称,数据字典
def writeDictCSV(fileName="", dataDict={}):
with open(fileName, "wb") as csvFile:
csvWriter = csv.writer(csvFile)
for k,v in dataDict.iteritems():
csvWriter.writerow([str(k), v[0], v[1]]) # 有冲突问题
csvFile.close()
def writeRddCSV(fileName, rdd, sqlContext):
df = sqlContext.createDataFrame(rdd)
#print df.first()
#df.write.format("com.databricks.spark.csv").save(fileName)
df.toPandas().to_csv(fileName, header=False, index=False) # 有冲突问题
'''
with open(fileName, "wb") as csvFile:
csvWriter = csv.writer(csvFile)
rdd.foreach(lambda elem: writeElem(csvWriter, elem))
csvFile.close()
'''
def writeElem(csvWriter, elem):
csvWriter.writerow(elem[0], elem[1][1], elem[1][2])
def toCSVLine(data):
return ','.join(str(d) for d in data)
'''
linear regression with Stochastic Gradient Decent mothod
'''
def linregSGD(x, y, a, b):
# -------------------------------------------随机梯度下降算法----------------------------------------------------------
# 两种终止条件
loop_max = 10000 # 最大迭代次数(防止死循环)
epsilon = 1e-6
alpha = 0.001 # 步长(注意取值过大会导致振荡,过小收敛速度变慢)
diff = 0.
errorA = a
errorB = b
count = 0 # 循环次数
finish = 0 # 终止标志
m = len(x) # 训练数据点数目
while count < loop_max:
#count += 1
# 遍历训练数据集,不断更新权值
for i in range(m):
count += 1
diff = a + b * x[i] - y[i] # 训练集代入,计算误差值
# 采用随机梯度下降算法,更新一次权值只使用一组训练数据
a = a - alpha * diff
b = b - alpha * diff * x[i]
if ((a-errorA)*(a-errorA) + (b-errorB)*(b-errorB)) < epsilon:
# 终止条件:前后两次计算出的权向量的绝对误差充分小
finish = 1
break
else:
errorA = a
errorB = b
if finish == 1: # 跳出循环
break
#print 'loop count = %d' % count, '\tweight:[%f, %f]' % (a, b)
return finish, a, b
def adfuller_check_sgd(closeprice_of_1, closeprice_of_2, a, b):
if len(closeprice_of_1) >= 10 and len(closeprice_of_2) >= 10:
alpha, beta, finish = linregSGD(x=closeprice_of_1, y=closeprice_of_2, a=a, b=b)
if not finish:
return False, a, b
spread = closeprice_of_2 - closeprice_of_1*beta - alpha
spread.dropna()
try:
adfstat, pvalue, usedlag, nobs, critvalues, icbest = sts.adfuller(x=spread)
except Exception, e:
print "exception"
return False, 0, 0
return adfstat < critvalues['5%'], alpha, beta
else:
return False, 0, 0
'''
print adfstat
for(k, v) in critvalues.items():
print k, v
'''
def load_process(code1, code2, start_date, end_date):
m = getSixDigitalStockCode(code1)
n = getSixDigitalStockCode(code2)
file1 = DownloadDir + "h_kline_" + m + ".csv"
file2 = DownloadDir + "h_kline_" + n + ".csv"
if (not os.path.exists(file1)) or (not os.path.exists(file1)):
return {},{}
kline1 = pd.read_csv(file1, parse_dates=['date'], index_col='date', date_parser=tudateparser)
kline2 = pd.read_csv(file2, parse_dates=['date'], index_col='date', date_parser=tudateparser)
#print kline1.head()
price_of_1 = kline1[end_date:start_date]
price_of_2 = kline2[end_date:start_date]
# regroup quotation according to date index
combination = price_of_1.join(price_of_2, how='inner', lsuffix='l', rsuffix='r')
combination.dropna()
closeprice_of_1 = combination['closel'].reset_index(drop=True)
closeprice_of_2 = combination['closer'].reset_index(drop=True)
return closeprice_of_1, closeprice_of_2
def adfuller_check_price_sgd(code1, code2, start_date = '2013-10-10', end_date = '2014-09-30'):
closeprice_of_1, closeprice_of_2 = load_process(code1, code2, start_date, end_date)
if len(closeprice_of_1)<=1 or len(closeprice_of_1)<=1:
return
np.random.seed(2)
a, b = np.random.randn(2)
result = adfuller_check_sgd(closeprice_of_1, closeprice_of_2, a, b)
return result
def adfuller_check_sgd_withweight(code1, code2, w, start_date = '2013-10-10', end_date = '2014-09-30'):
closeprice_of_1, closeprice_of_2 = load_process(code1, code2, start_date, end_date)
if len(closeprice_of_1)<=1 or len(closeprice_of_1)<=1:
return
if not w == 0 : # get previous weight
a = w[1]
b = w[2]
print w
else:
#print "not find w"
np.random.seed(2)
a, b = np.random.randn(2)
result = adfuller_check_sgd(closeprice_of_1, closeprice_of_2, a, b)
return code1+code2, np.float64(result[0]).item(), np.float64(result[1]).item(), np.float64(result[2]).item()
def adfuller_check(code1, code2, start_date = '2013-10-10', end_date = '2014-09-30'):
m = getSixDigitalStockCode(code1)
n = getSixDigitalStockCode(code2)
file1 = DownloadDir + "h_kline_" + m + ".csv"
file2 = DownloadDir + "h_kline_" + n + ".csv"
if not os.path.exists(file1) or not os.path.exists(file1):
return False
kline1 = pd.read_csv(file1, parse_dates=['date'], index_col='date', date_parser=tudateparser)
kline2 = pd.read_csv(file2, parse_dates=['date'], index_col='date', date_parser=tudateparser)
#print kline1.head()
price_of_1 = kline1[end_date:start_date]
price_of_2 = kline2[end_date:start_date]
combination = price_of_1.join(price_of_2, how='inner', lsuffix='l', rsuffix='r')
combination.dropna()
closeprice_of_1 = combination['closel'].reset_index(drop=True)
closeprice_of_2 = combination['closer'].reset_index(drop=True)
if len(closeprice_of_1) != 0 and len(closeprice_of_2) != 0:
X = sm.add_constant(closeprice_of_1)
model = sm.OLS(endog=closeprice_of_2, exog=X)
result = model.fit()
spread = result.resid
stat = sts.adfuller(x=spread)
adf = stat[0]
pvalue = stat[1]
critical_values = stat[4]
pair = m + '+' + n
return adf < critical_values['5%']
def adfuller_check2(row):
#return adfuller_check(row[0], row[1])
return adfuller_check_price_sgd(row[0], row[1], start_date = '2013-10-10', end_date = '2015-09-30')
def adfuller_check3(code1, code2, w):
return adfuller_check_sgd_withweight(code1, code2, w, start_date = '2013-10-10', end_date = '2014-12-30')
def check_all_dir(sc):
#readDictCSV(TABLE_WEIGHT, weightdict) # load weight file
# Broadcast the lookup dictionary to the cluster
#weight_lookup = sc.broadcast(weightdict)
print "starting adf checking"
stockPool = sc.textFile(TABLE_STOCKS_PAIRS + '.csv').map(split)
#print stockPool.first()
# column seems to be an array
adfResult = stockPool.map(adfuller_check2)
#adfResult = stockPool.filter(adfuller_check2)
#adfResult = stockPool.map(lambda f: (str(f[0])+str(f[1]), adfuller_check3(f[0], f[1], weight_lookup.value[str(f[0])+str(f[1])])))
#adfResult = stockPool.map(lambda f: (adfuller_check3(f[0], f[1], weight_lookup.value.get(str(f[0])+str(f[1]), 0))))
#adfResult.collect()
print adfResult.first()
print "%d <<<pairings" % adfResult.count()
## Main functionality
def main(sc):
time1 = time.time()
#adfuller_check2("601002", "600815")
# check all stock pairing in list book
#save_stk_pairings()
check_all_dir(sc)
time2 = time.time()
print "running time(s): ", time2-time1
if __name__ == "__main__":
# Configure Spark
conf = SparkConf().setAppName(APP_NAME)
conf = conf.setMaster("local[*]")
sc = SparkContext(conf=conf)
# Execute Main functionality
main(sc)
|
lionelliang/PairTradingSpark
|
checkpairtradingSpark.py
|
Python
|
gpl-2.0
| 10,134
|
[
"ADF"
] |
2cd684953dcc0b7ab245a26b027d99f0d3cb2dbc654963575fb859e0e00e4722
|
# $HeadURL: $
'''
:mod: RssConfiguration
Module that collects utility functions.
'''
from DIRAC import S_OK
from DIRAC.Core.Utilities import List
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ResourceStatusSystem.PolicySystem.StateMachine import RSSMachine
from DIRAC.ResourceStatusSystem.Utilities import Utils
__RCSID__ = '$Id: $'
## RssConfiguration config path ################################################
_rssConfigPath = 'ResourceStatus'
## RssConfiguration ############################################################
class RssConfiguration:
'''
RssConfiguration::
{
Config:
{
State : Active | InActive,
Cache : 300,
FromAddress : 'email@site.domain'
StatusType :
{
default : all,
StorageElement: ReadAccess, WriteAccess, CheckAccess, RemoveAccess
}
}
}
'''
def __init__( self ):
self.opsHelper = Operations()
def getConfigState( self, default = 'InActive' ):
'''
Gets from <pathToRSSConfiguration>/Config the value of State
'''
return self.opsHelper.getValue( '%s/Config/State' % _rssConfigPath, default )
def getConfigCache( self, default = 300 ):
'''
Gets from <pathToRSSConfiguration>/Config the value of Cache
'''
return self.opsHelper.getValue( '%s/Config/Cache' % _rssConfigPath, default )
def getConfigFromAddress( self, default = None ):
'''
Gets from <pathToRSSConfiguration>/Config the value of FromAddress
'''
return self.opsHelper.getValue( '%s/Config/FromAddress' % _rssConfigPath, default )
def getConfigStatusType( self, elementType = None ):
'''
Gets all the status types per elementType, if not given, it takes default
from CS. If not, hardcoded variable DEFAULT.
'''
_DEFAULTS = ( 'all', )
res = self.opsHelper.getOptionsDict( '%s/Config/StatusTypes' % _rssConfigPath )
if res[ 'OK' ]:
if elementType in res[ 'Value' ]:
return List.fromChar( res[ 'Value' ][ elementType ] )
if 'default' in res[ 'Value' ]:
return List.fromChar( res[ 'Value' ][ 'default' ] )
return _DEFAULTS
## RssConfiguration/Policies ###################################################
def getPolicies():
'''
Returns from the OperationsHelper: <_rssConfigPath>/Policies
'''
return Utils.getCSTree( '%s/Policies' % _rssConfigPath )
## RssConfiguration/PolicyActions ##############################################
def getPolicyActions():
'''
Returns from the OperationsHelper: <_rssConfigPath>/PolicyActions
'''
return Utils.getCSTree( '%s/PolicyActions' % _rssConfigPath )
## RssConfiguration/notificationGroups ##############################################
def getnotificationGroups():
'''
Returns from the OperationsHelper: <_rssConfigPath>/PolicyActions
'''
return Utils.getCSTree( '%s/Config' % _rssConfigPath )
## RssConfiguration/Notifications ##############################################
def getNotifications():
'''
Returns from the OperationsHelper: <_rssConfigPath>/Notification
'''
return Utils.getCSTree( '%s/Notification' % _rssConfigPath )
## RssConfiguration/GeneralConfig ##############################################
def getValidElements():
'''
Returns from the OperationsHelper: <_rssConfigPath>/GeneralConfig/ValidElements
'''
_DEFAULTS = ( 'Site', 'Resource', 'Node', 'Component' )
# result = Operations().getValue( '%s/GeneralConfig/ValidElements' % _rssConfigPath )
# if result is not None:
# return List.fromChar( result )
return _DEFAULTS
def getValidStatus():
'''
Returns a list of statuses as were defined on the RSS(State)Machine
'''
validStatus = RSSMachine( None ).getStates()
return S_OK( validStatus )
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
arrabito/DIRAC
|
ResourceStatusSystem/Utilities/RssConfiguration.py
|
Python
|
gpl-3.0
| 4,088
|
[
"DIRAC"
] |
4b1d821db08eb841c74643d38f4494ea6983d974fee4d8a53ec40ff1d85093b7
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from ehs_esports.home.views import HomeView
from ehs_esports.sms.views import SMSView
urlpatterns = [
url(r'^$', HomeView.as_view(), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include('ehs_esports.users.urls', namespace='users')),
url(r'^games/', include('ehs_esports.games.urls', namespace='games')),
url(r'^teams/', include('ehs_esports.teams.urls', namespace='teams')),
url(r'^accounts/', include('allauth.urls')),
url(r'^sms/', SMSView.as_view(), name='sms')
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
|
ReilySiegel/ehs_esports
|
config/urls.py
|
Python
|
mit
| 1,685
|
[
"VisIt"
] |
4f9553a37d57fb37ea8adf762916719fcf0ddd018543d78cbe58ec8effda3cd9
|
#!/usr/bin/env python
""" This script instantiate a DFC client against a given service,
and hammers it with read request (listDirectory) for a given time.
It produces two files : time.txt and clock.txt which contain time measurement,
using time.time and time.clock (see respective doc)
It assumes that the DB has been filled with the scripts in generateDB
Tunable parameters:
* maxDuration : time it will run. Cannot be too long, otherwise job
is killed because staled
* port: list of ports on which we can find a service (assumes all the service running on one machine)
* hostname: name of the host hosting the service
* readDepth: depth of the path when reading
The depths are to be put in relation with the depths you used to generate the db
"""
import DIRAC
DIRAC.initialize() # Initialize configuration
import os
import random
import time
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
port = random.choice([9196, 9197, 9198, 9199])
hostname = "yourmachine.somewhere.something"
servAddress = "dips://%s:%s/DataManagement/FileCatalog" % (hostname, port)
maxDuration = 1800 # 30mn
fc = FileCatalogClient(servAddress)
# lfc size = 9, huge db small req = 12, huge db big = 6
readDepth = 12
f = open("time.txt", "w")
f2 = open("clock.txt", "w")
f.write("QueryStart\tQueryEnd\tQueryTime\textra(port %s)\n" % port)
f2.write("QueryStart\tQueryEnd\tQueryClock\textra(port %s)\n" % port)
start = time.time()
done = False
while not done:
# Between 0 and 3 because in generate we have 4 subdirs per dir. Adapt :-)
rndTab = [random.randint(0, 3) for i in range(readDepth)]
dirPath = "/" + "/".join(map(str, rndTab))
before = time.time()
beforeC = time.clock()
res = fc.listDirectory(dirPath)
afterC = time.clock()
after = time.time()
queryTime = after - before
queryTimeC = afterC - beforeC
if not res["OK"]:
extra = res["Message"]
else:
out = res["Value"]["Successful"][dirPath]
extra = "%s %s %s" % (dirPath, len(out["Files"]), len(out["SubDirs"]))
f.write("%s\t%s\t%s\t%s\n" % (before, after, queryTime, extra))
f.flush()
os.fsync(f)
f2.write("%s\t%s\t%s\t%s\n" % (beforeC, afterC, queryTimeC, extra))
f2.flush()
os.fsync(f2)
if time.time() - start > maxDuration:
done = True
f.close()
f2.close()
|
DIRACGrid/DIRAC
|
tests/Performance/DFCPerformance/readPerf.py
|
Python
|
gpl-3.0
| 2,409
|
[
"DIRAC"
] |
168e850278ff3fa2ebcfd1f4a37ede51d0adfda25f5e47c26cb4abbdbd882cac
|
import bge
from mouse import *
from keyboard import *
from building import *
from civilisation import *
from ressource import *
import random
class Game():
def __init__(self, players):
"""param: self, number of players"""
self.players = players
self.civilisations = []
self.ressources = {
"mines": [],
"veins": [],
"trees": []
}
self.buildings = []
self.selected_units = []
self.bullets = []
self.seed = 7
random.seed(self.seed)
def game_init(self):
scene = bge.logic.getCurrentScene()
mines_loc = [[-8, -11, 1], [55, -11, 1]]
for i in range(self.players):
self.civilisation = Civilisation(i)
self.civilisations.append(self.civilisation)
scene.objects['SpawnM'].worldPosition = mines_loc[i]
self.ressources["mines"].append(Mine(scene.addObject('Mine', scene.objects['SpawnM'])))
for civ in self.civilisations:
civ.civ_init()
scene.objects['SpawnC'].worldPosition = [22, 13, 1]
self.ressources["veins"].append(Crystal(scene.addObject('Crystal', scene.objects['SpawnC'])))
for i in range(15):
posX = random.randint(-10, 15)
posY = random.randint(3, 30)
scene.objects['SpawnT'].worldPosition[0] = posX
scene.objects['SpawnT'].worldPosition[1] = posY
new_tree = Tree(scene.addObject('Tree', scene.objects['SpawnT']))
rotation = random.randint(0, 5)
new_tree.applyRotation([0, 0, rotation], False)
self.ressources["trees"].append(new_tree)
for i in range(15):
posX = random.randint(30, 55)
posY = random.randint(3, 30)
scene.objects['SpawnT'].worldPosition[0] = posX
scene.objects['SpawnT'].worldPosition[1] = posY
new_tree = Tree(scene.addObject('Tree', scene.objects['SpawnT']))
rotation = random.randint(0, 5)
new_tree.applyRotation([0, 0, rotation], False)
self.ressources["trees"].append(new_tree)
def game_update(self):
if len(self.bullets) > 0:
for obj in self.bullets:
obj.trajectory(obj.worldPosition[0], obj.worldPosition[1])
for civ in self.civilisations:
civ.civ_update()
|
folkrav/rts-b51
|
src/projectX/game.py
|
Python
|
gpl-3.0
| 2,449
|
[
"CRYSTAL"
] |
b950c64dc8702fc9bd32c78c95abc21d5fa9d8cc9c73f129bc478f4ff2d75937
|
from . import base
from .. import stats
from .. import items
from .. import dialogue
from .. import context
from .. import spells
from .. import invocations
from .. import effects
from .. import animobs
from .. import targetarea
from .. import aibrain
import random
from . import animals
from . import undead
from .. import enchantments
from . import treasuretype
from . import abilities
# *******************************
# *** ENCOUNTER LEVEL 1 ***
# *******************************
class Hurthling( base.Monster ):
name = "Hurthling"
statline = { stats.STRENGTH: 10, stats.TOUGHNESS: 8, stats.REFLEXES: 14, \
stats.INTELLIGENCE: 10, stats.PIETY: 10, stats.CHARISMA: 10,
stats.NATURAL_DEFENSE: 5 }
SPRITENAME = "monster_people.png"
FRAME = 5
TEMPLATES = ()
MOVE_POINTS = 10
VOICE = dialogue.voice.HURTHISH
HABITAT = ( context.HAB_EVERY, context.HAB_FOREST, context.SET_EVERY,
context.MAP_WILDERNESS,
context.MTY_HUMANOID, context.MTY_THIEF, context.GEN_KINGDOM )
ENC_LEVEL = 1
TREASURE = treasuretype.Low()
ATTACK = items.Attack( (1,4,0), element = stats.RESIST_PIERCING )
def init_monster( self ):
self.levels.append( base.Humanoid( 1, self ) )
# *******************************
# *** ENCOUNTER LEVEL 2 ***
# *******************************
class NoviceWarrior( base.Monster ):
name = "Novice Warrior"
statline = { stats.STRENGTH: 13, stats.TOUGHNESS: 13, stats.REFLEXES: 10, \
stats.INTELLIGENCE: 10, stats.PIETY: 10, stats.CHARISMA: 10, \
stats.COUNTER_ATTACK: 5 }
SPRITENAME = "monster_people.png"
FRAME = 0
TEMPLATES = ()
MOVE_POINTS = 10
HABITAT = ( context.HAB_EVERY, context.HAB_BUILDING, context.SET_EVERY,
context.DES_CIVILIZED,
context.MTY_HUMANOID, context.MTY_FIGHTER,
context.GEN_CHAOS, context.GEN_KINGDOM, context.GEN_GIANT )
ENC_LEVEL = 2
TREASURE = treasuretype.Low()
ATTACK = items.Attack( (1,8,0), element = stats.RESIST_SLASHING )
def init_monster( self ):
self.levels.append( base.Humanoid( 2, self ) )
class NoviceThief( base.Monster ):
name = "Novice Thief"
statline = { stats.STRENGTH: 10, stats.TOUGHNESS: 10, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 12, stats.PIETY: 10, stats.CHARISMA: 10,
stats.STEALTH: 10 }
SPRITENAME = "monster_people.png"
FRAME = 2
TEMPLATES = ()
MOVE_POINTS = 10
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.DES_CIVILIZED,
context.MTY_HUMANOID, context.MTY_THIEF )
ENC_LEVEL = 2
TREASURE = treasuretype.Standard()
COMPANIONS = (NoviceWarrior,)
ATTACK = items.Attack( (1,6,0), element = stats.RESIST_PIERCING )
def init_monster( self ):
self.levels.append( base.Humanoid( 1, self ) )
# *******************************
# *** ENCOUNTER LEVEL 3 ***
# *******************************
class NovicePriest( base.Monster ):
name = "Novice Priest"
statline = { stats.STRENGTH: 12, stats.TOUGHNESS: 13, stats.REFLEXES: 10, \
stats.INTELLIGENCE: 13, stats.PIETY: 15, stats.CHARISMA: 12 }
SPRITENAME = "monster_spellcasters.png"
FRAME = 0
TEMPLATES = ()
MOVE_POINTS = 10
HABITAT = ( context.HAB_EVERY, context.HAB_BUILDING, context.SET_EVERY,
context.DES_CIVILIZED, context.DES_SOLAR, context.DES_AIR,
context.MTY_HUMANOID, context.MTY_PRIEST, context.GEN_KINGDOM )
ENC_LEVEL = 3
TREASURE = treasuretype.Standard( ( items.scrolls.Rank1Scroll, items.scrolls.Rank2Scroll ) )
COMBAT_AI = aibrain.BasicTechnicalAI()
COMPANIONS = (NoviceWarrior,)
ATTACK = items.Attack( (1,6,0), element = stats.RESIST_CRUSHING )
TECHNIQUES = ( spells.waterspells.FREEZE_FOE, spells.airspells.SILENCE,
spells.solarspells.BLESSING, spells.solarspells.MINOR_CURE )
def init_monster( self ):
self.levels.append( base.Humanoid( 3, self ) )
class NoviceMage( base.Monster ):
name = "Novice Mage"
statline = { stats.STRENGTH: 12, stats.TOUGHNESS: 10, stats.REFLEXES: 13, \
stats.INTELLIGENCE: 15, stats.PIETY: 13, stats.CHARISMA: 12 }
SPRITENAME = "monster_spellcasters.png"
FRAME = 21
TEMPLATES = ()
MOVE_POINTS = 10
HABITAT = ( context.HAB_EVERY, context.HAB_BUILDING, context.SET_EVERY,
context.DES_CIVILIZED, context.DES_LUNAR, context.DES_FIRE,
context.MTY_HUMANOID, context.MTY_MAGE, context.GEN_KINGDOM )
ENC_LEVEL = 3
TREASURE = treasuretype.Standard( ( items.scrolls.Rank1Scroll, items.scrolls.Rank2Scroll ) )
COMBAT_AI = aibrain.BasicTechnicalAI()
COMPANIONS = (NovicePriest,NoviceWarrior)
ATTACK = items.Attack( (1,6,0), element = stats.RESIST_CRUSHING )
TECHNIQUES = ( spells.firespells.FIRE_BOLT, spells.lunarspells.CURSE,
spells.magespells.FIRE_ARC, spells.lunarspells.SLEEP )
def init_monster( self ):
self.levels.append( base.Spellcaster( 3, self ) )
class Highwayman( base.Monster ):
name = "Highwayman"
statline = { stats.STRENGTH: 12, stats.TOUGHNESS: 10, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 10, stats.PIETY: 10, stats.CHARISMA: 10 }
SPRITENAME = "monster_people.png"
FRAME = 4
TEMPLATES = ()
MOVE_POINTS = 10
HABITAT = ( context.HAB_EVERY, context.SET_EVERY, context.SET_RENFAN,
context.MAP_WILDERNESS,
context.DES_CIVILIZED,
context.MTY_HUMANOID, context.MTY_FIGHTER, context.MTY_THIEF )
ENC_LEVEL = 3
TREASURE = treasuretype.Standard()
COMPANIONS = (NoviceThief,)
ATTACK = items.Attack( (1,8,0), element = stats.RESIST_SLASHING )
def init_monster( self ):
self.levels.append( base.Humanoid( 3, self ) )
# *******************************
# *** ENCOUNTER LEVEL 4 ***
# *******************************
class NoviceDruid( base.Monster ):
name = "Novice Druid"
statline = { stats.STRENGTH: 12, stats.TOUGHNESS: 13, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 14, stats.PIETY: 14, stats.CHARISMA: 12 }
SPRITENAME = "monster_spellcasters.png"
FRAME = 8
TEMPLATES = ()
MOVE_POINTS = 10
HABITAT = ( context.HAB_EVERY, context.HAB_FOREST, context.SET_EVERY,
context.MAP_WILDERNESS,
context.DES_SOLAR, context.DES_EARTH, context.DES_FIRE,
context.MTY_HUMANOID, context.MTY_PRIEST, context.GEN_NATURE )
ENC_LEVEL = 4
TREASURE = treasuretype.Standard( ( items.scrolls.Rank1Scroll, items.scrolls.Rank2Scroll ) )
COMBAT_AI = aibrain.BasicTechnicalAI()
COMPANIONS = (animals.Wolf,animals.BlackBear)
ATTACK = items.Attack( (1,8,0), element = stats.RESIST_CRUSHING )
TECHNIQUES = ( spells.solarspells.MINOR_CURE, spells.earthspells.CALL_CRITTER,
spells.earthspells.ACID_BOLT, spells.earthspells.BEASTLY_MIGHT )
def init_monster( self ):
self.levels.append( base.Humanoid( 2, self ) )
self.levels.append( base.Spellcaster( 2, self ) )
class Bushwhacker( base.Monster ):
name = "Bushwhacker"
statline = { stats.STRENGTH: 13, stats.TOUGHNESS: 13, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 12, stats.PIETY: 12, stats.CHARISMA: 12,
stats.NATURAL_DEFENSE: 10 }
SPRITENAME = "monster_people.png"
FRAME = 1
TEMPLATES = ()
MOVE_POINTS = 10
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MTY_HUMANOID, context.MTY_FIGHTER, context.MTY_THIEF, context.MTY_LEADER,
context.GEN_CHAOS )
ENC_LEVEL = 4
TREASURE = treasuretype.Standard()
COMPANIONS = (Highwayman,)
ATTACK = items.Attack( (1,8,0), element = stats.RESIST_SLASHING )
def init_monster( self ):
self.levels.append( base.Humanoid( 4, self ) )
# *******************************
# *** ENCOUNTER LEVEL 5 ***
# *******************************
class Necromancer( base.Monster ):
name = "Necromancer"
statline = { stats.STRENGTH: 12, stats.TOUGHNESS: 12, stats.REFLEXES: 13, \
stats.INTELLIGENCE: 15, stats.PIETY: 13, stats.CHARISMA: 12 }
SPRITENAME = "monster_spellcasters.png"
FRAME = 23
TEMPLATES = ()
MOVE_POINTS = 10
HABITAT = ( context.HAB_EVERY, context.HAB_BUILDING, context.SET_EVERY,
context.DES_CIVILIZED, context.DES_LUNAR,
context.MTY_HUMANOID, context.MTY_MAGE, context.GEN_UNDEAD )
ENC_LEVEL = 5
TREASURE = treasuretype.HighItems( ( items.scrolls.Rank2Scroll, items.scrolls.Rank3Scroll ) )
COMBAT_AI = aibrain.BasicTechnicalAI()
COMPANIONS = (undead.Ghoul,undead.SkeletonWithMorningstar)
ATTACK = items.Attack( (1,6,0), element = stats.RESIST_CRUSHING )
TECHNIQUES = ( spells.lunarspells.ENERVATE, spells.necrospells.ACID_CLOUD,
spells.necrospells.TOUCH_OF_DEATH, spells.necrospells.RAISE_CORPSE )
def init_monster( self ):
self.levels.append( base.Spellcaster( 5, self ) )
class Warrior( base.Monster ):
name = "Warrior"
statline = { stats.STRENGTH: 14, stats.TOUGHNESS: 14, stats.REFLEXES: 13, \
stats.INTELLIGENCE: 10, stats.PIETY: 12, stats.CHARISMA: 12,
stats.NATURAL_DEFENSE: 10, stats.COUNTER_ATTACK: 15 }
SPRITENAME = "monster_people.png"
FRAME = 9
TEMPLATES = ()
MOVE_POINTS = 8
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.DES_CIVILIZED,
context.MTY_HUMANOID, context.MTY_FIGHTER, context.MTY_LEADER,
context.GEN_KINGDOM )
ENC_LEVEL = 5
TREASURE = treasuretype.Standard((items.SWORD,))
ATTACK = items.Attack( (1,10,0), element = stats.RESIST_SLASHING )
def init_monster( self ):
self.levels.append( base.Humanoid( 5, self ) )
# *******************************
# *** ENCOUNTER LEVEL 6 ***
# *******************************
class Priest( base.Monster ):
name = "Priest"
statline = { stats.STRENGTH: 12, stats.TOUGHNESS: 14, stats.REFLEXES: 10, \
stats.INTELLIGENCE: 13, stats.PIETY: 16, stats.CHARISMA: 12 }
SPRITENAME = "monster_spellcasters.png"
FRAME = 2
TEMPLATES = ()
MOVE_POINTS = 10
HABITAT = ( context.HAB_EVERY, context.HAB_BUILDING, context.SET_EVERY,
context.DES_CIVILIZED, context.DES_SOLAR, context.MTY_LEADER,
context.MTY_HUMANOID, context.MTY_PRIEST, context.GEN_KINGDOM )
ENC_LEVEL = 6
TREASURE = treasuretype.HighItems( ( items.scrolls.Rank2Scroll, items.scrolls.Rank3Scroll, items.POTION ) )
COMBAT_AI = aibrain.BasicTechnicalAI()
COMPANIONS = (NoviceWarrior,NovicePriest,Warrior)
ATTACK = items.Attack( (1,8,0), element = stats.RESIST_CRUSHING )
TECHNIQUES = ( spells.waterspells.FREEZE_FOE, spells.priestspells.HEALING_LIGHT,
spells.solarspells.SUNRAY, spells.airspells.SILENCE,
spells.priestspells.HEROISM, spells.priestspells.ARMOR_OF_FAITH )
def init_monster( self ):
self.levels.append( base.Humanoid( 6, self ) )
class Mercenary( base.Monster ):
name = "Mercenary"
statline = { stats.STRENGTH: 14, stats.TOUGHNESS: 14, stats.REFLEXES: 13, \
stats.INTELLIGENCE: 10, stats.PIETY: 12, stats.CHARISMA: 12 }
SPRITENAME = "monster_people.png"
FRAME = 17
TEMPLATES = ()
MOVE_POINTS = 10
HABITAT = ( context.HAB_EVERY, context.SET_EVERY,
context.MAP_DUNGEON,
context.MTY_HUMANOID, context.MTY_FIGHTER, context.GEN_KINGDOM )
ENC_LEVEL = 6
TREASURE = treasuretype.Low( (items.POLEARM,items.LIGHT_ARMOR) )
ATTACK = items.Attack( (2,6,0), element = stats.RESIST_SLASHING, reach=2 )
def init_monster( self ):
self.levels.append( base.Humanoid( 6, self ) )
class Ranger( base.Monster ):
name = "Ranger"
statline = { stats.STRENGTH: 12, stats.TOUGHNESS: 12, stats.REFLEXES: 14, \
stats.INTELLIGENCE: 12, stats.PIETY: 12, stats.CHARISMA: 12, \
stats.STEALTH: 24 }
SPRITENAME = "monster_people.png"
FRAME = 16
TEMPLATES = ()
MOVE_POINTS = 10
HABITAT = ( context.HAB_EVERY, context.HAB_FOREST, context.SET_EVERY,
context.MAP_WILDERNESS,
context.MTY_HUMANOID, context.MTY_FIGHTER, context.GEN_NATURE, context.GEN_KINGDOM )
ENC_LEVEL = 6
TREASURE = treasuretype.Standard( ( items.ARROW, items.BOW ) )
COMPANIONS = (NoviceDruid,)
COMBAT_AI = aibrain.BasicTechnicalAI()
ATTACK = items.Attack( (1,8,0), element = stats.RESIST_SLASHING )
TECHNIQUES = ( abilities.LONGBOW, spells.earthspells.EARTHBIND )
def init_monster( self ):
self.levels.append( base.Humanoid( 5, self ) )
# *******************************
# *** ENCOUNTER LEVEL 7 ***
# *******************************
class Conjuoror( base.Monster ):
name = "Conjuoror"
statline = { stats.STRENGTH: 12, stats.TOUGHNESS: 12, stats.REFLEXES: 13, \
stats.INTELLIGENCE: 16, stats.PIETY: 14, stats.CHARISMA: 12 }
SPRITENAME = "monster_spellcasters.png"
FRAME = 10
TEMPLATES = ()
MOVE_POINTS = 10
HABITAT = ( context.HAB_EVERY, context.HAB_BUILDING, context.SET_EVERY,
context.DES_CIVILIZED, context.DES_LUNAR, context.DES_FIRE,
context.MTY_HUMANOID, context.MTY_MAGE, context.GEN_KINGDOM )
ENC_LEVEL = 7
TREASURE = treasuretype.HighItems( ( items.scrolls.Rank3Scroll, items.scrolls.Rank4Scroll ) )
COMBAT_AI = aibrain.BasicTechnicalAI()
COMPANIONS = (NovicePriest,NoviceWarrior,Warrior,Mercenary)
ATTACK = items.Attack( (1,8,0), element = stats.RESIST_CRUSHING )
TECHNIQUES = ( spells.magespells.LIGHTNING_BOLT, spells.lunarspells.SLEEP,
spells.lunarspells.HELLBLAST, spells.firespells.EXPLOSION,
spells.firespells.PYROTECHNICS
)
def init_monster( self ):
self.levels.append( base.Spellcaster( 7, self ) )
class Executioner( base.Monster ):
name = "Executioner"
statline = { stats.STRENGTH: 15, stats.TOUGHNESS: 16, stats.REFLEXES: 12,
stats.INTELLIGENCE: 10, stats.PIETY: 12, stats.CHARISMA: 10,
stats.CRITICAL_HIT: 20 }
SPRITENAME = "monster_people.png"
FRAME = 6
TEMPLATES = ()
MOVE_POINTS = 8
HABITAT = ( context.HAB_EVERY, context.SET_RENFAN,
context.MTY_HUMANOID, context.MTY_BOSS,
context.MTY_FIGHTER, context.GEN_KINGDOM )
ENC_LEVEL = 7
TREASURE = treasuretype.Standard((items.AXE,))
COMPANIONS = (Bushwhacker,)
ATTACK = items.Attack( (1,10,0), element = stats.RESIST_SLASHING )
def init_monster( self ):
self.levels.append( base.Humanoid( 6, self ) )
# Lieutenant - Sprite 11
# Druid - Sprite 6
# *******************************
# *** ENCOUNTER LEVEL 8 ***
# *******************************
# Crusader - Get knightly sprite, PRIEST+WARRIOR, Sprite 21
# *******************************
# *** ENCOUNTER LEVEL 9 ***
# *******************************
# Warden - spellcaster Sprite 5
# Witch - spellcaster Sprite 18
# Commander - Sprite 10
# ********************************
# *** ENCOUNTER LEVEL 10 ***
# ********************************
class Healer( base.Monster ):
name = "Healer"
statline = { stats.STRENGTH: 12, stats.TOUGHNESS: 16, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 14, stats.PIETY: 18, stats.CHARISMA: 18 }
SPRITENAME = "monster_spellcasters.png"
FRAME = 16
TEMPLATES = ()
MOVE_POINTS = 10
HABITAT = ( context.HAB_EVERY, context.HAB_BUILDING, context.SET_EVERY,
context.DES_CIVILIZED, context.DES_SOLAR, context.DES_WATER, context.MTY_BOSS,
context.MTY_HUMANOID, context.MTY_PRIEST, context.GEN_KINGDOM )
ENC_LEVEL = 10
TREASURE = treasuretype.HighItems( ( items.potions.PotionOfHealing, items.scrolls.Rank4Scroll, items.scrolls.Rank5Scroll ) )
COMBAT_AI = aibrain.BasicTechnicalAI()
COMPANIONS = (NoviceWarrior,NovicePriest,Warrior)
ATTACK = items.Attack( (3,6,0), element = stats.RESIST_SOLAR,
hit_anim=animobs.YellowExplosion )
TECHNIQUES = ( spells.priestspells.SMITE, spells.solarspells.MASS_CURE,
spells.solarspells.MAXIMUM_CURE, invocations.MPInvocation( "Repent",
effects.TargetIsAlly( on_true = (
effects.Enchant( enchantments.BlessingEn, anim=animobs.GreenSparkle ),
effects.TargetIsDamaged( on_true= (
effects.HealthRestore( dice=(3,12,12) ),
))
), on_false=(
effects.TargetIsEnemy( on_true = (
effects.HealthDamage( (3,12,0), stat_bonus=stats.CHARISMA, element=stats.RESIST_WATER, anim=animobs.Bubbles ),
)),
)), shot_anim=animobs.BlueComet, com_tar=targetarea.Blast(radius=3),
ai_tar=invocations.TargetEnemy(), mp_cost=12 )
)
def init_monster( self ):
self.levels.append( base.Spellcaster( 6, self ) )
self.levels.append( base.Defender( 4, self ) )
# ********************************
# *** ENCOUNTER LEVEL 11 ***
# ********************************
# Ranger Hero - Sprite 12
# ********************************
# *** ENCOUNTER LEVEL 12 ***
# ********************************
# Antihero - Sprite 18
# High Priest - Sprite 1
# ********************************
# *** ENCOUNTER LEVEL 13 ***
# ********************************
# High Druid - Sprite 7
# ********************************
# *** ENCOUNTER LEVEL 14 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 15 ***
# ********************************
# Bishop - Sprite 3
# ********************************
# *** ENCOUNTER LEVEL 16 ***
# ********************************
# Master Druid - Sprite 11
# ********************************
# *** ENCOUNTER LEVEL 17 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 18 ***
# ********************************
# Cannoness - Sprite 4
# ********************************
# *** ENCOUNTER LEVEL 19 ***
# ********************************
# Elder Druid - Sprite 9
# ********************************
# *** ENCOUNTER LEVEL 20 ***
# ********************************
# Archmage - Sprite 24
|
jwvhewitt/dmeternal
|
old_game/monsters/people.py
|
Python
|
gpl-2.0
| 17,995
|
[
"BLAST"
] |
0029dd023d82851e497fe252c52d861f1b006c9dab4e9a5acb07bfd6a423ebe1
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 29 11:23:17 2015
Sellemeier coefficients and nonlinear parameter for PPLN
This file is part of pyNLO.
pyNLO is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyNLO is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyNLO. If not, see <http://www.gnu.org/licenses/>.
@author: ycasg
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from pynlo.media.crystals.CrystalContainer import Crystal
class DengSellmeier:
""" Temperature dependent refractive index for e axis of PPLN, using
equations from Deng et al."""
a1 = 5.39121
a2 = 0.100473
a3 = 0.20692
a4 = 100.0
a5 = 11.34927
a6 = 1.544e-2
b1 = 4.96827e-7
b2 = 3.862e-8
b3 = -0.89e-8
b4 = 2.657e-5
b5 = 9.62119e-10
T = 0
def __init__(self, T):
self.set_T_degC(T)
def set_T_degC(self, T):
self.T = T
def n(self, wl_nm, axis = None):
wl_um = wl_nm * 1.0e-3
f = (self.T - 24.5)*(self.T+570.82)
return np.sqrt(self.a1 + self.b1*f +\
(self.a2 + self.b2*f)/(wl_um**2-(self.a3+self.b3*f)**2) +\
(self.a4 + self.b4*f)/(wl_um**2 - self.a5**2) -\
(self.a6 + self.b5*f) * wl_um**2)
class Gayer5PctSellmeier:
""" Temperature dependent refractive index for e axis of PPLN, 5pct Mg,
using equations from Gayer et al."""
a1 = 5.756
a2 = 0.0983
a3 = 0.2020
a4 = 189.32
a5 = 12.52
a6 = 1.32e-2
b1 = 2.860e-6
b2 = 4.700e-8
b3 = 6.113e-8
b4 = 1.516e-4
T = 30
def __init__(self, T):
self.set_T_degC(T)
def set_T_degC(self, T):
self.T = T
def n(self, wl_nm, axis = None):
wl_um = wl_nm * 1.0e-3
f = (self.T - 24.5)*(self.T+570.82)
return np.sqrt(self.a1 + self.b1*f +\
(self.a2 + self.b2*f)/(wl_um**2-(self.a3+self.b3*f)**2) +\
(self.a4 + self.b4*f)/(wl_um**2 - self.a5**2) -\
self.a6 * wl_um**2)
class PPLN(Crystal):
def __init__(self, T, **params):
Crystal.__init__(self, params)
self.load(T)
def load(self, T, data_source = "Gayer_5pctMg"):
""" Load PPLN data. params -- 'T' : crystal temperature
Uses parameters from:
* Deng: Deng et al, Opt. Comm. 268, 1, 1 pp 110-114
'Improvement to Sellmeier equation for periodically poled LiNbO3
crystal using mid-infrared difference-frequency generation'
* Gayer_5pctMg: Appl. Phys. B 91, 343–348 (2008)
'Temperature and wavelength dependent refractive index equations
for MgO-doped congruent and stoichiometric LiNbO3'
"""
self.T = T
self.mode = 'PP'
self.sellmeier_type = data_source
self.sellmeier_calculators = {'Deng' :DengSellmeier(T),
'Gayer_5pctMg':Gayer5PctSellmeier(T)}
self.n = self.sellmeier_calculators[data_source].n
self.set_xtalT = self.sellmeier_calculators[data_source].set_T_degC
self.deff= 14.9e-12 # from SNLO
self.n2= 3e-15 / 100**2 # from Nikogosyan
self.pp= lambda x: 30.49e-6
self._crystal_properties['damage_threshold_GW_per_sqcm'] = 4.0
self._crystal_properties['damage_threshold_info'] = """ This 4 GW/cm^2 number is from Covesion. According
to their website, it is from a 200 fs pulses source at 1550 nm."""
def set_pp(self, p) :
if p.__class__ is tuple:
self.pp = lambda x: p[0]
else:
self.pp = lambda x: p(x)
def set_T(self, T_degC):
self.T = T_degC
self.set_xtalT(T_degC)
def calculate_poling_period(self, pump_wl_nm, sgnl_wl_nm, idlr_wl_nm,
delta_k_L = 3.2, silent=False):
""" Calculate poling period [meters] for pump, signal, and idler -- each a
PINT object (with units.) If one is None, then it is calculated by
energy conservation. """
RET_wl_nm = False
new_wl_nm = None
if pump_wl_nm is None:
pump_wl_nm = 1.0/(1.0/idlr_wl_nm + 1.0/sgnl_wl_nm)
if not silent:
print ('Setting pump to ',pump_wl_nm)
RET_wl_nm = True
new_wl_nm = pump_wl_nm
if sgnl_wl_nm is None:
sgnl_wl_nm = 1.0/(1.0/pump_wl_nm - 1.0/idlr_wl_nm)
if not silent:
print ('Setting signal to ',sgnl_wl_nm)
RET_wl_nm = True
new_wl_nm = sgnl_wl_nm
if idlr_wl_nm is None:
idlr_wl_nm = 1.0/(1.0/pump_wl_nm - 1.0/sgnl_wl_nm)
if not silent:
print ('Setting idler to ',idlr_wl_nm,' nm')
RET_wl_nm = True
new_wl_nm = idlr_wl_nm
kp = self.n(pump_wl_nm)*2*np.pi/pump_wl_nm
ks = self.n(sgnl_wl_nm)*2*np.pi/sgnl_wl_nm
ki = self.n(idlr_wl_nm)*2*np.pi/idlr_wl_nm
if self.length_mks is not None:
delta_k_set_pt = delta_k_L / self.length_nm
else:
delta_k_set_pt = 0
deltak = kp-ks-ki - delta_k_set_pt
period_meter = np.pi/deltak*1.0e-9
if not silent:
print ('period is ',2.0*period_meter*1.0e6,' um')
if RET_wl_nm:
return (period_meter*2, new_wl_nm)
else:
return period_meter*2
|
ycasg/PyNLO
|
src/pynlo/media/crystals/XTAL_PPLN.py
|
Python
|
gpl-3.0
| 6,036
|
[
"CRYSTAL"
] |
90db38a9bb6079e20b483348a320ad3c1b6b77ee345999242c92b97e2e10c873
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Gaussian Mixture Model Class:
contains the basic fields and methods of GMMs
The class GMM _old uses C bindings which are
computationally and memory efficient.
Author : Bertrand Thirion, 2006-2009
"""
import numpy as np
from scipy.linalg import eigvalsh
class GridDescriptor(object):
"""
A tiny class to handle cartesian grids
"""
def __init__(self, dim=1, lim=None, n_bins=None):
"""
Parameters
----------
dim: int, optional,
the dimension of the grid
lim: list of len(2*self.dim),
the limits of the grid as (xmin, xmax, ymin, ymax, ...)
n_bins: list of len(self.dim),
the number of bins in each direction
"""
self.dim = dim
if lim is not None:
self.set(lim, n_bins)
if np.size(n_bins) == self.dim:
self.n_bins = np.ravel(np.array(n_bins))
def set(self, lim, n_bins=10):
""" set the limits of the grid and the number of bins
Parameters
----------
lim: list of len(2*self.dim),
the limits of the grid as (xmin, xmax, ymin, ymax, ...)
n_bins: list of len(self.dim), optional
the number of bins in each direction
"""
if len(lim) == 2 * self.dim:
self.lim = lim
else:
raise ValueError("Wrong dimension for grid definition")
if np.size(n_bins) == self.dim:
self.n_bins = np.ravel(np.array(n_bins))
else:
raise ValueError("Wrong dimension for grid definition")
def make_grid(self):
""" Compute the grid points
Returns
-------
grid: array of shape (nb_nodes, self.dim)
where nb_nodes is the prod of self.n_bins
"""
size = np.prod(self.n_bins)
grid = np.zeros((size, self.dim))
grange = []
for j in range(self.dim):
xm = self.lim[2 * j]
xM = self.lim[2 * j + 1]
if np.isscalar(self.n_bins):
xb = self.n_bins
else:
xb = self.n_bins[j]
gr = xm + float(xM - xm) / (xb - 1) * np.arange(xb).astype('f')
grange.append(gr)
if self.dim == 1:
grid = np.array([[grange[0][i]] for i in range(xb)])
if self.dim == 2:
for i in range(self.n_bins[0]):
for j in range(self.n_bins[1]):
grid[i * self.n_bins[1] + j] = np.array(
[grange[0][i], grange[1][j]])
if self.dim == 3:
for i in range(self.n_bins[0]):
for j in range(self.n_bins[1]):
for k in range(self.n_bins[2]):
q = (i * self.n_bins[1] + j) * self.n_bins[2] + k
grid[q] = np.array([grange[0][i], grange[1][j],
grange[2][k]])
if self.dim > 3:
raise NotImplementedError(
'only dimensions <4 are currently handled')
return grid
def best_fitting_GMM(x, krange, prec_type='full', niter=100, delta=1.e-4,
ninit=1, verbose=0):
"""
Given a certain dataset x, find the best-fitting GMM
with a number k of classes in a certain range defined by krange
Parameters
----------
x: array of shape (n_samples,dim)
the data from which the model is estimated
krange: list of floats,
the range of values to test for k
prec_type: string (to be chosen within 'full','diag'), optional,
the covariance parameterization
niter: int, optional,
maximal number of iterations in the estimation process
delta: float, optional,
increment of data likelihood at which convergence is declared
ninit: int
number of initialization performed
verbose=0: verbosity mode
Returns
-------
mg : the best-fitting GMM instance
"""
if np.size(x) == x.shape[0]:
x = np.reshape(x, (np.size(x), 1))
dim = x.shape[1]
bestbic = - np.inf
for k in krange:
lgmm = GMM(k, dim, prec_type)
gmmk = lgmm.initialize_and_estimate(x, None, niter, delta, ninit,
verbose)
bic = gmmk.evidence(x)
if bic > bestbic:
bestbic = bic
bgmm = gmmk
if verbose:
print 'k', k, 'bic', bic
return bgmm
def plot2D(x, my_gmm, z=None, with_dots=True, log_scale=False, mpaxes=None,
verbose=0):
"""
Given a set of points in a plane and a GMM, plot them
Parameters
----------
x: array of shape (npoints, dim=2),
sample points
my_gmm: GMM instance,
whose density has to be ploted
z: array of shape (npoints), optional
that gives a labelling of the points in x
by default, it is not taken into account
with_dots, bool, optional
whether to plot the dots or not
log_scale: bool, optional
whether to plot the likelihood in log scale or not
mpaxes=None, int, optional
if not None, axes handle for plotting
verbose: verbosity mode, optional
Returns
-------
gd, GridDescriptor instance,
that represents the grid used in the function
ax, handle to the figure axes
Notes
-----
``my_gmm`` is assumed to have have a 'nixture_likelihood' method that takes
an array of points of shape (np, dim) and returns an array of shape
(np,my_gmm.k) that represents the likelihood component-wise
"""
import matplotlib.pyplot as plt
if x.shape[1] != my_gmm.dim:
raise ValueError('Incompatible dimension between data and model')
if x.shape[1] != 2:
raise ValueError('this works only for 2D cases')
gd1 = GridDescriptor(2)
xmin, xmax = x.min(0), x.max(0)
xm = 1.1 * xmin[0] - 0.1 * xmax[0]
xs = 1.1 * xmax[0] - 0.1 * xmin[0]
ym = 1.1 * xmin[1] - 0.1 * xmax[1]
ys = 1.1 * xmax[1] - 0.1 * xmin[1]
gd1.set([xm, xs, ym, ys], [51, 51])
grid = gd1.make_grid()
L = my_gmm.mixture_likelihood(grid)
if verbose:
intl = L.sum() * (xs - xm) * (ys - ym) / 2500
print 'integral of the density on the domain ', intl
if mpaxes == None:
plt.figure()
ax = plt.subplot(1, 1, 1)
else:
ax = mpaxes
gdx = gd1.n_bins[0]
Pdens = np.reshape(L, (gdx, np.size(L) / gdx))
extent = [xm, xs, ym, ys]
if log_scale:
plt.imshow(np.log(Pdens.T), alpha=2.0, origin='lower',
extent=extent)
else:
plt.imshow(Pdens.T, alpha=2.0, origin='lower', extent=extent)
if with_dots:
if z == None:
plt.plot(x[:, 0], x[:, 1], 'o')
else:
hsv = plt.cm.hsv(range(256))
col = hsv[range(0, 256, 256 // int(z.max() + 1))]
for k in range(z.max() + 1):
plt.plot(x[z == k, 0], x[z == k, 1], 'o', color=col[k])
plt.axis(extent)
plt.colorbar()
return gd1, ax
class GMM(object):
"""Standard GMM.
this class contains the following members
k (int): the number of components in the mixture
dim (int): is the dimension of the data
prec_type = 'full' (string) is the parameterization
of the precisions/covariance matrices:
either 'full' or 'diagonal'.
means: array of shape (k,dim):
all the means (mean parameters) of the components
precisions: array of shape (k,dim,dim):
the precisions (inverse covariance matrix) of the components
weights: array of shape(k): weights of the mixture
fixme
-----
no copy method
"""
def __init__(self, k=1, dim=1, prec_type='full', means=None,
precisions=None, weights=None):
"""
Initialize the structure, at least with the dimensions of the problem
Parameters
----------
k (int) the number of classes of the model
dim (int) the dimension of the problem
prec_type = 'full' : coavriance:precision parameterization
(diagonal 'diag' or full 'full').
means = None: array of shape (self.k,self.dim)
precisions = None: array of shape (self.k,self.dim,self.dim)
or (self.k, self.dim)
weights=None: array of shape (self.k)
By default, means, precision and weights are set as
zeros()
eye()
1/k ones()
with the correct dimensions
"""
self.k = k
self.dim = dim
self.prec_type = prec_type
self.means = means
self.precisions = precisions
self.weights = weights
if self.means == None:
self.means = np.zeros((self.k, self.dim))
if self.precisions == None:
if prec_type == 'full':
prec = np.reshape(np.eye(self.dim), (1, self.dim, self.dim))
self.precisions = np.repeat(prec, self.k, 0)
else:
self.precisions = np.ones((self.k, self.dim))
if self.weights == None:
self.weights = np.ones(self.k) * 1.0 / self.k
def plugin(self, means, precisions, weights):
"""
Set manually the weights, means and precision of the model
Parameters
----------
means: array of shape (self.k,self.dim)
precisions: array of shape (self.k,self.dim,self.dim)
or (self.k, self.dim)
weights: array of shape (self.k)
"""
self.means = means
self.precisions = precisions
self.weights = weights
self.check()
def check(self):
"""
Checking the shape of different matrices involved in the model
"""
if self.means.shape[0] != self.k:
raise ValueError("self.means does not have correct dimensions")
if self.means.shape[1] != self.dim:
raise ValueError("self.means does not have correct dimensions")
if self.weights.size != self.k:
raise ValueError("self.weights does not have correct dimensions")
if self.dim != self.precisions.shape[1]:
raise ValueError(
"self.precisions does not have correct dimensions")
if self.prec_type == 'full':
if self.dim != self.precisions.shape[2]:
raise ValueError(
"self.precisions does not have correct dimensions")
if self.prec_type == 'diag':
if np.shape(self.precisions) != np.shape(self.means):
raise ValueError(
"self.precisions does not have correct dimensions")
if self.precisions.shape[0] != self.k:
raise ValueError(
"self.precisions does not have correct dimensions")
if self.prec_type not in ['full', 'diag']:
raise ValueError('unknown precisions type')
def check_x(self, x):
"""
essentially check that x.shape[1]==self.dim
x is returned with possibly reshaping
"""
if np.size(x) == x.shape[0]:
x = np.reshape(x, (np.size(x), 1))
if x.shape[1] != self.dim:
raise ValueError('incorrect size for x')
return x
def initialize(self, x):
"""Initializes self according to a certain dataset x:
1. sets the regularizing hyper-parameters
2. initializes z using a k-means algorithm, then
3. upate the parameters
Parameters
----------
x, array of shape (n_samples,self.dim)
the data used in the estimation process
"""
from .utils import kmeans
n = x.shape[0]
#1. set the priors
self.guess_regularizing(x, bcheck=1)
# 2. initialize the memberships
if self.k > 1:
_, z, _ = kmeans(x, self.k)
else:
z = np.zeros(n).astype(np.int)
l = np.zeros((n, self.k))
l[np.arange(n), z] = 1
# 3.update the parameters
self.update(x, l)
def pop(self, like, tiny=1.e-15):
"""compute the population, i.e. the statistics of allocation
Parameters
----------
like: array of shape (n_samples,self.k):
the likelihood of each item being in each class
"""
sl = np.maximum(tiny, np.sum(like, 1))
nl = (like.T / sl).T
return np.sum(nl, 0)
def update(self, x, l):
""" Identical to self._Mstep(x,l)
"""
self._Mstep(x, l)
def likelihood(self, x):
"""
return the likelihood of the model for the data x
the values are weighted by the components weights
Parameters
----------
x array of shape (n_samples,self.dim)
the data used in the estimation process
Returns
-------
like, array of shape(n_samples,self.k)
component-wise likelihood
"""
like = self.unweighted_likelihood(x)
like *= self.weights
return like
def unweighted_likelihood_(self, x):
"""
return the likelihood of each data for each component
the values are not weighted by the component weights
Parameters
----------
x: array of shape (n_samples,self.dim)
the data used in the estimation process
Returns
-------
like, array of shape(n_samples,self.k)
unweighted component-wise likelihood
"""
n = x.shape[0]
like = np.zeros((n, self.k))
for k in range(self.k):
# compute the data-independent factor first
w = - np.log(2 * np.pi) * self.dim
m = np.reshape(self.means[k], (1, self.dim))
b = self.precisions[k]
if self.prec_type == 'full':
w += np.log(eigvalsh(b)).sum()
dx = m - x
q = np.sum(np.dot(dx, b) * dx, 1)
else:
w += np.sum(np.log(b))
q = np.dot((m - x) ** 2, b)
w -= q
w /= 2
like[:, k] = np.exp(w)
return like
def unweighted_likelihood(self, x):
"""
return the likelihood of each data for each component
the values are not weighted by the component weights
Parameters
----------
x: array of shape (n_samples,self.dim)
the data used in the estimation process
Returns
-------
like, array of shape(n_samples,self.k)
unweighted component-wise likelihood
Notes
-----
Hopefully faster
"""
xt = x.T.copy()
n = x.shape[0]
like = np.zeros((n, self.k))
for k in range(self.k):
# compute the data-independent factor first
w = - np.log(2 * np.pi) * self.dim
m = np.reshape(self.means[k], (self.dim, 1))
b = self.precisions[k]
if self.prec_type == 'full':
w += np.log(eigvalsh(b)).sum()
dx = xt - m
sqx = dx * np.dot(b, dx)
q = np.zeros(n)
for d in range(self.dim):
q += sqx[d]
else:
w += np.sum(np.log(b))
q = np.dot(b, (m - xt) ** 2)
w -= q
w /= 2
like[:, k] = np.exp(w)
return like
def mixture_likelihood(self, x):
"""Returns the likelihood of the mixture for x
Parameters
----------
x: array of shape (n_samples,self.dim)
the data used in the estimation process
"""
x = self.check_x(x)
like = self.likelihood(x)
sl = np.sum(like, 1)
return sl
def average_log_like(self, x, tiny=1.e-15):
"""returns the averaged log-likelihood of the mode for the dataset x
Parameters
----------
x: array of shape (n_samples,self.dim)
the data used in the estimation process
tiny = 1.e-15: a small constant to avoid numerical singularities
"""
x = self.check_x(x)
like = self.likelihood(x)
sl = np.sum(like, 1)
sl = np.maximum(sl, tiny)
return np.mean(np.log(sl))
def evidence(self, x):
"""Computation of bic approximation of evidence
Parameters
----------
x array of shape (n_samples,dim)
the data from which bic is computed
Returns
-------
the bic value
"""
x = self.check_x(x)
tiny = 1.e-15
like = self.likelihood(x)
return self.bic(like, tiny)
def bic(self, like, tiny=1.e-15):
"""Computation of bic approximation of evidence
Parameters
----------
like, array of shape (n_samples, self.k)
component-wise likelihood
tiny=1.e-15, a small constant to avoid numerical singularities
Returns
-------
the bic value, float
"""
sl = np.sum(like, 1)
sl = np.maximum(sl, tiny)
bicc = np.sum(np.log(sl))
# number of parameters
n = like.shape[0]
if self.prec_type == 'full':
eta = self.k * (1 + self.dim + (self.dim * self.dim + 1) / 2) - 1
else:
eta = self.k * (1 + 2 * self.dim) - 1
bicc = bicc - np.log(n) * eta
return bicc
def _Estep(self, x):
"""
E step of the EM algo
returns the likelihood per class of each data item
Parameters
----------
x array of shape (n_samples,dim)
the data used in the estimation process
Returns
-------
likelihood array of shape(n_samples,self.k)
component-wise likelihood
"""
return self.likelihood(x)
def guess_regularizing(self, x, bcheck=1):
"""
Set the regularizing priors as weakly informative
according to Fraley and raftery;
Journal of Classification 24:155-181 (2007)
Parameters
----------
x array of shape (n_samples,dim)
the data used in the estimation process
"""
small = 0.01
# the mean of the data
mx = np.reshape(x.mean(0), (1, self.dim))
dx = x - mx
vx = np.dot(dx.T, dx) / x.shape[0]
if self.prec_type == 'full':
px = np.reshape(np.diag(1.0 / np.diag(vx)),
(1, self.dim, self.dim))
else:
px = np.reshape(1.0 / np.diag(vx), (1, self.dim))
px *= np.exp(2.0 / self.dim * np.log(self.k))
self.prior_means = np.repeat(mx, self.k, 0)
self.prior_weights = np.ones(self.k) / self.k
self.prior_scale = np.repeat(px, self.k, 0)
self.prior_dof = self.dim + 2
self.prior_shrinkage = small
self.weights = np.ones(self.k) * 1.0 / self.k
if bcheck:
self.check()
def _Mstep(self, x, like):
"""
M step regularized according to the procedure of
Fraley et al. 2007
Parameters
----------
x: array of shape(n_samples,self.dim)
the data from which the model is estimated
like: array of shape(n_samples,self.k)
the likelihood of the data under each class
"""
from numpy.linalg import pinv
tiny = 1.e-15
pop = self.pop(like)
sl = np.maximum(tiny, np.sum(like, 1))
like = (like.T / sl).T
# shrinkage,weights,dof
self.weights = self.prior_weights + pop
self.weights = self.weights / self.weights.sum()
# reshape
pop = np.reshape(pop, (self.k, 1))
prior_shrinkage = self.prior_shrinkage
shrinkage = pop + prior_shrinkage
# means
means = np.dot(like.T, x) + self.prior_means * prior_shrinkage
self.means = means / shrinkage
#precisions
empmeans = np.dot(like.T, x) / np.maximum(pop, tiny)
empcov = np.zeros(np.shape(self.precisions))
if self.prec_type == 'full':
for k in range(self.k):
dx = x - empmeans[k]
empcov[k] = np.dot(dx.T, like[:, k:k + 1] * dx)
#covariance
covariance = np.array([pinv(self.prior_scale[k])
for k in range(self.k)])
covariance += empcov
dx = np.reshape(empmeans - self.prior_means, (self.k, self.dim, 1))
addcov = np.array([np.dot(dx[k], dx[k].T) for k in range(self.k)])
apms = np.reshape(prior_shrinkage * pop / shrinkage,
(self.k, 1, 1))
covariance += (addcov * apms)
dof = self.prior_dof + pop + self.dim + 2
covariance /= np.reshape(dof, (self.k, 1, 1))
# precision
self.precisions = np.array([pinv(covariance[k]) \
for k in range(self.k)])
else:
for k in range(self.k):
dx = x - empmeans[k]
empcov[k] = np.sum(dx ** 2 * like[:, k:k + 1], 0)
# covariance
covariance = np.array([1.0 / self.prior_scale[k]
for k in range(self.k)])
covariance += empcov
dx = np.reshape(empmeans - self.prior_means, (self.k, self.dim, 1))
addcov = np.array([np.sum(dx[k] ** 2, 0) for k in range(self.k)])
apms = np.reshape(prior_shrinkage * pop / shrinkage, (self.k, 1))
covariance += addcov * apms
dof = self.prior_dof + pop + self.dim + 2
covariance /= np.reshape(dof, (self.k, 1))
# precision
self.precisions = np.array([1.0 / covariance[k] \
for k in range(self.k)])
def map_label(self, x, like=None):
"""return the MAP labelling of x
Parameters
----------
x array of shape (n_samples,dim)
the data under study
like=None array of shape(n_samples,self.k)
component-wise likelihood
if like==None, it is recomputed
Returns
-------
z: array of shape(n_samples): the resulting MAP labelling
of the rows of x
"""
if like == None:
like = self.likelihood(x)
z = np.argmax(like, 1)
return z
def estimate(self, x, niter=100, delta=1.e-4, verbose=0):
""" Estimation of the model given a dataset x
Parameters
----------
x array of shape (n_samples,dim)
the data from which the model is estimated
niter=100: maximal number of iterations in the estimation process
delta = 1.e-4: increment of data likelihood at which
convergence is declared
verbose=0: verbosity mode
Returns
-------
bic : an asymptotic approximation of model evidence
"""
# check that the data is OK
x = self.check_x(x)
# alternation of E/M step until convergence
tiny = 1.e-15
av_ll_old = - np.inf
for i in range(niter):
l = self._Estep(x)
av_ll = np.mean(np.log(np.maximum(np.sum(l, 1), tiny)))
if av_ll < av_ll_old + delta:
if verbose:
print 'iteration:', i, 'log-likelihood:', av_ll,\
'old value:', av_ll_old
break
else:
av_ll_old = av_ll
if verbose:
print i, av_ll, self.bic(l)
self._Mstep(x, l)
return self.bic(l)
def initialize_and_estimate(self, x, z=None, niter=100, delta=1.e-4,\
ninit=1, verbose=0):
"""Estimation of self given x
Parameters
----------
x array of shape (n_samples,dim)
the data from which the model is estimated
z = None: array of shape (n_samples)
a prior labelling of the data to initialize the computation
niter=100: maximal number of iterations in the estimation process
delta = 1.e-4: increment of data likelihood at which
convergence is declared
ninit=1: number of initialization performed
to reach a good solution
verbose=0: verbosity mode
Returns
-------
the best model is returned
"""
bestbic = - np.inf
bestgmm = GMM(self.k, self.dim, self.prec_type)
bestgmm.initialize(x)
for i in range(ninit):
# initialization -> Kmeans
self.initialize(x)
# alternation of E/M step until convergence
bic = self.estimate(x, niter=niter, delta=delta, verbose=0)
if bic > bestbic:
bestbic = bic
bestgmm.plugin(self.means, self.precisions, self.weights)
return bestgmm
def train(self, x, z=None, niter=100, delta=1.e-4, ninit=1, verbose=0):
"""Idem initialize_and_estimate
"""
return self.initialize_and_estimate(x, z, niter, delta, ninit, verbose)
def test(self, x, tiny=1.e-15):
"""Returns the log-likelihood of the mixture for x
Parameters
----------
x array of shape (n_samples,self.dim)
the data used in the estimation process
Returns
-------
ll: array of shape(n_samples)
the log-likelihood of the rows of x
"""
return np.log(np.maximum(self.mixture_likelihood(x), tiny))
def show_components(self, x, gd, density=None, mpaxes=None):
"""Function to plot a GMM -- Currently, works only in 1D
Parameters
----------
x: array of shape(n_samples, dim)
the data under study
gd: GridDescriptor instance
density: array os shape(prod(gd.n_bins))
density of the model one the discrete grid implied by gd
by default, this is recomputed
mpaxes: axes handle to make the figure, optional,
if None, a new figure is created
"""
import matplotlib.pyplot as plt
if density is None:
density = self.mixture_likelihood(gd.make_grid())
if gd.dim > 1:
raise NotImplementedError("only implemented in 1D")
step = 3.5 * np.std(x) / np.exp(np.log(np.size(x)) / 3)
bins = max(10, int((x.max() - x.min()) / step))
xmin = 1.1 * x.min() - 0.1 * x.max()
xmax = 1.1 * x.max() - 0.1 * x.min()
h, c = np.histogram(x, bins, [xmin, xmax], normed=True)
# Make code robust to new and old behavior of np.histogram
c = c[:len(h)]
offset = (xmax - xmin) / (2 * bins)
c += offset / 2
grid = gd.make_grid()
if mpaxes == None:
plt.figure()
ax = plt.axes()
else:
ax = mpaxes
ax.plot(c + offset, h, linewidth=2)
for k in range(self.k):
ax.plot(grid, density[:, k], linewidth=2)
ax.set_title('Fit of the density with a mixture of Gaussians',
fontsize=12)
legend = ['data']
for k in range(self.k):
legend.append('component %d' % (k + 1))
l = ax.legend(tuple(legend))
for t in l.get_texts():
t.set_fontsize(12)
ax.set_xticklabels(ax.get_xticks(), fontsize=12)
ax.set_yticklabels(ax.get_yticks(), fontsize=12)
def show(self, x, gd, density=None, axes=None):
"""
Function to plot a GMM, still in progress
Currently, works only in 1D and 2D
Parameters
----------
x: array of shape(n_samples, dim)
the data under study
gd: GridDescriptor instance
density: array os shape(prod(gd.n_bins))
density of the model one the discrete grid implied by gd
by default, this is recomputed
"""
import matplotlib.pyplot as plt
# recompute the density if necessary
if density is None:
density = self.mixture_likelihood(gd, x)
if axes is None:
axes = plt.figure()
if gd.dim == 1:
from ..statistics.empirical_pvalue import \
smoothed_histogram_from_samples
h, c = smoothed_histogram_from_samples(x, normalized=True)
offset = (c.max() - c.min()) / (2 * c.size)
grid = gd.make_grid()
h /= h.sum()
h /= (2 * offset)
plt.plot(c[: -1] + offset, h)
plt.plot(grid, density)
if gd.dim == 2:
plt.figure()
xm, xM, ym, yM = gd.lim[0:3]
gd0 = gd.n_bins[0]
Pdens = np.reshape(density, (gd0, np.size(density) / gd0))
axes.imshow(Pdens.T, None, None, None, 'nearest',
1.0, None, None, 'lower', [xm, xM, ym, yM])
axes.plot(x[:, 0], x[:, 1], '.k')
axes.axis([xm, xM, ym, yM])
return axes
|
bthirion/nipy
|
nipy/algorithms/clustering/gmm.py
|
Python
|
bsd-3-clause
| 29,373
|
[
"Gaussian"
] |
b45ba4c6705d6bb19fb8a30817150e4d1079a025096c2efb1cc38c5a4d52f7b0
|
# S.D. Peckham
# Sept 2014 (new version to use netCDF4)
# June 2010 (streamlined a bit more)
# December 2, 2009 (updated open_new_file to use "info")
# October 13, 2009
import os
import sys
import time
import numpy as np
import bov_files
import file_utils
import rti_files
import netCDF4 as nc
#-------------------------------------------------------------------
# unit_test()
# save_ncgs_frame() ## (12/7/09)
#
# class ncgs_file():
#
# import_netCDF4()
# open_file()
# check_and_store_info() # (12/2/09)
# get_dtype_map()
# open_new_file()
# add_grid()
# get_grid()
# close_file()
# close()
#
#-------------------------------------------------------------------
def unit_test(nx=4, ny=5, n_grids=6, VERBOSE=False,
file_name="NCGS_Grid_Test.nc"):
print 'Running unit_test()...'
#-------------------------------------
# Make instance of ncgs_file() class
#-------------------------------------
ncgs = ncgs_file()
dx = 100
dy = 100
var_name = "depth"
info = rti_files.make_info( file_name, nx, ny, dx, dy )
OK = ncgs.open_new_file( file_name, info,
dtype='float32',
var_name=var_name,
long_name="depth of water",
units_name="meters",
comment="Created by TopoFlow 3.0.")
if not(OK):
print 'ERROR during open_new_file().'
return
grid = np.arange(nx * ny, dtype='Float32')
grid = grid.reshape( (ny, nx) )
#-----------------------------------------------
# (6/10/10) Can use this to test new ability
# of add_grid() to convert from scalar to grid
#-----------------------------------------------
# grid = np.float32(0)
#----------------------------------
# Add some test grids to the file
#----------------------------------
print 'Writing grids to NCGS file...'
for time_index in xrange(n_grids):
ncgs.add_grid( grid, var_name )
## ncgs.add_grid( grid, var_name, time_index )
grid = (grid + 1)
if (VERBOSE):
print self.ncgs_unit # (print a summary)
ncgs.close_file()
print 'Finished writing NCGS file: ' + file_name
print ' '
#---------------------------------------------
# Re-open the file and read grids one-by-one
#---------------------------------------------
OK = ncgs.open_file( file_name )
if not(OK): return
print 'Reading grids from NCGS file: '
for time_index in xrange(n_grids):
grid = ncgs.get_grid(var_name, time_index)
print 'grid[' + str(time_index) + '] = '
print grid
print '-----------------------------------------------'
ncgs.close_file()
print 'Finished reading NCGS file: ' + file_name
print ' '
# unit_test()
#-------------------------------------------------------------------
def save_ncgs_frame(ncgs_file_name=None, rtg_file_name=None):
ncgs = ncgs_file()
OK = ncgs.open_file( ncgs_file_name )
if not(OK): return
grid_name = 'H'
time_index = 200
grid = ncgs.get_grid( grid_name, time_index )
ncgs.close()
grid = np.array( grid )
print 'min(grid), max(grid) =', grid.min(), grid.max()
rtg_unit = open( rtg_file_name, 'wb' )
grid.tofile( unit )
rtg_unit.close()
# save_ncgs_frame()
#-------------------------------------------------------------------
class ncgs_file():
#------------------------------------------------------
# Note: ncgs = NetCDF Grid Stack (used by CSDMS)
#
# (10/9/10) Added check_netcdf() function in
# model_output.py that each component can call
# in its open_output_files() method.
#------------------------------------------------------
def import_netCDF4(self):
try:
import netCDF4
# print 'Imported netCDF4 version: ' + netCDF4.__version__
return netCDF4
except:
## print ' '
## print 'SORRY, Cannot write netCDF files because'
## print 'the "netCDF4" package cannot be imported.'
## print ' '
## python_version = sys.version[:3]
## if (python_version != '2.6'):
## print 'Note that "PyNIO" is only installed for'
## print 'Python version 2.6 on "beach".'
## print 'The current Python version is:', python_version
## print ' '
return False
# import_netCDF4()
#----------------------------------------------------------
def open_file(self, file_name):
#-------------------------
# Open file to read only
#-------------------------
try:
ncgs_unit = nc.Dataset(file_name, mode='r')
self.ncgs_unit = ncgs_unit
### return ncgs_unit
return True
except:
return False
# open_file()
#----------------------------------------------------------
def check_and_store_info(self, file_name, info=None,
grid_name='UNKNOWN',
dtype='float32',
MAKE_RTI=True, MAKE_BOV=False):
#-----------------------------------------------------
# Note: This object (self) may be new or it may have
# been used previously. In the latter case,
# "info" should still be available in "self".
# We only need info if MAKE_RTI or MAKE_BOV.
#-----------------------------------------------------
self.format = 'NCGS'
self.file_name = file_name
self.time_index = 0
self.grid_name = grid_name
#-----------------------------------------------------
# This was used by rts_files.check_and_store_info()
# but is not appropriate here because we need to
# know nx, ny, dx and dy for the netCDF file.
#-----------------------------------------------------
### if not(MAKE_RTI or MAKE_BOV): return
#---------------------------------
# Was "info" argument provided ?
#---------------------------------
NEW_INFO = True
if (info is None):
try:
info = self.info
self.nx = info.ncols ###
self.ny = info.nrows
NEW_INFO = False
## print 'Found info in state.'
except:
#------------------------------------------
# Try to find RTI file to copy info from.
# Don't create a new RTI file.
#------------------------------------------
RTI_file = rti_files.try_to_find_rti_file( file_name )
if (RTI_file != 'none'):
print 'Reading info from: ' + RTI_file
info = rti_files.read_info( RTI_file )
else:
print 'ERROR during open_new_file():'
print ' Could not find RTI file and "info"'
print ' argument was not provided.'
print ' '
return
#-----------------------------
# Update "info" as necessary
#-----------------------------
info.grid_file = file_name
info.data_type = rti_files.get_rti_data_type( dtype )
info.data_source = 'TopoFlow 3.0'
info.gmin = -9999.0
info.gmax = -9999.0
#---------------------------------------
# If new "info" was provided, store it
#---------------------------------------
if (NEW_INFO):
self.info = info
self.nx = info.ncols
self.ny = info.nrows
## print 'Stored new info in state.'
#-------------------
# Write RTI file ?
#-------------------
if (MAKE_RTI):
prefix = rti_files.get_file_prefix( file_name )
RTI_file = (prefix + '.rti')
rti_files.write_info( RTI_file, info )
# print 'Wrote grid info to: ' + RTI_file ######
#-------------------
# Write BOV file ?
#-------------------
if (MAKE_BOV):
bov_files.write_info_as_bov( file_name, info, grid_name)
### time )
# check_and_store_info()
#----------------------------------------------------------
def get_dtype_map(self):
#----------------------------------------
# Possible settings for "dtype_code"
#----------------------------------------------------
# These two-char codes are used for netCDF4 package
#----------------------------------------------------
# See: http://unidata.github.io/netcdf4-python/
#----------------------------------------------------
dtype_map = {'float64':'f8', 'float32':'f4',
'int64':'i8', 'int32':'i4',
'int16':'i2', 'int8':'i1',
'S|100':'S1'} # ( ????? )
#-------------------------------------------------
# These one-char codes are used for Nio in PyNIO
#-------------------------------------------------
# dtype_code = "d" # (double, Float64)
# dtype_code = "f" # (float, Float32)
# dtype_code = "l" # (long, Int64)
# dtype_code = "i" # (int, Int32)
# dtype_code = "h" # (short, Int16)
# dtype_code = "b" # (byte, Int8)
# dtype_code = "S1" # (char)
#-------------------------------------------
# dtype_map = {'float64':'d', 'float32':'f',
# 'int64':'l', 'int32':'i',
# 'int16':'s', 'int8':'b',
# 'S|100':'S1'} # (check last entry)
return dtype_map
# get_dtype_map()
#----------------------------------------------------------
def open_new_file(self, file_name, info=None,
var_name='X',
long_name=None,
units_name='None',
dtype='float32',
### dtype='float64'
time_units='minutes',
comment='',
MAKE_RTI=True, MAKE_BOV=False):
#----------------------------
# Does file already exist ?
#----------------------------
file_name = file_utils.check_overwrite( file_name )
self.file_name = file_name
#---------------------------------------
# Check and store the grid information
#---------------------------------------
self.check_and_store_info( file_name, info, var_name,
dtype, MAKE_RTI, MAKE_BOV )
if (long_name is None): long_name = var_name
self.long_name = long_name
self.units_name = units_name
self.dtype = dtype
#-----------------------------------
# Save the two-char data type code
#-----------------------------------
dtype_map = self.get_dtype_map()
dtype_code = dtype_map[ dtype.lower() ]
self.dtype_code = dtype_code
#-------------------------------------
# Open a new netCDF file for writing
#-------------------------------------
try:
## format = 'NETCDF4'
format = 'NETCDF4_CLASSIC'
ncgs_unit = nc.Dataset(file_name, mode='w', format=format)
OK = True
except:
OK = False
return OK
#------------------------------------------------------------
# Option to pre-fill with fill values
# Set fill_value for a var with "var._Fill_Value = number"
# For Nio was: opt.PreFill = False # (for efficiency)
#------------------------------------------------------------
ncgs_unit.set_fill_off()
# ncgs_unit.set_fill_on()
#-------------------------------------
# Prepare and save a history string
#-------------------------------------
# Sample output from time.asctime():
# "Thu Oct 8 17:10:18 2009"
#-------------------------------------
history = "Created using netCDF4 " + nc.__version__ + " on "
history = history + time.asctime() + ". "
history = history + comment
ncgs_unit.history = history
# print 'MADE IT PAST history BLOCK'
## print 'nx =', self.info.ncols
## print 'ny =', self.info.nrows
## print 'dx =', self.info.xres
## print 'dy =', self.info.yres
## print ' '
#----------------------------------------------
# Create grid dimensions nx and ny, plus time
#----------------------------------------------
# Without using "int()" here, we get this:
# TypeError: size must be None or integer
#----------------------------------------------
ncgs_unit.createDimension('nx', int(self.info.ncols))
ncgs_unit.createDimension('ny', int(self.info.nrows))
ncgs_unit.createDimension('time', None) # (unlimited dimension)
# print 'MADE IT PAST create_dimension CALLS.'
#-------------------------
# Create a time variable
#------------------------------------------
#('d' = float64; must match in add_grid()
# In netCDF4 package, use 'f8' vs. 'd'.
#------------------------------------------
tvar = ncgs_unit.createVariable('time', 'f8', ('time',))
tvar.units = time_units
### ncgs_unit.variables['time'].units = time_units
#--------------------------------
# Create a variable in the file
#--------------------------------
var = ncgs_unit.createVariable(var_name, dtype_code,
('time', 'ny', 'nx'))
#----------------------------------
# Specify a "nodata" fill value ?
#----------------------------------
# var._Fill_Value = -9999.0 ## Used for pre-fill above ?
#-------------------------------------------
# Create a separate, scalar "time stamp" ?
#-------------------------------------------
# t = nc_unit.createVariable('time', dtype_code, ('time'))
#----------------------------------
# Specify a "nodata" fill value ?
#----------------------------------
# var._FillValue = -9999.0 ## Was used for Nio.
#------------------------------------
# Create attributes of the variable
#------------------------------------
ncgs_unit.variables[var_name].long_name = long_name
ncgs_unit.variables[var_name].units = units_name
ncgs_unit.variables[var_name].dx = self.info.xres
ncgs_unit.variables[var_name].dy = self.info.yres ### (12/2/09)
ncgs_unit.variables[var_name].y_south_edge = self.info.y_south_edge
ncgs_unit.variables[var_name].y_north_edge = self.info.y_north_edge
ncgs_unit.variables[var_name].x_west_edge = self.info.x_west_edge
ncgs_unit.variables[var_name].x_east_edge = self.info.x_east_edge
self.ncgs_unit = ncgs_unit
return OK
# open_new_file()
#----------------------------------------------------------
def add_grid(self, grid, grid_name, time=None,
time_index=-1):
#---------------------------------
# Assign a value to the variable
#-------------------------------------------
# This syntax works for scalars and grids
#-------------------------------------------
# nc_unit.variables[var_name].assign_value( grid )
#-------------------------------------
# Can use time_index to overwrite an
# existing grid vs. simple append.
#-------------------------------------
if (time_index == -1):
time_index = self.time_index
if (time is None):
time = np.float64( time_index )
#---------------------------------------
# Write a time to existing netCDF file
#---------------------------------------
times = self.ncgs_unit.variables[ 'time' ]
times[ time_index ] = time ############################ CHECK
#---------------------------------------
# Write a grid to existing netCDF file
#---------------------------------------
var = self.ncgs_unit.variables[ grid_name ]
if (np.ndim(grid) == 0):
#-----------------------------------------------
# "grid" is actually a scalar (dynamic typing)
# so convert it to a grid before saving
#-----------------------------------------------
grid2 = grid + np.zeros([self.ny, self.nx],
dtype=self.dtype)
var[ time_index ] = grid2.astype(self.dtype)
else:
var[ time_index ] = grid.astype(self.dtype)
#---------------------------
# Increment the time index
#---------------------------
self.time_index += 1
#-------------------------------------------------
# 12/2/09: netCDF is supposed to take care of
# byteorder transparently. However, we need to
# make sure we don't byteswap in the function
# "model_output.save_as_grid_to_file()" when the
# output format is netCDF.
#-------------------------------------------------
## if (sys.byteorder == 'big'):
## var[time_index] = grid
## else:
## grid2 = grid.copy()
## var[time_index] = grid2.byteswap()
## self.time_index += 1
# add_grid()
#----------------------------------------------------------
def get_grid(self, grid_name, time_index):
var = self.ncgs_unit.variables[ grid_name ]
return var[ time_index ]
# get_grid()
#-------------------------------------------------------------------
def close_file(self):
# self.ncgs_unit.sync() ## (netCDF4 has no "flush")
self.ncgs_unit.close()
# close_file()
#-------------------------------------------------------------------
def close(self):
# self.ncgs_unit.sync() ## (netCDF4 has no "flush")
self.ncgs_unit.close()
# close()
#-------------------------------------------------------------------
|
mdpiper/topoflow
|
topoflow/utils/ncgs_files.py
|
Python
|
mit
| 19,007
|
[
"NetCDF"
] |
720076c7ec2159bdadd98ef8488e17efdc51d2d0cf90395137c0004cbb052f05
|
# -*- coding: utf-8 -*-
"""The config functions."""
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
import atexit
from functools import partial
import json
import os
import os.path as op
import platform
import shutil
import sys
import tempfile
import re
import numpy as np
from .check import (_validate_type, _check_pyqt5_version, _check_option,
_check_fname)
from .docs import fill_doc
from ._logging import warn, logger
_temp_home_dir = None
def set_cache_dir(cache_dir):
"""Set the directory to be used for temporary file storage.
This directory is used by joblib to store memmapped arrays,
which reduces memory requirements and speeds up parallel
computation.
Parameters
----------
cache_dir : str or None
Directory to use for temporary file storage. None disables
temporary file storage.
"""
if cache_dir is not None and not op.exists(cache_dir):
raise IOError('Directory %s does not exist' % cache_dir)
set_config('MNE_CACHE_DIR', cache_dir, set_env=False)
def set_memmap_min_size(memmap_min_size):
"""Set the minimum size for memmaping of arrays for parallel processing.
Parameters
----------
memmap_min_size : str or None
Threshold on the minimum size of arrays that triggers automated memory
mapping for parallel processing, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
"""
if memmap_min_size is not None:
if not isinstance(memmap_min_size, str):
raise ValueError('\'memmap_min_size\' has to be a string.')
if memmap_min_size[-1] not in ['K', 'M', 'G']:
raise ValueError('The size has to be given in kilo-, mega-, or '
'gigabytes, e.g., 100K, 500M, 1G.')
set_config('MNE_MEMMAP_MIN_SIZE', memmap_min_size, set_env=False)
# List the known configuration values
known_config_types = (
'MNE_3D_OPTION_ANTIALIAS',
'MNE_BROWSE_RAW_SIZE',
'MNE_BROWSE_BACKEND',
'MNE_CACHE_DIR',
'MNE_COREG_ADVANCED_RENDERING',
'MNE_COREG_COPY_ANNOT',
'MNE_COREG_GUESS_MRI_SUBJECT',
'MNE_COREG_HEAD_HIGH_RES',
'MNE_COREG_HEAD_OPACITY',
'MNE_COREG_HEAD_INSIDE',
'MNE_COREG_INTERACTION',
'MNE_COREG_MARK_INSIDE',
'MNE_COREG_PREPARE_BEM',
'MNE_COREG_PROJECT_EEG',
'MNE_COREG_ORIENT_TO_SURFACE',
'MNE_COREG_SCALE_LABELS',
'MNE_COREG_SCALE_BY_DISTANCE',
'MNE_COREG_SCENE_SCALE',
'MNE_COREG_WINDOW_HEIGHT',
'MNE_COREG_WINDOW_WIDTH',
'MNE_COREG_SUBJECTS_DIR',
'MNE_CUDA_DEVICE',
'MNE_CUDA_IGNORE_PRECISION',
'MNE_DATA',
'MNE_DATASETS_BRAINSTORM_PATH',
'MNE_DATASETS_EEGBCI_PATH',
'MNE_DATASETS_EPILEPSY_ECOG_PATH',
'MNE_DATASETS_HF_SEF_PATH',
'MNE_DATASETS_MEGSIM_PATH',
'MNE_DATASETS_MISC_PATH',
'MNE_DATASETS_MTRF_PATH',
'MNE_DATASETS_SAMPLE_PATH',
'MNE_DATASETS_SOMATO_PATH',
'MNE_DATASETS_MULTIMODAL_PATH',
'MNE_DATASETS_FNIRS_MOTOR_PATH',
'MNE_DATASETS_OPM_PATH',
'MNE_DATASETS_SPM_FACE_DATASETS_TESTS',
'MNE_DATASETS_SPM_FACE_PATH',
'MNE_DATASETS_TESTING_PATH',
'MNE_DATASETS_VISUAL_92_CATEGORIES_PATH',
'MNE_DATASETS_KILOWORD_PATH',
'MNE_DATASETS_FIELDTRIP_CMC_PATH',
'MNE_DATASETS_PHANTOM_4DBTI_PATH',
'MNE_DATASETS_LIMO_PATH',
'MNE_DATASETS_REFMEG_NOISE_PATH',
'MNE_DATASETS_SSVEP_PATH',
'MNE_DATASETS_ERP_CORE_PATH',
'MNE_DATASETS_EPILEPSY_ECOG_PATH',
'MNE_FORCE_SERIAL',
'MNE_KIT2FIFF_STIM_CHANNELS',
'MNE_KIT2FIFF_STIM_CHANNEL_CODING',
'MNE_KIT2FIFF_STIM_CHANNEL_SLOPE',
'MNE_KIT2FIFF_STIM_CHANNEL_THRESHOLD',
'MNE_LOGGING_LEVEL',
'MNE_MEMMAP_MIN_SIZE',
'MNE_SKIP_FTP_TESTS',
'MNE_SKIP_NETWORK_TESTS',
'MNE_SKIP_TESTING_DATASET_TESTS',
'MNE_STIM_CHANNEL',
'MNE_TQDM',
'MNE_USE_CUDA',
'MNE_USE_NUMBA',
'SUBJECTS_DIR',
)
# These allow for partial matches, e.g. 'MNE_STIM_CHANNEL_1' is okay key
known_config_wildcards = (
'MNE_STIM_CHANNEL',
)
def _load_config(config_path, raise_error=False):
"""Safely load a config file."""
with open(config_path, 'r') as fid:
try:
config = json.load(fid)
except ValueError:
# No JSON object could be decoded --> corrupt file?
msg = ('The MNE-Python config file (%s) is not a valid JSON '
'file and might be corrupted' % config_path)
if raise_error:
raise RuntimeError(msg)
warn(msg)
config = dict()
return config
def get_config_path(home_dir=None):
r"""Get path to standard mne-python config file.
Parameters
----------
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
Returns
-------
config_path : str
The path to the mne-python configuration file. On windows, this
will be '%USERPROFILE%\.mne\mne-python.json'. On every other
system, this will be ~/.mne/mne-python.json.
"""
val = op.join(_get_extra_data_path(home_dir=home_dir),
'mne-python.json')
return val
def get_config(key=None, default=None, raise_error=False, home_dir=None,
use_env=True):
"""Read MNE-Python preferences from environment or config file.
Parameters
----------
key : None | str
The preference key to look for. The os environment is searched first,
then the mne-python config file is parsed.
If None, all the config parameters present in environment variables or
the path are returned. If key is an empty string, a list of all valid
keys (but not values) is returned.
default : str | None
Value to return if the key is not found.
raise_error : bool
If True, raise an error if the key is not found (instead of returning
default).
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
use_env : bool
If True, consider env vars, if available.
If False, only use MNE-Python configuration file values.
.. versionadded:: 0.18
Returns
-------
value : dict | str | None
The preference key value.
See Also
--------
set_config
"""
_validate_type(key, (str, type(None)), "key", 'string or None')
if key == '':
return known_config_types
# first, check to see if key is in env
if use_env and key is not None and key in os.environ:
return os.environ[key]
# second, look for it in mne-python config file
config_path = get_config_path(home_dir=home_dir)
if not op.isfile(config_path):
config = {}
else:
config = _load_config(config_path)
if key is None:
# update config with environment variables
if use_env:
env_keys = (set(config).union(known_config_types).
intersection(os.environ))
config.update({key: os.environ[key] for key in env_keys})
return config
elif raise_error is True and key not in config:
loc_env = 'the environment or in the ' if use_env else ''
meth_env = ('either os.environ["%s"] = VALUE for a temporary '
'solution, or ' % key) if use_env else ''
extra_env = (' You can also set the environment variable before '
'running python.' if use_env else '')
meth_file = ('mne.utils.set_config("%s", VALUE, set_env=True) '
'for a permanent one' % key)
raise KeyError('Key "%s" not found in %s'
'the mne-python config file (%s). '
'Try %s%s.%s'
% (key, loc_env, config_path, meth_env, meth_file,
extra_env))
else:
return config.get(key, default)
def set_config(key, value, home_dir=None, set_env=True):
"""Set a MNE-Python preference key in the config file and environment.
Parameters
----------
key : str
The preference key to set.
value : str | None
The value to assign to the preference key. If None, the key is
deleted.
home_dir : str | None
The folder that contains the .mne config folder.
If None, it is found automatically.
set_env : bool
If True (default), update :data:`os.environ` in addition to
updating the MNE-Python config file.
See Also
--------
get_config
"""
_validate_type(key, 'str', "key")
# While JSON allow non-string types, we allow users to override config
# settings using env, which are strings, so we enforce that here
_validate_type(value, (str, 'path-like', type(None)), 'value')
if value is not None:
value = str(value)
if key not in known_config_types and not \
any(k in key for k in known_config_wildcards):
warn('Setting non-standard config type: "%s"' % key)
# Read all previous values
config_path = get_config_path(home_dir=home_dir)
if op.isfile(config_path):
config = _load_config(config_path, raise_error=True)
else:
config = dict()
logger.info('Attempting to create new mne-python configuration '
'file:\n%s' % config_path)
if value is None:
config.pop(key, None)
if set_env and key in os.environ:
del os.environ[key]
else:
config[key] = value
if set_env:
os.environ[key] = value
# Write all values. This may fail if the default directory is not
# writeable.
directory = op.dirname(config_path)
if not op.isdir(directory):
os.mkdir(directory)
with open(config_path, 'w') as fid:
json.dump(config, fid, sort_keys=True, indent=0)
def _get_extra_data_path(home_dir=None):
"""Get path to extra data (config, tables, etc.)."""
global _temp_home_dir
if home_dir is None:
home_dir = os.environ.get('_MNE_FAKE_HOME_DIR')
if home_dir is None:
# this has been checked on OSX64, Linux64, and Win32
if 'nt' == os.name.lower():
if op.isdir(op.join(os.getenv('APPDATA'), '.mne')):
home_dir = os.getenv('APPDATA')
else:
home_dir = os.getenv('USERPROFILE')
else:
# This is a more robust way of getting the user's home folder on
# Linux platforms (not sure about OSX, Unix or BSD) than checking
# the HOME environment variable. If the user is running some sort
# of script that isn't launched via the command line (e.g. a script
# launched via Upstart) then the HOME environment variable will
# not be set.
if os.getenv('MNE_DONTWRITE_HOME', '') == 'true':
if _temp_home_dir is None:
_temp_home_dir = tempfile.mkdtemp()
atexit.register(partial(shutil.rmtree, _temp_home_dir,
ignore_errors=True))
home_dir = _temp_home_dir
else:
home_dir = os.path.expanduser('~')
if home_dir is None:
raise ValueError('mne-python config file path could '
'not be determined, please report this '
'error to mne-python developers')
return op.join(home_dir, '.mne')
def get_subjects_dir(subjects_dir=None, raise_error=False):
"""Safely use subjects_dir input to return SUBJECTS_DIR.
Parameters
----------
subjects_dir : str | None
If a value is provided, return subjects_dir. Otherwise, look for
SUBJECTS_DIR config and return the result.
raise_error : bool
If True, raise a KeyError if no value for SUBJECTS_DIR can be found
(instead of returning None).
Returns
-------
value : str | None
The SUBJECTS_DIR value.
"""
_validate_type(item=subjects_dir, types=('path-like', None),
item_name='subjects_dir', type_name='str or path-like')
if subjects_dir is None:
subjects_dir = get_config('SUBJECTS_DIR', raise_error=raise_error)
if subjects_dir is not None:
subjects_dir = _check_fname(
fname=subjects_dir, overwrite='read', must_exist=True,
need_dir=True, name='subjects_dir'
)
return subjects_dir
@fill_doc
def _get_stim_channel(stim_channel, info, raise_error=True):
"""Determine the appropriate stim_channel.
First, 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', etc.
are read. If these are not found, it will fall back to 'STI 014' if
present, then fall back to the first channel of type 'stim', if present.
Parameters
----------
stim_channel : str | list of str | None
The stim channel selected by the user.
%(info_not_none)s
Returns
-------
stim_channel : str | list of str
The name of the stim channel(s) to use
"""
if stim_channel is not None:
if not isinstance(stim_channel, list):
_validate_type(stim_channel, 'str', "Stim channel")
stim_channel = [stim_channel]
for channel in stim_channel:
_validate_type(channel, 'str', "Each provided stim channel")
return stim_channel
stim_channel = list()
ch_count = 0
ch = get_config('MNE_STIM_CHANNEL')
while(ch is not None and ch in info['ch_names']):
stim_channel.append(ch)
ch_count += 1
ch = get_config('MNE_STIM_CHANNEL_%d' % ch_count)
if ch_count > 0:
return stim_channel
if 'STI101' in info['ch_names']: # combination channel for newer systems
return ['STI101']
if 'STI 014' in info['ch_names']: # for older systems
return ['STI 014']
from ..io.pick import pick_types
stim_channel = pick_types(info, meg=False, ref_meg=False, stim=True)
if len(stim_channel) > 0:
stim_channel = [info['ch_names'][ch_] for ch_ in stim_channel]
elif raise_error:
raise ValueError("No stim channels found. Consider specifying them "
"manually using the 'stim_channel' parameter.")
return stim_channel
def _get_root_dir():
"""Get as close to the repo root as possible."""
root_dir = op.abspath(op.join(op.dirname(__file__), '..'))
up_dir = op.join(root_dir, '..')
if op.isfile(op.join(up_dir, 'setup.py')) and all(
op.isdir(op.join(up_dir, x)) for x in ('mne', 'examples', 'doc')):
root_dir = op.abspath(up_dir)
return root_dir
def _get_numpy_libs():
from ._testing import SilenceStdout
with SilenceStdout(close=False) as capture:
np.show_config()
lines = capture.getvalue().split('\n')
capture.close()
libs = []
for li, line in enumerate(lines):
for key in ('lapack', 'blas'):
if line.startswith('%s_opt_info' % key):
lib = lines[li + 1]
if 'NOT AVAILABLE' in lib:
lib = 'unknown'
else:
try:
lib = lib.split('[')[1].split("'")[1]
except IndexError:
pass # keep whatever it was
libs += ['%s=%s' % (key, lib)]
libs = ', '.join(libs)
return libs
def sys_info(fid=None, show_paths=False, *, dependencies='user'):
"""Print the system information for debugging.
This function is useful for printing system information
to help triage bugs.
Parameters
----------
fid : file-like | None
The file to write to. Will be passed to :func:`print()`.
Can be None to use :data:`sys.stdout`.
show_paths : bool
If True, print paths for each module.
dependencies : str
Can be "user" (default) to show user-relevant dependencies, or
"developer" to additionally show developer dependencies.
.. versionadded:: 0.24
Examples
--------
Running this function with no arguments prints an output that is
useful when submitting bug reports::
>>> import mne
>>> mne.sys_info() # doctest: +SKIP
Platform: Linux-4.15.0-1067-aws-x86_64-with-glibc2.2.5
Python: 3.8.1 (default, Feb 2 2020, 08:37:37) [GCC 8.3.0]
Executable: /usr/local/bin/python
CPU: : 36 cores
Memory: 68.7 GB
mne: 0.21.dev0
numpy: 1.19.0 {blas=openblas, lapack=openblas}
scipy: 1.5.1
matplotlib: 3.2.2 {backend=Qt5Agg}
sklearn: 0.23.1
numba: 0.50.1
nibabel: 3.1.1
nilearn: 0.7.0
dipy: 1.1.1
cupy: Not found
pandas: 1.0.5
mayavi: Not found
pyvista: 0.25.3 {pyvistaqt=0.1.1, OpenGL 3.3 (Core Profile) Mesa 18.3.6 via llvmpipe (LLVM 7.0, 256 bits)}
vtk: 9.0.1
PyQt5: 5.15.0
pooch: v1.5.1
""" # noqa: E501
_validate_type(dependencies, str)
_check_option('dependencies', dependencies, ('user', 'developer'))
ljust = 21 if dependencies == 'developer' else 15
platform_str = platform.platform()
if platform.system() == 'Darwin' and sys.version_info[:2] < (3, 8):
# platform.platform() in Python < 3.8 doesn't call
# platform.mac_ver() if we're on Darwin, so we don't get a nice macOS
# version number. Therefore, let's do this manually here.
macos_ver = platform.mac_ver()[0]
macos_architecture = re.findall('Darwin-.*?-(.*)', platform_str)
if macos_architecture:
macos_architecture = macos_architecture[0]
platform_str = f'macOS-{macos_ver}-{macos_architecture}'
del macos_ver, macos_architecture
out = 'Platform:'.ljust(ljust) + platform_str + '\n'
out += 'Python:'.ljust(ljust) + str(sys.version).replace('\n', ' ') + '\n'
out += 'Executable:'.ljust(ljust) + sys.executable + '\n'
out += 'CPU:'.ljust(ljust) + ('%s: ' % platform.processor())
try:
import multiprocessing
except ImportError:
out += ('number of processors unavailable ' +
'(requires "multiprocessing" package)\n')
else:
out += '%s cores\n' % multiprocessing.cpu_count()
out += 'Memory:'.ljust(ljust)
try:
import psutil
except ImportError:
out += 'Unavailable (requires "psutil" package)'
else:
out += '%0.1f GB\n' % (psutil.virtual_memory().total / float(2 ** 30),)
out += '\n'
libs = _get_numpy_libs()
has_3d = False
use_mod_names = ('mne', 'numpy', 'scipy', 'matplotlib', '', 'sklearn',
'numba', 'nibabel', 'nilearn', 'dipy', 'cupy', 'pandas',
'mayavi', 'pyvista', 'vtk', 'PyQt5', 'pooch')
if dependencies == 'developer':
use_mod_names += (
'', 'sphinx', 'sphinx_gallery', 'numpydoc', 'pydata_sphinx_theme',
'mne_bids', 'pytest')
for mod_name in use_mod_names:
if mod_name == '':
out += '\n'
continue
if mod_name == 'PyQt5' and not has_3d:
continue
out += ('%s:' % mod_name).ljust(ljust)
try:
mod = __import__(mod_name)
if mod_name == 'mayavi':
# the real test
from mayavi import mlab # noqa, analysis:ignore
except Exception:
out += 'Not found\n'
else:
extra = ''
if mod_name == 'numpy':
extra += ' {%s}%s' % (libs, extra)
elif mod_name == 'matplotlib':
extra += ' {backend=%s}%s' % (mod.get_backend(), extra)
elif mod_name == 'pyvista':
extras = list()
try:
from pyvistaqt import __version__
except Exception:
extras += ['pyvistaqt not found']
else:
extras += [f'pyvistaqt={__version__}']
try:
from pyvista import GPUInfo
except ImportError:
pass
else:
gi = GPUInfo()
extras += [f'OpenGL {gi.version} via {gi.renderer}']
if extras:
extra += f' {{{", ".join(extras)}}}'
elif mod_name in ('mayavi', 'vtk'):
has_3d = True
if mod_name == 'vtk':
version = mod.vtkVersion()
# 9.0 dev has VersionFull but 9.0 doesn't
for attr in ('GetVTKVersionFull', 'GetVTKVersion'):
if hasattr(version, attr):
version = getattr(version, attr)()
break
elif mod_name == 'PyQt5':
version = _check_pyqt5_version()
else:
version = mod.__version__
if show_paths:
extra += f'\n{" " * ljust}•{op.dirname(mod.__file__)}'
out += '%s%s\n' % (version, extra)
print(out, end='', file=fid)
|
pravsripad/mne-python
|
mne/utils/config.py
|
Python
|
bsd-3-clause
| 21,324
|
[
"Mayavi",
"VTK"
] |
9a54928ae5753f94342243e750a0e324ac7ddcfe05a7987bcfb54d31b36bed98
|
# -*- coding: utf-8 -*-
import re
import sublime
from sublime_plugin import TextCommand, WindowCommand
from .SublimeBoo import server, get_code, convert_hint
from .BooHints import format_type, format_method, find_open_paren
MEMBER_REGEX = re.compile(r'[\w\)\]]\.$')
WORD_REGEX = re.compile(r'^\w+$')
class BooDotCompleteCommand(TextCommand):
""" Command triggered when the dot key is pressed to show the autocomplete popup
"""
def run(self, edit):
# Insert the dot in the buffer
for region in self.view.sel():
self.view.insert(edit, region.end(), ".")
# Trigger auto complete
caret = self.view.sel()[0].begin()
line = self.view.substr(sublime.Region(self.view.word(caret - 1).a, caret))
if MEMBER_REGEX.search(line) is not None:
self.view.run_command('hide_auto_complete')
sublime.set_timeout(self.delayed_complete, 0)
def delayed_complete(self):
self.view.run_command("auto_complete")
class BooImportCommand(WindowCommand):
""" Include an additional import for a symbol into the file
TODO:
- Use a quick panel to browse namespaces
- List already imported symbols too, if selected they are removed
- If importing show an input panel to define an alias, prefiled
for types, empty or "*" for namespaces (import all)
"""
def run(self):
self.input = None
self.input = self.window.show_input_panel(
'Import',
'',
self.on_done,
self.on_change,
self.on_cancel)
def on_done(self, text):
print('on_done', text)
def on_change(self, text):
print('on_change', text)
# First call may come before we register the view object
if not self.input:
return
def on_cancel(self):
pass
class BooQuickPanelCompleteCommand(TextCommand):
def run(self, edit):
self.edit = edit
view = self.view
# Find a preceding non-word character in the line
offset = view.sel()[0].a
word = view.word(offset)
self.prefix = view.substr(word)
# After a dot just autocomplete members
if word.a == offset-1 and self.prefix[0] == '.':
self.prefix = ''
# At the end of an ident autocomplete it
elif word.b == offset and self.prefix.isalnum():
offset = word.a
# Check if we are inside a call expression to report information about
# it and its overloads
else:
idx = find_open_paren(view.substr(sublime.Region(0, offset)), open='([', close=')]')
if idx is None:
return
word = view.word(idx)
offset = word.a
self.prefix = view.substr(word)
# Request member hints
resp = server(view).query(
'members',
fname=view.file_name(),
code=get_code(view),
offset=offset,
extra=True)
hints = resp['hints']
# For not member references ask for globals
if view.substr(offset-1) != '.':
resp = server(view).query(
'globals',
fname=view.file_name(),
code=get_code(view),
extra=True)
hints += resp['hints']
# TODO: Why doesn't it work with locals?
# Filter out entities to those we are interested in
if len(self.prefix):
# If followed by a paren do an exact match
if view.substr(word.b) == '(':
rex = re.compile(re.escape(self.prefix) + '$')
else:
# Emulate sublime's fuzzy search with a regexp
#rex = re.compile(''.join('.*' + re.escape(ch) for ch in self.prefix), re.IGNORECASE)
# TODO: Filtering by actual prefix seems to feel better. Perhaps because the
# ordering is very different from Sublime's weighted one.
rex = re.compile(re.escape(self.prefix), re.IGNORECASE)
hints = [x for x in hints if rex.match(x['name'])]
# Ignore if there are no hints to show
if not hints:
return
hints.sort(key=lambda x: x['name'])
def format(hint):
# Reuse standard completion formatting for the title
# TODO: Refactor this to make it not a hack
desc, name = convert_hint(hint)
# Note: Sublime expects lists not tuples
if hint['node'] == 'Namespace':
return [
desc,
'namespace %s' % hint['full']
]
elif hint['node'] == 'Type':
return [
desc,
' '.join([x.strip().lower() for x in sorted(hint['info'].split(','))]) + ' ' + hint['full']
]
elif hint['node'] == 'Method':
name = format_method(hint, '{name} ({params})', '{name}')
sign = format_method(hint, 'def ({params}): {return}', '{type}')
return [
u'ƒ ' + name,
sign
]
return [desc, '{0}: {1}'.format(hint['node'].lower(), hint['type'])]
# Ignore constructors
hints = [x for x in hints if x['node'] != 'Constructor']
self.hints = hints
hints = [format(x) for x in hints]
#hints.insert(0, [u'↵ Go to parent namespace'])
#hints.insert(0, [u'⟳ System.Collections'])
flags = 0 # sublime.MONOSPACE_FONT
selected = 0
view.window().show_quick_panel(hints, self.on_select, flags, selected)
def on_select(self, idx):
if idx < 0:
return
hint = self.hints[idx]
hint = hint['name']
hint = hint[len(self.prefix):]
view = self.view
for s in view.sel():
view.insert(self.edit, s.a, hint)
class BooBrowseNamespacesCommand(TextCommand):
""" Browse global namespaces
"""
def run(self, edit):
view = self.view
self.list = []
items = []
resp = server(view).query(
'namespaces',
fname=view.file_name(),
code='',
)
from .SublimeBoo import symbol_for
seen = set()
for hint in resp['hints']:
if hint['full'] not in seen and hint['node'] == 'Namespace':
seen.add(hint['full'])
self.list.append(hint['full'])
items.append([
u'{0} {1}'.format(symbol_for(hint), hint['full']),
])
view.window().show_quick_panel(items, self.on_select)
def on_select(self, idx):
if idx >= 0:
sublime.set_timeout(lambda: self.browse(self.list[idx]), 1)
def browse(self, fullname):
self.list = [
'.'.join(fullname.split('.')[:-1]),
fullname,
]
items = [
[u'⇧ Go to parent namespace'],
[u'⟳ ' + fullname],
]
resp = server(self.view).query(
'members',
fname=self.view.file_name(),
code='{0}.'.format(fullname),
offset=len(fullname)+1,
extra=True
)
from .SublimeBoo import symbol_for
for hint in resp['hints']:
self.list.append(hint['full'])
items.append([
'{0} {1}'.format(symbol_for(hint), hint['name'])
])
self.view.window().show_quick_panel(items, self.on_select)
class BooNavigateCommand(TextCommand):
""" Navigates symbols, errors and namespaces
"""
def run(self, edit):
view = self.view
self.actions = []
items = []
items.append([u'⇤ Go to previous position'])#, view.file_name() + ':89'])
self.actions.append(None)
items.append([u'⇥ Go to next position'])#, view.file_name() + ':180'])
self.actions.append(None)
items.append([u'📁 Browse namespaces'])#, ''])
self.actions.append((self.command, 'boo_browse_namespaces'))
#items.append([u'⇧ Go to parent namespace', 'System'])
#items.append([u'⟳ System.Diagnostics', 'Select to reload'])
from .SublimeBoo import _LINTS
lints = _LINTS.get(view.id(), {})
for line, lint in lints.items():
# TODO: Handle column
if 'BCE' in lint:
lint = u'✖ ' + lint
else:
lint = u'⚠ ' + lint
items.append([lint, '{0}:{1}'.format(view.file_name(), line)])
self.actions.append((self.goto, view.file_name(), line + 1))
from .SublimeBoo import _GLOBALS, symbol_for
hints = _GLOBALS.get(view.id(), [])
items += [symbol_for(x) + ' ' + x['name'] for x in hints if x['node'] == 'Namespace']
view.window().show_quick_panel(items, self.on_select)
def on_select(self, idx):
print('Navigate idx', idx)
action = self.actions[idx]
action[0](*action[1:])
def goto(self, fname, line = 0, column = 0):
self.view.window().open_file(
'{0}:{1}:{2}'.format(fname, line, column),
sublime.TRANSIENT | sublime.ENCODED_POSITION)
def command(self, command):
sublime.set_timeout(lambda: self.view.window().run_command(command, {}), 1)
class BooShowInfoCommand(TextCommand):
""" Shows an output panel with information about the currently
focused entity
"""
def run(self, edit):
view = self.view
ofs = view.sel()[0].a
ch = view.substr(ofs)
word = view.substr(view.word(ofs)).rstrip('\r\n')
if ch == '.':
return
elif ch in (' ', ',', '(', ')'):
# TODO: use find_open_paren
# Silly algorithm to detect if we are in the middle of a method call
# If there are more parens open than closed (unbalanced) remove all the
# ones balanced.
line = view.substr(sublime.Region(view.line(ofs).a, ofs))
print('L "%s"' % line)
unbalanced = 1
idx = len(line)
while unbalanced > 0 and idx > 0:
idx -= 1
if line[idx] == '(':
unbalanced -= 1
elif line[idx] == ')':
unbalanced += 1
if unbalanced != 0:
view.erase_status('boo.signature')
return
ofs = ofs - len(line) + idx
ofs = view.word(ofs).a
elif word.isalnum() and word not in ('if', 'elif', 'else', 'for', 'while', 'try', 'except', 'ensure', 'def', 'class', 'struct', 'interface', 'continue', 'return', 'yield', 'true', 'false', 'null', 'in', 'of'):
ofs = view.word(ofs).a
# Comment out the end of line to try to make it compile without errors
until = view.word(ofs).b
code = get_code(view)
#code = code[0:until] + ' # ' + code[until:]
print('CODE:', code[until-5:until+5])
row, col = view.rowcol(ofs)
resp = server(view).query(
'entity',
fname=view.file_name(),
code=code,
line=row + 1,
column=col + 1,
extra=True,
params=(True,) # Request all candidate entities based on name
)
self.panel = view.window().get_output_panel('boo.info')
if not len(resp['hints']):
self.panel.replace(edit, sublime.Region(0, self.panel.size()), '')
self.panel.show(0)
return
hint = resp['hints'][0]
self.panel.insert(edit, self.panel.size(), hint['full'] + ':\n\n')
for hint in resp['hints']:
if hint['node'] == 'Method':
self.panel.insert(
edit,
self.panel.size(),
' ' + format_method(hint, 'def ({params}): {return}', '{name}: {type}') + '\n'
)
if hint.get('doc'):
lines = hint['doc'].strip().split('\n')
for line in lines:
self.panel.insert(edit, self.panel.size(), ' # ' + line.strip() + '\n')
else:
self.panel.insert(edit, self.panel.size(), ' ' + hint['node'] + ': ' + hint['info'] + '\n')
self.panel.show(0)
self.view.window().run_command('show_panel', {'panel': 'output.boo.info'})
class BooOutlineCommand(TextCommand):
""" Generate an outline for the current file
TODO: Generate into an scratch buffer
TODO: Keep it synchronized with the current view?
TODO: Colored syntax
TODO: Navigate on double click and keyboard trigger
"""
def run(self, edit):
resp = server(self.view).query(
'outline',
fname=self.view.file_name(),
code=get_code(self.view))
view = self.view.window().get_output_panel('boo.outline')
view.insert(edit, view.size(), '\n'.join(self.render(resp)))
view.show(view.size())
self.view.window().run_command('show_panel', {'panel': 'output.boo.outline'})
def render(self, node, indent=0):
mapping = {
'Import': 'import',
'ClassDefinition': 'class',
'Method': 'def',
}
lines = []
lines.append('{0} {1}'.format(
mapping.get(node['type'], node['type']),
node.get('desc', node.get('name'))
))
for member in node['members']:
lines.extend(self.render(member, indent+1))
if node['type'] == 'ClassDefinition':
lines.append('')
return [(' ' * indent) + ln for ln in lines]
class BooGoToImportsCommand(TextCommand):
""" Jumps to the imports section of the current file
"""
def run(self, edit):
resp = server(self.view).query(
command='outline',
fname=self.view.file_name(),
code=get_code(self.view)
)
imports = [x for x in resp['members'] if x['type'] == 'Import']
if len(imports):
imports.sort(key=lambda x: x['line'])
ln = imports[-1]['line']
else:
ln = 1
target = self.view.text_point(ln, 0)
self.view.show_at_center(target)
self.view.sel().clear()
self.view.sel().add(target)
class BooGoToErrorCommand(TextCommand):
""" Jumps to next error
"""
def run(self, edit, reverse=False):
ofs = self.view.sel()[-1].a
ln = self.view.rowcol(ofs)[0]
# TODO: Take column into consideration
# Get lines with lints
from SublimeBoo import _LINTS
lines = _LINTS.get(self.view.id(), {}).keys()
# Make sure its sorted
lines.sort(reverse=reverse)
for line in lines:
if (not reverse and line > ln) or (reverse and line < ln):
target = self.view.text_point(line, 1)
self.view.show_at_center(target)
self.view.sel().clear()
self.view.sel().add(target)
return
class BooGoToEnclosingTypeCommand(TextCommand):
""" Jumps to the enclosing type for the current position
"""
def run(self, edit):
resp = server(self.view).query(
command='outline',
fname=self.view.file_name(),
code=get_code(self.view)
)
ofs = self.view.sel()[-1].a
ln = self.view.rowcol(ofs)[0]
def extract_types(root):
accepted = ('ClassDefinition', 'InterfaceDefinition', 'StructDefinition', 'EnumDefinition')
types = []
for node in root['members']:
if node['type'] in accepted:
types += extract_types(node)
types.append(node)
return types
# Extract all type definitions from the outline
types = extract_types(resp)
# Sort them from bottom to top
types = sorted(types, key=lambda x: x['line'], reverse=True)
for node in types:
if ln > node['line'] and ln <= node['line'] + node['length']:
target = self.view.text_point(node['line'], 1)
self.view.show_at_center(target)
self.view.sel().clear()
self.view.sel().add(target)
return
class BooGoToMainCommand(TextCommand):
""" Jumps to the main section of the current project/directory/file
"""
def run(self, edit):
# TODO
pass
class BooGoToDeclarationCommand(TextCommand):
""" Navigate to the declaration for the selected symbol.
If multiple choices are available a quick panel will be shown to choose
one of them.
When no declaration is found a message will be displayed briefly in the
status bar.
"""
def run(self, edit):
# Get the position at the start of the symbol
offset = self.view.sel()[0].a
offset = self.view.word(offset).a
row, col = self.view.rowcol(offset)
resp = server(self.view).query(
'entity',
fname=self.view.file_name(),
code=get_code(self.view),
line=row + 1,
column=col + 1,
extra=True
)
self.hints = [x for x in resp['hints'] if x.get('loc')]
if not self.hints:
self.set_status(self.view, 'GoTo: Unable to find a definition for the selected symbol')
return
# Automatically show the target if there is a single one
if len(self.hints) == 1:
self.on_select(0)
return
items = []
for hint in self.hints:
items.append([hint['type'], hint['loc']])
flags = 0
selected = 0
self.view.window().show_quick_panel(items, self.on_select, flags, selected)
def on_select(self, idx):
hint = self.hints[idx]
loc = hint['loc'].split(':')
col = int(loc.pop())
ln = int(loc.pop())
filepath = ':'.join(loc)
# Dirty way to check if we will be able to open the file
try:
open(filepath, 'r').close()
except:
self.set_status(self.view, 'GoTo: Unable to open target file "{0}"'.format(filepath))
return
# Trigger the file open
view = self.view.window().open_file(filepath, sublime.TRANSIENT)
def focus():
if view.is_loading():
print('View not ready yet...')
sublime.set_timeout(focus, 50)
return
# Many hints may be approximations, find the actual one in surrounding lines
# The strange sequence of negative and positive numbers is because there is a
# tendency to report lines way above the actual symbol for fields for example.
rex = re.compile(r'\b{0}\b'.format(re.escape(hint['name'])))
row = ln-1
for idx in (0, -1, 1, -2, 2, -3, 3, 4, 5, 6, 7, 8, 9):
line = view.line(view.text_point(row + idx, 0))
match = rex.search(view.substr(line))
if match:
ofs = line.a + match.start()
view.show_at_center(ofs)
view.sel().clear()
#view.sel().add(view.word(ofs))
view.sel().add(line)
return
# If no exact location was found try to center the view
view.show_at_center(view.text_point(row, 0))
self.set_status(view, 'GoTo: Unable to find the exact location')
focus()
def set_status(self, view, msg):
view.set_status('boo.command', msg)
sublime.set_timeout(lambda: view.erase_status('boo.command'), 4000)
class BooFindUsagesCommand(TextCommand):
"""
TODO: Implement Find Usages by sending the server a list of files, it will
then visit them looking for the desired entity.
"""
def run(self, edit):
pass
|
drslump/sublime-boo
|
commands.py
|
Python
|
mit
| 20,206
|
[
"VisIt"
] |
d6a9a605f5008555e51a99586ceab17dd683222e84e4af4f8cdaef5fa4b27c23
|
#!/usr/bin/env python
# from nltk.classify.megam import config_megam, call_megam
#from nltk.classify.weka import WekaClassifier, config_weka
from datetime import time
from nltk import ELEProbDist
from nltk.classify.naivebayes import NaiveBayesClassifier
from nltk.classify.positivenaivebayes import PositiveNaiveBayesClassifier
from nltk.classify.decisiontree import DecisionTreeClassifier
from nltk.classify.rte_classify import rte_classifier, rte_features, RTEFeatureExtractor
from nltk.classify.util import accuracy, apply_features, log_likelihood
from nltk.classify.scikitlearn import SklearnClassifier
from nltk.classify.maxent import (MaxentClassifier, BinaryMaxentFeatureEncoding,
TypedMaxentFeatureEncoding,
ConditionalExponentialClassifier)
#################
#OBSERVATIONS
#################
#GaussianNB is not a good fit for document classification at all, since tf-idf values are non-negative frequencies;
# use MultinomialNB instead, and maybe try BernoulliNB. scikit-learn comes with a document classification example that,
# incidentally, uses tf-idf weighting using the built-in TfidfTransformer.
#################
from tf_core.nltoolkit.helpers import NltkClassifier, DictionaryProbDist
def nltk_naive_bayes_classifier(input_dict):
"""
A classifier based on the Naive Bayes algorithm. In order to find the
probability for a label, this algorithm first uses the Bayes rule to
express P(label|features) in terms of P(label) and P(features|label):
| P(label) * P(features|label)
| P(label|features) = ------------------------------
| P(features)
The algorithm then makes the 'naive' assumption that all features are
independent, given the label:
| P(label) * P(f1|label) * ... * P(fn|label)
| P(label|features) = --------------------------------------------
| P(features)
Rather than computing P(featues) explicitly, the algorithm just
calculates the denominator for each label, and normalizes them so they
sum to one:
| P(label) * P(f1|label) * ... * P(fn|label)
| P(label|features) = --------------------------------------------
| SUM[l]( P(l) * P(f1|l) * ... * P(fn|l) )
"""
estimator = ELEProbDist #TODO estimator
classifier=NltkClassifier(NaiveBayesClassifier,estimator=estimator)
return {'classifier': classifier}
from nltk.classify import DecisionTreeClassifier, MaxentClassifier, NaiveBayesClassifier, megam
#from nltk_trainer import basestring
#from nltk_trainer.classification.multi import AvgProbClassifier
#
# classifier_choices = ['NaiveBayes', 'DecisionTree', 'Maxent'] + MaxentClassifier.ALGORITHMS
#
# dense_classifiers = set(['ExtraTreesClassifier', 'GradientBoostingClassifier',
# 'RandomForestClassifier', 'GaussianNB', 'DecisionTreeClassifier'])
# verbose_classifiers = set(['RandomForestClassifier', 'SVC'])
#
# try:
# import svmlight # do this first since svm module makes ugly errors
# from nltk.classify.svm import SvmClassifier
#
# classifier_choices.append('Svm')
# except:
# pass
#
# try:
# from nltk.classify import scikitlearn
# from sklearn.feature_extraction.text import TfidfTransformer
# from sklearn.pipeline import Pipeline
# from sklearn import ensemble, feature_selection, linear_model, naive_bayes, neighbors, svm, tree
#
# classifiers = [
# ensemble.ExtraTreesClassifier,
# ensemble.GradientBoostingClassifier,
# ensemble.RandomForestClassifier,
# linear_model.LogisticRegression,
# #linear_model.SGDClassifier, # NOTE: this seems terrible, but could just be the options
# naive_bayes.BernoulliNB,
# naive_bayes.GaussianNB,
# naive_bayes.MultinomialNB,
# neighbors.KNeighborsClassifier, # TODO: options for nearest neighbors
# svm.LinearSVC,
# svm.NuSVC,
# svm.SVC,
# tree.DecisionTreeClassifier,
# ]
# sklearn_classifiers = {}
#
# for classifier in classifiers:
# sklearn_classifiers[classifier.__name__] = classifier
#
# classifier_choices.extend(sorted(['sklearn.%s' % c.__name__ for c in classifiers]))
# except ImportError as exc:
# sklearn_classifiers = {}
def train_classifier(input_dict):
classifier=input_dict['classifier']
training_bow_dataset = input_dict['training_data'] #BowDataset
training_data=training_bow_dataset.bow_in_proper_format(classifier)
if isinstance(classifier,NltkClassifier):
trained_classifier=classifier.train(training_data)
return {'trained_classifier': trained_classifier}
elif input_dict['classifier'].__module__.startswith('sklearn'):
classifier.fit(training_data, training_bow_dataset.labels)
return {'trained_classifier': classifier}
else:
from tf_latino.latino.library_gen import latino_train_classifier
return latino_train_classifier(input_dict)
def convert_to_probdists(csf,y_probas):
classes = csf.classes_
return [DictionaryProbDist(dict((classes[i], p)
for i, p in enumerate(y_proba))) for y_proba in y_probas]
#apply_classifier already exists in orange package
def apply_bow_classifier(input_dict):
trained_classifier = input_dict['trained_classifier']
if input_dict.get('probability','true')=='true':
try:
trained_classifier.set_params(probability=True)
except (ValueError,AttributeError): #some classifiers don't have probability parameter
pass
testing_bow_dataset = input_dict['testing_dataset']
testing_dataset=testing_bow_dataset.bow_in_proper_format(trained_classifier,no_labels=True)
classifier_package=input_dict['trained_classifier'].__module__
if trained_classifier.__class__.__name__=='LatinoObject': #check if this is a latino object
from tf_latino.latino.library_gen import latino_predict_classification
return latino_predict_classification(input_dict)
elif classifier_package.startswith("sklearn"):
#a=trained_classifier.predict(testing_dataset)
#example: http://scikit-learn.org/stable/auto_examples/document_classification_20newsgroups.html
try:
results = [DictionaryProbDist.from_probabilities_and_classes(example_predictions,trained_classifier.classes_)
for example_predictions in trained_classifier.predict_proba(testing_dataset)]
except AttributeError:
results = [DictionaryProbDist.from_prediction_and_classes(example_prediction,trained_classifier.classes_)
for example_prediction in trained_classifier.predict(testing_dataset)]
#results=convert_to_probdists(trained_classifier,_results)
elif isinstance(trained_classifier,NltkClassifier):
results=[DictionaryProbDist(prob_dict=dpd._prob_dict,normalize=True)
for dpd in trained_classifier.prob_classify_many(testing_dataset)]
else:
raise Exception("What are you connecting me to then?")
return {'labeled_dataset': None, 'predictions': results}
def extract_classifier_name(input_dict):
import re
in1=input_dict['classifier']
clsf=in1.__class__.__module__+'.'+in1.__class__.__name__ if not in1.__class__.__name__=='LatinoObject' else in1.name
out2=re.search(r'[A-Za-z\.0-9]+',clsf).group()
spl=out2.split('.')
if spl[0]=='sklearn':
spl[0]='scikit-learn'
elif spl[0]=='Latino':
spl[0]='LATINO'
if spl[-1]=='naive':
spl[-1]='Gaussian Naive Bayes Classifier'
if spl[-1]=='LinearSVC':
spl[-1]='SVM Linear Classifier'
return {'classifier_name': '['+spl[0]+'] '+spl[-1]}
def extract_actual_and_predicted_values(input_dict):
actual=input_dict['dataset']
predicted=input_dict['predictions'] #[a.max() for a in input_dict['predictions']]
return {'actual_and_predicted': [list(actual.Y), list(predicted.Y)]}
|
xflows/tf_core
|
tf_core/nltoolkit/lib/classification.py
|
Python
|
mit
| 8,140
|
[
"Gaussian"
] |
7955156643c356ef39f6fc2777f9c199f00234292ae41f8a8ed8fb447c6170dc
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# math2html: convert LaTeX equations to HTML output.
#
# Copyright (C) 2009-2011 Alex Fernández
#
# Released under the terms of the `2-Clause BSD license'_, in short:
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
# Based on eLyXer: convert LyX source files to HTML output.
# http://elyxer.nongnu.org/
# --end--
# Alex 20101110
# eLyXer standalone formula conversion to HTML.
class Trace(object):
"A tracing class"
debugmode = False
quietmode = False
showlinesmode = False
prefix = None
def debug(cls, message):
"Show a debug message"
if not Trace.debugmode or Trace.quietmode:
return
Trace.show(message, sys.stdout)
def message(cls, message):
"Show a trace message"
if Trace.quietmode:
return
if Trace.prefix and Trace.showlinesmode:
message = Trace.prefix + message
Trace.show(message, sys.stdout)
def error(cls, message):
"Show an error message"
message = '* ' + message
if Trace.prefix and Trace.showlinesmode:
message = Trace.prefix + message
Trace.show(message, sys.stderr)
def fatal(cls, message):
"Show an error message and terminate"
Trace.error('FATAL: ' + message)
exit(-1)
def show(cls, message, channel):
"Show a message out of a channel"
if sys.version_info < (3,0):
message = message.encode('utf-8')
channel.write(message + '\n')
debug = classmethod(debug)
message = classmethod(message)
error = classmethod(error)
fatal = classmethod(fatal)
show = classmethod(show)
import os.path
class BibStylesConfig(object):
"Configuration class from elyxer.config file"
abbrvnat = {
'@article':'$authors. $title. <i>$journal</i>,{ {$volume:}$pages,} $month $year.{ doi: $doi.}{ URL <a href="$url">$url</a>.}{ $note.}',
'cite':'$surname($year)',
'default':'$authors. <i>$title</i>. $publisher, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
}
alpha = {
'@article':'$authors. $title.{ <i>$journal</i>{, {$volume}{($number)}}{: $pages}{, $year}.}{ <a href="$url">$url</a>.}{ <a href="$filename">$filename</a>.}{ $note.}',
'cite':'$Sur$YY',
'default':'$authors. $title.{ <i>$journal</i>,} $year.{ <a href="$url">$url</a>.}{ <a href="$filename">$filename</a>.}{ $note.}',
}
authordate2 = {
'@article':'$authors. $year. $title. <i>$journal</i>, <b>$volume</b>($number), $pages.{ URL <a href="$url">$url</a>.}{ $note.}',
'@book':'$authors. $year. <i>$title</i>. $publisher.{ URL <a href="$url">$url</a>.}{ $note.}',
'cite':'$surname, $year',
'default':'$authors. $year. <i>$title</i>. $publisher.{ URL <a href="$url">$url</a>.}{ $note.}',
}
default = {
'@article':'$authors: “$title”, <i>$journal</i>,{ pp. $pages,} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
'@book':'{$authors: }<i>$title</i>{ ($editor, ed.)}.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
'@booklet':'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
'@conference':'$authors: “$title”, <i>$journal</i>,{ pp. $pages,} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
'@inbook':'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
'@incollection':'$authors: <i>$title</i>{ in <i>$booktitle</i>{ ($editor, ed.)}}.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
'@inproceedings':'$authors: “$title”, <i>$journal</i>,{ pp. $pages,} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
'@manual':'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
'@mastersthesis':'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
'@misc':'$authors: <i>$title</i>.{{ $publisher,}{ $howpublished,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
'@phdthesis':'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
'@proceedings':'$authors: “$title”, <i>$journal</i>,{ pp. $pages,} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
'@techreport':'$authors: <i>$title</i>, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
'@unpublished':'$authors: “$title”, <i>$journal</i>, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
'cite':'$index',
'default':'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
}
defaulttags = {
'YY':'??', 'authors':'', 'surname':'',
}
ieeetr = {
'@article':'$authors, “$title”, <i>$journal</i>, vol. $volume, no. $number, pp. $pages, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
'@book':'$authors, <i>$title</i>. $publisher, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
'cite':'$index',
'default':'$authors, “$title”. $year.{ URL <a href="$url">$url</a>.}{ $note.}',
}
plain = {
'@article':'$authors. $title.{ <i>$journal</i>{, {$volume}{($number)}}{:$pages}{, $year}.}{ URL <a href="$url">$url</a>.}{ $note.}',
'@book':'$authors. <i>$title</i>. $publisher,{ $month} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
'@incollection':'$authors. $title.{ In <i>$booktitle</i> {($editor, ed.)}.} $publisher,{ $month} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
'@inproceedings':'$authors. $title. { <i>$booktitle</i>{, {$volume}{($number)}}{:$pages}{, $year}.}{ URL <a href="$url">$url</a>.}{ $note.}',
'cite':'$index',
'default':'{$authors. }$title.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
}
vancouver = {
'@article':'$authors. $title. <i>$journal</i>, $year{;{<b>$volume</b>}{($number)}{:$pages}}.{ URL: <a href="$url">$url</a>.}{ $note.}',
'@book':'$authors. $title. {$publisher, }$year.{ URL: <a href="$url">$url</a>.}{ $note.}',
'cite':'$index',
'default':'$authors. $title; {$publisher, }$year.{ $howpublished.}{ URL: <a href="$url">$url</a>.}{ $note.}',
}
class BibTeXConfig(object):
"Configuration class from elyxer.config file"
replaced = {
'--':'—', '..':'.',
}
class ContainerConfig(object):
"Configuration class from elyxer.config file"
endings = {
'Align':'\\end_layout', 'BarredText':'\\bar',
'BoldText':'\\series', 'Cell':'</cell',
'ChangeDeleted':'\\change_unchanged',
'ChangeInserted':'\\change_unchanged', 'ColorText':'\\color',
'EmphaticText':'\\emph', 'Hfill':'\\hfill', 'Inset':'\\end_inset',
'Layout':'\\end_layout', 'LyXFooter':'\\end_document',
'LyXHeader':'\\end_header', 'Row':'</row', 'ShapedText':'\\shape',
'SizeText':'\\size', 'StrikeOut':'\\strikeout',
'TextFamily':'\\family', 'VersalitasText':'\\noun',
}
extracttext = {
'allowed':['StringContainer','Constant','FormulaConstant',],
'cloned':['',],
'extracted':['PlainLayout','TaggedText','Align','Caption','TextFamily','EmphaticText','VersalitasText','BarredText','SizeText','ColorText','LangLine','Formula','Bracket','RawText','BibTag','FormulaNumber','AlphaCommand','EmptyCommand','OneParamFunction','SymbolFunction','TextFunction','FontFunction','CombiningFunction','DecoratingFunction','FormulaSymbol','BracketCommand','TeXCode',],
}
startendings = {
'\\begin_deeper':'\\end_deeper', '\\begin_inset':'\\end_inset',
'\\begin_layout':'\\end_layout',
}
starts = {
'':'StringContainer', '#LyX':'BlackBox', '</lyxtabular':'BlackBox',
'<cell':'Cell', '<column':'Column', '<row':'Row',
'\\align':'Align', '\\bar':'BarredText',
'\\bar default':'BlackBox', '\\bar no':'BlackBox',
'\\begin_body':'BlackBox', '\\begin_deeper':'DeeperList',
'\\begin_document':'BlackBox', '\\begin_header':'LyXHeader',
'\\begin_inset Argument':'ShortTitle',
'\\begin_inset Box':'BoxInset', '\\begin_inset Branch':'Branch',
'\\begin_inset Caption':'Caption',
'\\begin_inset CommandInset bibitem':'BiblioEntry',
'\\begin_inset CommandInset bibtex':'BibTeX',
'\\begin_inset CommandInset citation':'BiblioCitation',
'\\begin_inset CommandInset href':'URL',
'\\begin_inset CommandInset include':'IncludeInset',
'\\begin_inset CommandInset index_print':'PrintIndex',
'\\begin_inset CommandInset label':'Label',
'\\begin_inset CommandInset line':'LineInset',
'\\begin_inset CommandInset nomencl_print':'PrintNomenclature',
'\\begin_inset CommandInset nomenclature':'NomenclatureEntry',
'\\begin_inset CommandInset ref':'Reference',
'\\begin_inset CommandInset toc':'TableOfContents',
'\\begin_inset ERT':'ERT', '\\begin_inset Flex':'FlexInset',
'\\begin_inset Flex Chunkref':'NewfangledChunkRef',
'\\begin_inset Flex Marginnote':'SideNote',
'\\begin_inset Flex Sidenote':'SideNote',
'\\begin_inset Flex URL':'FlexURL', '\\begin_inset Float':'Float',
'\\begin_inset FloatList':'ListOf', '\\begin_inset Foot':'Footnote',
'\\begin_inset Formula':'Formula',
'\\begin_inset FormulaMacro':'FormulaMacro',
'\\begin_inset Graphics':'Image',
'\\begin_inset Index':'IndexReference',
'\\begin_inset Info':'InfoInset',
'\\begin_inset LatexCommand bibitem':'BiblioEntry',
'\\begin_inset LatexCommand bibtex':'BibTeX',
'\\begin_inset LatexCommand cite':'BiblioCitation',
'\\begin_inset LatexCommand citealt':'BiblioCitation',
'\\begin_inset LatexCommand citep':'BiblioCitation',
'\\begin_inset LatexCommand citet':'BiblioCitation',
'\\begin_inset LatexCommand htmlurl':'URL',
'\\begin_inset LatexCommand index':'IndexReference',
'\\begin_inset LatexCommand label':'Label',
'\\begin_inset LatexCommand nomenclature':'NomenclatureEntry',
'\\begin_inset LatexCommand prettyref':'Reference',
'\\begin_inset LatexCommand printindex':'PrintIndex',
'\\begin_inset LatexCommand printnomenclature':'PrintNomenclature',
'\\begin_inset LatexCommand ref':'Reference',
'\\begin_inset LatexCommand tableofcontents':'TableOfContents',
'\\begin_inset LatexCommand url':'URL',
'\\begin_inset LatexCommand vref':'Reference',
'\\begin_inset Marginal':'SideNote',
'\\begin_inset Newline':'NewlineInset',
'\\begin_inset Newpage':'NewPageInset', '\\begin_inset Note':'Note',
'\\begin_inset OptArg':'ShortTitle',
'\\begin_inset Phantom':'PhantomText',
'\\begin_inset Quotes':'QuoteContainer',
'\\begin_inset Tabular':'Table', '\\begin_inset Text':'InsetText',
'\\begin_inset VSpace':'VerticalSpace', '\\begin_inset Wrap':'Wrap',
'\\begin_inset listings':'Listing', '\\begin_inset space':'Space',
'\\begin_layout':'Layout', '\\begin_layout Abstract':'Abstract',
'\\begin_layout Author':'Author',
'\\begin_layout Bibliography':'Bibliography',
'\\begin_layout Chunk':'NewfangledChunk',
'\\begin_layout Description':'Description',
'\\begin_layout Enumerate':'ListItem',
'\\begin_layout Itemize':'ListItem', '\\begin_layout List':'List',
'\\begin_layout LyX-Code':'LyXCode',
'\\begin_layout Plain':'PlainLayout',
'\\begin_layout Standard':'StandardLayout',
'\\begin_layout Title':'Title', '\\begin_preamble':'LyXPreamble',
'\\change_deleted':'ChangeDeleted',
'\\change_inserted':'ChangeInserted',
'\\change_unchanged':'BlackBox', '\\color':'ColorText',
'\\color inherit':'BlackBox', '\\color none':'BlackBox',
'\\emph default':'BlackBox', '\\emph off':'BlackBox',
'\\emph on':'EmphaticText', '\\emph toggle':'EmphaticText',
'\\end_body':'LyXFooter', '\\family':'TextFamily',
'\\family default':'BlackBox', '\\family roman':'BlackBox',
'\\hfill':'Hfill', '\\labelwidthstring':'BlackBox',
'\\lang':'LangLine', '\\length':'InsetLength',
'\\lyxformat':'LyXFormat', '\\lyxline':'LyXLine',
'\\newline':'Newline', '\\newpage':'NewPage',
'\\noindent':'BlackBox', '\\noun default':'BlackBox',
'\\noun off':'BlackBox', '\\noun on':'VersalitasText',
'\\paragraph_spacing':'BlackBox', '\\series bold':'BoldText',
'\\series default':'BlackBox', '\\series medium':'BlackBox',
'\\shape':'ShapedText', '\\shape default':'BlackBox',
'\\shape up':'BlackBox', '\\size':'SizeText',
'\\size normal':'BlackBox', '\\start_of_appendix':'StartAppendix',
'\\strikeout default':'BlackBox', '\\strikeout on':'StrikeOut',
}
string = {
'startcommand':'\\',
}
table = {
'headers':['<lyxtabular','<features',],
}
class EscapeConfig(object):
"Configuration class from elyxer.config file"
chars = {
'\n':'', ' -- ':' — ', '\'':'’', '---':'—', '`':'‘',
}
commands = {
'\\InsetSpace \\space{}':' ', '\\InsetSpace \\thinspace{}':' ',
'\\InsetSpace ~':' ', '\\SpecialChar \\-':'',
'\\SpecialChar \\@.':'.', '\\SpecialChar \\ldots{}':'…',
'\\SpecialChar \\menuseparator':' ▷ ',
'\\SpecialChar \\nobreakdash-':'-', '\\SpecialChar \\slash{}':'/',
'\\SpecialChar \\textcompwordmark{}':'', '\\backslash':'\\',
}
entities = {
'&':'&', '<':'<', '>':'>',
}
html = {
'/>':'>',
}
iso885915 = {
' ':' ', ' ':' ', ' ':' ',
}
nonunicode = {
' ':' ',
}
class FormulaConfig(object):
"Configuration class from elyxer.config file"
alphacommands = {
'\\AA':'Å', '\\AE':'Æ',
'\\AmS':'<span class="versalitas">AmS</span>', '\\DH':'Ð',
'\\L':'Ł', '\\O':'Ø', '\\OE':'Œ', '\\TH':'Þ', '\\aa':'å',
'\\ae':'æ', '\\alpha':'α', '\\beta':'β', '\\delta':'δ',
'\\dh':'ð', '\\epsilon':'ϵ', '\\eta':'η', '\\gamma':'γ',
'\\i':'ı', '\\imath':'ı', '\\iota':'ι', '\\j':'ȷ',
'\\jmath':'ȷ', '\\kappa':'κ', '\\l':'ł', '\\lambda':'λ',
'\\mu':'μ', '\\nu':'ν', '\\o':'ø', '\\oe':'œ', '\\omega':'ω',
'\\phi':'φ', '\\pi':'π', '\\psi':'ψ', '\\rho':'ρ',
'\\sigma':'σ', '\\ss':'ß', '\\tau':'τ', '\\textcrh':'ħ',
'\\th':'þ', '\\theta':'θ', '\\upsilon':'υ', '\\varDelta':'∆',
'\\varGamma':'Γ', '\\varLambda':'Λ', '\\varOmega':'Ω',
'\\varPhi':'Φ', '\\varPi':'Π', '\\varPsi':'Ψ', '\\varSigma':'Σ',
'\\varTheta':'Θ', '\\varUpsilon':'Υ', '\\varXi':'Ξ',
'\\varepsilon':'ε', '\\varkappa':'ϰ', '\\varphi':'φ',
'\\varpi':'ϖ', '\\varrho':'ϱ', '\\varsigma':'ς',
'\\vartheta':'ϑ', '\\xi':'ξ', '\\zeta':'ζ',
}
array = {
'begin':'\\begin', 'cellseparator':'&', 'end':'\\end',
'rowseparator':'\\\\',
}
bigbrackets = {
'(':['⎛','⎜','⎝',], ')':['⎞','⎟','⎠',], '[':['⎡','⎢','⎣',],
']':['⎤','⎥','⎦',], '{':['⎧','⎪','⎨','⎩',], '|':['|',],
'}':['⎫','⎪','⎬','⎭',], '∥':['∥',],
}
bigsymbols = {
'∑':['⎲','⎳',], '∫':['⌠','⌡',],
}
bracketcommands = {
'\\left':'span class="symbol"',
'\\left.':'<span class="leftdot"></span>',
'\\middle':'span class="symbol"', '\\right':'span class="symbol"',
'\\right.':'<span class="rightdot"></span>',
}
combiningfunctions = {
'\\"':'̈', '\\\'':'́', '\\^':'̂', '\\`':'̀', '\\acute':'́',
'\\bar':'̄', '\\breve':'̆', '\\c':'̧', '\\check':'̌',
'\\dddot':'⃛', '\\ddot':'̈', '\\dot':'̇', '\\grave':'̀',
'\\hat':'̂', '\\mathring':'̊', '\\overleftarrow':'⃖',
'\\overrightarrow':'⃗', '\\r':'̊', '\\s':'̩',
'\\textcircled':'⃝', '\\textsubring':'̥', '\\tilde':'̃',
'\\v':'̌', '\\vec':'⃗', '\\~':'̃',
}
commands = {
'\\ ':' ', '\\!':'', '\\#':'#', '\\$':'$', '\\%':'%',
'\\&':'&', '\\,':' ', '\\:':' ', '\\;':' ',
'\\APLdownarrowbox':'⍗', '\\APLleftarrowbox':'⍇',
'\\APLrightarrowbox':'⍈', '\\APLuparrowbox':'⍐', '\\Box':'□',
'\\Bumpeq':'≎', '\\CIRCLE':'●', '\\Cap':'⋒', '\\CheckedBox':'☑',
'\\Circle':'○', '\\Coloneqq':'⩴', '\\Corresponds':'≙',
'\\Cup':'⋓', '\\Delta':'Δ', '\\Diamond':'◇', '\\Downarrow':'⇓',
'\\EUR':'€', '\\Game':'⅁', '\\Gamma':'Γ', '\\Im':'ℑ',
'\\Join':'⨝', '\\LEFTCIRCLE':'◖', '\\LEFTcircle':'◐',
'\\Lambda':'Λ', '\\Leftarrow':'⇐', '\\Lleftarrow':'⇚',
'\\Longleftarrow':'⟸', '\\Longleftrightarrow':'⟺',
'\\Longrightarrow':'⟹', '\\Lsh':'↰', '\\Mapsfrom':'⇐|',
'\\Mapsto':'|⇒', '\\Omega':'Ω', '\\P':'¶', '\\Phi':'Φ',
'\\Pi':'Π', '\\Pr':'Pr', '\\Psi':'Ψ', '\\RIGHTCIRCLE':'◗',
'\\RIGHTcircle':'◑', '\\Re':'ℜ', '\\Rrightarrow':'⇛',
'\\Rsh':'↱', '\\S':'§', '\\Sigma':'Σ', '\\Square':'☐',
'\\Subset':'⋐', '\\Supset':'⋑', '\\Theta':'Θ', '\\Uparrow':'⇑',
'\\Updownarrow':'⇕', '\\Upsilon':'Υ', '\\Vdash':'⊩',
'\\Vert':'∥', '\\Vvdash':'⊪', '\\XBox':'☒', '\\Xi':'Ξ',
'\\Yup':'⅄', '\\\\':'<br/>', '\\_':'_', '\\aleph':'ℵ',
'\\amalg':'∐', '\\angle':'∠', '\\aquarius':'♒',
'\\arccos':'arccos', '\\arcsin':'arcsin', '\\arctan':'arctan',
'\\arg':'arg', '\\aries':'♈', '\\ast':'∗', '\\asymp':'≍',
'\\backepsilon':'∍', '\\backprime':'‵', '\\backsimeq':'⋍',
'\\backslash':'\\', '\\barwedge':'⊼', '\\because':'∵',
'\\beth':'ℶ', '\\between':'≬', '\\bigcap':'∩', '\\bigcirc':'○',
'\\bigcup':'∪', '\\bigodot':'⊙', '\\bigoplus':'⊕',
'\\bigotimes':'⊗', '\\bigsqcup':'⊔', '\\bigstar':'★',
'\\bigtriangledown':'▽', '\\bigtriangleup':'△', '\\biguplus':'⊎',
'\\bigvee':'∨', '\\bigwedge':'∧', '\\blacklozenge':'⧫',
'\\blacksmiley':'☻', '\\blacksquare':'■', '\\blacktriangle':'▲',
'\\blacktriangledown':'▼', '\\blacktriangleright':'▶', '\\bot':'⊥',
'\\bowtie':'⋈', '\\box':'▫', '\\boxdot':'⊡', '\\bullet':'•',
'\\bumpeq':'≏', '\\cancer':'♋', '\\cap':'∩', '\\capricornus':'♑',
'\\cdot':'⋅', '\\cdots':'⋯', '\\centerdot':'∙',
'\\checkmark':'✓', '\\chi':'χ', '\\circ':'○', '\\circeq':'≗',
'\\circledR':'®', '\\circledast':'⊛', '\\circledcirc':'⊚',
'\\circleddash':'⊝', '\\clubsuit':'♣', '\\coloneqq':'≔',
'\\complement':'∁', '\\cong':'≅', '\\coprod':'∐',
'\\copyright':'©', '\\cos':'cos', '\\cosh':'cosh', '\\cot':'cot',
'\\coth':'coth', '\\csc':'csc', '\\cup':'∪',
'\\curvearrowleft':'↶', '\\curvearrowright':'↷', '\\dag':'†',
'\\dagger':'†', '\\daleth':'ℸ', '\\dashleftarrow':'⇠',
'\\dashv':'⊣', '\\ddag':'‡', '\\ddagger':'‡', '\\ddots':'⋱',
'\\deg':'deg', '\\det':'det', '\\diagdown':'╲', '\\diagup':'╱',
'\\diamond':'◇', '\\diamondsuit':'♦', '\\dim':'dim', '\\div':'÷',
'\\divideontimes':'⋇', '\\dotdiv':'∸', '\\doteq':'≐',
'\\doteqdot':'≑', '\\dotplus':'∔', '\\dots':'…',
'\\doublebarwedge':'⌆', '\\downarrow':'↓', '\\downdownarrows':'⇊',
'\\downharpoonleft':'⇃', '\\downharpoonright':'⇂', '\\earth':'♁',
'\\ell':'ℓ', '\\emptyset':'∅', '\\eqcirc':'≖', '\\eqcolon':'≕',
'\\eqsim':'≂', '\\euro':'€', '\\exists':'∃', '\\exp':'exp',
'\\fallingdotseq':'≒', '\\female':'♀', '\\flat':'♭',
'\\forall':'∀', '\\frown':'⌢', '\\frownie':'☹', '\\gcd':'gcd',
'\\gemini':'♊', '\\geq)':'≥', '\\geqq':'≧', '\\geqslant':'≥',
'\\gets':'←', '\\gg':'≫', '\\ggg':'⋙', '\\gimel':'ℷ',
'\\gneqq':'≩', '\\gnsim':'⋧', '\\gtrdot':'⋗', '\\gtreqless':'⋚',
'\\gtreqqless':'⪌', '\\gtrless':'≷', '\\gtrsim':'≳',
'\\guillemotleft':'«', '\\guillemotright':'»', '\\hbar':'ℏ',
'\\heartsuit':'♥', '\\hfill':'<span class="hfill"> </span>',
'\\hom':'hom', '\\hookleftarrow':'↩', '\\hookrightarrow':'↪',
'\\hslash':'ℏ', '\\idotsint':'<span class="bigsymbol">∫⋯∫</span>',
'\\iiint':'<span class="bigsymbol">∭</span>',
'\\iint':'<span class="bigsymbol">∬</span>', '\\imath':'ı',
'\\inf':'inf', '\\infty':'∞', '\\invneg':'⌐', '\\jmath':'ȷ',
'\\jupiter':'♃', '\\ker':'ker', '\\land':'∧',
'\\landupint':'<span class="bigsymbol">∱</span>', '\\langle':'⟨',
'\\lbrace':'{', '\\lbrace)':'{', '\\lbrack':'[', '\\lceil':'⌈',
'\\ldots':'…', '\\leadsto':'⇝', '\\leftarrow)':'←',
'\\leftarrowtail':'↢', '\\leftarrowtobar':'⇤',
'\\leftharpoondown':'↽', '\\leftharpoonup':'↼',
'\\leftleftarrows':'⇇', '\\leftleftharpoons':'⥢', '\\leftmoon':'☾',
'\\leftrightarrow':'↔', '\\leftrightarrows':'⇆',
'\\leftrightharpoons':'⇋', '\\leftthreetimes':'⋋', '\\leo':'♌',
'\\leq)':'≤', '\\leqq':'≦', '\\leqslant':'≤', '\\lessdot':'⋖',
'\\lesseqgtr':'⋛', '\\lesseqqgtr':'⪋', '\\lessgtr':'≶',
'\\lesssim':'≲', '\\lfloor':'⌊', '\\lg':'lg', '\\lhd':'⊲',
'\\libra':'♎', '\\lightning':'↯', '\\liminf':'liminf',
'\\limsup':'limsup', '\\ll':'≪', '\\lll':'⋘', '\\ln':'ln',
'\\lneqq':'≨', '\\lnot':'¬', '\\lnsim':'⋦', '\\log':'log',
'\\longleftarrow':'⟵', '\\longleftrightarrow':'⟷',
'\\longmapsto':'⟼', '\\longrightarrow':'⟶', '\\looparrowleft':'↫',
'\\looparrowright':'↬', '\\lor':'∨', '\\lozenge':'◊',
'\\ltimes':'⋉', '\\lyxlock':'', '\\male':'♂', '\\maltese':'✠',
'\\mapsfrom':'↤', '\\mapsto':'↦', '\\mathcircumflex':'^',
'\\max':'max', '\\measuredangle':'∡', '\\mercury':'☿',
'\\mho':'℧', '\\mid':'∣', '\\min':'min', '\\models':'⊨',
'\\mp':'∓', '\\multimap':'⊸', '\\nLeftarrow':'⇍',
'\\nLeftrightarrow':'⇎', '\\nRightarrow':'⇏', '\\nVDash':'⊯',
'\\nabla':'∇', '\\napprox':'≉', '\\natural':'♮', '\\ncong':'≇',
'\\nearrow':'↗', '\\neg':'¬', '\\neg)':'¬', '\\neptune':'♆',
'\\nequiv':'≢', '\\newline':'<br/>', '\\nexists':'∄',
'\\ngeqslant':'≱', '\\ngtr':'≯', '\\ngtrless':'≹', '\\ni':'∋',
'\\ni)':'∋', '\\nleftarrow':'↚', '\\nleftrightarrow':'↮',
'\\nleqslant':'≰', '\\nless':'≮', '\\nlessgtr':'≸', '\\nmid':'∤',
'\\nolimits':'', '\\nonumber':'', '\\not':'¬', '\\not<':'≮',
'\\not=':'≠', '\\not>':'≯', '\\notbackslash':'⍀', '\\notin':'∉',
'\\notni':'∌', '\\notslash':'⌿', '\\nparallel':'∦',
'\\nprec':'⊀', '\\nrightarrow':'↛', '\\nsim':'≁', '\\nsimeq':'≄',
'\\nsqsubset':'⊏̸', '\\nsubseteq':'⊈', '\\nsucc':'⊁',
'\\nsucccurlyeq':'⋡', '\\nsupset':'⊅', '\\nsupseteq':'⊉',
'\\ntriangleleft':'⋪', '\\ntrianglelefteq':'⋬',
'\\ntriangleright':'⋫', '\\ntrianglerighteq':'⋭', '\\nvDash':'⊭',
'\\nvdash':'⊬', '\\nwarrow':'↖', '\\odot':'⊙',
'\\officialeuro':'€', '\\oiiint':'<span class="bigsymbol">∰</span>',
'\\oiint':'<span class="bigsymbol">∯</span>',
'\\oint':'<span class="bigsymbol">∮</span>',
'\\ointclockwise':'<span class="bigsymbol">∲</span>',
'\\ointctrclockwise':'<span class="bigsymbol">∳</span>',
'\\ominus':'⊖', '\\oplus':'⊕', '\\oslash':'⊘', '\\otimes':'⊗',
'\\owns':'∋', '\\parallel':'∥', '\\partial':'∂', '\\perp':'⊥',
'\\pisces':'♓', '\\pitchfork':'⋔', '\\pluto':'♇', '\\pm':'±',
'\\pointer':'➪', '\\pounds':'£', '\\prec':'≺',
'\\preccurlyeq':'≼', '\\preceq':'≼', '\\precsim':'≾',
'\\prime':'′', '\\prompto':'∝', '\\qquad':' ', '\\quad':' ',
'\\quarternote':'♩', '\\rangle':'⟩', '\\rbrace':'}',
'\\rbrace)':'}', '\\rbrack':']', '\\rceil':'⌉', '\\rfloor':'⌋',
'\\rhd':'⊳', '\\rightarrow)':'→', '\\rightarrowtail':'↣',
'\\rightarrowtobar':'⇥', '\\rightharpoondown':'⇁',
'\\rightharpoonup':'⇀', '\\rightharpooondown':'⇁',
'\\rightharpooonup':'⇀', '\\rightleftarrows':'⇄',
'\\rightleftharpoons':'⇌', '\\rightmoon':'☽',
'\\rightrightarrows':'⇉', '\\rightrightharpoons':'⥤',
'\\rightthreetimes':'⋌', '\\risingdotseq':'≓', '\\rtimes':'⋊',
'\\sagittarius':'♐', '\\saturn':'♄', '\\scorpio':'♏',
'\\searrow':'↘', '\\sec':'sec', '\\setminus':'∖', '\\sharp':'♯',
'\\simeq':'≃', '\\sin':'sin', '\\sinh':'sinh', '\\slash':'∕',
'\\smile':'⌣', '\\smiley':'☺', '\\spadesuit':'♠',
'\\sphericalangle':'∢', '\\sqcap':'⊓', '\\sqcup':'⊔',
'\\sqsubset':'⊏', '\\sqsubseteq':'⊑', '\\sqsupset':'⊐',
'\\sqsupseteq':'⊒', '\\square':'□', '\\star':'⋆',
'\\subseteqq':'⫅', '\\subsetneqq':'⫋', '\\succ':'≻',
'\\succcurlyeq':'≽', '\\succeq':'≽', '\\succnsim':'⋩',
'\\succsim':'≿', '\\sun':'☼', '\\sup':'sup', '\\supseteqq':'⫆',
'\\supsetneqq':'⫌', '\\surd':'√', '\\swarrow':'↙', '\\tan':'tan',
'\\tanh':'tanh', '\\taurus':'♉', '\\textasciicircum':'^',
'\\textasciitilde':'~', '\\textbackslash':'\\',
'\\textcopyright':'©\'', '\\textdegree':'°', '\\textellipsis':'…',
'\\textemdash':'—', '\\textendash':'—', '\\texteuro':'€',
'\\textgreater':'>', '\\textless':'<', '\\textordfeminine':'ª',
'\\textordmasculine':'º', '\\textquotedblleft':'“',
'\\textquotedblright':'”', '\\textquoteright':'’',
'\\textregistered':'®', '\\textrightarrow':'→',
'\\textsection':'§', '\\texttrademark':'™',
'\\texttwosuperior':'²', '\\textvisiblespace':' ',
'\\therefore':'∴', '\\top':'⊤', '\\triangle':'△',
'\\triangleleft':'⊲', '\\trianglelefteq':'⊴', '\\triangleq':'≜',
'\\triangleright':'▷', '\\trianglerighteq':'⊵',
'\\twoheadleftarrow':'↞', '\\twoheadrightarrow':'↠',
'\\twonotes':'♫', '\\udot':'⊍', '\\unlhd':'⊴', '\\unrhd':'⊵',
'\\unrhl':'⊵', '\\uparrow':'↑', '\\updownarrow':'↕',
'\\upharpoonleft':'↿', '\\upharpoonright':'↾', '\\uplus':'⊎',
'\\upuparrows':'⇈', '\\uranus':'♅', '\\vDash':'⊨',
'\\varclubsuit':'♧', '\\vardiamondsuit':'♦', '\\varheartsuit':'♥',
'\\varnothing':'∅', '\\varspadesuit':'♤', '\\vdash':'⊢',
'\\vdots':'⋮', '\\vee':'∨', '\\vee)':'∨', '\\veebar':'⊻',
'\\vert':'∣', '\\virgo':'♍', '\\wedge':'∧', '\\wedge)':'∧',
'\\wp':'℘', '\\wr':'≀', '\\yen':'¥', '\\{':'{', '\\|':'∥',
'\\}':'}',
}
decoratedcommand = {
}
decoratingfunctions = {
'\\overleftarrow':'⟵', '\\overrightarrow':'⟶', '\\widehat':'^',
}
endings = {
'bracket':'}', 'complex':'\\]', 'endafter':'}',
'endbefore':'\\end{', 'squarebracket':']',
}
environments = {
'align':['r','l',], 'eqnarray':['r','c','l',],
'gathered':['l','l',],
}
fontfunctions = {
'\\boldsymbol':'b', '\\mathbb':'span class="blackboard"',
'\\mathbb{A}':'𝔸', '\\mathbb{B}':'𝔹', '\\mathbb{C}':'ℂ',
'\\mathbb{D}':'𝔻', '\\mathbb{E}':'𝔼', '\\mathbb{F}':'𝔽',
'\\mathbb{G}':'𝔾', '\\mathbb{H}':'ℍ', '\\mathbb{J}':'𝕁',
'\\mathbb{K}':'𝕂', '\\mathbb{L}':'𝕃', '\\mathbb{N}':'ℕ',
'\\mathbb{O}':'𝕆', '\\mathbb{P}':'ℙ', '\\mathbb{Q}':'ℚ',
'\\mathbb{R}':'ℝ', '\\mathbb{S}':'𝕊', '\\mathbb{T}':'𝕋',
'\\mathbb{W}':'𝕎', '\\mathbb{Z}':'ℤ', '\\mathbf':'b',
'\\mathcal':'span class="scriptfont"', '\\mathcal{B}':'ℬ',
'\\mathcal{E}':'ℰ', '\\mathcal{F}':'ℱ', '\\mathcal{H}':'ℋ',
'\\mathcal{I}':'ℐ', '\\mathcal{L}':'ℒ', '\\mathcal{M}':'ℳ',
'\\mathcal{R}':'ℛ', '\\mathfrak':'span class="fraktur"',
'\\mathfrak{C}':'ℭ', '\\mathfrak{F}':'𝔉', '\\mathfrak{H}':'ℌ',
'\\mathfrak{I}':'ℑ', '\\mathfrak{R}':'ℜ', '\\mathfrak{Z}':'ℨ',
'\\mathit':'i', '\\mathring{A}':'Å', '\\mathring{U}':'Ů',
'\\mathring{a}':'å', '\\mathring{u}':'ů', '\\mathring{w}':'ẘ',
'\\mathring{y}':'ẙ', '\\mathrm':'span class="mathrm"',
'\\mathscr':'span class="scriptfont"', '\\mathscr{B}':'ℬ',
'\\mathscr{E}':'ℰ', '\\mathscr{F}':'ℱ', '\\mathscr{H}':'ℋ',
'\\mathscr{I}':'ℐ', '\\mathscr{L}':'ℒ', '\\mathscr{M}':'ℳ',
'\\mathscr{R}':'ℛ', '\\mathsf':'span class="mathsf"',
'\\mathtt':'tt',
}
hybridfunctions = {
'\\binom':['{$1}{$2}','f2{(}f0{f1{$1}f1{$2}}f2{)}','span class="binom"','span class="binomstack"','span class="bigsymbol"',],
'\\boxed':['{$1}','f0{$1}','span class="boxed"',],
'\\cfrac':['[$p!]{$1}{$2}','f0{f3{(}f1{$1}f3{)/(}f2{$2}f3{)}}','span class="fullfraction"','span class="numerator align-$p"','span class="denominator"','span class="ignored"',],
'\\color':['{$p!}{$1}','f0{$1}','span style="color: $p;"',],
'\\colorbox':['{$p!}{$1}','f0{$1}','span class="colorbox" style="background: $p;"',],
'\\dbinom':['{$1}{$2}','(f0{f1{f2{$1}}f1{f2{ }}f1{f2{$2}}})','span class="binomial"','span class="binomrow"','span class="binomcell"',],
'\\dfrac':['{$1}{$2}','f0{f3{(}f1{$1}f3{)/(}f2{$2}f3{)}}','span class="fullfraction"','span class="numerator"','span class="denominator"','span class="ignored"',],
'\\displaystyle':['{$1}','f0{$1}','span class="displaystyle"',],
'\\fbox':['{$1}','f0{$1}','span class="fbox"',],
'\\fboxrule':['{$p!}','f0{}','ignored',],
'\\fboxsep':['{$p!}','f0{}','ignored',],
'\\fcolorbox':['{$p!}{$q!}{$1}','f0{$1}','span class="boxed" style="border-color: $p; background: $q;"',],
'\\frac':['{$1}{$2}','f0{f3{(}f1{$1}f3{)/(}f2{$2}f3{)}}','span class="fraction"','span class="numerator"','span class="denominator"','span class="ignored"',],
'\\framebox':['[$p!][$q!]{$1}','f0{$1}','span class="framebox align-$q" style="width: $p;"',],
'\\href':['[$o]{$u!}{$t!}','f0{$t}','a href="$u"',],
'\\hspace':['{$p!}','f0{ }','span class="hspace" style="width: $p;"',],
'\\leftroot':['{$p!}','f0{ }','span class="leftroot" style="width: $p;px"',],
'\\nicefrac':['{$1}{$2}','f0{f1{$1}⁄f2{$2}}','span class="fraction"','sup class="numerator"','sub class="denominator"','span class="ignored"',],
'\\parbox':['[$p!]{$w!}{$1}','f0{1}','div class="Boxed" style="width: $w;"',],
'\\raisebox':['{$p!}{$1}','f0{$1.font}','span class="raisebox" style="vertical-align: $p;"',],
'\\renewenvironment':['{$1!}{$2!}{$3!}','',],
'\\rule':['[$v!]{$w!}{$h!}','f0/','hr class="line" style="width: $w; height: $h;"',],
'\\scriptscriptstyle':['{$1}','f0{$1}','span class="scriptscriptstyle"',],
'\\scriptstyle':['{$1}','f0{$1}','span class="scriptstyle"',],
'\\sqrt':['[$0]{$1}','f0{f1{$0}f2{√}f4{(}f3{$1}f4{)}}','span class="sqrt"','sup class="root"','span class="radical"','span class="root"','span class="ignored"',],
'\\stackrel':['{$1}{$2}','f0{f1{$1}f2{$2}}','span class="stackrel"','span class="upstackrel"','span class="downstackrel"',],
'\\tbinom':['{$1}{$2}','(f0{f1{f2{$1}}f1{f2{ }}f1{f2{$2}}})','span class="binomial"','span class="binomrow"','span class="binomcell"',],
'\\textcolor':['{$p!}{$1}','f0{$1}','span style="color: $p;"',],
'\\textstyle':['{$1}','f0{$1}','span class="textstyle"',],
'\\unit':['[$0]{$1}','$0f0{$1.font}','span class="unit"',],
'\\unitfrac':['[$0]{$1}{$2}','$0f0{f1{$1.font}⁄f2{$2.font}}','span class="fraction"','sup class="unit"','sub class="unit"',],
'\\uproot':['{$p!}','f0{ }','span class="uproot" style="width: $p;px"',],
'\\url':['{$u!}','f0{$u}','a href="$u"',],
'\\vspace':['{$p!}','f0{ }','span class="vspace" style="height: $p;"',],
}
hybridsizes = {
'\\binom':'$1+$2', '\\cfrac':'$1+$2', '\\dbinom':'$1+$2+1',
'\\dfrac':'$1+$2', '\\frac':'$1+$2', '\\tbinom':'$1+$2+1',
}
labelfunctions = {
'\\label':'a name="#"',
}
limitcommands = {
'\\int':'∫', '\\intop':'∫', '\\lim':'lim', '\\prod':'∏',
'\\smallint':'∫', '\\sum':'∑',
}
# TODO: setting for simple enlarged vs. piecewise symbols
for key in ('\\int', '\\intop', '\\prod', '\\sum'):
limitcommands[key] = '<span class="symbol">%s</span>' % limitcommands[key]
misccommands = {
'\\limits':'LimitPreviousCommand', '\\newcommand':'MacroDefinition',
'\\renewcommand':'MacroDefinition',
'\\setcounter':'SetCounterFunction', '\\tag':'FormulaTag',
'\\tag*':'FormulaTag',
}
modified = {
'\n':'', ' ':'', '$':'', '&':' ', '\'':'’', '+':' + ',
',':', ', '-':' − ', '/':' ⁄ ', '<':' < ', '=':' = ',
'>':' > ', '@':'', '~':'',
}
onefunctions = {
'\\Big':'span class="bigsymbol"', '\\Bigg':'span class="hugesymbol"',
'\\bar':'span class="bar"', '\\begin{array}':'span class="arraydef"',
'\\big':'span class="symbol"', '\\bigg':'span class="largesymbol"',
'\\bigl':'span class="bigsymbol"', '\\bigr':'span class="bigsymbol"',
'\\centering':'span class="align-center"',
'\\ensuremath':'span class="ensuremath"',
'\\hphantom':'span class="phantom"',
'\\noindent':'span class="noindent"',
'\\overbrace':'span class="overbrace"',
'\\overline':'span class="overline"',
'\\phantom':'span class="phantom"',
'\\underbrace':'span class="underbrace"', '\\underline':'u',
'\\vphantom':'span class="phantom"',
}
spacedcommands = {
'\\Leftrightarrow':'⇔', '\\Rightarrow':'⇒', '\\approx':'≈',
'\\dashrightarrow':'⇢', '\\equiv':'≡', '\\ge':'≥', '\\geq':'≥',
'\\implies':' ⇒ ', '\\in':'∈', '\\le':'≤', '\\leftarrow':'←',
'\\leq':'≤', '\\ne':'≠', '\\neq':'≠', '\\not\\in':'∉',
'\\propto':'∝', '\\rightarrow':'→', '\\rightsquigarrow':'⇝',
'\\sim':'~', '\\subset':'⊂', '\\subseteq':'⊆', '\\supset':'⊃',
'\\supseteq':'⊇', '\\times':'×', '\\to':'→',
}
starts = {
'beginafter':'}', 'beginbefore':'\\begin{', 'bracket':'{',
'command':'\\', 'comment':'%', 'complex':'\\[', 'simple':'$',
'squarebracket':'[', 'unnumbered':'*',
}
symbolfunctions = {
'^':'sup', '_':'sub',
}
textfunctions = {
'\\mbox':'span class="mbox"', '\\text':'span class="text"',
'\\textbf':'b', '\\textipa':'span class="textipa"', '\\textit':'i',
'\\textnormal':'span class="textnormal"',
'\\textrm':'span class="textrm"',
'\\textsc':'span class="versalitas"',
'\\textsf':'span class="textsf"', '\\textsl':'i', '\\texttt':'tt',
'\\textup':'span class="normal"',
}
unmodified = {
'characters':['.','*','€','(',')','[',']',':','·','!',';','|','§','"',],
}
urls = {
'googlecharts':'http://chart.googleapis.com/chart?cht=tx&chl=',
}
class GeneralConfig(object):
"Configuration class from elyxer.config file"
version = {
'date':'2011-06-27', 'lyxformat':'413', 'number':'1.2.3',
}
class HeaderConfig(object):
"Configuration class from elyxer.config file"
parameters = {
'beginpreamble':'\\begin_preamble', 'branch':'\\branch',
'documentclass':'\\textclass', 'endbranch':'\\end_branch',
'endpreamble':'\\end_preamble', 'language':'\\language',
'lstset':'\\lstset', 'outputchanges':'\\output_changes',
'paragraphseparation':'\\paragraph_separation',
'pdftitle':'\\pdf_title', 'secnumdepth':'\\secnumdepth',
'tocdepth':'\\tocdepth',
}
styles = {
'article':['article','aastex','aapaper','acmsiggraph','sigplanconf','achemso','amsart','apa','arab-article','armenian-article','article-beamer','chess','dtk','elsarticle','heb-article','IEEEtran','iopart','kluwer','scrarticle-beamer','scrartcl','extarticle','paper','mwart','revtex4','spie','svglobal3','ltugboat','agu-dtd','jgrga','agums','entcs','egs','ijmpc','ijmpd','singlecol-new','doublecol-new','isprs','tarticle','jsarticle','jarticle','jss','literate-article','siamltex','cl2emult','llncs','svglobal','svjog','svprobth',],
'book':['book','amsbook','scrbook','extbook','tufte-book','report','extreport','scrreprt','memoir','tbook','jsbook','jbook','mwbk','svmono','svmult','treport','jreport','mwrep',],
}
class ImageConfig(object):
"Configuration class from elyxer.config file"
converters = {
'imagemagick':'convert[ -density $scale][ -define $format:use-cropbox=true] "$input" "$output"',
'inkscape':'inkscape "$input" --export-png="$output"',
}
cropboxformats = {
'.eps':'ps', '.pdf':'pdf', '.ps':'ps',
}
formats = {
'default':'.png', 'vector':['.svg','.eps',],
}
class LayoutConfig(object):
"Configuration class from elyxer.config file"
groupable = {
'allowed':['StringContainer','Constant','TaggedText','Align','TextFamily','EmphaticText','VersalitasText','BarredText','SizeText','ColorText','LangLine','Formula',],
}
class NewfangleConfig(object):
"Configuration class from elyxer.config file"
constants = {
'chunkref':'chunkref{', 'endcommand':'}', 'endmark':'>',
'startcommand':'\\', 'startmark':'=<',
}
class NumberingConfig(object):
"Configuration class from elyxer.config file"
layouts = {
'ordered':['Chapter','Section','Subsection','Subsubsection','Paragraph',],
'roman':['Part','Book',],
}
sequence = {
'symbols':['*','**','†','‡','§','§§','¶','¶¶','#','##',],
}
class StyleConfig(object):
"Configuration class from elyxer.config file"
hspaces = {
'\\enskip{}':' ', '\\hfill{}':'<span class="hfill"> </span>',
'\\hspace*{\\fill}':' ', '\\hspace*{}':'', '\\hspace{}':' ',
'\\negthinspace{}':'', '\\qquad{}':' ', '\\quad{}':' ',
'\\space{}':' ', '\\thinspace{}':' ', '~':' ',
}
quotes = {
'ald':'»', 'als':'›', 'ard':'«', 'ars':'‹', 'eld':'“',
'els':'‘', 'erd':'”', 'ers':'’', 'fld':'«',
'fls':'‹', 'frd':'»', 'frs':'›', 'gld':'„', 'gls':'‚',
'grd':'“', 'grs':'‘', 'pld':'„', 'pls':'‚', 'prd':'”',
'prs':'’', 'sld':'”', 'srd':'”',
}
referenceformats = {
'eqref':'(@↕)', 'formatted':'¶↕', 'nameref':'$↕', 'pageref':'#↕',
'ref':'@↕', 'vpageref':'on-page#↕', 'vref':'@on-page#↕',
}
size = {
'ignoredtexts':['col','text','line','page','theight','pheight',],
}
vspaces = {
'bigskip':'<div class="bigskip"> </div>',
'defskip':'<div class="defskip"> </div>',
'medskip':'<div class="medskip"> </div>',
'smallskip':'<div class="smallskip"> </div>',
'vfill':'<div class="vfill"> </div>',
}
class TOCConfig(object):
"Configuration class from elyxer.config file"
extractplain = {
'allowed':['StringContainer','Constant','TaggedText','Align','TextFamily','EmphaticText','VersalitasText','BarredText','SizeText','ColorText','LangLine','Formula',],
'cloned':['',], 'extracted':['',],
}
extracttitle = {
'allowed':['StringContainer','Constant','Space',],
'cloned':['TextFamily','EmphaticText','VersalitasText','BarredText','SizeText','ColorText','LangLine','Formula',],
'extracted':['PlainLayout','TaggedText','Align','Caption','StandardLayout','FlexInset',],
}
class TagConfig(object):
"Configuration class from elyxer.config file"
barred = {
'under':'u',
}
family = {
'sans':'span class="sans"', 'typewriter':'tt',
}
flex = {
'CharStyle:Code':'span class="code"',
'CharStyle:MenuItem':'span class="menuitem"',
'Code':'span class="code"', 'MenuItem':'span class="menuitem"',
'Noun':'span class="noun"', 'Strong':'span class="strong"',
}
group = {
'layouts':['Quotation','Quote',],
}
layouts = {
'Center':'div', 'Chapter':'h?', 'Date':'h2', 'Paragraph':'div',
'Part':'h1', 'Quotation':'blockquote', 'Quote':'blockquote',
'Section':'h?', 'Subsection':'h?', 'Subsubsection':'h?',
}
listitems = {
'Enumerate':'ol', 'Itemize':'ul',
}
notes = {
'Comment':'', 'Greyedout':'span class="greyedout"', 'Note':'',
}
shaped = {
'italic':'i', 'slanted':'i', 'smallcaps':'span class="versalitas"',
}
class TranslationConfig(object):
"Configuration class from elyxer.config file"
constants = {
'Appendix':'Appendix', 'Book':'Book', 'Chapter':'Chapter',
'Paragraph':'Paragraph', 'Part':'Part', 'Section':'Section',
'Subsection':'Subsection', 'Subsubsection':'Subsubsection',
'abstract':'Abstract', 'bibliography':'Bibliography',
'figure':'figure', 'float-algorithm':'Algorithm ',
'float-figure':'Figure ', 'float-listing':'Listing ',
'float-table':'Table ', 'float-tableau':'Tableau ',
'footnotes':'Footnotes', 'generated-by':'Document generated by ',
'generated-on':' on ', 'index':'Index',
'jsmath-enable':'Please enable JavaScript on your browser.',
'jsmath-requires':' requires JavaScript to correctly process the mathematics on this page. ',
'jsmath-warning':'Warning: ', 'list-algorithm':'List of Algorithms',
'list-figure':'List of Figures', 'list-table':'List of Tables',
'list-tableau':'List of Tableaux', 'main-page':'Main page',
'next':'Next', 'nomenclature':'Nomenclature',
'on-page':' on page ', 'prev':'Prev', 'references':'References',
'toc':'Table of Contents', 'toc-for':'Contents for ', 'up':'Up',
}
languages = {
'american':'en', 'british':'en', 'deutsch':'de', 'dutch':'nl',
'english':'en', 'french':'fr', 'ngerman':'de', 'spanish':'es',
}
class CommandLineParser(object):
"A parser for runtime options"
def __init__(self, options):
self.options = options
def parseoptions(self, args):
"Parse command line options"
if len(args) == 0:
return None
while len(args) > 0 and args[0].startswith('--'):
key, value = self.readoption(args)
if not key:
return 'Option ' + value + ' not recognized'
if not value:
return 'Option ' + key + ' needs a value'
setattr(self.options, key, value)
return None
def readoption(self, args):
"Read the key and value for an option"
arg = args[0][2:]
del args[0]
if '=' in arg:
key = self.readequalskey(arg, args)
else:
key = arg.replace('-', '')
if not hasattr(self.options, key):
return None, key
current = getattr(self.options, key)
if isinstance(current, bool):
return key, True
# read value
if len(args) == 0:
return key, None
if args[0].startswith('"'):
initial = args[0]
del args[0]
return key, self.readquoted(args, initial)
value = args[0]
del args[0]
if isinstance(current, list):
current.append(value)
return key, current
return key, value
def readquoted(self, args, initial):
"Read a value between quotes"
value = initial[1:]
while len(args) > 0 and not args[0].endswith('"') and not args[0].startswith('--'):
value += ' ' + args[0]
del args[0]
if len(args) == 0 or args[0].startswith('--'):
return None
value += ' ' + args[0:-1]
return value
def readequalskey(self, arg, args):
"Read a key using equals"
split = arg.split('=', 1)
key = split[0]
value = split[1]
args.insert(0, value)
return key
class Options(object):
"A set of runtime options"
instance = None
location = None
nocopy = False
copyright = False
debug = False
quiet = False
version = False
hardversion = False
versiondate = False
html = False
help = False
showlines = True
str = False
iso885915 = False
css = []
title = None
directory = None
destdirectory = None
toc = False
toctarget = ''
tocfor = None
forceformat = None
lyxformat = False
target = None
splitpart = None
memory = True
lowmem = False
nobib = False
converter = 'imagemagick'
raw = False
jsmath = None
mathjax = None
nofooter = False
simplemath = False
template = None
noconvert = False
notoclabels = False
letterfoot = True
numberfoot = False
symbolfoot = False
hoverfoot = True
marginfoot = False
endfoot = False
supfoot = True
alignfoot = False
footnotes = None
imageformat = None
copyimages = False
googlecharts = False
embedcss = []
branches = dict()
def parseoptions(self, args):
"Parse command line options"
Options.location = args[0]
del args[0]
parser = CommandLineParser(Options)
result = parser.parseoptions(args)
if result:
Trace.error(result)
self.usage()
self.processoptions()
def processoptions(self):
"Process all options parsed."
if Options.help:
self.usage()
if Options.version:
self.showversion()
if Options.hardversion:
self.showhardversion()
if Options.versiondate:
self.showversiondate()
if Options.lyxformat:
self.showlyxformat()
if Options.splitpart:
try:
Options.splitpart = int(Options.splitpart)
if Options.splitpart <= 0:
Trace.error('--splitpart requires a number bigger than zero')
self.usage()
except:
Trace.error('--splitpart needs a numeric argument, not ' + Options.splitpart)
self.usage()
if Options.lowmem or Options.toc or Options.tocfor:
Options.memory = False
self.parsefootnotes()
if Options.forceformat and not Options.imageformat:
Options.imageformat = Options.forceformat
if Options.imageformat == 'copy':
Options.copyimages = True
if Options.css == []:
Options.css = ['http://elyxer.nongnu.org/lyx.css']
if Options.html:
Options.simplemath = True
if Options.toc and not Options.tocfor:
Trace.error('Option --toc is deprecated; use --tocfor "page" instead')
Options.tocfor = Options.toctarget
if Options.nocopy:
Trace.error('Option --nocopy is deprecated; it is no longer needed')
# set in Trace if necessary
for param in dir(Trace):
if param.endswith('mode'):
setattr(Trace, param, getattr(self, param[:-4]))
def usage(self):
"Show correct usage"
Trace.error('Usage: ' + os.path.basename(Options.location) + ' [options] [filein] [fileout]')
Trace.error('Convert LyX input file "filein" to HTML file "fileout".')
Trace.error('If filein (or fileout) is not given use standard input (or output).')
Trace.error('Main program of the eLyXer package (http://elyxer.nongnu.org/).')
self.showoptions()
def parsefootnotes(self):
"Parse footnotes options."
if not Options.footnotes:
return
Options.marginfoot = False
Options.letterfoot = False
options = Options.footnotes.split(',')
for option in options:
footoption = option + 'foot'
if hasattr(Options, footoption):
setattr(Options, footoption, True)
else:
Trace.error('Unknown footnotes option: ' + option)
if not Options.endfoot and not Options.marginfoot and not Options.hoverfoot:
Options.hoverfoot = True
if not Options.numberfoot and not Options.symbolfoot:
Options.letterfoot = True
def showoptions(self):
"Show all possible options"
Trace.error(' Common options:')
Trace.error(' --help: show this online help')
Trace.error(' --quiet: disables all runtime messages')
Trace.error('')
Trace.error(' Advanced options:')
Trace.error(' --debug: enable debugging messages (for developers)')
Trace.error(' --version: show version number and release date')
Trace.error(' --lyxformat: return the highest LyX version supported')
Trace.error(' Options for HTML output:')
Trace.error(' --title "title": set the generated page title')
Trace.error(' --css "file.css": use a custom CSS file')
Trace.error(' --embedcss "file.css": embed styles from elyxer.a CSS file into the output')
Trace.error(' --html: output HTML 4.0 instead of the default XHTML')
Trace.error(' --unicode: full Unicode output')
Trace.error(' --iso885915: output a document with ISO-8859-15 encoding')
Trace.error(' --nofooter: remove the footer "generated by eLyXer"')
Trace.error(' --simplemath: do not generate fancy math constructions')
Trace.error(' Options for image output:')
Trace.error(' --directory "img_dir": look for images in the specified directory')
Trace.error(' --destdirectory "dest": put converted images into this directory')
Trace.error(' --imageformat ".ext": image output format, or "copy" to copy images')
Trace.error(' --noconvert: do not convert images, use in original locations')
Trace.error(' --converter "inkscape": use an alternative program to convert images')
Trace.error(' Options for footnote display:')
Trace.error(' --numberfoot: mark footnotes with numbers instead of letters')
Trace.error(' --symbolfoot: mark footnotes with symbols (*, **...)')
Trace.error(' --hoverfoot: show footnotes as hovering text (default)')
Trace.error(' --marginfoot: show footnotes on the page margin')
Trace.error(' --endfoot: show footnotes at the end of the page')
Trace.error(' --supfoot: use superscript for footnote markers (default)')
Trace.error(' --alignfoot: use aligned text for footnote markers')
Trace.error(' --footnotes "options": specify several comma-separated footnotes options')
Trace.error(' Available options are: "number", "symbol", "hover", "margin", "end",')
Trace.error(' "sup", "align"')
Trace.error(' Advanced output options:')
Trace.error(' --splitpart "depth": split the resulting webpage at the given depth')
Trace.error(' --tocfor "page": generate a TOC that points to the given page')
Trace.error(' --target "frame": make all links point to the given frame')
Trace.error(' --notoclabels: omit the part labels in the TOC, such as Chapter')
Trace.error(' --lowmem: do the conversion on the fly (conserve memory)')
Trace.error(' --raw: generate HTML without header or footer.')
Trace.error(' --jsmath "URL": use jsMath from elyxer.the given URL to display equations')
Trace.error(' --mathjax "URL": use MathJax from elyxer.the given URL to display equations')
Trace.error(' --googlecharts: use Google Charts to generate formula images')
Trace.error(' --template "file": use a template, put everything in <!--$content-->')
Trace.error(' --copyright: add a copyright notice at the bottom')
Trace.error(' Deprecated options:')
Trace.error(' --toc: (deprecated) create a table of contents')
Trace.error(' --toctarget "page": (deprecated) generate a TOC for the given page')
Trace.error(' --nocopy: (deprecated) maintained for backwards compatibility')
sys.exit()
def showversion(self):
"Return the current eLyXer version string"
string = 'eLyXer version ' + GeneralConfig.version['number']
string += ' (' + GeneralConfig.version['date'] + ')'
Trace.error(string)
sys.exit()
def showhardversion(self):
"Return just the version string"
Trace.message(GeneralConfig.version['number'])
sys.exit()
def showversiondate(self):
"Return just the version dte"
Trace.message(GeneralConfig.version['date'])
sys.exit()
def showlyxformat(self):
"Return just the lyxformat parameter"
Trace.message(GeneralConfig.version['lyxformat'])
sys.exit()
class BranchOptions(object):
"A set of options for a branch"
def __init__(self, name):
self.name = name
self.options = {'color':'#ffffff'}
def set(self, key, value):
"Set a branch option"
if not key.startswith(ContainerConfig.string['startcommand']):
Trace.error('Invalid branch option ' + key)
return
key = key.replace(ContainerConfig.string['startcommand'], '')
self.options[key] = value
def isselected(self):
"Return if the branch is selected"
if not 'selected' in self.options:
return False
return self.options['selected'] == '1'
def __unicode__(self):
"String representation"
return 'options for ' + self.name + ': ' + str(self.options)
import urllib.request, urllib.parse, urllib.error
class Cloner(object):
"An object used to clone other objects."
def clone(cls, original):
"Return an exact copy of an object."
"The original object must have an empty constructor."
return cls.create(original.__class__)
def create(cls, type):
"Create an object of a given class."
clone = type.__new__(type)
clone.__init__()
return clone
clone = classmethod(clone)
create = classmethod(create)
class ContainerExtractor(object):
"A class to extract certain containers."
def __init__(self, config):
"The config parameter is a map containing three lists: allowed, copied and extracted."
"Each of the three is a list of class names for containers."
"Allowed containers are included as is into the result."
"Cloned containers are cloned and placed into the result."
"Extracted containers are looked into."
"All other containers are silently ignored."
self.allowed = config['allowed']
self.cloned = config['cloned']
self.extracted = config['extracted']
def extract(self, container):
"Extract a group of selected containers from elyxer.a container."
list = []
locate = lambda c: c.__class__.__name__ in self.allowed + self.cloned
recursive = lambda c: c.__class__.__name__ in self.extracted
process = lambda c: self.process(c, list)
container.recursivesearch(locate, recursive, process)
return list
def process(self, container, list):
"Add allowed containers, clone cloned containers and add the clone."
name = container.__class__.__name__
if name in self.allowed:
list.append(container)
elif name in self.cloned:
list.append(self.safeclone(container))
else:
Trace.error('Unknown container class ' + name)
def safeclone(self, container):
"Return a new container with contents only in a safe list, recursively."
clone = Cloner.clone(container)
clone.output = container.output
clone.contents = self.extract(container)
return clone
class Parser(object):
"A generic parser"
def __init__(self):
self.begin = 0
self.parameters = dict()
def parseheader(self, reader):
"Parse the header"
header = reader.currentline().split()
reader.nextline()
self.begin = reader.linenumber
return header
def parseparameter(self, reader):
"Parse a parameter"
if reader.currentline().strip().startswith('<'):
key, value = self.parsexml(reader)
self.parameters[key] = value
return
split = reader.currentline().strip().split(' ', 1)
reader.nextline()
if len(split) == 0:
return
key = split[0]
if len(split) == 1:
self.parameters[key] = True
return
if not '"' in split[1]:
self.parameters[key] = split[1].strip()
return
doublesplit = split[1].split('"')
self.parameters[key] = doublesplit[1]
def parsexml(self, reader):
"Parse a parameter in xml form: <param attr1=value...>"
strip = reader.currentline().strip()
reader.nextline()
if not strip.endswith('>'):
Trace.error('XML parameter ' + strip + ' should be <...>')
split = strip[1:-1].split()
if len(split) == 0:
Trace.error('Empty XML parameter <>')
return None, None
key = split[0]
del split[0]
if len(split) == 0:
return key, dict()
attrs = dict()
for attr in split:
if not '=' in attr:
Trace.error('Erroneous attribute for ' + key + ': ' + attr)
attr += '="0"'
parts = attr.split('=')
attrkey = parts[0]
value = parts[1].split('"')[1]
attrs[attrkey] = value
return key, attrs
def parseending(self, reader, process):
"Parse until the current ending is found"
if not self.ending:
Trace.error('No ending for ' + str(self))
return
while not reader.currentline().startswith(self.ending):
process()
def parsecontainer(self, reader, contents):
container = self.factory.createcontainer(reader)
if container:
container.parent = self.parent
contents.append(container)
def __unicode__(self):
"Return a description"
return self.__class__.__name__ + ' (' + str(self.begin) + ')'
class LoneCommand(Parser):
"A parser for just one command line"
def parse(self,reader):
"Read nothing"
return []
class TextParser(Parser):
"A parser for a command and a bit of text"
stack = []
def __init__(self, container):
Parser.__init__(self)
self.ending = None
if container.__class__.__name__ in ContainerConfig.endings:
self.ending = ContainerConfig.endings[container.__class__.__name__]
self.endings = []
def parse(self, reader):
"Parse lines as long as they are text"
TextParser.stack.append(self.ending)
self.endings = TextParser.stack + [ContainerConfig.endings['Layout'],
ContainerConfig.endings['Inset'], self.ending]
contents = []
while not self.isending(reader):
self.parsecontainer(reader, contents)
return contents
def isending(self, reader):
"Check if text is ending"
current = reader.currentline().split()
if len(current) == 0:
return False
if current[0] in self.endings:
if current[0] in TextParser.stack:
TextParser.stack.remove(current[0])
else:
TextParser.stack = []
return True
return False
class ExcludingParser(Parser):
"A parser that excludes the final line"
def parse(self, reader):
"Parse everything up to (and excluding) the final line"
contents = []
self.parseending(reader, lambda: self.parsecontainer(reader, contents))
return contents
class BoundedParser(ExcludingParser):
"A parser bound by a final line"
def parse(self, reader):
"Parse everything, including the final line"
contents = ExcludingParser.parse(self, reader)
# skip last line
reader.nextline()
return contents
class BoundedDummy(Parser):
"A bound parser that ignores everything"
def parse(self, reader):
"Parse the contents of the container"
self.parseending(reader, lambda: reader.nextline())
# skip last line
reader.nextline()
return []
class StringParser(Parser):
"Parses just a string"
def parseheader(self, reader):
"Do nothing, just take note"
self.begin = reader.linenumber + 1
return []
def parse(self, reader):
"Parse a single line"
contents = reader.currentline()
reader.nextline()
return contents
class InsetParser(BoundedParser):
"Parses a LyX inset"
def parse(self, reader):
"Parse inset parameters into a dictionary"
startcommand = ContainerConfig.string['startcommand']
while reader.currentline() != '' and not reader.currentline().startswith(startcommand):
self.parseparameter(reader)
return BoundedParser.parse(self, reader)
class ContainerOutput(object):
"The generic HTML output for a container."
def gethtml(self, container):
"Show an error."
Trace.error('gethtml() not implemented for ' + str(self))
def isempty(self):
"Decide if the output is empty: by default, not empty."
return False
class EmptyOutput(ContainerOutput):
def gethtml(self, container):
"Return empty HTML code."
return []
def isempty(self):
"This output is particularly empty."
return True
class FixedOutput(ContainerOutput):
"Fixed output"
def gethtml(self, container):
"Return constant HTML code"
return container.html
class ContentsOutput(ContainerOutput):
"Outputs the contents converted to HTML"
def gethtml(self, container):
"Return the HTML code"
html = []
if container.contents == None:
return html
for element in container.contents:
if not hasattr(element, 'gethtml'):
Trace.error('No html in ' + element.__class__.__name__ + ': ' + str(element))
return html
html += element.gethtml()
return html
class TaggedOutput(ContentsOutput):
"Outputs an HTML tag surrounding the contents."
tag = None
breaklines = False
empty = False
def settag(self, tag, breaklines=False, empty=False):
"Set the value for the tag and other attributes."
self.tag = tag
if breaklines:
self.breaklines = breaklines
if empty:
self.empty = empty
return self
def setbreaklines(self, breaklines):
"Set the value for breaklines."
self.breaklines = breaklines
return self
def gethtml(self, container):
"Return the HTML code."
if self.empty:
return [self.selfclosing(container)]
html = [self.open(container)]
html += ContentsOutput.gethtml(self, container)
html.append(self.close(container))
return html
def open(self, container):
"Get opening line."
if not self.checktag():
return ''
open = '<' + self.tag + '>'
if self.breaklines:
return open + '\n'
return open
def close(self, container):
"Get closing line."
if not self.checktag():
return ''
close = '</' + self.tag.split()[0] + '>'
if self.breaklines:
return '\n' + close + '\n'
return close
def selfclosing(self, container):
"Get self-closing line."
if not self.checktag():
return ''
selfclosing = '<' + self.tag + '/>'
if self.breaklines:
return selfclosing + '\n'
return selfclosing
def checktag(self):
"Check that the tag is valid."
if not self.tag:
Trace.error('No tag in ' + str(container))
return False
if self.tag == '':
return False
return True
class FilteredOutput(ContentsOutput):
"Returns the output in the contents, but filtered:"
"some strings are replaced by others."
def __init__(self):
"Initialize the filters."
self.filters = []
def addfilter(self, original, replacement):
"Add a new filter: replace the original by the replacement."
self.filters.append((original, replacement))
def gethtml(self, container):
"Return the HTML code"
result = []
html = ContentsOutput.gethtml(self, container)
for line in html:
result.append(self.filter(line))
return result
def filter(self, line):
"Filter a single line with all available filters."
for original, replacement in self.filters:
if original in line:
line = line.replace(original, replacement)
return line
class StringOutput(ContainerOutput):
"Returns a bare string as output"
def gethtml(self, container):
"Return a bare string"
return [container.string]
import sys
import codecs
class LineReader(object):
"Reads a file line by line"
def __init__(self, filename):
if isinstance(filename, file):
self.file = filename
else:
self.file = codecs.open(filename, 'rU', 'utf-8')
self.linenumber = 1
self.lastline = None
self.current = None
self.mustread = True
self.depleted = False
try:
self.readline()
except UnicodeDecodeError:
# try compressed file
import gzip
self.file = gzip.open(filename, 'rb')
self.readline()
def setstart(self, firstline):
"Set the first line to read."
for i in range(firstline):
self.file.readline()
self.linenumber = firstline
def setend(self, lastline):
"Set the last line to read."
self.lastline = lastline
def currentline(self):
"Get the current line"
if self.mustread:
self.readline()
return self.current
def nextline(self):
"Go to next line"
if self.depleted:
Trace.fatal('Read beyond file end')
self.mustread = True
def readline(self):
"Read a line from elyxer.file"
self.current = self.file.readline()
if not isinstance(self.file, codecs.StreamReaderWriter):
self.current = self.current.decode('utf-8')
if len(self.current) == 0:
self.depleted = True
self.current = self.current.rstrip('\n\r')
self.linenumber += 1
self.mustread = False
Trace.prefix = 'Line ' + str(self.linenumber) + ': '
if self.linenumber % 1000 == 0:
Trace.message('Parsing')
def finished(self):
"Find out if the file is finished"
if self.lastline and self.linenumber == self.lastline:
return True
if self.mustread:
self.readline()
return self.depleted
def close(self):
self.file.close()
class LineWriter(object):
"Writes a file as a series of lists"
file = False
def __init__(self, filename):
if isinstance(filename, file):
self.file = filename
self.filename = None
else:
self.filename = filename
def write(self, strings):
"Write a list of strings"
for string in strings:
if not isinstance(string, str):
Trace.error('Not a string: ' + str(string) + ' in ' + str(strings))
return
self.writestring(string)
def writestring(self, string):
"Write a string"
if not self.file:
self.file = codecs.open(self.filename, 'w', "utf-8")
if self.file == sys.stdout and sys.version_info < (3,0):
string = string.encode('utf-8')
self.file.write(string)
def writeline(self, line):
"Write a line to file"
self.writestring(line + '\n')
def close(self):
self.file.close()
class Globable(object):
"""A bit of text which can be globbed (lumped together in bits).
Methods current(), skipcurrent(), checkfor() and isout() have to be
implemented by subclasses."""
leavepending = False
def __init__(self):
self.endinglist = EndingList()
def checkbytemark(self):
"Check for a Unicode byte mark and skip it."
if self.finished():
return
if ord(self.current()) == 0xfeff:
self.skipcurrent()
def isout(self):
"Find out if we are out of the position yet."
Trace.error('Unimplemented isout()')
return True
def current(self):
"Return the current character."
Trace.error('Unimplemented current()')
return ''
def checkfor(self, string):
"Check for the given string in the current position."
Trace.error('Unimplemented checkfor()')
return False
def finished(self):
"Find out if the current text has finished."
if self.isout():
if not self.leavepending:
self.endinglist.checkpending()
return True
return self.endinglist.checkin(self)
def skipcurrent(self):
"Return the current character and skip it."
Trace.error('Unimplemented skipcurrent()')
return ''
def glob(self, currentcheck):
"Glob a bit of text that satisfies a check on the current char."
glob = ''
while not self.finished() and currentcheck():
glob += self.skipcurrent()
return glob
def globalpha(self):
"Glob a bit of alpha text"
return self.glob(lambda: self.current().isalpha())
def globnumber(self):
"Glob a row of digits."
return self.glob(lambda: self.current().isdigit())
def isidentifier(self):
"Return if the current character is alphanumeric or _."
if self.current().isalnum() or self.current() == '_':
return True
return False
def globidentifier(self):
"Glob alphanumeric and _ symbols."
return self.glob(self.isidentifier)
def isvalue(self):
"Return if the current character is a value character:"
"not a bracket or a space."
if self.current().isspace():
return False
if self.current() in '{}()':
return False
return True
def globvalue(self):
"Glob a value: any symbols but brackets."
return self.glob(self.isvalue)
def skipspace(self):
"Skip all whitespace at current position."
return self.glob(lambda: self.current().isspace())
def globincluding(self, magicchar):
"Glob a bit of text up to (including) the magic char."
glob = self.glob(lambda: self.current() != magicchar) + magicchar
self.skip(magicchar)
return glob
def globexcluding(self, excluded):
"Glob a bit of text up until (excluding) any excluded character."
return self.glob(lambda: self.current() not in excluded)
def pushending(self, ending, optional = False):
"Push a new ending to the bottom"
self.endinglist.add(ending, optional)
def popending(self, expected = None):
"Pop the ending found at the current position"
if self.isout() and self.leavepending:
return expected
ending = self.endinglist.pop(self)
if expected and expected != ending:
Trace.error('Expected ending ' + expected + ', got ' + ending)
self.skip(ending)
return ending
def nextending(self):
"Return the next ending in the queue."
nextending = self.endinglist.findending(self)
if not nextending:
return None
return nextending.ending
class EndingList(object):
"A list of position endings"
def __init__(self):
self.endings = []
def add(self, ending, optional = False):
"Add a new ending to the list"
self.endings.append(PositionEnding(ending, optional))
def pickpending(self, pos):
"Pick any pending endings from a parse position."
self.endings += pos.endinglist.endings
def checkin(self, pos):
"Search for an ending"
if self.findending(pos):
return True
return False
def pop(self, pos):
"Remove the ending at the current position"
if pos.isout():
Trace.error('No ending out of bounds')
return ''
ending = self.findending(pos)
if not ending:
Trace.error('No ending at ' + pos.current())
return ''
for each in reversed(self.endings):
self.endings.remove(each)
if each == ending:
return each.ending
elif not each.optional:
Trace.error('Removed non-optional ending ' + each)
Trace.error('No endings left')
return ''
def findending(self, pos):
"Find the ending at the current position"
if len(self.endings) == 0:
return None
for index, ending in enumerate(reversed(self.endings)):
if ending.checkin(pos):
return ending
if not ending.optional:
return None
return None
def checkpending(self):
"Check if there are any pending endings"
if len(self.endings) != 0:
Trace.error('Pending ' + str(self) + ' left open')
def __unicode__(self):
"Printable representation"
string = 'endings ['
for ending in self.endings:
string += str(ending) + ','
if len(self.endings) > 0:
string = string[:-1]
return string + ']'
class PositionEnding(object):
"An ending for a parsing position"
def __init__(self, ending, optional):
self.ending = ending
self.optional = optional
def checkin(self, pos):
"Check for the ending"
return pos.checkfor(self.ending)
def __unicode__(self):
"Printable representation"
string = 'Ending ' + self.ending
if self.optional:
string += ' (optional)'
return string
class Position(Globable):
"""A position in a text to parse.
Including those in Globable, functions to implement by subclasses are:
skip(), identifier(), extract(), isout() and current()."""
def __init__(self):
Globable.__init__(self)
def skip(self, string):
"Skip a string"
Trace.error('Unimplemented skip()')
def identifier(self):
"Return an identifier for the current position."
Trace.error('Unimplemented identifier()')
return 'Error'
def extract(self, length):
"Extract the next string of the given length, or None if not enough text,"
"without advancing the parse position."
Trace.error('Unimplemented extract()')
return None
def checkfor(self, string):
"Check for a string at the given position."
return string == self.extract(len(string))
def checkforlower(self, string):
"Check for a string in lower case."
extracted = self.extract(len(string))
if not extracted:
return False
return string.lower() == self.extract(len(string)).lower()
def skipcurrent(self):
"Return the current character and skip it."
current = self.current()
self.skip(current)
return current
def __next__(self):
"Advance the position and return the next character."
self.skipcurrent()
return self.current()
def checkskip(self, string):
"Check for a string at the given position; if there, skip it"
if not self.checkfor(string):
return False
self.skip(string)
return True
def error(self, message):
"Show an error message and the position identifier."
Trace.error(message + ': ' + self.identifier())
class TextPosition(Position):
"A parse position based on a raw text."
def __init__(self, text):
"Create the position from elyxer.some text."
Position.__init__(self)
self.pos = 0
self.text = text
self.checkbytemark()
def skip(self, string):
"Skip a string of characters."
self.pos += len(string)
def identifier(self):
"Return a sample of the remaining text."
length = 30
if self.pos + length > len(self.text):
length = len(self.text) - self.pos
return '*' + self.text[self.pos:self.pos + length] + '*'
def isout(self):
"Find out if we are out of the text yet."
return self.pos >= len(self.text)
def current(self):
"Return the current character, assuming we are not out."
return self.text[self.pos]
def extract(self, length):
"Extract the next string of the given length, or None if not enough text."
if self.pos + length > len(self.text):
return None
return self.text[self.pos : self.pos + length]
class FilePosition(Position):
"A parse position based on an underlying file."
def __init__(self, filename):
"Create the position from a file."
Position.__init__(self)
self.reader = LineReader(filename)
self.pos = 0
self.checkbytemark()
def skip(self, string):
"Skip a string of characters."
length = len(string)
while self.pos + length > len(self.reader.currentline()):
length -= len(self.reader.currentline()) - self.pos + 1
self.nextline()
self.pos += length
def currentline(self):
"Get the current line of the underlying file."
return self.reader.currentline()
def nextline(self):
"Go to the next line."
self.reader.nextline()
self.pos = 0
def linenumber(self):
"Return the line number of the file."
return self.reader.linenumber + 1
def identifier(self):
"Return the current line and line number in the file."
before = self.reader.currentline()[:self.pos - 1]
after = self.reader.currentline()[self.pos:]
return 'line ' + str(self.getlinenumber()) + ': ' + before + '*' + after
def isout(self):
"Find out if we are out of the text yet."
if self.pos > len(self.reader.currentline()):
if self.pos > len(self.reader.currentline()) + 1:
Trace.error('Out of the line ' + self.reader.currentline() + ': ' + str(self.pos))
self.nextline()
return self.reader.finished()
def current(self):
"Return the current character, assuming we are not out."
if self.pos == len(self.reader.currentline()):
return '\n'
if self.pos > len(self.reader.currentline()):
Trace.error('Out of the line ' + self.reader.currentline() + ': ' + str(self.pos))
return '*'
return self.reader.currentline()[self.pos]
def extract(self, length):
"Extract the next string of the given length, or None if not enough text."
if self.pos + length > len(self.reader.currentline()):
return None
return self.reader.currentline()[self.pos : self.pos + length]
class Container(object):
"A container for text and objects in a lyx file"
partkey = None
parent = None
begin = None
def __init__(self):
self.contents = list()
def process(self):
"Process contents"
pass
def gethtml(self):
"Get the resulting HTML"
html = self.output.gethtml(self)
if isinstance(html, str):
Trace.error('Raw string ' + html)
html = [html]
return self.escapeall(html)
def escapeall(self, lines):
"Escape all lines in an array according to the output options."
result = []
for line in lines:
if Options.html:
line = self.escape(line, EscapeConfig.html)
if Options.iso885915:
line = self.escape(line, EscapeConfig.iso885915)
line = self.escapeentities(line)
elif not Options.str:
line = self.escape(line, EscapeConfig.nonunicode)
result.append(line)
return result
def escape(self, line, replacements = EscapeConfig.entities):
"Escape a line with replacements from elyxer.a map"
pieces = list(replacements.keys())
# do them in order
pieces.sort()
for piece in pieces:
if piece in line:
line = line.replace(piece, replacements[piece])
return line
def escapeentities(self, line):
"Escape all Unicode characters to HTML entities."
result = ''
pos = TextPosition(line)
while not pos.finished():
if ord(pos.current()) > 128:
codepoint = hex(ord(pos.current()))
if codepoint == '0xd835':
codepoint = hex(ord(next(pos)) + 0xf800)
result += '&#' + codepoint[1:] + ';'
else:
result += pos.current()
pos.skipcurrent()
return result
def searchall(self, type):
"Search for all embedded containers of a given type"
list = []
self.searchprocess(type, lambda container: list.append(container))
return list
def searchremove(self, type):
"Search for all containers of a type and remove them"
list = self.searchall(type)
for container in list:
container.parent.contents.remove(container)
return list
def searchprocess(self, type, process):
"Search for elements of a given type and process them"
self.locateprocess(lambda container: isinstance(container, type), process)
def locateprocess(self, locate, process):
"Search for all embedded containers and process them"
for container in self.contents:
container.locateprocess(locate, process)
if locate(container):
process(container)
def recursivesearch(self, locate, recursive, process):
"Perform a recursive search in the container."
for container in self.contents:
if recursive(container):
container.recursivesearch(locate, recursive, process)
if locate(container):
process(container)
def extracttext(self):
"Extract all text from elyxer.allowed containers."
result = ''
constants = ContainerExtractor(ContainerConfig.extracttext).extract(self)
for constant in constants:
result += constant.string
return result
def group(self, index, group, isingroup):
"Group some adjoining elements into a group"
if index >= len(self.contents):
return
if hasattr(self.contents[index], 'grouped'):
return
while index < len(self.contents) and isingroup(self.contents[index]):
self.contents[index].grouped = True
group.contents.append(self.contents[index])
self.contents.pop(index)
self.contents.insert(index, group)
def remove(self, index):
"Remove a container but leave its contents"
container = self.contents[index]
self.contents.pop(index)
while len(container.contents) > 0:
self.contents.insert(index, container.contents.pop())
def tree(self, level = 0):
"Show in a tree"
Trace.debug(" " * level + str(self))
for container in self.contents:
container.tree(level + 1)
def getparameter(self, name):
"Get the value of a parameter, if present."
if not name in self.parameters:
return None
return self.parameters[name]
def getparameterlist(self, name):
"Get the value of a comma-separated parameter as a list."
paramtext = self.getparameter(name)
if not paramtext:
return []
return paramtext.split(',')
def hasemptyoutput(self):
"Check if the parent's output is empty."
current = self.parent
while current:
if current.output.isempty():
return True
current = current.parent
return False
def __unicode__(self):
"Get a description"
if not self.begin:
return self.__class__.__name__
return self.__class__.__name__ + '@' + str(self.begin)
class BlackBox(Container):
"A container that does not output anything"
def __init__(self):
self.parser = LoneCommand()
self.output = EmptyOutput()
self.contents = []
class LyXFormat(BlackBox):
"Read the lyxformat command"
def process(self):
"Show warning if version < 276"
version = int(self.header[1])
if version < 276:
Trace.error('Warning: unsupported old format version ' + str(version))
if version > int(GeneralConfig.version['lyxformat']):
Trace.error('Warning: unsupported new format version ' + str(version))
class StringContainer(Container):
"A container for a single string"
parsed = None
def __init__(self):
self.parser = StringParser()
self.output = StringOutput()
self.string = ''
def process(self):
"Replace special chars from elyxer.the contents."
if self.parsed:
self.string = self.replacespecial(self.parsed)
self.parsed = None
def replacespecial(self, line):
"Replace all special chars from elyxer.a line"
replaced = self.escape(line, EscapeConfig.entities)
replaced = self.changeline(replaced)
if ContainerConfig.string['startcommand'] in replaced and len(replaced) > 1:
# unprocessed commands
if self.begin:
message = 'Unknown command at ' + str(self.begin) + ': '
else:
message = 'Unknown command: '
Trace.error(message + replaced.strip())
return replaced
def changeline(self, line):
line = self.escape(line, EscapeConfig.chars)
if not ContainerConfig.string['startcommand'] in line:
return line
line = self.escape(line, EscapeConfig.commands)
return line
def extracttext(self):
"Return all text."
return self.string
def __unicode__(self):
"Return a printable representation."
result = 'StringContainer'
if self.begin:
result += '@' + str(self.begin)
ellipsis = '...'
if len(self.string.strip()) <= 15:
ellipsis = ''
return result + ' (' + self.string.strip()[:15] + ellipsis + ')'
class Constant(StringContainer):
"A constant string"
def __init__(self, text):
self.contents = []
self.string = text
self.output = StringOutput()
def __unicode__(self):
return 'Constant: ' + self.string
class TaggedText(Container):
"Text inside a tag"
output = None
def __init__(self):
self.parser = TextParser(self)
self.output = TaggedOutput()
def complete(self, contents, tag, breaklines=False):
"Complete the tagged text and return it"
self.contents = contents
self.output.tag = tag
self.output.breaklines = breaklines
return self
def constant(self, text, tag, breaklines=False):
"Complete the tagged text with a constant"
constant = Constant(text)
return self.complete([constant], tag, breaklines)
def __unicode__(self):
"Return a printable representation."
if not hasattr(self.output, 'tag'):
return 'Emtpy tagged text'
if not self.output.tag:
return 'Tagged <unknown tag>'
return 'Tagged <' + self.output.tag + '>'
class DocumentParameters(object):
"Global parameters for the document."
pdftitle = None
indentstandard = False
tocdepth = 10
startinglevel = 0
maxdepth = 10
language = None
bibliography = None
outputchanges = False
displaymode = False
class FormulaParser(Parser):
"Parses a formula"
def parseheader(self, reader):
"See if the formula is inlined"
self.begin = reader.linenumber + 1
type = self.parsetype(reader)
if not type:
reader.nextline()
type = self.parsetype(reader)
if not type:
Trace.error('Unknown formula type in ' + reader.currentline().strip())
return ['unknown']
return [type]
def parsetype(self, reader):
"Get the formula type from the first line."
if reader.currentline().find(FormulaConfig.starts['simple']) >= 0:
return 'inline'
if reader.currentline().find(FormulaConfig.starts['complex']) >= 0:
return 'block'
if reader.currentline().find(FormulaConfig.starts['unnumbered']) >= 0:
return 'block'
if reader.currentline().find(FormulaConfig.starts['beginbefore']) >= 0:
return 'numbered'
return None
def parse(self, reader):
"Parse the formula until the end"
formula = self.parseformula(reader)
while not reader.currentline().startswith(self.ending):
stripped = reader.currentline().strip()
if len(stripped) > 0:
Trace.error('Unparsed formula line ' + stripped)
reader.nextline()
reader.nextline()
return formula
def parseformula(self, reader):
"Parse the formula contents"
simple = FormulaConfig.starts['simple']
if simple in reader.currentline():
rest = reader.currentline().split(simple, 1)[1]
if simple in rest:
# formula is $...$
return self.parsesingleliner(reader, simple, simple)
# formula is multiline $...$
return self.parsemultiliner(reader, simple, simple)
if FormulaConfig.starts['complex'] in reader.currentline():
# formula of the form \[...\]
return self.parsemultiliner(reader, FormulaConfig.starts['complex'],
FormulaConfig.endings['complex'])
beginbefore = FormulaConfig.starts['beginbefore']
beginafter = FormulaConfig.starts['beginafter']
if beginbefore in reader.currentline():
if reader.currentline().strip().endswith(beginafter):
current = reader.currentline().strip()
endsplit = current.split(beginbefore)[1].split(beginafter)
startpiece = beginbefore + endsplit[0] + beginafter
endbefore = FormulaConfig.endings['endbefore']
endafter = FormulaConfig.endings['endafter']
endpiece = endbefore + endsplit[0] + endafter
return startpiece + self.parsemultiliner(reader, startpiece, endpiece) + endpiece
Trace.error('Missing ' + beginafter + ' in ' + reader.currentline())
return ''
begincommand = FormulaConfig.starts['command']
beginbracket = FormulaConfig.starts['bracket']
if begincommand in reader.currentline() and beginbracket in reader.currentline():
endbracket = FormulaConfig.endings['bracket']
return self.parsemultiliner(reader, beginbracket, endbracket)
Trace.error('Formula beginning ' + reader.currentline() + ' is unknown')
return ''
def parsesingleliner(self, reader, start, ending):
"Parse a formula in one line"
line = reader.currentline().strip()
if not start in line:
Trace.error('Line ' + line + ' does not contain formula start ' + start)
return ''
if not line.endswith(ending):
Trace.error('Formula ' + line + ' does not end with ' + ending)
return ''
index = line.index(start)
rest = line[index + len(start):-len(ending)]
reader.nextline()
return rest
def parsemultiliner(self, reader, start, ending):
"Parse a formula in multiple lines"
formula = ''
line = reader.currentline()
if not start in line:
Trace.error('Line ' + line.strip() + ' does not contain formula start ' + start)
return ''
index = line.index(start)
line = line[index + len(start):].strip()
while not line.endswith(ending):
formula += line + '\n'
reader.nextline()
line = reader.currentline()
formula += line[:-len(ending)]
reader.nextline()
return formula
class MacroParser(FormulaParser):
"A parser for a formula macro."
def parseheader(self, reader):
"See if the formula is inlined"
self.begin = reader.linenumber + 1
return ['inline']
def parse(self, reader):
"Parse the formula until the end"
formula = self.parsemultiliner(reader, self.parent.start, self.ending)
reader.nextline()
return formula
class FormulaBit(Container):
"A bit of a formula"
type = None
size = 1
original = ''
def __init__(self):
"The formula bit type can be 'alpha', 'number', 'font'."
self.contents = []
self.output = ContentsOutput()
def setfactory(self, factory):
"Set the internal formula factory."
self.factory = factory
return self
def add(self, bit):
"Add any kind of formula bit already processed"
self.contents.append(bit)
self.original += bit.original
bit.parent = self
def skiporiginal(self, string, pos):
"Skip a string and add it to the original formula"
self.original += string
if not pos.checkskip(string):
Trace.error('String ' + string + ' not at ' + pos.identifier())
def computesize(self):
"Compute the size of the bit as the max of the sizes of all contents."
if len(self.contents) == 0:
return 1
self.size = max([element.size for element in self.contents])
return self.size
def clone(self):
"Return a copy of itself."
return self.factory.parseformula(self.original)
def __unicode__(self):
"Get a string representation"
return self.__class__.__name__ + ' read in ' + self.original
class TaggedBit(FormulaBit):
"A tagged string in a formula"
def constant(self, constant, tag):
"Set the constant and the tag"
self.output = TaggedOutput().settag(tag)
self.add(FormulaConstant(constant))
return self
def complete(self, contents, tag, breaklines = False):
"Set the constant and the tag"
self.contents = contents
self.output = TaggedOutput().settag(tag, breaklines)
return self
def selfcomplete(self, tag):
"Set the self-closing tag, no contents (as in <hr/>)."
self.output = TaggedOutput().settag(tag, empty = True)
return self
class FormulaConstant(Constant):
"A constant string in a formula"
def __init__(self, string):
"Set the constant string"
Constant.__init__(self, string)
self.original = string
self.size = 1
self.type = None
def computesize(self):
"Compute the size of the constant: always 1."
return self.size
def clone(self):
"Return a copy of itself."
return FormulaConstant(self.original)
def __unicode__(self):
"Return a printable representation."
return 'Formula constant: ' + self.string
class RawText(FormulaBit):
"A bit of text inside a formula"
def detect(self, pos):
"Detect a bit of raw text"
return pos.current().isalpha()
def parsebit(self, pos):
"Parse alphabetic text"
alpha = pos.globalpha()
self.add(FormulaConstant(alpha))
self.type = 'alpha'
class FormulaSymbol(FormulaBit):
"A symbol inside a formula"
modified = FormulaConfig.modified
unmodified = FormulaConfig.unmodified['characters']
def detect(self, pos):
"Detect a symbol"
if pos.current() in FormulaSymbol.unmodified:
return True
if pos.current() in FormulaSymbol.modified:
return True
return False
def parsebit(self, pos):
"Parse the symbol"
if pos.current() in FormulaSymbol.unmodified:
self.addsymbol(pos.current(), pos)
return
if pos.current() in FormulaSymbol.modified:
self.addsymbol(FormulaSymbol.modified[pos.current()], pos)
return
Trace.error('Symbol ' + pos.current() + ' not found')
def addsymbol(self, symbol, pos):
"Add a symbol"
self.skiporiginal(pos.current(), pos)
self.contents.append(FormulaConstant(symbol))
class FormulaNumber(FormulaBit):
"A string of digits in a formula"
def detect(self, pos):
"Detect a digit"
return pos.current().isdigit()
def parsebit(self, pos):
"Parse a bunch of digits"
digits = pos.glob(lambda: pos.current().isdigit())
self.add(FormulaConstant(digits))
self.type = 'number'
class Comment(FormulaBit):
"A LaTeX comment: % to the end of the line."
start = FormulaConfig.starts['comment']
def detect(self, pos):
"Detect the %."
return pos.current() == self.start
def parsebit(self, pos):
"Parse to the end of the line."
self.original += pos.globincluding('\n')
class WhiteSpace(FormulaBit):
"Some white space inside a formula."
def detect(self, pos):
"Detect the white space."
return pos.current().isspace()
def parsebit(self, pos):
"Parse all whitespace."
self.original += pos.skipspace()
def __unicode__(self):
"Return a printable representation."
return 'Whitespace: *' + self.original + '*'
class Bracket(FormulaBit):
"A {} bracket inside a formula"
start = FormulaConfig.starts['bracket']
ending = FormulaConfig.endings['bracket']
def __init__(self):
"Create a (possibly literal) new bracket"
FormulaBit.__init__(self)
self.inner = None
def detect(self, pos):
"Detect the start of a bracket"
return pos.checkfor(self.start)
def parsebit(self, pos):
"Parse the bracket"
self.parsecomplete(pos, self.innerformula)
return self
def parsetext(self, pos):
"Parse a text bracket"
self.parsecomplete(pos, self.innertext)
return self
def parseliteral(self, pos):
"Parse a literal bracket"
self.parsecomplete(pos, self.innerliteral)
return self
def parsecomplete(self, pos, innerparser):
"Parse the start and end marks"
if not pos.checkfor(self.start):
Trace.error('Bracket should start with ' + self.start + ' at ' + pos.identifier())
return None
self.skiporiginal(self.start, pos)
pos.pushending(self.ending)
innerparser(pos)
self.original += pos.popending(self.ending)
self.computesize()
def innerformula(self, pos):
"Parse a whole formula inside the bracket"
while not pos.finished():
self.add(self.factory.parseany(pos))
def innertext(self, pos):
"Parse some text inside the bracket, following textual rules."
specialchars = list(FormulaConfig.symbolfunctions.keys())
specialchars.append(FormulaConfig.starts['command'])
specialchars.append(FormulaConfig.starts['bracket'])
specialchars.append(Comment.start)
while not pos.finished():
if pos.current() in specialchars:
self.add(self.factory.parseany(pos))
if pos.checkskip(' '):
self.original += ' '
else:
self.add(FormulaConstant(pos.skipcurrent()))
def innerliteral(self, pos):
"Parse a literal inside the bracket, which does not generate HTML."
self.literal = ''
while not pos.finished() and not pos.current() == self.ending:
if pos.current() == self.start:
self.parseliteral(pos)
else:
self.literal += pos.skipcurrent()
self.original += self.literal
class SquareBracket(Bracket):
"A [] bracket inside a formula"
start = FormulaConfig.starts['squarebracket']
ending = FormulaConfig.endings['squarebracket']
def clone(self):
"Return a new square bracket with the same contents."
bracket = SquareBracket()
bracket.contents = self.contents
return bracket
class MathsProcessor(object):
"A processor for a maths construction inside the FormulaProcessor."
def process(self, contents, index):
"Process an element inside a formula."
Trace.error('Unimplemented process() in ' + str(self))
def __unicode__(self):
"Return a printable description."
return 'Maths processor ' + self.__class__.__name__
class FormulaProcessor(object):
"A processor specifically for formulas."
processors = []
def process(self, bit):
"Process the contents of every formula bit, recursively."
self.processcontents(bit)
self.processinsides(bit)
self.traversewhole(bit)
def processcontents(self, bit):
"Process the contents of a formula bit."
if not isinstance(bit, FormulaBit):
return
bit.process()
for element in bit.contents:
self.processcontents(element)
def processinsides(self, bit):
"Process the insides (limits, brackets) in a formula bit."
if not isinstance(bit, FormulaBit):
return
for index, element in enumerate(bit.contents):
for processor in self.processors:
processor.process(bit.contents, index)
# continue with recursive processing
self.processinsides(element)
def traversewhole(self, formula):
"Traverse over the contents to alter variables and space units."
last = None
for bit, contents in self.traverse(formula):
if bit.type == 'alpha':
self.italicize(bit, contents)
elif bit.type == 'font' and last and last.type == 'number':
bit.contents.insert(0, FormulaConstant(' '))
last = bit
def traverse(self, bit):
"Traverse a formula and yield a flattened structure of (bit, list) pairs."
for element in bit.contents:
if hasattr(element, 'type') and element.type:
yield (element, bit.contents)
elif isinstance(element, FormulaBit):
for pair in self.traverse(element):
yield pair
def italicize(self, bit, contents):
"Italicize the given bit of text."
index = contents.index(bit)
contents[index] = TaggedBit().complete([bit], 'i')
class Formula(Container):
"A LaTeX formula"
def __init__(self):
self.parser = FormulaParser()
self.output = TaggedOutput().settag('span class="formula"')
def process(self):
"Convert the formula to tags"
if self.header[0] == 'inline':
DocumentParameters.displaymode = False
else:
DocumentParameters.displaymode = True
self.output.settag('div class="formula"', True)
if Options.jsmath:
self.jsmath()
elif Options.mathjax:
self.mathjax()
elif Options.googlecharts:
self.googlecharts()
else:
self.classic()
def jsmath(self):
"Make the contents for jsMath."
if self.header[0] != 'inline':
self.output = TaggedOutput().settag('div class="math"')
else:
self.output = TaggedOutput().settag('span class="math"')
self.contents = [Constant(self.parsed)]
def mathjax(self):
"Make the contents for MathJax."
self.output.tag = 'span class="MathJax_Preview"'
tag = 'script type="math/tex'
if self.header[0] != 'inline':
tag += ';mode=display'
self.contents = [TaggedText().constant(self.parsed, tag + '"', True)]
def googlecharts(self):
"Make the contents using Google Charts http://code.google.com/apis/chart/."
url = FormulaConfig.urls['googlecharts'] + urllib.parse.quote_plus(self.parsed)
img = '<img class="chart" src="' + url + '" alt="' + self.parsed + '"/>'
self.contents = [Constant(img)]
def classic(self):
"Make the contents using classic output generation with XHTML and CSS."
whole = FormulaFactory().parseformula(self.parsed)
FormulaProcessor().process(whole)
whole.parent = self
self.contents = [whole]
def parse(self, pos):
"Parse using a parse position instead of self.parser."
if pos.checkskip('$$'):
self.parsedollarblock(pos)
elif pos.checkskip('$'):
self.parsedollarinline(pos)
elif pos.checkskip('\\('):
self.parseinlineto(pos, '\\)')
elif pos.checkskip('\\['):
self.parseblockto(pos, '\\]')
else:
pos.error('Unparseable formula')
self.process()
return self
def parsedollarinline(self, pos):
"Parse a $...$ formula."
self.header = ['inline']
self.parsedollar(pos)
def parsedollarblock(self, pos):
"Parse a $$...$$ formula."
self.header = ['block']
self.parsedollar(pos)
if not pos.checkskip('$'):
pos.error('Formula should be $$...$$, but last $ is missing.')
def parsedollar(self, pos):
"Parse to the next $."
pos.pushending('$')
self.parsed = pos.globexcluding('$')
pos.popending('$')
def parseinlineto(self, pos, limit):
"Parse a \\(...\\) formula."
self.header = ['inline']
self.parseupto(pos, limit)
def parseblockto(self, pos, limit):
"Parse a \\[...\\] formula."
self.header = ['block']
self.parseupto(pos, limit)
def parseupto(self, pos, limit):
"Parse a formula that ends with the given command."
pos.pushending(limit)
self.parsed = pos.glob(lambda: True)
pos.popending(limit)
def __unicode__(self):
"Return a printable representation."
if self.partkey and self.partkey.number:
return 'Formula (' + self.partkey.number + ')'
return 'Unnumbered formula'
class WholeFormula(FormulaBit):
"Parse a whole formula"
def detect(self, pos):
"Not outside the formula is enough."
return not pos.finished()
def parsebit(self, pos):
"Parse with any formula bit"
while not pos.finished():
self.add(self.factory.parseany(pos))
class FormulaFactory(object):
"Construct bits of formula"
# bit types will be appended later
types = [FormulaSymbol, RawText, FormulaNumber, Bracket, Comment, WhiteSpace]
skippedtypes = [Comment, WhiteSpace]
defining = False
def __init__(self):
"Initialize the map of instances."
self.instances = dict()
def detecttype(self, type, pos):
"Detect a bit of a given type."
if pos.finished():
return False
return self.instance(type).detect(pos)
def instance(self, type):
"Get an instance of the given type."
if not type in self.instances or not self.instances[type]:
self.instances[type] = self.create(type)
return self.instances[type]
def create(self, type):
"Create a new formula bit of the given type."
return Cloner.create(type).setfactory(self)
def clearskipped(self, pos):
"Clear any skipped types."
while not pos.finished():
if not self.skipany(pos):
return
return
def skipany(self, pos):
"Skip any skipped types."
for type in self.skippedtypes:
if self.instance(type).detect(pos):
return self.parsetype(type, pos)
return None
def parseany(self, pos):
"Parse any formula bit at the current location."
for type in self.types + self.skippedtypes:
if self.detecttype(type, pos):
return self.parsetype(type, pos)
Trace.error('Unrecognized formula at ' + pos.identifier())
return FormulaConstant(pos.skipcurrent())
def parsetype(self, type, pos):
"Parse the given type and return it."
bit = self.instance(type)
self.instances[type] = None
returnedbit = bit.parsebit(pos)
if returnedbit:
return returnedbit.setfactory(self)
return bit
def parseformula(self, formula):
"Parse a string of text that contains a whole formula."
pos = TextPosition(formula)
whole = self.create(WholeFormula)
if whole.detect(pos):
whole.parsebit(pos)
return whole
# no formula found
if not pos.finished():
Trace.error('Unknown formula at: ' + pos.identifier())
whole.add(TaggedBit().constant(formula, 'span class="unknown"'))
return whole
import gettext
class Translator(object):
"Reads the configuration file and tries to find a translation."
"Otherwise falls back to the messages in the config file."
instance = None
def translate(cls, key):
"Get the translated message for a key."
return cls.instance.getmessage(key)
translate = classmethod(translate)
def __init__(self):
self.translation = None
self.first = True
def findtranslation(self):
"Find the translation for the document language."
self.langcodes = None
if not DocumentParameters.language:
Trace.error('No language in document')
return
if not DocumentParameters.language in TranslationConfig.languages:
Trace.error('Unknown language ' + DocumentParameters.language)
return
if TranslationConfig.languages[DocumentParameters.language] == 'en':
return
langcodes = [TranslationConfig.languages[DocumentParameters.language]]
try:
self.translation = gettext.translation('elyxer', None, langcodes)
except IOError:
Trace.error('No translation for ' + str(langcodes))
def getmessage(self, key):
"Get the translated message for the given key."
if self.first:
self.findtranslation()
self.first = False
message = self.getuntranslated(key)
if not self.translation:
return message
try:
message = self.translation.ugettext(message)
except IOError:
pass
return message
def getuntranslated(self, key):
"Get the untranslated message."
if not key in TranslationConfig.constants:
Trace.error('Cannot translate ' + key)
return key
return TranslationConfig.constants[key]
Translator.instance = Translator()
class NumberCounter(object):
"A counter for numbers (by default)."
"The type can be changed to return letters, roman numbers..."
name = None
value = None
mode = None
master = None
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
symbols = NumberingConfig.sequence['symbols']
romannumerals = [
('M', 1000), ('CM', 900), ('D', 500), ('CD', 400), ('C', 100),
('XC', 90), ('L', 50), ('XL', 40), ('X', 10), ('IX', 9), ('V', 5),
('IV', 4), ('I', 1)
]
def __init__(self, name):
"Give a name to the counter."
self.name = name
def setmode(self, mode):
"Set the counter mode. Can be changed at runtime."
self.mode = mode
return self
def init(self, value):
"Set an initial value."
self.value = value
def gettext(self):
"Get the next value as a text string."
return str(self.value)
def getletter(self):
"Get the next value as a letter."
return self.getsequence(self.letters)
def getsymbol(self):
"Get the next value as a symbol."
return self.getsequence(self.symbols)
def getsequence(self, sequence):
"Get the next value from elyxer.a sequence."
return sequence[(self.value - 1) % len(sequence)]
def getroman(self):
"Get the next value as a roman number."
result = ''
number = self.value
for numeral, value in self.romannumerals:
if number >= value:
result += numeral * (number / value)
number = number % value
return result
def getvalue(self):
"Get the current value as configured in the current mode."
if not self.mode or self.mode in ['text', '1']:
return self.gettext()
if self.mode == 'A':
return self.getletter()
if self.mode == 'a':
return self.getletter().lower()
if self.mode == 'I':
return self.getroman()
if self.mode == '*':
return self.getsymbol()
Trace.error('Unknown counter mode ' + self.mode)
return self.gettext()
def getnext(self):
"Increase the current value and get the next value as configured."
if not self.value:
self.value = 0
self.value += 1
return self.getvalue()
def reset(self):
"Reset the counter."
self.value = 0
def __unicode__(self):
"Return a printable representation."
result = 'Counter ' + self.name
if self.mode:
result += ' in mode ' + self.mode
return result
class DependentCounter(NumberCounter):
"A counter which depends on another one (the master)."
def setmaster(self, master):
"Set the master counter."
self.master = master
self.last = self.master.getvalue()
return self
def getnext(self):
"Increase or, if the master counter has changed, restart."
if self.last != self.master.getvalue():
self.reset()
value = NumberCounter.getnext(self)
self.last = self.master.getvalue()
return value
def getvalue(self):
"Get the value of the combined counter: master.dependent."
return self.master.getvalue() + '.' + NumberCounter.getvalue(self)
class NumberGenerator(object):
"A number generator for unique sequences and hierarchical structures. Used in:"
" * ordered part numbers: Chapter 3, Section 5.3."
" * unique part numbers: Footnote 15, Bibliography cite [15]."
" * chaptered part numbers: Figure 3.15, Equation (8.3)."
" * unique roman part numbers: Part I, Book IV."
chaptered = None
generator = None
romanlayouts = [x.lower() for x in NumberingConfig.layouts['roman']]
orderedlayouts = [x.lower() for x in NumberingConfig.layouts['ordered']]
counters = dict()
appendix = None
def deasterisk(self, type):
"Remove the possible asterisk in a layout type."
return type.replace('*', '')
def isunique(self, type):
"Find out if the layout type corresponds to a unique part."
return self.isroman(type)
def isroman(self, type):
"Find out if the layout type should have roman numeration."
return self.deasterisk(type).lower() in self.romanlayouts
def isinordered(self, type):
"Find out if the layout type corresponds to an (un)ordered part."
return self.deasterisk(type).lower() in self.orderedlayouts
def isnumbered(self, type):
"Find out if the type for a layout corresponds to a numbered layout."
if '*' in type:
return False
if self.isroman(type):
return True
if not self.isinordered(type):
return False
if self.getlevel(type) > DocumentParameters.maxdepth:
return False
return True
def isunordered(self, type):
"Find out if the type contains an asterisk, basically."
return '*' in type
def getlevel(self, type):
"Get the level that corresponds to a layout type."
if self.isunique(type):
return 0
if not self.isinordered(type):
Trace.error('Unknown layout type ' + type)
return 0
type = self.deasterisk(type).lower()
level = self.orderedlayouts.index(type) + 1
return level - DocumentParameters.startinglevel
def getparttype(self, type):
"Obtain the type for the part: without the asterisk, "
"and switched to Appendix if necessary."
if NumberGenerator.appendix and self.getlevel(type) == 1:
return 'Appendix'
return self.deasterisk(type)
def generate(self, type):
"Generate a number for a layout type."
"Unique part types such as Part or Book generate roman numbers: Part I."
"Ordered part types return dot-separated tuples: Chapter 5, Subsection 2.3.5."
"Everything else generates unique numbers: Bibliography [1]."
"Each invocation results in a new number."
return self.getcounter(type).getnext()
def getcounter(self, type):
"Get the counter for the given type."
type = type.lower()
if not type in self.counters:
self.counters[type] = self.create(type)
return self.counters[type]
def create(self, type):
"Create a counter for the given type."
if self.isnumbered(type) and self.getlevel(type) > 1:
index = self.orderedlayouts.index(type)
above = self.orderedlayouts[index - 1]
master = self.getcounter(above)
return self.createdependent(type, master)
counter = NumberCounter(type)
if self.isroman(type):
counter.setmode('I')
return counter
def getdependentcounter(self, type, master):
"Get (or create) a counter of the given type that depends on another."
if not type in self.counters or not self.counters[type].master:
self.counters[type] = self.createdependent(type, master)
return self.counters[type]
def createdependent(self, type, master):
"Create a dependent counter given the master."
return DependentCounter(type).setmaster(master)
def startappendix(self):
"Start appendices here."
firsttype = self.orderedlayouts[DocumentParameters.startinglevel]
counter = self.getcounter(firsttype)
counter.setmode('A').reset()
NumberGenerator.appendix = True
class ChapteredGenerator(NumberGenerator):
"Generate chaptered numbers, as in Chapter.Number."
"Used in equations, figures: Equation (5.3), figure 8.15."
def generate(self, type):
"Generate a number which goes with first-level numbers (chapters). "
"For the article classes a unique number is generated."
if DocumentParameters.startinglevel > 0:
return NumberGenerator.generator.generate(type)
chapter = self.getcounter('Chapter')
return self.getdependentcounter(type, chapter).getnext()
NumberGenerator.chaptered = ChapteredGenerator()
NumberGenerator.generator = NumberGenerator()
class ContainerSize(object):
"The size of a container."
width = None
height = None
maxwidth = None
maxheight = None
scale = None
def set(self, width = None, height = None):
"Set the proper size with width and height."
self.setvalue('width', width)
self.setvalue('height', height)
return self
def setmax(self, maxwidth = None, maxheight = None):
"Set max width and/or height."
self.setvalue('maxwidth', maxwidth)
self.setvalue('maxheight', maxheight)
return self
def readparameters(self, container):
"Read some size parameters off a container."
self.setparameter(container, 'width')
self.setparameter(container, 'height')
self.setparameter(container, 'scale')
self.checkvalidheight(container)
return self
def setparameter(self, container, name):
"Read a size parameter off a container, and set it if present."
value = container.getparameter(name)
self.setvalue(name, value)
def setvalue(self, name, value):
"Set the value of a parameter name, only if it's valid."
value = self.processparameter(value)
if value:
setattr(self, name, value)
def checkvalidheight(self, container):
"Check if the height parameter is valid; otherwise erase it."
heightspecial = container.getparameter('height_special')
if self.height and self.extractnumber(self.height) == '1' and heightspecial == 'totalheight':
self.height = None
def processparameter(self, value):
"Do the full processing on a parameter."
if not value:
return None
if self.extractnumber(value) == '0':
return None
for ignored in StyleConfig.size['ignoredtexts']:
if ignored in value:
value = value.replace(ignored, '')
return value
def extractnumber(self, text):
"Extract the first number in the given text."
result = ''
decimal = False
for char in text:
if char.isdigit():
result += char
elif char == '.' and not decimal:
result += char
decimal = True
else:
return result
return result
def checkimage(self, width, height):
"Check image dimensions, set them if possible."
if width:
self.maxwidth = str(width) + 'px'
if self.scale and not self.width:
self.width = self.scalevalue(width)
if height:
self.maxheight = str(height) + 'px'
if self.scale and not self.height:
self.height = self.scalevalue(height)
if self.width and not self.height:
self.height = 'auto'
if self.height and not self.width:
self.width = 'auto'
def scalevalue(self, value):
"Scale the value according to the image scale and return it as unicode."
scaled = value * int(self.scale) / 100
return str(int(scaled)) + 'px'
def removepercentwidth(self):
"Remove percent width if present, to set it at the figure level."
if not self.width:
return None
if not '%' in self.width:
return None
width = self.width
self.width = None
if self.height == 'auto':
self.height = None
return width
def addstyle(self, container):
"Add the proper style attribute to the output tag."
if not isinstance(container.output, TaggedOutput):
Trace.error('No tag to add style, in ' + str(container))
if not self.width and not self.height and not self.maxwidth and not self.maxheight:
# nothing to see here; move along
return
tag = ' style="'
tag += self.styleparameter('width')
tag += self.styleparameter('maxwidth')
tag += self.styleparameter('height')
tag += self.styleparameter('maxheight')
if tag[-1] == ' ':
tag = tag[:-1]
tag += '"'
container.output.tag += tag
def styleparameter(self, name):
"Get the style for a single parameter."
value = getattr(self, name)
if value:
return name.replace('max', 'max-') + ': ' + value + '; '
return ''
class QuoteContainer(Container):
"A container for a pretty quote"
def __init__(self):
self.parser = BoundedParser()
self.output = FixedOutput()
def process(self):
"Process contents"
self.type = self.header[2]
if not self.type in StyleConfig.quotes:
Trace.error('Quote type ' + self.type + ' not found')
self.html = ['"']
return
self.html = [StyleConfig.quotes[self.type]]
class LyXLine(Container):
"A Lyx line"
def __init__(self):
self.parser = LoneCommand()
self.output = FixedOutput()
def process(self):
self.html = ['<hr class="line" />']
class EmphaticText(TaggedText):
"Text with emphatic mode"
def process(self):
self.output.tag = 'i'
class ShapedText(TaggedText):
"Text shaped (italic, slanted)"
def process(self):
self.type = self.header[1]
if not self.type in TagConfig.shaped:
Trace.error('Unrecognized shape ' + self.header[1])
self.output.tag = 'span'
return
self.output.tag = TagConfig.shaped[self.type]
class VersalitasText(TaggedText):
"Text in versalitas"
def process(self):
self.output.tag = 'span class="versalitas"'
class ColorText(TaggedText):
"Colored text"
def process(self):
self.color = self.header[1]
self.output.tag = 'span class="' + self.color + '"'
class SizeText(TaggedText):
"Sized text"
def process(self):
self.size = self.header[1]
self.output.tag = 'span class="' + self.size + '"'
class BoldText(TaggedText):
"Bold text"
def process(self):
self.output.tag = 'b'
class TextFamily(TaggedText):
"A bit of text from elyxer.a different family"
def process(self):
"Parse the type of family"
self.type = self.header[1]
if not self.type in TagConfig.family:
Trace.error('Unrecognized family ' + type)
self.output.tag = 'span'
return
self.output.tag = TagConfig.family[self.type]
class Hfill(TaggedText):
"Horizontall fill"
def process(self):
self.output.tag = 'span class="hfill"'
class BarredText(TaggedText):
"Text with a bar somewhere"
def process(self):
"Parse the type of bar"
self.type = self.header[1]
if not self.type in TagConfig.barred:
Trace.error('Unknown bar type ' + self.type)
self.output.tag = 'span'
return
self.output.tag = TagConfig.barred[self.type]
class LangLine(BlackBox):
"A line with language information"
def process(self):
self.lang = self.header[1]
class InsetLength(BlackBox):
"A length measure inside an inset."
def process(self):
self.length = self.header[1]
class Space(Container):
"A space of several types"
def __init__(self):
self.parser = InsetParser()
self.output = FixedOutput()
def process(self):
self.type = self.header[2]
if self.type not in StyleConfig.hspaces:
Trace.error('Unknown space type ' + self.type)
self.html = [' ']
return
self.html = [StyleConfig.hspaces[self.type]]
length = self.getlength()
if not length:
return
self.output = TaggedOutput().settag('span class="hspace"', False)
ContainerSize().set(length).addstyle(self)
def getlength(self):
"Get the space length from elyxer.the contents or parameters."
if len(self.contents) == 0 or not isinstance(self.contents[0], InsetLength):
return None
return self.contents[0].length
class VerticalSpace(Container):
"An inset that contains a vertical space."
def __init__(self):
self.parser = InsetParser()
self.output = FixedOutput()
def process(self):
"Set the correct tag"
self.type = self.header[2]
if self.type not in StyleConfig.vspaces:
self.output = TaggedOutput().settag('div class="vspace" style="height: ' + self.type + ';"', True)
return
self.html = [StyleConfig.vspaces[self.type]]
class Align(Container):
"Bit of aligned text"
def __init__(self):
self.parser = ExcludingParser()
self.output = TaggedOutput().setbreaklines(True)
def process(self):
self.output.tag = 'div class="' + self.header[1] + '"'
class Newline(Container):
"A newline"
def __init__(self):
self.parser = LoneCommand()
self.output = FixedOutput()
def process(self):
"Process contents"
self.html = ['<br/>\n']
class NewPage(Newline):
"A new page"
def process(self):
"Process contents"
self.html = ['<p><br/>\n</p>\n']
class Separator(Container):
"A separator string which is not extracted by extracttext()."
def __init__(self, constant):
self.output = FixedOutput()
self.contents = []
self.html = [constant]
class StrikeOut(TaggedText):
"Striken out text."
def process(self):
"Set the output tag to strike."
self.output.tag = 'strike'
class StartAppendix(BlackBox):
"Mark to start an appendix here."
"From this point on, all chapters become appendices."
def process(self):
"Activate the special numbering scheme for appendices, using letters."
NumberGenerator.generator.startappendix()
class Link(Container):
"A link to another part of the document"
anchor = None
url = None
type = None
page = None
target = None
destination = None
title = None
def __init__(self):
"Initialize the link, add target if configured."
self.contents = []
self.parser = InsetParser()
self.output = LinkOutput()
if Options.target:
self.target = Options.target
def complete(self, text, anchor = None, url = None, type = None, title = None):
"Complete the link."
self.contents = [Constant(text)]
if anchor:
self.anchor = anchor
if url:
self.url = url
if type:
self.type = type
if title:
self.title = title
return self
def computedestination(self):
"Use the destination link to fill in the destination URL."
if not self.destination:
return
self.url = ''
if self.destination.anchor:
self.url = '#' + self.destination.anchor
if self.destination.page:
self.url = self.destination.page + self.url
def setmutualdestination(self, destination):
"Set another link as destination, and set its destination to this one."
self.destination = destination
destination.destination = self
def __unicode__(self):
"Return a printable representation."
result = 'Link'
if self.anchor:
result += ' #' + self.anchor
if self.url:
result += ' to ' + self.url
return result
class URL(Link):
"A clickable URL"
def process(self):
"Read URL from elyxer.parameters"
target = self.escape(self.getparameter('target'))
self.url = target
type = self.getparameter('type')
if type:
self.url = self.escape(type) + target
name = self.getparameter('name')
if not name:
name = target
self.contents = [Constant(name)]
class FlexURL(URL):
"A flexible URL"
def process(self):
"Read URL from elyxer.contents"
self.url = self.extracttext()
class LinkOutput(ContainerOutput):
"A link pointing to some destination"
"Or an anchor (destination)"
def gethtml(self, link):
"Get the HTML code for the link"
type = link.__class__.__name__
if link.type:
type = link.type
tag = 'a class="' + type + '"'
if link.anchor:
tag += ' name="' + link.anchor + '"'
if link.destination:
link.computedestination()
if link.url:
tag += ' href="' + link.url + '"'
if link.target:
tag += ' target="' + link.target + '"'
if link.title:
tag += ' title="' + link.title + '"'
return TaggedOutput().settag(tag).gethtml(link)
class Postprocessor(object):
"Postprocess a container keeping some context"
stages = []
def __init__(self):
self.stages = StageDict(Postprocessor.stages, self)
self.current = None
self.last = None
def postprocess(self, next):
"Postprocess a container and its contents."
self.postrecursive(self.current)
result = self.postcurrent(next)
self.last = self.current
self.current = next
return result
def postrecursive(self, container):
"Postprocess the container contents recursively"
if not hasattr(container, 'contents'):
return
if len(container.contents) == 0:
return
if hasattr(container, 'postprocess'):
if not container.postprocess:
return
postprocessor = Postprocessor()
contents = []
for element in container.contents:
post = postprocessor.postprocess(element)
if post:
contents.append(post)
# two rounds to empty the pipeline
for i in range(2):
post = postprocessor.postprocess(None)
if post:
contents.append(post)
container.contents = contents
def postcurrent(self, next):
"Postprocess the current element taking into account next and last."
stage = self.stages.getstage(self.current)
if not stage:
return self.current
return stage.postprocess(self.last, self.current, next)
class StageDict(object):
"A dictionary of stages corresponding to classes"
def __init__(self, classes, postprocessor):
"Instantiate an element from elyxer.each class and store as a dictionary"
instances = self.instantiate(classes, postprocessor)
self.stagedict = dict([(x.processedclass, x) for x in instances])
def instantiate(self, classes, postprocessor):
"Instantiate an element from elyxer.each class"
stages = [x.__new__(x) for x in classes]
for element in stages:
element.__init__()
element.postprocessor = postprocessor
return stages
def getstage(self, element):
"Get the stage for a given element, if the type is in the dict"
if not element.__class__ in self.stagedict:
return None
return self.stagedict[element.__class__]
class Label(Link):
"A label to be referenced"
names = dict()
lastlayout = None
def __init__(self):
Link.__init__(self)
self.lastnumbered = None
def process(self):
"Process a label container."
key = self.getparameter('name')
self.create(' ', key)
self.lastnumbered = Label.lastlayout
def create(self, text, key, type = 'Label'):
"Create the label for a given key."
self.key = key
self.complete(text, anchor = key, type = type)
Label.names[key] = self
if key in Reference.references:
for reference in Reference.references[key]:
reference.destination = self
return self
def findpartkey(self):
"Get the part key for the latest numbered container seen."
numbered = self.numbered(self)
if numbered and numbered.partkey:
return numbered.partkey
return ''
def numbered(self, container):
"Get the numbered container for the label."
if container.partkey:
return container
if not container.parent:
if self.lastnumbered:
return self.lastnumbered
return None
return self.numbered(container.parent)
def __unicode__(self):
"Return a printable representation."
if not hasattr(self, 'key'):
return 'Unnamed label'
return 'Label ' + self.key
class Reference(Link):
"A reference to a label."
references = dict()
key = 'none'
def process(self):
"Read the reference and set the arrow."
self.key = self.getparameter('reference')
if self.key in Label.names:
self.direction = '↑'
label = Label.names[self.key]
else:
self.direction = '↓'
label = Label().complete(' ', self.key, 'preref')
self.destination = label
self.formatcontents()
if not self.key in Reference.references:
Reference.references[self.key] = []
Reference.references[self.key].append(self)
def formatcontents(self):
"Format the reference contents."
formatkey = self.getparameter('LatexCommand')
if not formatkey:
formatkey = 'ref'
self.formatted = '↕'
if formatkey in StyleConfig.referenceformats:
self.formatted = StyleConfig.referenceformats[formatkey]
else:
Trace.error('Unknown reference format ' + formatkey)
self.replace('↕', self.direction)
self.replace('#', '1')
self.replace('on-page', Translator.translate('on-page'))
partkey = self.destination.findpartkey()
# only if partkey and partkey.number are not null, send partkey.number
self.replace('@', partkey and partkey.number)
self.replace('¶', partkey and partkey.tocentry)
if not '$' in self.formatted or not partkey or not partkey.titlecontents:
if '$' in self.formatted:
Trace.error('No title in ' + str(partkey))
self.contents = [Constant(self.formatted)]
return
pieces = self.formatted.split('$')
self.contents = [Constant(pieces[0])]
for piece in pieces[1:]:
self.contents += partkey.titlecontents
self.contents.append(Constant(piece))
def replace(self, key, value):
"Replace a key in the format template with a value."
if not key in self.formatted:
return
if not value:
value = ''
self.formatted = self.formatted.replace(key, value)
def __unicode__(self):
"Return a printable representation."
return 'Reference ' + self.key
class FormulaCommand(FormulaBit):
"A LaTeX command inside a formula"
types = []
start = FormulaConfig.starts['command']
commandmap = None
def detect(self, pos):
"Find the current command."
return pos.checkfor(FormulaCommand.start)
def parsebit(self, pos):
"Parse the command."
command = self.extractcommand(pos)
bit = self.parsewithcommand(command, pos)
if bit:
return bit
if command.startswith('\\up') or command.startswith('\\Up'):
upgreek = self.parseupgreek(command, pos)
if upgreek:
return upgreek
if not self.factory.defining:
Trace.error('Unknown command ' + command)
self.output = TaggedOutput().settag('span class="unknown"')
self.add(FormulaConstant(command))
return None
def parsewithcommand(self, command, pos):
"Parse the command type once we have the command."
for type in FormulaCommand.types:
if command in type.commandmap:
return self.parsecommandtype(command, type, pos)
return None
def parsecommandtype(self, command, type, pos):
"Parse a given command type."
bit = self.factory.create(type)
bit.setcommand(command)
returned = bit.parsebit(pos)
if returned:
return returned
return bit
def extractcommand(self, pos):
"Extract the command from elyxer.the current position."
if not pos.checkskip(FormulaCommand.start):
pos.error('Missing command start ' + FormulaCommand.start)
return
if pos.finished():
return self.emptycommand(pos)
if pos.current().isalpha():
# alpha command
command = FormulaCommand.start + pos.globalpha()
# skip mark of short command
pos.checkskip('*')
return command
# symbol command
return FormulaCommand.start + pos.skipcurrent()
def emptycommand(self, pos):
"""Check for an empty command: look for command disguised as ending.
Special case against '{ \{ \} }' situation."""
command = ''
if not pos.isout():
ending = pos.nextending()
if ending and pos.checkskip(ending):
command = ending
return FormulaCommand.start + command
def parseupgreek(self, command, pos):
"Parse the Greek \\up command.."
if len(command) < 4:
return None
if command.startswith('\\up'):
upcommand = '\\' + command[3:]
elif pos.checkskip('\\Up'):
upcommand = '\\' + command[3:4].upper() + command[4:]
else:
Trace.error('Impossible upgreek command: ' + command)
return
upgreek = self.parsewithcommand(upcommand, pos)
if upgreek:
upgreek.type = 'font'
return upgreek
class CommandBit(FormulaCommand):
"A formula bit that includes a command"
def setcommand(self, command):
"Set the command in the bit"
self.command = command
if self.commandmap:
self.original += command
self.translated = self.commandmap[self.command]
def parseparameter(self, pos):
"Parse a parameter at the current position"
self.factory.clearskipped(pos)
if pos.finished():
return None
parameter = self.factory.parseany(pos)
self.add(parameter)
return parameter
def parsesquare(self, pos):
"Parse a square bracket"
self.factory.clearskipped(pos)
if not self.factory.detecttype(SquareBracket, pos):
return None
bracket = self.factory.parsetype(SquareBracket, pos)
self.add(bracket)
return bracket
def parseliteral(self, pos):
"Parse a literal bracket."
self.factory.clearskipped(pos)
if not self.factory.detecttype(Bracket, pos):
if not pos.isvalue():
Trace.error('No literal parameter found at: ' + pos.identifier())
return None
return pos.globvalue()
bracket = Bracket().setfactory(self.factory)
self.add(bracket.parseliteral(pos))
return bracket.literal
def parsesquareliteral(self, pos):
"Parse a square bracket literally."
self.factory.clearskipped(pos)
if not self.factory.detecttype(SquareBracket, pos):
return None
bracket = SquareBracket().setfactory(self.factory)
self.add(bracket.parseliteral(pos))
return bracket.literal
def parsetext(self, pos):
"Parse a text parameter."
self.factory.clearskipped(pos)
if not self.factory.detecttype(Bracket, pos):
Trace.error('No text parameter for ' + self.command)
return None
bracket = Bracket().setfactory(self.factory).parsetext(pos)
self.add(bracket)
return bracket
class EmptyCommand(CommandBit):
"An empty command (without parameters)"
commandmap = FormulaConfig.commands
def parsebit(self, pos):
"Parse a command without parameters"
self.contents = [FormulaConstant(self.translated)]
class SpacedCommand(CommandBit):
"An empty command which should have math spacing in formulas."
commandmap = FormulaConfig.spacedcommands
def parsebit(self, pos):
"Place as contents the command translated and spaced."
self.contents = [FormulaConstant(' ' + self.translated + ' ')]
class AlphaCommand(EmptyCommand):
"A command without paramters whose result is alphabetical"
commandmap = FormulaConfig.alphacommands
def parsebit(self, pos):
"Parse the command and set type to alpha"
EmptyCommand.parsebit(self, pos)
self.type = 'alpha'
class OneParamFunction(CommandBit):
"A function of one parameter"
commandmap = FormulaConfig.onefunctions
simplified = False
def parsebit(self, pos):
"Parse a function with one parameter"
self.output = TaggedOutput().settag(self.translated)
self.parseparameter(pos)
self.simplifyifpossible()
def simplifyifpossible(self):
"Try to simplify to a single character."
if self.original in self.commandmap:
self.output = FixedOutput()
self.html = [self.commandmap[self.original]]
self.simplified = True
class SymbolFunction(CommandBit):
"Find a function which is represented by a symbol (like _ or ^)"
commandmap = FormulaConfig.symbolfunctions
def detect(self, pos):
"Find the symbol"
return pos.current() in SymbolFunction.commandmap
def parsebit(self, pos):
"Parse the symbol"
self.setcommand(pos.current())
pos.skip(self.command)
self.output = TaggedOutput().settag(self.translated)
self.parseparameter(pos)
class TextFunction(CommandBit):
"A function where parameters are read as text."
commandmap = FormulaConfig.textfunctions
def parsebit(self, pos):
"Parse a text parameter"
self.output = TaggedOutput().settag(self.translated)
self.parsetext(pos)
def process(self):
"Set the type to font"
self.type = 'font'
class LabelFunction(CommandBit):
"A function that acts as a label"
commandmap = FormulaConfig.labelfunctions
def parsebit(self, pos):
"Parse a literal parameter"
self.key = self.parseliteral(pos)
def process(self):
"Add an anchor with the label contents."
self.type = 'font'
self.label = Label().create(' ', self.key, type = 'eqnumber')
self.contents = [self.label]
# store as a Label so we know it's been seen
Label.names[self.key] = self.label
class FontFunction(OneParamFunction):
"A function of one parameter that changes the font"
commandmap = FormulaConfig.fontfunctions
def process(self):
"Simplify if possible using a single character."
self.type = 'font'
self.simplifyifpossible()
FormulaFactory.types += [FormulaCommand, SymbolFunction]
FormulaCommand.types = [
AlphaCommand, EmptyCommand, OneParamFunction, FontFunction, LabelFunction,
TextFunction, SpacedCommand,
]
class BigSymbol(object):
"A big symbol generator."
symbols = FormulaConfig.bigsymbols
def __init__(self, symbol):
"Create the big symbol."
self.symbol = symbol
def getpieces(self):
"Get an array with all pieces."
if not self.symbol in self.symbols:
return [self.symbol]
if self.smalllimit():
return [self.symbol]
return self.symbols[self.symbol]
def smalllimit(self):
"Decide if the limit should be a small, one-line symbol."
if not DocumentParameters.displaymode:
return True
if len(self.symbols[self.symbol]) == 1:
return True
return Options.simplemath
class BigBracket(BigSymbol):
"A big bracket generator."
def __init__(self, size, bracket, alignment='l'):
"Set the size and symbol for the bracket."
self.size = size
self.original = bracket
self.alignment = alignment
self.pieces = None
if bracket in FormulaConfig.bigbrackets:
self.pieces = FormulaConfig.bigbrackets[bracket]
def getpiece(self, index):
"Return the nth piece for the bracket."
function = getattr(self, 'getpiece' + str(len(self.pieces)))
return function(index)
def getpiece1(self, index):
"Return the only piece for a single-piece bracket."
return self.pieces[0]
def getpiece3(self, index):
"Get the nth piece for a 3-piece bracket: parenthesis or square bracket."
if index == 0:
return self.pieces[0]
if index == self.size - 1:
return self.pieces[-1]
return self.pieces[1]
def getpiece4(self, index):
"Get the nth piece for a 4-piece bracket: curly bracket."
if index == 0:
return self.pieces[0]
if index == self.size - 1:
return self.pieces[3]
if index == (self.size - 1)/2:
return self.pieces[2]
return self.pieces[1]
def getcell(self, index):
"Get the bracket piece as an array cell."
piece = self.getpiece(index)
span = 'span class="bracket align-' + self.alignment + '"'
return TaggedBit().constant(piece, span)
def getcontents(self):
"Get the bracket as an array or as a single bracket."
if self.size == 1 or not self.pieces:
return self.getsinglebracket()
rows = []
for index in range(self.size):
cell = self.getcell(index)
rows.append(TaggedBit().complete([cell], 'span class="arrayrow"'))
return [TaggedBit().complete(rows, 'span class="array"')]
def getsinglebracket(self):
"Return the bracket as a single sign."
if self.original == '.':
return [TaggedBit().constant('', 'span class="emptydot"')]
return [TaggedBit().constant(self.original, 'span class="symbol"')]
class FormulaEquation(CommandBit):
"A simple numbered equation."
piece = 'equation'
def parsebit(self, pos):
"Parse the array"
self.output = ContentsOutput()
self.add(self.factory.parsetype(WholeFormula, pos))
class FormulaCell(FormulaCommand):
"An array cell inside a row"
def setalignment(self, alignment):
self.alignment = alignment
self.output = TaggedOutput().settag('span class="arraycell align-' + alignment +'"', True)
return self
def parsebit(self, pos):
self.factory.clearskipped(pos)
if pos.finished():
return
self.add(self.factory.parsetype(WholeFormula, pos))
class FormulaRow(FormulaCommand):
"An array row inside an array"
cellseparator = FormulaConfig.array['cellseparator']
def setalignments(self, alignments):
self.alignments = alignments
self.output = TaggedOutput().settag('span class="arrayrow"', True)
return self
def parsebit(self, pos):
"Parse a whole row"
index = 0
pos.pushending(self.cellseparator, optional=True)
while not pos.finished():
cell = self.createcell(index)
cell.parsebit(pos)
self.add(cell)
index += 1
pos.checkskip(self.cellseparator)
if len(self.contents) == 0:
self.output = EmptyOutput()
def createcell(self, index):
"Create the cell that corresponds to the given index."
alignment = self.alignments[index % len(self.alignments)]
return self.factory.create(FormulaCell).setalignment(alignment)
class MultiRowFormula(CommandBit):
"A formula with multiple rows."
def parserows(self, pos):
"Parse all rows, finish when no more row ends"
self.rows = []
first = True
for row in self.iteraterows(pos):
if first:
first = False
else:
# intersparse empty rows
self.addempty()
row.parsebit(pos)
self.addrow(row)
self.size = len(self.rows)
def iteraterows(self, pos):
"Iterate over all rows, end when no more row ends"
rowseparator = FormulaConfig.array['rowseparator']
while True:
pos.pushending(rowseparator, True)
row = self.factory.create(FormulaRow)
yield row.setalignments(self.alignments)
if pos.checkfor(rowseparator):
self.original += pos.popending(rowseparator)
else:
return
def addempty(self):
"Add an empty row."
row = self.factory.create(FormulaRow).setalignments(self.alignments)
for index, originalcell in enumerate(self.rows[-1].contents):
cell = row.createcell(index)
cell.add(FormulaConstant(' '))
row.add(cell)
self.addrow(row)
def addrow(self, row):
"Add a row to the contents and to the list of rows."
self.rows.append(row)
self.add(row)
class FormulaArray(MultiRowFormula):
"An array within a formula"
piece = 'array'
def parsebit(self, pos):
"Parse the array"
self.output = TaggedOutput().settag('span class="array"', False)
self.parsealignments(pos)
self.parserows(pos)
def parsealignments(self, pos):
"Parse the different alignments"
# vertical
self.valign = 'c'
literal = self.parsesquareliteral(pos)
if literal:
self.valign = literal
# horizontal
literal = self.parseliteral(pos)
self.alignments = []
for l in literal:
self.alignments.append(l)
class FormulaMatrix(MultiRowFormula):
"A matrix (array with center alignment)."
piece = 'matrix'
def parsebit(self, pos):
"Parse the matrix, set alignments to 'c'."
self.output = TaggedOutput().settag('span class="array"', False)
self.valign = 'c'
self.alignments = ['c']
self.parserows(pos)
class FormulaCases(MultiRowFormula):
"A cases statement"
piece = 'cases'
def parsebit(self, pos):
"Parse the cases"
self.output = ContentsOutput()
self.alignments = ['l', 'l']
self.parserows(pos)
for row in self.contents:
for cell in row.contents:
cell.output.settag('span class="case align-l"', True)
cell.contents.append(FormulaConstant(' '))
array = TaggedBit().complete(self.contents, 'span class="bracketcases"', True)
brace = BigBracket(len(self.contents), '{', 'l')
self.contents = brace.getcontents() + [array]
class EquationEnvironment(MultiRowFormula):
"A \\begin{}...\\end equation environment with rows and cells."
def parsebit(self, pos):
"Parse the whole environment."
self.output = TaggedOutput().settag('span class="environment"', False)
environment = self.piece.replace('*', '')
if environment in FormulaConfig.environments:
self.alignments = FormulaConfig.environments[environment]
else:
Trace.error('Unknown equation environment ' + self.piece)
self.alignments = ['l']
self.parserows(pos)
class BeginCommand(CommandBit):
"A \\begin{}...\end command and what it entails (array, cases, aligned)"
commandmap = {FormulaConfig.array['begin']:''}
types = [FormulaEquation, FormulaArray, FormulaCases, FormulaMatrix]
def parsebit(self, pos):
"Parse the begin command"
command = self.parseliteral(pos)
bit = self.findbit(command)
ending = FormulaConfig.array['end'] + '{' + command + '}'
pos.pushending(ending)
bit.parsebit(pos)
self.add(bit)
self.original += pos.popending(ending)
self.size = bit.size
def findbit(self, piece):
"Find the command bit corresponding to the \\begin{piece}"
for type in BeginCommand.types:
if piece.replace('*', '') == type.piece:
return self.factory.create(type)
bit = self.factory.create(EquationEnvironment)
bit.piece = piece
return bit
FormulaCommand.types += [BeginCommand]
class CombiningFunction(OneParamFunction):
commandmap = FormulaConfig.combiningfunctions
def parsebit(self, pos):
"Parse a combining function."
self.type = 'alpha'
combining = self.translated
parameter = self.parsesingleparameter(pos)
if not parameter:
Trace.error('Empty parameter for combining function ' + self.command)
elif len(parameter.extracttext()) != 1:
Trace.error('Applying combining function ' + self.command + ' to invalid string "' + parameter.extracttext() + '"')
self.contents.append(Constant(combining))
def parsesingleparameter(self, pos):
"Parse a parameter, or a single letter."
self.factory.clearskipped(pos)
if pos.finished():
Trace.error('Error while parsing single parameter at ' + pos.identifier())
return None
if self.factory.detecttype(Bracket, pos) \
or self.factory.detecttype(FormulaCommand, pos):
return self.parseparameter(pos)
letter = FormulaConstant(pos.skipcurrent())
self.add(letter)
return letter
class DecoratingFunction(OneParamFunction):
"A function that decorates some bit of text"
commandmap = FormulaConfig.decoratingfunctions
def parsebit(self, pos):
"Parse a decorating function"
self.type = 'alpha'
symbol = self.translated
self.symbol = TaggedBit().constant(symbol, 'span class="symbolover"')
self.parameter = self.parseparameter(pos)
self.output = TaggedOutput().settag('span class="withsymbol"')
self.contents.insert(0, self.symbol)
self.parameter.output = TaggedOutput().settag('span class="undersymbol"')
self.simplifyifpossible()
class LimitCommand(EmptyCommand):
"A command which accepts limits above and below, in display mode."
commandmap = FormulaConfig.limitcommands
def parsebit(self, pos):
"Parse a limit command."
pieces = BigSymbol(self.translated).getpieces()
self.output = TaggedOutput().settag('span class="limits"')
for piece in pieces:
self.contents.append(TaggedBit().constant(piece, 'span class="limit"'))
class LimitPreviousCommand(LimitCommand):
"A command to limit the previous command."
commandmap = None
def parsebit(self, pos):
"Do nothing."
self.output = TaggedOutput().settag('span class="limits"')
self.factory.clearskipped(pos)
def __unicode__(self):
"Return a printable representation."
return 'Limit previous command'
class LimitsProcessor(MathsProcessor):
"A processor for limits inside an element."
def process(self, contents, index):
"Process the limits for an element."
if Options.simplemath:
return
if self.checklimits(contents, index):
self.modifylimits(contents, index)
if self.checkscript(contents, index) and self.checkscript(contents, index + 1):
self.modifyscripts(contents, index)
def checklimits(self, contents, index):
"Check if the current position has a limits command."
if not DocumentParameters.displaymode:
return False
if self.checkcommand(contents, index + 1, LimitPreviousCommand):
self.limitsahead(contents, index)
return False
if not isinstance(contents[index], LimitCommand):
return False
return self.checkscript(contents, index + 1)
def limitsahead(self, contents, index):
"Limit the current element based on the next."
contents[index + 1].add(contents[index].clone())
contents[index].output = EmptyOutput()
def modifylimits(self, contents, index):
"Modify a limits commands so that the limits appear above and below."
limited = contents[index]
subscript = self.getlimit(contents, index + 1)
limited.contents.append(subscript)
if self.checkscript(contents, index + 1):
superscript = self.getlimit(contents, index + 1)
else:
superscript = TaggedBit().constant(' ', 'sup class="limit"')
limited.contents.insert(0, superscript)
def getlimit(self, contents, index):
"Get the limit for a limits command."
limit = self.getscript(contents, index)
limit.output.tag = limit.output.tag.replace('script', 'limit')
return limit
def modifyscripts(self, contents, index):
"Modify the super- and subscript to appear vertically aligned."
subscript = self.getscript(contents, index)
# subscript removed so instead of index + 1 we get index again
superscript = self.getscript(contents, index)
scripts = TaggedBit().complete([superscript, subscript], 'span class="scripts"')
contents.insert(index, scripts)
def checkscript(self, contents, index):
"Check if the current element is a sub- or superscript."
return self.checkcommand(contents, index, SymbolFunction)
def checkcommand(self, contents, index, type):
"Check for the given type as the current element."
if len(contents) <= index:
return False
return isinstance(contents[index], type)
def getscript(self, contents, index):
"Get the sub- or superscript."
bit = contents[index]
bit.output.tag += ' class="script"'
del contents[index]
return bit
class BracketCommand(OneParamFunction):
"A command which defines a bracket."
commandmap = FormulaConfig.bracketcommands
def parsebit(self, pos):
"Parse the bracket."
OneParamFunction.parsebit(self, pos)
def create(self, direction, character):
"Create the bracket for the given character."
self.original = character
self.command = '\\' + direction
self.contents = [FormulaConstant(character)]
return self
class BracketProcessor(MathsProcessor):
"A processor for bracket commands."
def process(self, contents, index):
"Convert the bracket using Unicode pieces, if possible."
if Options.simplemath:
return
if self.checkleft(contents, index):
return self.processleft(contents, index)
def processleft(self, contents, index):
"Process a left bracket."
rightindex = self.findright(contents, index + 1)
if not rightindex:
return
size = self.findmax(contents, index, rightindex)
self.resize(contents[index], size)
self.resize(contents[rightindex], size)
def checkleft(self, contents, index):
"Check if the command at the given index is left."
return self.checkdirection(contents[index], '\\left')
def checkright(self, contents, index):
"Check if the command at the given index is right."
return self.checkdirection(contents[index], '\\right')
def checkdirection(self, bit, command):
"Check if the given bit is the desired bracket command."
if not isinstance(bit, BracketCommand):
return False
return bit.command == command
def findright(self, contents, index):
"Find the right bracket starting at the given index, or 0."
depth = 1
while index < len(contents):
if self.checkleft(contents, index):
depth += 1
if self.checkright(contents, index):
depth -= 1
if depth == 0:
return index
index += 1
return None
def findmax(self, contents, leftindex, rightindex):
"Find the max size of the contents between the two given indices."
sliced = contents[leftindex:rightindex]
return max([element.size for element in sliced])
def resize(self, command, size):
"Resize a bracket command to the given size."
character = command.extracttext()
alignment = command.command.replace('\\', '')
bracket = BigBracket(size, character, alignment)
command.output = ContentsOutput()
command.contents = bracket.getcontents()
FormulaCommand.types += [
DecoratingFunction, CombiningFunction, LimitCommand, BracketCommand,
]
FormulaProcessor.processors += [
LimitsProcessor(), BracketProcessor(),
]
class ParameterDefinition(object):
"The definition of a parameter in a hybrid function."
"[] parameters are optional, {} parameters are mandatory."
"Each parameter has a one-character name, like {$1} or {$p}."
"A parameter that ends in ! like {$p!} is a literal."
"Example: [$1]{$p!} reads an optional parameter $1 and a literal mandatory parameter p."
parambrackets = [('[', ']'), ('{', '}')]
def __init__(self):
self.name = None
self.literal = False
self.optional = False
self.value = None
self.literalvalue = None
def parse(self, pos):
"Parse a parameter definition: [$0], {$x}, {$1!}..."
for (opening, closing) in ParameterDefinition.parambrackets:
if pos.checkskip(opening):
if opening == '[':
self.optional = True
if not pos.checkskip('$'):
Trace.error('Wrong parameter name, did you mean $' + pos.current() + '?')
return None
self.name = pos.skipcurrent()
if pos.checkskip('!'):
self.literal = True
if not pos.checkskip(closing):
Trace.error('Wrong parameter closing ' + pos.skipcurrent())
return None
return self
Trace.error('Wrong character in parameter template: ' + pos.skipcurrent())
return None
def read(self, pos, function):
"Read the parameter itself using the definition."
if self.literal:
if self.optional:
self.literalvalue = function.parsesquareliteral(pos)
else:
self.literalvalue = function.parseliteral(pos)
if self.literalvalue:
self.value = FormulaConstant(self.literalvalue)
elif self.optional:
self.value = function.parsesquare(pos)
else:
self.value = function.parseparameter(pos)
def __unicode__(self):
"Return a printable representation."
result = 'param ' + self.name
if self.value:
result += ': ' + str(self.value)
else:
result += ' (empty)'
return result
class ParameterFunction(CommandBit):
"A function with a variable number of parameters defined in a template."
"The parameters are defined as a parameter definition."
def readparams(self, readtemplate, pos):
"Read the params according to the template."
self.params = dict()
for paramdef in self.paramdefs(readtemplate):
paramdef.read(pos, self)
self.params['$' + paramdef.name] = paramdef
def paramdefs(self, readtemplate):
"Read each param definition in the template"
pos = TextPosition(readtemplate)
while not pos.finished():
paramdef = ParameterDefinition().parse(pos)
if paramdef:
yield paramdef
def getparam(self, name):
"Get a parameter as parsed."
if not name in self.params:
return None
return self.params[name]
def getvalue(self, name):
"Get the value of a parameter."
return self.getparam(name).value
def getliteralvalue(self, name):
"Get the literal value of a parameter."
param = self.getparam(name)
if not param or not param.literalvalue:
return None
return param.literalvalue
class HybridFunction(ParameterFunction):
"""
A parameter function where the output is also defined using a template.
The template can use a number of functions; each function has an associated
tag.
Example: [f0{$1},span class="fbox"] defines a function f0 which corresponds
to a span of class fbox, yielding <span class="fbox">$1</span>.
Literal parameters can be used in tags definitions:
[f0{$1},span style="color: $p;"]
yields <span style="color: $p;">$1</span>, where $p is a literal parameter.
Sizes can be specified in hybridsizes, e.g. adding parameter sizes. By
default the resulting size is the max of all arguments. Sizes are used
to generate the right parameters.
A function followed by a single / is output as a self-closing XHTML tag:
[f0/,hr]
will generate <hr/>.
"""
commandmap = FormulaConfig.hybridfunctions
def parsebit(self, pos):
"Parse a function with [] and {} parameters"
readtemplate = self.translated[0]
writetemplate = self.translated[1]
self.readparams(readtemplate, pos)
self.contents = self.writeparams(writetemplate)
self.computehybridsize()
def writeparams(self, writetemplate):
"Write all params according to the template"
return self.writepos(TextPosition(writetemplate))
def writepos(self, pos):
"Write all params as read in the parse position."
result = []
while not pos.finished():
if pos.checkskip('$'):
param = self.writeparam(pos)
if param:
result.append(param)
elif pos.checkskip('f'):
function = self.writefunction(pos)
if function:
function.type = None
result.append(function)
elif pos.checkskip('('):
result.append(self.writebracket('left', '('))
elif pos.checkskip(')'):
result.append(self.writebracket('right', ')'))
else:
result.append(FormulaConstant(pos.skipcurrent()))
return result
def writeparam(self, pos):
"Write a single param of the form $0, $x..."
name = '$' + pos.skipcurrent()
if not name in self.params:
Trace.error('Unknown parameter ' + name)
return None
if not self.params[name]:
return None
if pos.checkskip('.'):
self.params[name].value.type = pos.globalpha()
return self.params[name].value
def writefunction(self, pos):
"Write a single function f0,...,fn."
tag = self.readtag(pos)
if not tag:
return None
if pos.checkskip('/'):
# self-closing XHTML tag, such as <hr/>
return TaggedBit().selfcomplete(tag)
if not pos.checkskip('{'):
Trace.error('Function should be defined in {}')
return None
pos.pushending('}')
contents = self.writepos(pos)
pos.popending()
if len(contents) == 0:
return None
return TaggedBit().complete(contents, tag)
def readtag(self, pos):
"Get the tag corresponding to the given index. Does parameter substitution."
if not pos.current().isdigit():
Trace.error('Function should be f0,...,f9: f' + pos.current())
return None
index = int(pos.skipcurrent())
if 2 + index > len(self.translated):
Trace.error('Function f' + str(index) + ' is not defined')
return None
tag = self.translated[2 + index]
if not '$' in tag:
return tag
for variable in self.params:
if variable in tag:
param = self.params[variable]
if not param.literal:
Trace.error('Parameters in tag ' + tag + ' should be literal: {' + variable + '!}')
continue
if param.literalvalue:
value = param.literalvalue
else:
value = ''
tag = tag.replace(variable, value)
return tag
def writebracket(self, direction, character):
"Return a new bracket looking at the given direction."
return self.factory.create(BracketCommand).create(direction, character)
def computehybridsize(self):
"Compute the size of the hybrid function."
if not self.command in HybridSize.configsizes:
self.computesize()
return
self.size = HybridSize().getsize(self)
# set the size in all elements at first level
for element in self.contents:
element.size = self.size
class HybridSize(object):
"The size associated with a hybrid function."
configsizes = FormulaConfig.hybridsizes
def getsize(self, function):
"Read the size for a function and parse it."
sizestring = self.configsizes[function.command]
for name in function.params:
if name in sizestring:
size = function.params[name].value.computesize()
sizestring = sizestring.replace(name, str(size))
if '$' in sizestring:
Trace.error('Unconverted variable in hybrid size: ' + sizestring)
return 1
return eval(sizestring)
FormulaCommand.types += [HybridFunction]
class HeaderParser(Parser):
"Parses the LyX header"
def parse(self, reader):
"Parse header parameters into a dictionary, return the preamble."
contents = []
self.parseending(reader, lambda: self.parseline(reader, contents))
# skip last line
reader.nextline()
return contents
def parseline(self, reader, contents):
"Parse a single line as a parameter or as a start"
line = reader.currentline()
if line.startswith(HeaderConfig.parameters['branch']):
self.parsebranch(reader)
return
elif line.startswith(HeaderConfig.parameters['lstset']):
LstParser().parselstset(reader)
return
elif line.startswith(HeaderConfig.parameters['beginpreamble']):
contents.append(self.factory.createcontainer(reader))
return
# no match
self.parseparameter(reader)
def parsebranch(self, reader):
"Parse all branch definitions."
branch = reader.currentline().split()[1]
reader.nextline()
subparser = HeaderParser().complete(HeaderConfig.parameters['endbranch'])
subparser.parse(reader)
options = BranchOptions(branch)
for key in subparser.parameters:
options.set(key, subparser.parameters[key])
Options.branches[branch] = options
def complete(self, ending):
"Complete the parser with the given ending."
self.ending = ending
return self
class PreambleParser(Parser):
"A parser for the LyX preamble."
preamble = []
def parse(self, reader):
"Parse the full preamble with all statements."
self.ending = HeaderConfig.parameters['endpreamble']
self.parseending(reader, lambda: self.parsepreambleline(reader))
return []
def parsepreambleline(self, reader):
"Parse a single preamble line."
PreambleParser.preamble.append(reader.currentline())
reader.nextline()
class LstParser(object):
"Parse global and local lstparams."
globalparams = dict()
def parselstset(self, reader):
"Parse a declaration of lstparams in lstset."
paramtext = self.extractlstset(reader)
if not '{' in paramtext:
Trace.error('Missing opening bracket in lstset: ' + paramtext)
return
lefttext = paramtext.split('{')[1]
croppedtext = lefttext[:-1]
LstParser.globalparams = self.parselstparams(croppedtext)
def extractlstset(self, reader):
"Extract the global lstset parameters."
paramtext = ''
while not reader.finished():
paramtext += reader.currentline()
reader.nextline()
if paramtext.endswith('}'):
return paramtext
Trace.error('Could not find end of \\lstset settings; aborting')
def parsecontainer(self, container):
"Parse some lstparams from elyxer.a container."
container.lstparams = LstParser.globalparams.copy()
paramlist = container.getparameterlist('lstparams')
container.lstparams.update(self.parselstparams(paramlist))
def parselstparams(self, paramlist):
"Process a number of lstparams from elyxer.a list."
paramdict = dict()
for param in paramlist:
if not '=' in param:
if len(param.strip()) > 0:
Trace.error('Invalid listing parameter ' + param)
else:
key, value = param.split('=', 1)
paramdict[key] = value
return paramdict
class MacroDefinition(CommandBit):
"A function that defines a new command (a macro)."
macros = dict()
def parsebit(self, pos):
"Parse the function that defines the macro."
self.output = EmptyOutput()
self.parameternumber = 0
self.defaults = []
self.factory.defining = True
self.parseparameters(pos)
self.factory.defining = False
Trace.debug('New command ' + self.newcommand + ' (' + \
str(self.parameternumber) + ' parameters)')
self.macros[self.newcommand] = self
def parseparameters(self, pos):
"Parse all optional parameters (number of parameters, default values)"
"and the mandatory definition."
self.newcommand = self.parsenewcommand(pos)
# parse number of parameters
literal = self.parsesquareliteral(pos)
if literal:
self.parameternumber = int(literal)
# parse all default values
bracket = self.parsesquare(pos)
while bracket:
self.defaults.append(bracket)
bracket = self.parsesquare(pos)
# parse mandatory definition
self.definition = self.parseparameter(pos)
def parsenewcommand(self, pos):
"Parse the name of the new command."
self.factory.clearskipped(pos)
if self.factory.detecttype(Bracket, pos):
return self.parseliteral(pos)
if self.factory.detecttype(FormulaCommand, pos):
return self.factory.create(FormulaCommand).extractcommand(pos)
Trace.error('Unknown formula bit in defining function at ' + pos.identifier())
return 'unknown'
def instantiate(self):
"Return an instance of the macro."
return self.definition.clone()
class MacroParameter(FormulaBit):
"A parameter from elyxer.a macro."
def detect(self, pos):
"Find a macro parameter: #n."
return pos.checkfor('#')
def parsebit(self, pos):
"Parse the parameter: #n."
if not pos.checkskip('#'):
Trace.error('Missing parameter start #.')
return
self.number = int(pos.skipcurrent())
self.original = '#' + str(self.number)
self.contents = [TaggedBit().constant('#' + str(self.number), 'span class="unknown"')]
class MacroFunction(CommandBit):
"A function that was defined using a macro."
commandmap = MacroDefinition.macros
def parsebit(self, pos):
"Parse a number of input parameters."
self.output = FilteredOutput()
self.values = []
macro = self.translated
self.parseparameters(pos, macro)
self.completemacro(macro)
def parseparameters(self, pos, macro):
"Parse as many parameters as are needed."
self.parseoptional(pos, list(macro.defaults))
self.parsemandatory(pos, macro.parameternumber - len(macro.defaults))
if len(self.values) < macro.parameternumber:
Trace.error('Missing parameters in macro ' + str(self))
def parseoptional(self, pos, defaults):
"Parse optional parameters."
optional = []
while self.factory.detecttype(SquareBracket, pos):
optional.append(self.parsesquare(pos))
if len(optional) > len(defaults):
break
for value in optional:
default = defaults.pop()
if len(value.contents) > 0:
self.values.append(value)
else:
self.values.append(default)
self.values += defaults
def parsemandatory(self, pos, number):
"Parse a number of mandatory parameters."
for index in range(number):
parameter = self.parsemacroparameter(pos, number - index)
if not parameter:
return
self.values.append(parameter)
def parsemacroparameter(self, pos, remaining):
"Parse a macro parameter. Could be a bracket or a single letter."
"If there are just two values remaining and there is a running number,"
"parse as two separater numbers."
self.factory.clearskipped(pos)
if pos.finished():
return None
if self.factory.detecttype(FormulaNumber, pos):
return self.parsenumbers(pos, remaining)
return self.parseparameter(pos)
def parsenumbers(self, pos, remaining):
"Parse the remaining parameters as a running number."
"For example, 12 would be {1}{2}."
number = self.factory.parsetype(FormulaNumber, pos)
if not len(number.original) == remaining:
return number
for digit in number.original:
value = self.factory.create(FormulaNumber)
value.add(FormulaConstant(digit))
value.type = number
self.values.append(value)
return None
def completemacro(self, macro):
"Complete the macro with the parameters read."
self.contents = [macro.instantiate()]
replaced = [False] * len(self.values)
for parameter in self.searchall(MacroParameter):
index = parameter.number - 1
if index >= len(self.values):
Trace.error('Macro parameter index out of bounds: ' + str(index))
return
replaced[index] = True
parameter.contents = [self.values[index].clone()]
for index in range(len(self.values)):
if not replaced[index]:
self.addfilter(index, self.values[index])
def addfilter(self, index, value):
"Add a filter for the given parameter number and parameter value."
original = '#' + str(index + 1)
value = ''.join(self.values[0].gethtml())
self.output.addfilter(original, value)
class FormulaMacro(Formula):
"A math macro defined in an inset."
def __init__(self):
self.parser = MacroParser()
self.output = EmptyOutput()
def __unicode__(self):
"Return a printable representation."
return 'Math macro'
FormulaFactory.types += [ MacroParameter ]
FormulaCommand.types += [
MacroFunction,
]
def math2html(formula):
"Convert some TeX math to HTML."
factory = FormulaFactory()
whole = factory.parseformula(formula)
FormulaProcessor().process(whole)
whole.process()
return ''.join(whole.gethtml())
def main():
"Main function, called if invoked from elyxer.the command line"
args = sys.argv
Options().parseoptions(args)
if len(args) != 1:
Trace.error('Usage: math2html.py escaped_string')
exit()
result = math2html(args[0])
Trace.message(result)
if __name__ == '__main__':
main()
|
Soya93/Extract-Refactoring
|
python/helpers/py3only/docutils/utils/math/math2html.py
|
Python
|
apache-2.0
| 171,688
|
[
"Bowtie"
] |
d9787e9ad7f53e94ec07e34dc9bddff47020e790f024ccc9b91451618b95cac5
|
#
# mainTab
#
tab = self.notebook.mainTab
tab.settings['Program'] = 'castep'
tab.settings['Output file name'] = 'phonon.castep'
#
# SettingsTab
#
tab = self.notebook.settingsTab
tab.settings['Eckart flag'] = False
tab.settings['Neutral Born charges'] = False
tab.settings['Sigma value'] = 5
tab.settings['Mass definition'] = 'isotope'
#
# 0th Scenario tabs
#
tab = self.notebook.scenarios[0]
tab.settings['Matrix'] = 'ptfe'
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['Volume fraction'] = 0.1
tab.settings['Ellipsoid a/b'] = 0.5
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Effective medium method'] = 'Averaged Permittivity'
tab.settings['Particle shape'] = 'Sphere'
tab.settings['Legend'] = 'Averaged permittivity'
# Add new scenarios
methods = [ 'Maxwell-Garnett', 'Bruggeman']
shapes = ['Needle', 'Ellipsoid', 'Plate']
hkls = [[0,0,1], [0,0,1], [1,0,0]]
for method in methods:
for shape,hkl in zip(shapes,hkls):
self.notebook.addScenario()
tab = self.notebook.scenarios[-1]
tab.settings['Particle shape'] = shape
tab.settings['Effective medium method'] = method
tab.settings['Unique direction - h'] = hkl[0]
tab.settings['Unique direction - k'] = hkl[1]
tab.settings['Unique direction - l'] = hkl[2]
tab.settings['Legend'] = method + ' ' + shape + ' ' +str(hkl)
#
# Plotting Tab
#
tab = self.notebook.plottingTab
tab.settings['Minimum frequency'] = 0
tab.settings['Maximum frequency'] = 300
tab.settings['Frequency increment'] = 0.2
tab.settings['Molar definition'] = 'Unit cells'
tab.settings['Plot title'] = 'Castep Na2(SO4)2'
#
# Analysis Tab
#
tab = self.notebook.analysisTab
tab.settings['Minimum frequency'] = -1
tab.settings['Maximum frequency'] = 300
tab.settings['title'] = 'Analysis'
tab.settings['Covalent radius scaling'] = 1.1
tab.settings['Bonding tolerance'] = 0.1
tab.settings['Bar width'] = 0.5
#
|
JohnKendrick/PDielec
|
Examples/Castep/Na2SO42/script.py
|
Python
|
mit
| 2,005
|
[
"CASTEP"
] |
1f882e3474086bf98e05427ffabeb26b4510f21e5a8b7c91c9e3ff22af3c20cf
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
sample, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@SAMPLES_DIR@/h5md.py")
@skipIfMissingFeatures
class Sample(ut.TestCase):
system = sample.system
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/scripts/samples/test_h5md.py
|
Python
|
gpl-3.0
| 977
|
[
"ESPResSo"
] |
a12c7a08a5b85763d7f402b1dc0188c18e82448217354ba3a444e0d5a7625360
|
from ANNarchy import *
# Compulsory to allow structural plasticity
setup(structural_plasticity=True)
# Simple neuron type
LeakyIntegratorNeuron = Neuron(
parameters="""
tau = 10.0 : population
baseline = 0.0
""",
equations = """
tau * dr/dt + r = baseline + sum(exc) : min=0.0
"""
)
# Structurally plastic synapse
StructuralPlasticSynapse = Synapse(
parameters = " T = 10000 : int, projection ",
equations = """
age = if pre.r * post.r > 1.0 :
0
else :
age + 1 : init = 0, int""",
pruning = "age > T : proba = 0.2",
creating = "pre.r * post.r > 1.0 : proba = 0.1, w = 0.01",
)
# A single population
pop = Population(100, LeakyIntegratorNeuron)
# Lateral excitatory projection, initially sparse
proj = Projection(pop, pop, 'exc', StructuralPlasticSynapse)
proj.connect_fixed_probability(weights = 0.01, probability=0.1)
compile()
# Save the initial connectivity matrix
initial_weights = proj.connectivity_matrix()
# Start creating and pruning
proj.start_creating(period=100.0)
proj.start_pruning(period=100.0)
# Let structural plasticity over several trials
num_trials = 100
for trial in range(num_trials):
# Activate the first subpopulation
pop[:50].baseline = 1.0
# Simulate for 1s
simulate(1000.)
# Reset the population
pop.baseline = 0.0
simulate(100.)
# Activate the second subpopulation
pop[50:].baseline = 1.0
# Simulate for 1s
simulate(1000.)
# Reset the population
pop.baseline = 0.0
simulate(100.)
# Inspect the final connectivity matrix
final_weights = proj.connectivity_matrix()
# Visualize the two connectivity matrices
import matplotlib.pyplot as plt
plt.subplot(121)
plt.imshow(initial_weights)
plt.title('Connectivity matrix before')
plt.subplot(122)
plt.imshow(final_weights)
plt.title('Connectivity matrix after')
plt.show()
|
ANNarchy/ANNarchy
|
examples/structural_plasticity/StructuralPlasticity.py
|
Python
|
gpl-2.0
| 1,923
|
[
"NEURON"
] |
e1c2ff81542d97cf995ef025177e6363630a7901e280f53d592f7edd0bf89055
|
# Orca
#
# Copyright (C) 2010-2011 The Orca Team
# Copyright (C) 2011-2012 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (C) 2010-2011 The Orca Team" \
"Copyright (C) 2011-2012 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import pyatspi.utils as utils
import orca.scripts.default as default
import orca.cmdnames as cmdnames
import orca.debug as debug
import orca.eventsynthesizer as eventsynthesizer
import orca.guilabels as guilabels
import orca.input_event as input_event
import orca.messages as messages
import orca.orca as orca
import orca.settings as settings
import orca.settings_manager as settings_manager
import orca.speechserver as speechserver
import orca.orca_state as orca_state
import orca.speech as speech
import orca.structural_navigation as structural_navigation
from .braille_generator import BrailleGenerator
from .speech_generator import SpeechGenerator
from .script_utilities import Utilities
_settingsManager = settings_manager.getManager()
########################################################################
# #
# The WebKitGtk script class. #
# #
########################################################################
class Script(default.Script):
def __init__(self, app):
"""Creates a new script for WebKitGtk applications.
Arguments:
- app: the application to create a script for.
"""
super().__init__(app)
self._loadingDocumentContent = False
self._lastCaretContext = None, -1
self.sayAllOnLoadCheckButton = None
if _settingsManager.getSetting('sayAllOnLoad') is None:
_settingsManager.setSetting('sayAllOnLoad', True)
def setupInputEventHandlers(self):
"""Defines InputEventHandler fields for this script that can be
called by the key and braille bindings."""
default.Script.setupInputEventHandlers(self)
self.inputEventHandlers.update(
self.structuralNavigation.inputEventHandlers)
self.inputEventHandlers["sayAllHandler"] = \
input_event.InputEventHandler(
Script.sayAll,
cmdnames.SAY_ALL)
self.inputEventHandlers["panBrailleLeftHandler"] = \
input_event.InputEventHandler(
Script.panBrailleLeft,
cmdnames.PAN_BRAILLE_LEFT,
False) # Do not enable learn mode for this action
self.inputEventHandlers["panBrailleRightHandler"] = \
input_event.InputEventHandler(
Script.panBrailleRight,
cmdnames.PAN_BRAILLE_RIGHT,
False) # Do not enable learn mode for this action
def getToolkitKeyBindings(self):
"""Returns the toolkit-specific keybindings for this script."""
return self.structuralNavigation.keyBindings
def getAppPreferencesGUI(self):
"""Return a GtkGrid containing the application unique configuration
GUI items for the current application."""
from gi.repository import Gtk
grid = Gtk.Grid()
grid.set_border_width(12)
label = guilabels.READ_PAGE_UPON_LOAD
self.sayAllOnLoadCheckButton = \
Gtk.CheckButton.new_with_mnemonic(label)
self.sayAllOnLoadCheckButton.set_active(
_settingsManager.getSetting('sayAllOnLoad'))
grid.attach(self.sayAllOnLoadCheckButton, 0, 0, 1, 1)
grid.show_all()
return grid
def getPreferencesFromGUI(self):
"""Returns a dictionary with the app-specific preferences."""
return {'sayAllOnLoad': self.sayAllOnLoadCheckButton.get_active()}
def getBrailleGenerator(self):
"""Returns the braille generator for this script."""
return BrailleGenerator(self)
def getSpeechGenerator(self):
"""Returns the speech generator for this script."""
return SpeechGenerator(self)
def getEnabledStructuralNavigationTypes(self):
"""Returns a list of the structural navigation object types
enabled in this script."""
return [structural_navigation.StructuralNavigation.BLOCKQUOTE,
structural_navigation.StructuralNavigation.BUTTON,
structural_navigation.StructuralNavigation.CHECK_BOX,
structural_navigation.StructuralNavigation.CHUNK,
structural_navigation.StructuralNavigation.CLICKABLE,
structural_navigation.StructuralNavigation.COMBO_BOX,
structural_navigation.StructuralNavigation.CONTAINER,
structural_navigation.StructuralNavigation.ENTRY,
structural_navigation.StructuralNavigation.FORM_FIELD,
structural_navigation.StructuralNavigation.HEADING,
structural_navigation.StructuralNavigation.IMAGE,
structural_navigation.StructuralNavigation.LANDMARK,
structural_navigation.StructuralNavigation.LINK,
structural_navigation.StructuralNavigation.LIST,
structural_navigation.StructuralNavigation.LIST_ITEM,
structural_navigation.StructuralNavigation.LIVE_REGION,
structural_navigation.StructuralNavigation.PARAGRAPH,
structural_navigation.StructuralNavigation.RADIO_BUTTON,
structural_navigation.StructuralNavigation.SEPARATOR,
structural_navigation.StructuralNavigation.TABLE,
structural_navigation.StructuralNavigation.TABLE_CELL,
structural_navigation.StructuralNavigation.UNVISITED_LINK,
structural_navigation.StructuralNavigation.VISITED_LINK]
def getUtilities(self):
"""Returns the utilites for this script."""
return Utilities(self)
def onCaretMoved(self, event):
"""Callback for object:text-caret-moved accessibility events."""
if self._inSayAll:
return
if not self.utilities.isWebKitGtk(event.source):
super().onCaretMoved(event)
return
lastKey, mods = self.utilities.lastKeyAndModifiers()
if lastKey in ['Tab', 'ISO_Left_Tab']:
return
if lastKey == 'Down' \
and orca_state.locusOfFocus == event.source.parent \
and event.source.getIndexInParent() == 0 \
and orca_state.locusOfFocus.getRole() == pyatspi.ROLE_LINK:
self.updateBraille(event.source)
return
self.utilities.setCaretContext(event.source, event.detail1)
super().onCaretMoved(event)
def onDocumentReload(self, event):
"""Callback for document:reload accessibility events."""
if self.utilities.treatAsBrowser(event.source):
self._loadingDocumentContent = True
def onDocumentLoadComplete(self, event):
"""Callback for document:load-complete accessibility events."""
if not self.utilities.treatAsBrowser(event.source):
return
self._loadingDocumentContent = False
# TODO: We need to see what happens in Epiphany on pages where focus
# is grabbed rather than set the caret at the start. But for simple
# content in both Yelp and Epiphany this is alright for now.
obj, offset = self.utilities.setCaretAtStart(event.source)
self.utilities.setCaretContext(obj, offset)
self.updateBraille(obj)
if _settingsManager.getSetting('sayAllOnLoad') \
and _settingsManager.getSetting('enableSpeech'):
self.sayAll(None)
def onDocumentLoadStopped(self, event):
"""Callback for document:load-stopped accessibility events."""
if self.utilities.treatAsBrowser(event.source):
self._loadingDocumentContent = False
def onFocusedChanged(self, event):
"""Callback for object:state-changed:focused accessibility events."""
if self._inSayAll or not event.detail1:
return
if not self.utilities.isWebKitGtk(event.source):
super().onFocusedChanged(event)
return
contextObj, offset = self.utilities.getCaretContext()
if event.source == contextObj:
return
obj = event.source
role = obj.getRole()
textRoles = [pyatspi.ROLE_HEADING,
pyatspi.ROLE_PANEL,
pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_SECTION,
pyatspi.ROLE_TABLE_CELL]
if role in textRoles \
or (role == pyatspi.ROLE_LIST_ITEM and obj.childCount):
return
super().onFocusedChanged(event)
def onBusyChanged(self, event):
"""Callback for object:state-changed:busy accessibility events."""
obj = event.source
try:
role = obj.getRole()
name = obj.name
except:
return
if not self.utilities.treatAsBrowser(obj):
return
if event.detail1:
self.presentMessage(messages.PAGE_LOADING_START)
elif name:
self.presentMessage(messages.PAGE_LOADING_END_NAMED % name)
else:
self.presentMessage(messages.PAGE_LOADING_END)
def sayCharacter(self, obj):
"""Speak the character at the caret.
Arguments:
- obj: an Accessible object that implements the AccessibleText interface
"""
if obj.getRole() == pyatspi.ROLE_ENTRY:
default.Script.sayCharacter(self, obj)
return
boundary = pyatspi.TEXT_BOUNDARY_CHAR
objects = self.utilities.getObjectsFromEOCs(obj, boundary=boundary)
for (obj, start, end, string) in objects:
if string:
self.speakCharacter(string)
else:
speech.speak(self.speechGenerator.generateSpeech(obj))
self.pointOfReference["lastTextUnitSpoken"] = "char"
def sayWord(self, obj):
"""Speaks the word at the caret.
Arguments:
- obj: an Accessible object that implements the AccessibleText interface
"""
if obj.getRole() == pyatspi.ROLE_ENTRY:
default.Script.sayWord(self, obj)
return
boundary = pyatspi.TEXT_BOUNDARY_WORD_START
objects = self.utilities.getObjectsFromEOCs(obj, boundary=boundary)
for (obj, start, end, string) in objects:
self.sayPhrase(obj, start, end)
self.pointOfReference["lastTextUnitSpoken"] = "word"
def sayLine(self, obj):
"""Speaks the line at the caret.
Arguments:
- obj: an Accessible object that implements the AccessibleText interface
"""
if obj.getRole() == pyatspi.ROLE_ENTRY:
default.Script.sayLine(self, obj)
return
boundary = pyatspi.TEXT_BOUNDARY_LINE_START
objects = self.utilities.getObjectsFromEOCs(obj, boundary=boundary)
for (obj, start, end, string) in objects:
self.sayPhrase(obj, start, end)
# TODO: Move these next items into the speech generator.
if obj.getRole() == pyatspi.ROLE_PANEL \
and obj.getIndexInParent() == 0:
obj = obj.parent
rolesToSpeak = [pyatspi.ROLE_HEADING, pyatspi.ROLE_LINK]
if obj.getRole() in rolesToSpeak:
speech.speak(self.speechGenerator.getRoleName(obj))
self.pointOfReference["lastTextUnitSpoken"] = "line"
def sayPhrase(self, obj, startOffset, endOffset):
"""Speaks the text of an Accessible object between the given offsets.
Arguments:
- obj: an Accessible object that implements the AccessibleText interface
- startOffset: the start text offset.
- endOffset: the end text offset.
"""
if obj.getRole() == pyatspi.ROLE_ENTRY:
default.Script.sayPhrase(self, obj, startOffset, endOffset)
return
phrase = self.utilities.substring(obj, startOffset, endOffset)
if len(phrase) and phrase != "\n":
voice = self.speechGenerator.voice(obj=obj, string=phrase)
phrase = self.utilities.adjustForRepeats(phrase)
links = [x for x in obj if x.getRole() == pyatspi.ROLE_LINK]
if links:
phrase = self.utilities.adjustForLinks(obj, phrase, startOffset)
speech.speak(phrase, voice)
else:
# Speak blank line if appropriate.
#
self.sayCharacter(obj)
self.pointOfReference["lastTextUnitSpoken"] = "phrase"
def skipObjectEvent(self, event):
"""Gives us, and scripts, the ability to decide an event isn't
worth taking the time to process under the current circumstances.
Arguments:
- event: the Event
Returns True if we shouldn't bother processing this object event.
"""
if event.type.startswith('object:state-changed:focused') \
and event.detail1:
if event.source.getRole() == pyatspi.ROLE_LINK:
return False
return default.Script.skipObjectEvent(self, event)
def useStructuralNavigationModel(self):
"""Returns True if we should do our own structural navigation.
This should return False if we're in a form field, or not in
document content.
"""
doNotHandleRoles = [pyatspi.ROLE_ENTRY,
pyatspi.ROLE_TEXT,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_LIST,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_MENU_ITEM]
if not self.structuralNavigation.enabled:
return False
if not self.utilities.isWebKitGtk(orca_state.locusOfFocus):
return False
states = orca_state.locusOfFocus.getState()
if states.contains(pyatspi.STATE_EDITABLE):
return False
role = orca_state.locusOfFocus.getRole()
if role in doNotHandleRoles:
if role == pyatspi.ROLE_LIST_ITEM:
return not states.contains(pyatspi.STATE_SELECTABLE)
if states.contains(pyatspi.STATE_FOCUSED):
return False
return True
def panBrailleLeft(self, inputEvent=None, panAmount=0):
"""In document content, we want to use the panning keys to browse the
entire document.
"""
if self.flatReviewContext \
or not self.isBrailleBeginningShowing() \
or not self.utilities.isWebKitGtk(orca_state.locusOfFocus):
return default.Script.panBrailleLeft(self, inputEvent, panAmount)
obj = self.utilities.findPreviousObject(orca_state.locusOfFocus)
orca.setLocusOfFocus(None, obj, notifyScript=False)
self.updateBraille(obj)
# Hack: When panning to the left in a document, we want to start at
# the right/bottom of each new object. For now, we'll pan there.
# When time permits, we'll give our braille code some smarts.
while self.panBrailleInDirection(panToLeft=False):
pass
self.refreshBraille(False)
return True
def panBrailleRight(self, inputEvent=None, panAmount=0):
"""In document content, we want to use the panning keys to browse the
entire document.
"""
if self.flatReviewContext \
or not self.isBrailleEndShowing() \
or not self.utilities.isWebKitGtk(orca_state.locusOfFocus):
return default.Script.panBrailleRight(self, inputEvent, panAmount)
obj = self.utilities.findNextObject(orca_state.locusOfFocus)
orca.setLocusOfFocus(None, obj, notifyScript=False)
self.updateBraille(obj)
# Hack: When panning to the right in a document, we want to start at
# the left/top of each new object. For now, we'll pan there. When time
# permits, we'll give our braille code some smarts.
while self.panBrailleInDirection(panToLeft=True):
pass
self.refreshBraille(False)
return True
def sayAll(self, inputEvent, obj=None, offset=None):
"""Speaks the contents of the document beginning with the present
location. Overridden in this script because the sayAll could have
been started on an object without text (such as an image).
"""
obj = obj or orca_state.locusOfFocus
if not self.utilities.isWebKitGtk(obj):
return default.Script.sayAll(self, inputEvent, obj, offset)
speech.sayAll(self.textLines(obj, offset),
self.__sayAllProgressCallback)
return True
def getTextSegments(self, obj, boundary, offset=0):
segments = []
text = obj.queryText()
length = text.characterCount
string, start, end = text.getTextAtOffset(offset, boundary)
while string and offset < length:
string = self.utilities.adjustForRepeats(string)
voice = self.speechGenerator.getVoiceForString(obj, string)
string = self.utilities.adjustForLinks(obj, string, start)
# Incrementing the offset should cause us to eventually reach
# the end of the text as indicated by a 0-length string and
# start and end offsets of 0. Sometimes WebKitGtk returns the
# final text segment instead.
if segments and [string, start, end, voice] == segments[-1]:
break
segments.append([string, start, end, voice])
offset = end + 1
string, start, end = text.getTextAtOffset(offset, boundary)
return segments
def textLines(self, obj, offset=None):
"""Creates a generator that can be used to iterate over each line
of a text object, starting at the caret offset.
Arguments:
- obj: an Accessible that has a text specialization
Returns an iterator that produces elements of the form:
[SayAllContext, acss], where SayAllContext has the text to be
spoken and acss is an ACSS instance for speaking the text.
"""
self._sayAllIsInterrupted = False
self._inSayAll = False
if not obj:
return
if obj.getRole() == pyatspi.ROLE_LINK:
obj = obj.parent
document = self.utilities.getDocumentForObject(obj)
if not document or document.getState().contains(pyatspi.STATE_BUSY):
return
allTextObjs = utils.findAllDescendants(
document, lambda x: x and 'Text' in utils.listInterfaces(x))
allTextObjs = allTextObjs[allTextObjs.index(obj):len(allTextObjs)]
textObjs = [x for x in allTextObjs if x.parent not in allTextObjs]
if not textObjs:
return
boundary = pyatspi.TEXT_BOUNDARY_LINE_START
sayAllStyle = _settingsManager.getSetting('sayAllStyle')
if sayAllStyle == settings.SAYALL_STYLE_SENTENCE:
boundary = pyatspi.TEXT_BOUNDARY_SENTENCE_START
voices = _settingsManager.getSetting('voices')
systemVoice = voices.get(settings.SYSTEM_VOICE)
self._inSayAll = True
offset = textObjs[0].queryText().caretOffset
for textObj in textObjs:
textSegments = self.getTextSegments(textObj, boundary, offset)
roleName = self.speechGenerator.getRoleName(textObj)
if roleName:
textSegments.append([roleName, 0, -1, systemVoice])
for (string, start, end, voice) in textSegments:
context = speechserver.SayAllContext(textObj, string, start, end)
self._sayAllContexts.append(context)
eventsynthesizer.scrollIntoView(obj, start, end)
yield [context, voice]
offset = 0
self._inSayAll = False
self._sayAllContexts = []
def __sayAllProgressCallback(self, context, progressType):
if progressType == speechserver.SayAllContext.PROGRESS:
orca.emitRegionChanged(
context.obj, context.currentOffset, context.currentEndOffset, orca.SAY_ALL)
return
obj = context.obj
orca.setLocusOfFocus(None, obj, notifyScript=False)
offset = context.currentOffset
text = obj.queryText()
if progressType == speechserver.SayAllContext.INTERRUPTED:
self._sayAllIsInterrupted = True
if isinstance(orca_state.lastInputEvent, input_event.KeyboardEvent):
lastKey = orca_state.lastInputEvent.event_string
if lastKey == "Down" and self._fastForwardSayAll(context):
return
elif lastKey == "Up" and self._rewindSayAll(context):
return
self._inSayAll = False
self._sayAllContexts = []
if not self._lastCommandWasStructNav:
text.setCaretOffset(offset)
orca.emitRegionChanged(obj, offset)
return
# SayAllContext.COMPLETED doesn't necessarily mean done with SayAll;
# just done with the current object. If we're still in SayAll, we do
# not want to set the caret (and hence set focus) in a link we just
# passed by.
try:
hypertext = obj.queryHypertext()
except NotImplementedError:
pass
else:
linkCount = hypertext.getNLinks()
links = [hypertext.getLink(x) for x in range(linkCount)]
if [l for l in links if l.startIndex <= offset <= l.endIndex]:
return
orca.emitRegionChanged(obj, offset, mode=orca.SAY_ALL)
text.setCaretOffset(offset)
def getTextLineAtCaret(self, obj, offset=None, startOffset=None, endOffset=None):
"""To-be-removed. Returns the string, caretOffset, startOffset."""
textLine = super().getTextLineAtCaret(obj, offset, startOffset, endOffset)
string = textLine[0]
if string and string.find(self.EMBEDDED_OBJECT_CHARACTER) == -1 \
and obj.getState().contains(pyatspi.STATE_FOCUSED):
return textLine
textLine[0] = self.utilities.displayedText(obj)
try:
text = obj.queryText()
except:
pass
else:
textLine[1] = min(textLine[1], text.characterCount)
return textLine
def updateBraille(self, obj, **args):
"""Updates the braille display to show the given object.
Arguments:
- obj: the Accessible
"""
if not _settingsManager.getSetting('enableBraille') \
and not _settingsManager.getSetting('enableBrailleMonitor'):
debug.println(debug.LEVEL_INFO, "BRAILLE: update disabled", True)
return
if not obj:
return
if not self.utilities.isWebKitGtk(obj) \
or (not self.utilities.isInlineContainer(obj) \
and not self.utilities.isTextListItem(obj)):
default.Script.updateBraille(self, obj, **args)
return
brailleLine = self.getNewBrailleLine(clearBraille=True, addLine=True)
for child in obj:
if not self.utilities.onSameLine(child, obj[0]):
break
[regions, fRegion] = self.brailleGenerator.generateBraille(child)
self.addBrailleRegionsToLine(regions, brailleLine)
if not brailleLine.regions:
[regions, fRegion] = self.brailleGenerator.generateBraille(
obj, role=pyatspi.ROLE_PARAGRAPH)
self.addBrailleRegionsToLine(regions, brailleLine)
self.setBrailleFocus(fRegion)
extraRegion = args.get('extraRegion')
if extraRegion:
self.addBrailleRegionToLine(extraRegion, brailleLine)
self.refreshBraille()
|
GNOME/orca
|
src/orca/scripts/toolkits/WebKitGtk/script.py
|
Python
|
lgpl-2.1
| 24,873
|
[
"ORCA"
] |
f8dabb923a0594f9c69eea883f40c913dcad5fc9ef24b133349159ff3da39df7
|
from polychartQuery.expr import ExprTreeVisitor
from polychartQuery.utils import isNumber
# GENERAL SHARED FUNCTIONS
QUOTE = "'" # note: double quote does not work in postgres!
def escape(str): return str # TODO: implement
def quote(str): return QUOTE+escape(str)+QUOTE
def unquote(str):
if str[0] == QUOTE and str[-1] == QUOTE:
return str[1:len(str)-1]
return str
class ExprToSql(ExprTreeVisitor):
def __init__(self):
self.concat = "{0} ++ {1}"
self.binnum = "ROUND({0}/{1}) * {1}"
self.fns = {
'log': 'LOG({0})'
# string functions
, 'length': 'CHAR_LENGTH({0})'
, 'upper': 'UPPER({0})'
, 'lower': 'LOWER({0})'
, 'parseNum': '0+{0}'
# date functions
# statistical aggregate functions
, 'count': 'COUNT({0})'
, 'mean': 'AVG({0})'
, 'median': 'MEDIAN({0})'
, 'max': 'MAX({0})'
, 'min': 'MIN({0})'
, 'sum': 'SUM({0})'
, 'unique': 'COUNT(DISTINCT {0})'
# for backend use only
}
self.binfns = {}
def ident(self, name): return name # TODO: check for SQL Injection
def const(self, type, value):
if type == 'num':
return value
else:
return quote(value)
def infixop(self, opname, lhs, rhs):
if opname in ["+", "-", "*", "/", "%", ">", "<", ">=", "<=", "!="]:
return lhs + opname + rhs
if opname == "==":
return lhs + "=" + rhs
if opname == "++":
return self.concat.format(lhs, rhs)
raise Exception("Unknown polyjs operation %s" % opname)
def conditional(self, cond, conseq, altern):
return "IF(%s, %s, %s)" % (cond, conseq, altern)
def call(self, fname, args):
if fname in self.fns:
return self.fns[fname].format(*args)
if fname == 'bin':
return self.fn_bin(args)
raise Exception("Unknown polyjs function %s" % fname)
def fn_bin(self, args):
key, bw = args
if isNumber(bw):
return self.binnum.format(key, bw)
bw = unquote(bw)
return self.binfns[bw].format(key)
class ExprToMySql(ExprToSql):
def __init__(self):
super(ExprToMySql, self).__init__()
self.concat = "{0} ++ {1}"
self.binnum = "ROUND({0}/{1}) * {1}"
self.fns.update({
# string functions
'substr': 'SUBSTRING({0},{1}+1,{2})'
, 'indexOf': 'INSTR({0},{1})-1'
# date functions
, 'year': 'YEAR({0})'
, 'month': 'MONTH({0})'
, 'dayOfMonth': 'DAY({0})'
, 'dayOfYear': 'DAYOFYEAR({0})'
, 'dayOfWeek': 'DAYOFWEEK({0})'
, 'week': 'WEEK({0})'
, 'hour': 'HOUR({0})'
, 'minute': 'MINUTE({0})'
, 'second': 'SECOND({0})'
# for backend use only
, 'unix': 'UNIX_TIMESTAMP({0})'
})
self.binfns.update({
'second': 'UNIX_TIMESTAMP({0})'
, 'minute': 'UNIX_TIMESTAMP(DATE_SUB({0},INTERVAL second({0}) SECOND))'
, 'hour': 'UNIX_TIMESTAMP(DATE_SUB({0},INTERVAL 60*minute({0})+second({0}) SECOND))'
, 'day': 'UNIX_TIMESTAMP(DATE({0}))'
, 'week': 'UNIX_TIMESTAMP(DATE(SUBDATE({0},DAYOFWEEK({0})-1)))'
, 'month': 'UNIX_TIMESTAMP(DATE(SUBDATE({0},DAYOFMONTH({0})-1)))'
, 'twoMonth': "UNIX_TIMESTAMP(CONCAT(YEAR({0}),'-',MONTH({0})-MOD(MONTH({0}),2)+1,'-01'))"
, 'quarter': "UNIX_TIMESTAMP(CONCAT(YEAR({0}),'-',MONTH({0})-MOD(MONTH({0}),4)+1,'-01'))"
, 'sixMonth': "UNIX_TIMESTAMP(CONCAT(YEAR({0}),'-',MONTH({0})-MOD(MONTH({0}),6)+1,'-01'))"
, 'year': 'UNIX_TIMESTAMP(DATE(SUBDATE({0}, DAYOFYEAR({0})-1)))'
, 'twoYear': "UNIX_TIMESTAMP(CONCAT(YEAR({0}) - MOD(YEAR({0}), 2), '-01-01'))"
, 'fiveYear': "UNIX_TIMESTAMP(CONCAT(YEAR({0}) - MOD(YEAR({0}), 5), '-01-01'))"
, 'decade': "UNIX_TIMESTAMP(CONCAT(YEAR({0}) - MOD(YEAR({0}), 10), '-01-01'))"
})
exprToMySqlInstance = ExprToMySql()
def exprToMySql(expr):
str = exprToMySqlInstance.visit(expr)
if str == 'COUNT(1)':
return 'COUNT(*)' # which one is faster?
return str
class ExprToPostgres(ExprToSql):
def __init__(self):
super(ExprToPostgres, self).__init__()
self.concat = "{0} || {1}"
self.binnum = '({0}/{1})::int * {1}'
self.fns.update({
# string functions
'substr': 'SUBSTRING({0} FROM {1}+1 FOR {2})'
, 'indexOf': 'STRPOS({0}, {1})-1'
# date functions
, 'year': 'EXTRACT(YEAR FROM {0})'
, 'month': 'EXTRACT(MONTH FROM {0})'
, 'dayOfMonth': 'EXTRACT(DAY FROM {0})'
, 'dayOfYear': 'EXTRACT(DOY FROM {0})'
, 'dayOfWeek': 'EXTRACT(DOW FROM {0})'
, 'week': 'EXTRACT(WEEK FROM {0})'
, 'hour': 'EXTRACT(HOUR FROM {0})'
, 'minute': 'EXTRACT(MINUTE FROM {0})'
, 'second': 'EXTRACT(SECOND FROM {0})'
# for backend use only
, 'unix': 'EXTRACT(EPOCH FROM {0})'
})
self.binfns.update({
'second': 'EXTRACT(EPOCH FROM {0})'
, 'minute': 'EXTRACT(EPOCH FROM DATE_TRUNC(MINUTE, {0}))'
, 'hour': 'EXTRACT(EPOCH FROM DATE_TRUNC(HOUR, {0}))'
, 'day': 'EXTRACT(EPOCH FROM DATE_TRUNC(DAY, {0}))'
, 'week': 'EXTRACT(EPOCH FROM DATE_TRUNC(WEEK, {0}))'
, 'month': 'EXTRACT(EPOCH FROM DATE_TRUNC(MONTH, {0}))'
, 'twoMonth': "EXTRACT(EPOCH FROM DATE_TRUNC(MONTH, {0}-(DATE_PART(MONTH,{0})::int%2||'MONTHS')::interval))"
, 'quarter': 'EXTRACT(EPOCH FROM DATE_TRUNC(QUARTER, {0}))'
, 'twoMonth': "EXTRACT(EPOCH FROM DATE_TRUNC(MONTH, {0}-(DATE_PART(MONTH,{0})::int%6||'MONTHS')::interval))"
, 'year': 'EXTRACT(EPOCH FROM DATE_TRUNC(YEAR, {0}))'
, 'twoYear': "EXTRACT(EPOCH FROM DATE_TRUNC(YEAR, {0}-(DATE_PART(YEAR,{0})::int%2||'YEARS')::interval))"
, 'fiveYear': "EXTRACT(EPOCH FROM DATE_TRUNC(YEAR, {0}-(DATE_PART(YEAR,{0})::int%5||'YEARS')::interval))"
, 'decade': 'EXTRACT(EPOCH FROM DATE_TRUNC(DECADE, {0}))'
})
exprToPostgresInstance = ExprToPostgres()
def exprToPostgres(expr):
str = exprToPostgresInstance.visit(expr)
if str == 'COUNT(1)':
return 'COUNT(*)' # which one is faster?
return str
|
Polychart/builder
|
server/polychartQuery/sql/expr.py
|
Python
|
agpl-3.0
| 5,886
|
[
"VisIt"
] |
89dfd5cb45557bd50343b6620d9ce95e05d6e7f2511dde511de8b7399f1d2985
|
from benchbuild.utils.wrapping import wrap
from benchbuild.projects.benchbuild.group import BenchBuildGroup
from benchbuild.utils.compiler import lt_clang_cxx
from benchbuild.utils.downloader import Git
from benchbuild.utils.run import run
from benchbuild.utils.versions import get_version_from_cache_dir
from plumbum import local
from benchbuild.utils.cmd import cp, make
from os import path
from glob import glob
class Lammps(BenchBuildGroup):
""" LAMMPS benchmark """
NAME = 'lammps'
DOMAIN = 'scientific'
SRC_FILE = 'lammps.git'
def prepare(self):
super(Lammps, self).prepare()
cp("-vr", self.testdir, "test")
def run_tests(self, experiment, run):
lammps_dir = path.join(self.builddir, self.src_dir, "src")
exp = wrap(path.join(lammps_dir, "lmp_serial"), experiment)
with local.cwd("test"):
tests = glob(path.join(self.testdir, "in.*"))
for test in tests:
cmd = (exp < test)
run(cmd, None)
src_dir = SRC_FILE
src_uri = "https://github.com/lammps/lammps"
def download(self):
Git(self.src_uri, self.src_dir)
def configure(self):
pass
def build(self):
self.ldflags += ["-lgomp"]
clang_cxx = lt_clang_cxx(self.cflags, self.ldflags,
self.compiler_extension)
with local.cwd(path.join(self.src_dir, "src")):
run(make[
"CC=" + str(clang_cxx), "LINK=" + str(
clang_cxx), "clean", "serial"])
|
simbuerg/benchbuild
|
benchbuild/projects/benchbuild/lammps.py
|
Python
|
mit
| 1,566
|
[
"LAMMPS"
] |
cb60332e053a4881c8b39645bb0c6e17069aa679b61c03961a565edc55547a61
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# utils.py
#
# Copyright 2014 Gary Dalton <gary@ggis.biz>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
"""
Module utils.py documentation
A variety of utilities used by Twenty47. These are mostly used
to interact with AWS through boto though some other utilities
are provided.
"""
import sys
from flask.ext.mail import Message
from twenty47 import app, debug, sns_error
from flask import render_template, flash, abort, redirect, url_for
from flask.ext.mail import Mail
from boto import sns
from twenty47.models import User, Role
from itsdangerous import URLSafeTimedSerializer, BadSignature
mail = Mail(app)
conn = sns.SNSConnection()
def put_subscriber(topic, method, endpoint, conn=conn):
'''
Returns False or the subscription ARN:
{'SubscribeResponse': {'SubscribeResult': {'SubscriptionArn': 'arn:aws:sns:us-east-1:123456789012:Dispatch_Email:2a9f687f-2313-411f-88c8-4cce9b29c53b'}, 'ResponseMetadata': {'RequestId': 'a8763b99-33a7-11df-a9b7-05d48da6f042'}}}
Tested
'''
try:
result = conn.subscribe(topic, method, endpoint)
debug('Added %s subscriber %s has ARN of %s' % (method, endpoint, result['SubscribeResponse']['SubscribeResult']['SubscriptionArn']))
return result['SubscribeResponse']['SubscribeResult']['SubscriptionArn']
except Exception, e:
return
sns_error.send(app, func='put_email_subscriber', e=e)
return False
def get_one_subscriber(topic, endpoint, nexttoken=None, conn=conn):
'''
Returns False or the subscription data:
{'Owner': '796928799269', 'Endpoint': 'gary@gruffgoat.com', 'Protocol': 'email', 'TopicArn': 'arn:aws:sns:us-east-1:796928799269:Dispatch_Email', 'SubscriptionArn': 'arn:aws:sns:us-east-1:796928799269:Dispatch_Email:784fe2e3-f495-4c63-85e2-2306c2b400df'}
Tested
'''
try:
subscribers_obj = conn.get_all_subscriptions_by_topic(topic, nexttoken)
for subscriber in subscribers_obj['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['Subscriptions']:
if subscriber['Endpoint'] == endpoint:
return subscriber
if subscribers_obj['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['NextToken'] is None:
return False
else:
nexttoken = subscribers_obj['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['NextToken']
return get_one_subscriber(topic, endpoint, nexttoken, conn)
except Exception, e:
return ('Error: %s' % (e))
sns_error.send(app, func='get_one_subscriber', e=e)
return False
def del_subscriber(arn, conn=conn):
if arn == "PendingConfirmation":
debug("Cannot delete subscription because its still Pending")
return False
try:
result = conn.unsubscribe(arn)
debug("Deleted subscription of ARN of %s." % (arn))
return True
except Exception, e:
return ('Error: %s' % (e))
sns_error.send(app, func='del_subscriber', e=e)
return False
def put_email_subscriber(email, conn=conn):
return(put_subscriber(app.config['DISPATCH_EMAIL_TOPIC'], 'email', email, conn))
def put_sms_subscriber(phone, conn=conn):
return(put_subscriber(app.config['DISPATCH_SMS_TOPIC'], 'sms', phone, conn))
def get_topic_subscribers(topic, nexttoken=None, subscribers=None, conn=conn):
'''
Returns a dictionary of the subscribers' endpoints and the ARNs
'''
if subscribers is None:
subscribers = {}
try:
subscribers_obj = conn.get_all_subscriptions_by_topic(topic, nexttoken)
nexttoken = subscribers_obj['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['NextToken']
for subscriber in subscribers_obj['ListSubscriptionsByTopicResponse']['ListSubscriptionsByTopicResult']['Subscriptions']:
subscribers[subscriber['Endpoint']] = subscriber['SubscriptionArn']
if nexttoken is None:
return subscribers
else:
return(get_topic_subscribers(topic, nexttoken, subscribers, conn))
except Exception, e:
return ('Error: %s' % (e))
sns_error.send(app, func='get_email_subscribers', e=e)
return subscribers
def get_all_subscribers(conn=conn):
subscribers = get_email_subscribers(conn=conn)
subscribers.update(get_sms_subscribers(conn=conn))
return(subscribers)
def get_email_subscribers(conn=conn):
return (get_topic_subscribers(app.config['DISPATCH_EMAIL_TOPIC'], nexttoken=None, subscribers=None, conn=conn))
def get_sms_subscribers(conn=conn):
return (get_topic_subscribers(app.config['DISPATCH_SMS_TOPIC'], nexttoken=None, subscribers=None, conn=conn))
def del_email_subscriber(arn, conn=conn):
return del_subscriber(arn, conn=conn)
def del_sms_subscriber(arn, conn=conn):
return del_subscriber(arn, conn=conn)
def put_sns_message(topic, message, subject=None, conn=conn):
"""Send message via Amazon SNS.
:param message: Plain text message
"""
try:
result = conn.publish(topic=topic, message=message)
return result
except Exception, e:
return ('Error: %s' % (e))
sns_error.send(app, func='put_sns_sms_message', e=e)
return False
def put_sns_sms_message(message, conn=conn):
"""Send an SMS.
:param message: Plain text message, max length is 160 characters
"""
try:
result = conn.publish(topic=app.config['DISPATCH_SMS_TOPIC'], message=message)
return result
except Exception, e:
return ('Error: %s' % (e))
sns_error.send(app, func='put_sns_email_message', e=e)
return False
return put_sns_message(app.config['DISPATCH_SMS_TOPIC'], message[:150])
def put_sns_email_message(subject, template, conn=conn, **context):
"""Send an email via the Amazon SNS.
:param subject: Email subject
:param template: The name of the email template
"""
ctx = ('dispatch/sns', template)
message = render_template('%s/%s.txt' % ctx, **context)
#return put_sns_message(app.config['DISPATCH_EMAIL_TOPIC'], message=message, subject=subject)
try:
result = conn.publish(topic=app.config['DISPATCH_EMAIL_TOPIC'], message=message, subject=subject)
return result
except Exception, e:
return ('Error: %s' % (e))
sns_error.send(app, func='put_sns_email_message', e=e)
return False
def update_user_subscriptions(user):
debug("In utils.update_user_subscriptions")
if not user.subscription:
debug("Why are we here")
return False
else:
debug(user.subscription.email)
debug(user.subscription.smsPhone)
current_email_subscribers = get_email_subscribers()
current_sms_subscribers = get_sms_subscribers()
pending_or_emtpy = ['', 'PendingConfirmation', 'pending confirmation']
if user.subscription.status != "APPROVED" or (user.subscription.methods != "Both" and user.subscription.methods != "Email"):
# Get the best version of the ARN
#if user.subscription.email_arn == '' or user.subscription.email_arn == 'PendingConfirmation':
if user.subscription.email_arn in pending_or_emtpy:
try:
user.subscription.email_arn = current_email_subscribers[user.subscription.email]
except KeyError:
user.subscription.email_arn = ''
elif user.subscription.email_arn not in current_email_subscribers.values():
user.subscription.email_arn = ''
# Delete if there is an ARN
#if user.subscription.email_arn != '' or user.subscription.email_arn != 'PendingConfirmation':
if user.subscription.email_arn not in pending_or_emtpy:
debug("Going to delete %s" % user.subscription.email)
if del_email_subscriber(user.subscription.email_arn) is not False:
user.subscription.email_arn = ''
if user.subscription.status != "APPROVED" or (user.subscription.methods != "Both" and user.subscription.methods != "SMS Phone"):
# Get the best version of the ARN
#if user.subscription.sms_arn == '' or user.subscription.sms_arn == 'PendingConfirmation':
if user.subscription.sms_arn in pending_or_emtpy:
try:
user.subscription.sms_arn = current_sms_subscribers['1' + user.subscription.smsPhone]
except KeyError:
user.subscription.sms_arn = ''
elif user.subscription.sms_arn not in current_sms_subscribers.values():
user.subscription.sms_arn = ''
# Delete if there is an ARN
#if user.subscription.sms_arn != '' or user.subscription.sms_arn != 'PendingConfirmation':
if user.subscription.sms_arn not in pending_or_emtpy:
debug("Going to delete %s" % user.subscription.smsPhone)
if del_sms_subscriber(user.subscription.sms_arn) is not False:
user.subscription.sms_arn = ''
if user.subscription.status == "APPROVED":
if user.subscription.methods == "Both" or user.subscription.methods == "Email":
if len(user.subscription.email) > 5:
# Already subscribed?
try:
arn = current_email_subscribers[user.subscription.email]
except KeyError:
arn = put_email_subscriber(user.subscription.email)
if arn is False:
sns_error.send(app, func='update_user_subscriptions', e='Unable to put_email_subscriber')
# Change email address?
#if user.subscription.email_arn != '' or user.subscription.email_arn != 'PendingConfirmation':
if user.subscription.email_arn not in pending_or_emtpy:
if user.subscription.email_arn != arn:
debug("Going to delete %s" % user.subscription.email)
del_email_subscriber(user.subscription.email_arn)
user.subscription.email_arn = arn
if user.subscription.methods == "Both" or user.subscription.methods == "SMS Phone":
if len(user.subscription.smsPhone) > 7:
# Already subscribed?
try:
arn = current_sms_subscribers['1' + user.subscription.smsPhone]
except KeyError:
arn = put_sms_subscriber('1' + user.subscription.smsPhone)
if arn is False:
sns_error.send(app, func='update_user_subscriptions', e='Unable to put_sms_subscriber')
# Change sms number?
#if user.subscription.sms_arn != '' or user.subscription.sms_arn != 'PendingConfirmation':
if user.subscription.sms_arn not in pending_or_emtpy:
if user.subscription.sms_arn != arn:
debug("Going to delete %s" % user.subscription.smsPhone)
del_sms_subscriber(user.subscription.sms_arn)
user.subscription.sms_arn = arn
debug(user.subscription.email_arn)
debug(user.subscription.sms_arn)
user.save()
return True
'''
if user.subscription.email:
current_subscribers = get_email_subscribers()
try:
user.subscription.email_arn = current_subscribers[user.subscription.email]
if user.subscription.status != "APPROVED" or (user.subscription.methods != "Both" and user.subscription.methods != "Email"):
if del_email_subscriber(user.subscription.email_arn) not False:
user.subscription.email_arn = ''
except KeyError:
if user.subscription.status == "APPROVED":
if user.subscription.methods == "Both" or user.subscription.methods == "Email":
user.subscription.email_arn = put_email_subscriber(user.subscription.email)
if user.subscription.email_arn is False:
sns_error.send(app, func='update_user_subscriptions', e='Unable to put_email_subscriber')
if user.subscription.smsPhone:
current_subscribers = get_sms_subscribers()
try:
user.subscription.sms_arn = current_subscribers['1' + user.subscription.smsPhone]
if user.subscription.status != "APPROVED" or (user.subscription.methods != "Both" and user.subscription.methods != "SMS Phone"):
del_sms_subscribers('1' + user.subscription.smsPhone)
user.subscription.sms_arn = ''
except KeyError:
if user.subscription.status == "APPROVED":
if user.subscription.methods == "Both" or user.subscription.methods == "SMS Phone":
user.subscription.sms_arn = put_sms_subscriber('1' + user.subscription.smsPhone)
'''
def update_all_user_subscriptions(user):
current_email_subscribers = get_email_subscribers()
# Create a list of just email addresses
list_current_email_subscriber = []
for k, v in current_email_subscribers.iteritems():
list_current_email_subscriber.append(k)
debug('Current email subscribers are %s' % (list_current_email_subscriber))
current_sms_subscribers = get_sms_subscribers()
# Create a list of just phone numbers
list_current_sms_subscriber = []
for k, v in current_sms_subscribers.iteritems():
list_current_sms_subscriber.append(k)
debug('Current SMS subscribers are %s' % (list_current_sms_subscriber))
users = User.objects(subscription__status="APPROVED")
needed_email_subscribers = []
needed_sms_subscribers = []
# Create lists of needed emails and phone numbers
for user in users:
debug(user.subscription.methods)
if user.subscription.methods == "Both" or user.subscription.methods == "Email":
needed_email_subscribers.append(user.subscription.email)
debug("Added " + user.subscription.email)
if user.subscription.methods == "Both" or user.subscription.methods == "SMS Phone":
needed_sms_subscribers.append('1' + user.subscription.smsPhone)
debug('Needed email subscribers are %s' % (needed_email_subscribers))
debug('Needed SMS subscribers are %s' % (needed_sms_subscribers))
# Difference the lists
add_email_subscribers = list(set(needed_email_subscribers)-set(list_current_email_subscriber))
remove_email_subscribers = list(set(list_current_email_subscriber)-set(needed_email_subscribers))
add_sms_subscribers = list(set(needed_sms_subscribers)-set(list_current_sms_subscriber))
remove_sms_subscribers = list(set(list_current_sms_subscriber)-set(needed_sms_subscribers))
'''
# Here we communicate with SNS
for email_subscriber in add_email_subscribers:
debug('Add these email subscribers %s' % (email_subscriber))
user.subscription.email_arn = put_email_subscriber(email_subscriber)
debug('Added email subscriber %s has ARN of %s' % (email_subscriber, user.subscription.email_arn))
for email_subscriber in remove_email_subscribers:
debug('Remove these email subscribers %s with ARN %s' % (email_subscriber, current_email_subscribers[email_subscriber]))
if current_email_subscribers[email_subscriber] != 'PendingConfirmation':
debug('Can remove this email subscriber %s' % (email_subscriber))
for sms_subscriber in add_sms_subscribers:
debug('Add these sms subscribers %s' % (sms_subscriber))
user.subscription.sms_arn = put_sms_subscriber(user)
flash('Added sms subscriber %s has ARN of %s' % (sms_subscriber, user.subscription.sms_arn))
for sms_subscriber in remove_sms_subscribers:
if current_sms_subscribers[sms_subscriber] != 'PendingConfirmation':
debug('Can remove this email subscriber %s' % (sms_subscriber))
'''
def send_mail(subject, recipients, template, **context):
"""Send an email via the Flask-Mail extension.
:param subject: Email subject
:param recipient: Email recipient
:param template: The name of the email template
"""
msg = Message(subject,
sender=app.config['SECURITY_EMAIL_SENDER'],
recipients=recipients)
ctx = ('dispatch/email', template)
msg.body = render_template('%s/%s.txt' % ctx, **context)
msg.html = render_template('%s/%s.html' % ctx, **context)
mail = Mail(app)
mail.send(msg)
def get_serializer(secret_key=None):
if secret_key is None:
secret_key = "secret"
secret_key = app.config['SECRET_KEY']
return URLSafeTimedSerializer(secret_key)
def get_activation_link(user_id, action):
s = get_serializer()
#payload = s.dumps(user.id, action)
payload = s.dumps(user_id + "," + action)
return url_for('admin.remote_admin', payload=payload, _external=True)
def get_users_with_role(role_name=None, list_of=None):
if role_name is None:
role_name = "User"
if list_of is None:
list_of = "email"
roles = Role.objects(name=role_name)
the_list = []
for role in roles:
users = User.objects(roles=role)
for user in users:
the_list.append(user[list_of])
return the_list
def test_mongo_settings():
from pymongo import MongoClient
mc = MongoClient()
tst_db = mc[app.config['MONGODB_SETTINGS']['DB']]
try:
tst_db.authenticate( app.config['MONGODB_SETTINGS']['USERNAME'], password = app.config['MONGODB_SETTINGS']['PASSWORD'] )
return True
except Exception, e:
return e
def strip_non_digits(orig_str):
import string
orig_str = orig_str.encode('ascii','ignore')
allchar=string.maketrans('','')
nodigs=allchar.translate(allchar, string.digits)
return orig_str.translate(allchar, nodigs)
|
gary-dalton/Twenty47
|
twenty47/utils.py
|
Python
|
mit
| 19,151
|
[
"Dalton"
] |
1662cfac97d79e16af7cea32e3f4c91e660178cba0d9bd3d0f007bf704c768c2
|
import atexit
import contextlib
import fnmatch
import importlib.util
import itertools
import os
import shutil
import sys
import uuid
import warnings
from enum import Enum
from functools import partial
from os.path import expanduser
from os.path import expandvars
from os.path import isabs
from os.path import sep
from posixpath import sep as posix_sep
from types import ModuleType
from typing import Callable
from typing import Iterable
from typing import Iterator
from typing import Optional
from typing import Set
from typing import TypeVar
from typing import Union
import py
from _pytest.compat import assert_never
from _pytest.outcomes import skip
from _pytest.warning_types import PytestWarning
if sys.version_info[:2] >= (3, 6):
from pathlib import Path, PurePath
else:
from pathlib2 import Path, PurePath
__all__ = ["Path", "PurePath"]
LOCK_TIMEOUT = 60 * 60 * 24 * 3
_AnyPurePath = TypeVar("_AnyPurePath", bound=PurePath)
def get_lock_path(path: _AnyPurePath) -> _AnyPurePath:
return path.joinpath(".lock")
def ensure_reset_dir(path: Path) -> None:
"""Ensure the given path is an empty directory."""
if path.exists():
rm_rf(path)
path.mkdir()
def on_rm_rf_error(func, path: str, exc, *, start_path: Path) -> bool:
"""Handle known read-only errors during rmtree.
The returned value is used only by our own tests.
"""
exctype, excvalue = exc[:2]
# Another process removed the file in the middle of the "rm_rf" (xdist for example).
# More context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018
if isinstance(excvalue, FileNotFoundError):
return False
if not isinstance(excvalue, PermissionError):
warnings.warn(
PytestWarning(
"(rm_rf) error removing {}\n{}: {}".format(path, exctype, excvalue)
)
)
return False
if func not in (os.rmdir, os.remove, os.unlink):
if func not in (os.open,):
warnings.warn(
PytestWarning(
"(rm_rf) unknown function {} when removing {}:\n{}: {}".format(
func, path, exctype, excvalue
)
)
)
return False
# Chmod + retry.
import stat
def chmod_rw(p: str) -> None:
mode = os.stat(p).st_mode
os.chmod(p, mode | stat.S_IRUSR | stat.S_IWUSR)
# For files, we need to recursively go upwards in the directories to
# ensure they all are also writable.
p = Path(path)
if p.is_file():
for parent in p.parents:
chmod_rw(str(parent))
# Stop when we reach the original path passed to rm_rf.
if parent == start_path:
break
chmod_rw(str(path))
func(path)
return True
def ensure_extended_length_path(path: Path) -> Path:
"""Get the extended-length version of a path (Windows).
On Windows, by default, the maximum length of a path (MAX_PATH) is 260
characters, and operations on paths longer than that fail. But it is possible
to overcome this by converting the path to "extended-length" form before
performing the operation:
https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#maximum-path-length-limitation
On Windows, this function returns the extended-length absolute version of path.
On other platforms it returns path unchanged.
"""
if sys.platform.startswith("win32"):
path = path.resolve()
path = Path(get_extended_length_path_str(str(path)))
return path
def get_extended_length_path_str(path: str) -> str:
"""Convert a path to a Windows extended length path."""
long_path_prefix = "\\\\?\\"
unc_long_path_prefix = "\\\\?\\UNC\\"
if path.startswith((long_path_prefix, unc_long_path_prefix)):
return path
# UNC
if path.startswith("\\\\"):
return unc_long_path_prefix + path[2:]
return long_path_prefix + path
def rm_rf(path: Path) -> None:
"""Remove the path contents recursively, even if some elements
are read-only."""
path = ensure_extended_length_path(path)
onerror = partial(on_rm_rf_error, start_path=path)
shutil.rmtree(str(path), onerror=onerror)
def find_prefixed(root: Path, prefix: str) -> Iterator[Path]:
"""Find all elements in root that begin with the prefix, case insensitive."""
l_prefix = prefix.lower()
for x in root.iterdir():
if x.name.lower().startswith(l_prefix):
yield x
def extract_suffixes(iter: Iterable[PurePath], prefix: str) -> Iterator[str]:
"""Return the parts of the paths following the prefix.
:param iter: Iterator over path names.
:param prefix: Expected prefix of the path names.
"""
p_len = len(prefix)
for p in iter:
yield p.name[p_len:]
def find_suffixes(root: Path, prefix: str) -> Iterator[str]:
"""Combine find_prefixes and extract_suffixes."""
return extract_suffixes(find_prefixed(root, prefix), prefix)
def parse_num(maybe_num) -> int:
"""Parse number path suffixes, returns -1 on error."""
try:
return int(maybe_num)
except ValueError:
return -1
def _force_symlink(
root: Path, target: Union[str, PurePath], link_to: Union[str, Path]
) -> None:
"""Helper to create the current symlink.
It's full of race conditions that are reasonably OK to ignore
for the context of best effort linking to the latest test run.
The presumption being that in case of much parallelism
the inaccuracy is going to be acceptable.
"""
current_symlink = root.joinpath(target)
try:
current_symlink.unlink()
except OSError:
pass
try:
current_symlink.symlink_to(link_to)
except Exception:
pass
def make_numbered_dir(root: Path, prefix: str) -> Path:
"""Create a directory with an increased number as suffix for the given prefix."""
for i in range(10):
# try up to 10 times to create the folder
max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
new_number = max_existing + 1
new_path = root.joinpath("{}{}".format(prefix, new_number))
try:
new_path.mkdir()
except Exception:
pass
else:
_force_symlink(root, prefix + "current", new_path)
return new_path
else:
raise OSError(
"could not create numbered dir with prefix "
"{prefix} in {root} after 10 tries".format(prefix=prefix, root=root)
)
def create_cleanup_lock(p: Path) -> Path:
"""Create a lock to prevent premature folder cleanup."""
lock_path = get_lock_path(p)
try:
fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
except FileExistsError as e:
raise OSError("cannot create lockfile in {path}".format(path=p)) from e
else:
pid = os.getpid()
spid = str(pid).encode()
os.write(fd, spid)
os.close(fd)
if not lock_path.is_file():
raise OSError("lock path got renamed after successful creation")
return lock_path
def register_cleanup_lock_removal(lock_path: Path, register=atexit.register):
"""Register a cleanup function for removing a lock, by default on atexit."""
pid = os.getpid()
def cleanup_on_exit(lock_path: Path = lock_path, original_pid: int = pid) -> None:
current_pid = os.getpid()
if current_pid != original_pid:
# fork
return
try:
lock_path.unlink()
except OSError:
pass
return register(cleanup_on_exit)
def maybe_delete_a_numbered_dir(path: Path) -> None:
"""Remove a numbered directory if its lock can be obtained and it does
not seem to be in use."""
path = ensure_extended_length_path(path)
lock_path = None
try:
lock_path = create_cleanup_lock(path)
parent = path.parent
garbage = parent.joinpath("garbage-{}".format(uuid.uuid4()))
path.rename(garbage)
rm_rf(garbage)
except OSError:
# known races:
# * other process did a cleanup at the same time
# * deletable folder was found
# * process cwd (Windows)
return
finally:
# If we created the lock, ensure we remove it even if we failed
# to properly remove the numbered dir.
if lock_path is not None:
try:
lock_path.unlink()
except OSError:
pass
def ensure_deletable(path: Path, consider_lock_dead_if_created_before: float) -> bool:
"""Check if `path` is deletable based on whether the lock file is expired."""
if path.is_symlink():
return False
lock = get_lock_path(path)
try:
if not lock.is_file():
return True
except OSError:
# we might not have access to the lock file at all, in this case assume
# we don't have access to the entire directory (#7491).
return False
try:
lock_time = lock.stat().st_mtime
except Exception:
return False
else:
if lock_time < consider_lock_dead_if_created_before:
# We want to ignore any errors while trying to remove the lock such as:
# - PermissionDenied, like the file permissions have changed since the lock creation;
# - FileNotFoundError, in case another pytest process got here first;
# and any other cause of failure.
with contextlib.suppress(OSError):
lock.unlink()
return True
return False
def try_cleanup(path: Path, consider_lock_dead_if_created_before: float) -> None:
"""Try to cleanup a folder if we can ensure it's deletable."""
if ensure_deletable(path, consider_lock_dead_if_created_before):
maybe_delete_a_numbered_dir(path)
def cleanup_candidates(root: Path, prefix: str, keep: int) -> Iterator[Path]:
"""List candidates for numbered directories to be removed - follows py.path."""
max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
max_delete = max_existing - keep
paths = find_prefixed(root, prefix)
paths, paths2 = itertools.tee(paths)
numbers = map(parse_num, extract_suffixes(paths2, prefix))
for path, number in zip(paths, numbers):
if number <= max_delete:
yield path
def cleanup_numbered_dir(
root: Path, prefix: str, keep: int, consider_lock_dead_if_created_before: float
) -> None:
"""Cleanup for lock driven numbered directories."""
for path in cleanup_candidates(root, prefix, keep):
try_cleanup(path, consider_lock_dead_if_created_before)
for path in root.glob("garbage-*"):
try_cleanup(path, consider_lock_dead_if_created_before)
def make_numbered_dir_with_cleanup(
root: Path, prefix: str, keep: int, lock_timeout: float
) -> Path:
"""Create a numbered dir with a cleanup lock and remove old ones."""
e = None
for i in range(10):
try:
p = make_numbered_dir(root, prefix)
lock_path = create_cleanup_lock(p)
register_cleanup_lock_removal(lock_path)
except Exception as exc:
e = exc
else:
consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout
# Register a cleanup for program exit
atexit.register(
cleanup_numbered_dir,
root,
prefix,
keep,
consider_lock_dead_if_created_before,
)
return p
assert e is not None
raise e
def resolve_from_str(input: str, rootpath: Path) -> Path:
input = expanduser(input)
input = expandvars(input)
if isabs(input):
return Path(input)
else:
return rootpath.joinpath(input)
def fnmatch_ex(pattern: str, path) -> bool:
"""A port of FNMatcher from py.path.common which works with PurePath() instances.
The difference between this algorithm and PurePath.match() is that the
latter matches "**" glob expressions for each part of the path, while
this algorithm uses the whole path instead.
For example:
"tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py"
with this algorithm, but not with PurePath.match().
This algorithm was ported to keep backward-compatibility with existing
settings which assume paths match according this logic.
References:
* https://bugs.python.org/issue29249
* https://bugs.python.org/issue34731
"""
path = PurePath(path)
iswin32 = sys.platform.startswith("win")
if iswin32 and sep not in pattern and posix_sep in pattern:
# Running on Windows, the pattern has no Windows path separators,
# and the pattern has one or more Posix path separators. Replace
# the Posix path separators with the Windows path separator.
pattern = pattern.replace(posix_sep, sep)
if sep not in pattern:
name = path.name
else:
name = str(path)
if path.is_absolute() and not os.path.isabs(pattern):
pattern = "*{}{}".format(os.sep, pattern)
return fnmatch.fnmatch(name, pattern)
def parts(s: str) -> Set[str]:
parts = s.split(sep)
return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))}
def symlink_or_skip(src, dst, **kwargs):
"""Make a symlink, or skip the test in case symlinks are not supported."""
try:
os.symlink(str(src), str(dst), **kwargs)
except OSError as e:
skip("symlinks not supported: {}".format(e))
class ImportMode(Enum):
"""Possible values for `mode` parameter of `import_path`."""
prepend = "prepend"
append = "append"
importlib = "importlib"
class ImportPathMismatchError(ImportError):
"""Raised on import_path() if there is a mismatch of __file__'s.
This can happen when `import_path` is called multiple times with different filenames that has
the same basename but reside in packages
(for example "/tests1/test_foo.py" and "/tests2/test_foo.py").
"""
def import_path(
p: Union[str, py.path.local, Path],
*,
mode: Union[str, ImportMode] = ImportMode.prepend
) -> ModuleType:
"""Import and return a module from the given path, which can be a file (a module) or
a directory (a package).
The import mechanism used is controlled by the `mode` parameter:
* `mode == ImportMode.prepend`: the directory containing the module (or package, taking
`__init__.py` files into account) will be put at the *start* of `sys.path` before
being imported with `__import__.
* `mode == ImportMode.append`: same as `prepend`, but the directory will be appended
to the end of `sys.path`, if not already in `sys.path`.
* `mode == ImportMode.importlib`: uses more fine control mechanisms provided by `importlib`
to import the module, which avoids having to use `__import__` and muck with `sys.path`
at all. It effectively allows having same-named test modules in different places.
:raises ImportPathMismatchError:
If after importing the given `path` and the module `__file__`
are different. Only raised in `prepend` and `append` modes.
"""
mode = ImportMode(mode)
path = Path(str(p))
if not path.exists():
raise ImportError(path)
if mode is ImportMode.importlib:
module_name = path.stem
for meta_importer in sys.meta_path:
spec = meta_importer.find_spec(module_name, [str(path.parent)])
if spec is not None:
break
else:
spec = importlib.util.spec_from_file_location(module_name, str(path))
if spec is None:
raise ImportError(
"Can't find module {} at location {}".format(module_name, str(path))
)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod) # type: ignore[union-attr]
return mod
pkg_path = resolve_package_path(path)
if pkg_path is not None:
pkg_root = pkg_path.parent
names = list(path.with_suffix("").relative_to(pkg_root).parts)
if names[-1] == "__init__":
names.pop()
module_name = ".".join(names)
else:
pkg_root = path.parent
module_name = path.stem
# Change sys.path permanently: restoring it at the end of this function would cause surprising
# problems because of delayed imports: for example, a conftest.py file imported by this function
# might have local imports, which would fail at runtime if we restored sys.path.
if mode is ImportMode.append:
if str(pkg_root) not in sys.path:
sys.path.append(str(pkg_root))
elif mode is ImportMode.prepend:
if str(pkg_root) != sys.path[0]:
sys.path.insert(0, str(pkg_root))
else:
assert_never(mode)
importlib.import_module(module_name)
mod = sys.modules[module_name]
if path.name == "__init__.py":
return mod
ignore = os.environ.get("PY_IGNORE_IMPORTMISMATCH", "")
if ignore != "1":
module_file = mod.__file__
if module_file.endswith((".pyc", ".pyo")):
module_file = module_file[:-1]
if module_file.endswith(os.path.sep + "__init__.py"):
module_file = module_file[: -(len(os.path.sep + "__init__.py"))]
try:
is_same = os.path.samefile(str(path), module_file)
except FileNotFoundError:
is_same = False
if not is_same:
raise ImportPathMismatchError(module_name, module_file, path)
return mod
def resolve_package_path(path: Path) -> Optional[Path]:
"""Return the Python package path by looking for the last
directory upwards which still contains an __init__.py.
Returns None if it can not be determined.
"""
result = None
for parent in itertools.chain((path,), path.parents):
if parent.is_dir():
if not parent.joinpath("__init__.py").is_file():
break
if not parent.name.isidentifier():
break
result = parent
return result
def visit(
path: str, recurse: Callable[["os.DirEntry[str]"], bool]
) -> Iterator["os.DirEntry[str]"]:
"""Walk a directory recursively, in breadth-first order.
Entries at each directory level are sorted.
"""
entries = sorted(os.scandir(path), key=lambda entry: entry.name)
yield from entries
for entry in entries:
if entry.is_dir(follow_symlinks=False) and recurse(entry):
yield from visit(entry.path, recurse)
def absolutepath(path: Union[Path, str]) -> Path:
"""Convert a path to an absolute path using os.path.abspath.
Prefer this over Path.resolve() (see #6523).
Prefer this over Path.absolute() (not public, doesn't normalize).
"""
return Path(os.path.abspath(str(path)))
def commonpath(path1: Path, path2: Path) -> Optional[Path]:
"""Return the common part shared with the other path, or None if there is
no common part."""
try:
return Path(os.path.commonpath((str(path1), str(path2))))
except ValueError:
return None
def bestrelpath(directory: Path, dest: Path) -> str:
"""Return a string which is a relative path from directory to dest such
that directory/bestrelpath == dest.
If no such path can be determined, returns dest.
"""
if dest == directory:
return os.curdir
# Find the longest common directory.
base = commonpath(directory, dest)
# Can be the case on Windows.
if not base:
return str(dest)
reldirectory = directory.relative_to(base)
reldest = dest.relative_to(base)
return os.path.join(
# Back from directory to base.
*([os.pardir] * len(reldirectory.parts)),
# Forward from base to dest.
*reldest.parts,
)
|
TeamSPoon/logicmoo_workspace
|
packs_web/butterfly/lib/python3.7/site-packages/_pytest/pathlib.py
|
Python
|
mit
| 20,034
|
[
"VisIt"
] |
caaa607b1decb33402ad43f78a4eef33a8c7718846271db6037fb102110eaf7f
|
"""
Cardiac image multi-atlas segmentation pipeline
Author: Wenjia Bai
First created: 2015.03.31
Last modified: 2016.11.21 by wbai
"""
import os
def segment_data(image_name, landmarks_name, output_dir, atlas_root, atlas_list, template_dir, par_dir, remove_temp=True):
# Check files
if not os.path.exists(image_name):
print('Error: no image. Please provide the cardiac image.')
return
if not os.path.exists(landmarks_name):
print('Error: no landmarks. Please provide the landmarks.')
return
# Create temporary directory for intermediate results in multi-atlas segmentation
temp_dir = os.path.join(output_dir, 'temp')
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
dof_dir = os.path.join(output_dir, 'dof')
if not os.path.exists(dof_dir):
os.mkdir(dof_dir)
# Split temporal sequence and pick the ED and ES time frames
os.system('splitvolume {0} {1}/sa_fr -sequence'.format(image_name, temp_dir))
ES = int(os.popen('detect_ES_frame {0}'.format(image_name)).read())
os.system('cp {0}/sa_fr00.nii.gz {0}/sa_ED.nii.gz'.format(temp_dir))
os.system('cp {0}/sa_fr{1:02d}.nii.gz {0}/sa_ES.nii.gz'.format(temp_dir, ES))
# Check the number of atlases
n_atlas = len(atlas_list)
# Step 1: multi-atlas segmentation using point-based registration at the ED frame
# The landmarks can also be automatically detected using Ozan Oktak's method
image_props = ''
label_props = ''
for atlas in atlas_list:
atlas_dir = os.path.join(atlas_root, atlas)
os.system('prreg {0} {1}/landmarks.vtk -dofout {2}/prreg_{3}.dof.gz'.format(landmarks_name, atlas_dir, temp_dir, atlas))
os.system('mirtk transform-image {0}/lvsa_ED.nii.gz {1}/image_ED_prreg_from_{2}.nii.gz \
-dofin {1}/prreg_{2}.dof.gz -target {1}/sa_ED.nii.gz -interp Linear'.format(atlas_dir, temp_dir, atlas))
os.system('mirtk transform-image {0}/segmentation_ED.nii.gz {1}/label_ED_prreg_from_{2}.nii.gz \
-dofin {1}/prreg_{2}.dof.gz -target {1}/sa_ED.nii.gz -interp NN'.format(atlas_dir, temp_dir, atlas))
image_props += ' {0}/image_ED_prreg_from_{1}.nii.gz'.format(temp_dir, atlas)
label_props += ' {0}/label_ED_prreg_from_{1}.nii.gz'.format(temp_dir, atlas)
if not os.path.exists('{0}/seg_ED_prreg_mv.nii.gz'.format(temp_dir)):
os.system('label_fusion {0}/sa_ED.nii.gz {1} {2} {3} {0}/seg_ED_prreg_mv.nii.gz -method MV'.format( \
temp_dir, n_atlas, image_props, label_props))
# Step 2: Pre-processing, including cropping the region of the heart and stretching the intensity histogram
# Crop the image for two purposes:
# (1) Save computation time
# (2) Non-rigid registration can be more accurate since the cost function will focus on the region of interest
os.system('auto_crop_image {0}/seg_ED_prreg_mv.nii.gz {0}/seg_ED_prreg_mv_crop.nii.gz -reserve 20'.format(temp_dir))
os.system('region {0}/sa_ED.nii.gz {0}/sa_ED_crop.nii.gz -ref {0}/seg_ED_prreg_mv_crop.nii.gz'.format(temp_dir))
os.system('region {0}/sa_ES.nii.gz {0}/sa_ES_crop.nii.gz -ref {0}/seg_ED_prreg_mv_crop.nii.gz'.format(temp_dir))
# Stretch the intensity histogram
# This step will reduce the intensity range of the image so that in the following image registrations,
# the bin width of normalised mutual information (NMI) will be smaller and represent the intensities more accurately.
os.system('stretch_contrast {0}/sa_ED_crop.nii.gz {0}/sa_ED_crop.nii.gz'.format(temp_dir))
os.system('stretch_contrast {0}/sa_ES_crop.nii.gz {0}/sa_ES_crop.nii.gz'.format(temp_dir))
# Step 3: multi-atlas segmentation using affine image registration at the ED frame
target = '{0}/sa_ED_crop.nii.gz'.format(temp_dir)
image_props = ''
label_props = ''
for atlas in atlas_list:
atlas_dir = os.path.join(atlas_root, atlas)
if not os.path.exists('{0}/affine_init_ED_{1}.dof.gz'.format(temp_dir, atlas)):
os.system('mirtk register {0} {1}/lvsa_ED.nii.gz -parin {2}/affine.cfg \
-dofin {3}/prreg_{4}.dof.gz -dofout {3}/affine_init_ED_{4}.dof.gz'.format( \
target, atlas_dir, par_dir, temp_dir, atlas))
os.system('mirtk transform-image {0}/lvsa_ED.nii.gz {1}/image_ED_affine_init_from_{2}.nii.gz \
-dofin {1}/affine_init_ED_{2}.dof.gz -target {3} -interp Linear'.format(atlas_dir, temp_dir, atlas, target))
os.system('mirtk transform-image {0}/segmentation_ED.nii.gz {1}/label_ED_affine_init_from_{2}.nii.gz \
-dofin {1}/affine_init_ED_{2}.dof.gz -target {3} -interp NN'.format(atlas_dir, temp_dir, atlas, target))
image_props += ' {0}/image_ED_affine_init_from_{1}.nii.gz'.format(temp_dir, atlas)
label_props += ' {0}/label_ED_affine_init_from_{1}.nii.gz'.format(temp_dir, atlas)
# Cross correlation is used by default as similarity metric in label fusion in case the target image
# has a different intensity distribution from the atlas image. However, if the target image is acquired
# using the same protocol as the atlas image, mean squared difference can be a better similarity metric.
seg = '{0}/seg_ED_affine_init_pbcc.nii.gz'.format(temp_dir)
if not os.path.exists(seg):
os.system('label_fusion {0} {1} {2} {3} {4} -method PBCC -par {5}/pbcc.cfg'.format( \
target, n_atlas, image_props, label_props, seg, par_dir))
# Step 4: Register label maps to refine affine registration in case landmarks are not detected accurately
target_seg = seg
for atlas in atlas_list:
atlas_dir = os.path.join(atlas_root, atlas)
if not os.path.exists('{0}/affine_label_ED_{1}.dof.gz'.format(temp_dir, atlas)):
os.system('mirtk register {0} {1}/segmentation_ED.nii.gz -parin {2}/affine_label.cfg -dofin {3}/prreg_{4}.dof.gz \
-dofout {3}/affine_label_ED_{4}.dof.gz'.format(target_seg, atlas_dir, par_dir, temp_dir, atlas))
# For each time frame
for fr in ['ED', 'ES']:
# Image to be segmented
target = '{0}/sa_{1}_crop.nii.gz'.format(temp_dir, fr)
# Step 5: multi-atlas segmentation based on non-rigid image registration
image_props = ''
label_props = ''
for atlas in atlas_list:
atlas_dir = os.path.join(atlas_root, atlas)
if not os.path.exists('{0}/ffd_{1}_{2}.dof.gz'.format(temp_dir, fr, atlas)):
os.system('mirtk register {0} {1}/lvsa_{2}.nii.gz -parin {3}/ffd.cfg \
-dofin {4}/affine_label_ED_{5}.dof.gz -dofout {4}/ffd_{2}_{5}.dof.gz'.format( \
target, atlas_dir, fr, par_dir, temp_dir, atlas))
os.system('mirtk transform-image {0}/lvsa_{2}.nii.gz {1}/image_{2}_ffd_from_{3}.nii.gz \
-dofin {1}/ffd_{2}_{3}.dof.gz -target {4} -interp Linear'.format(atlas_dir, temp_dir, fr, atlas, target))
os.system('mirtk transform-image {0}/segmentation_{2}.nii.gz {1}/label_{2}_ffd_from_{3}.nii.gz \
-dofin {1}/ffd_{2}_{3}.dof.gz -target {4} -interp NN'.format(atlas_dir, temp_dir, fr, atlas, target))
image_props += ' {0}/image_{1}_ffd_from_{2}.nii.gz'.format(temp_dir, fr, atlas)
label_props += ' {0}/label_{1}_ffd_from_{2}.nii.gz'.format(temp_dir, fr, atlas)
# Cross correlation is used by default as similarity metric in label fusion in case the target image
# has a different intensity distribution from the atlas image. However, if the target image is acquired
# using the same protocol as the atlas image, mean squared difference can be a better similarity metric.
seg = '{0}/seg_{1}_ffd_pbcc_{2}a.nii.gz'.format(temp_dir, fr, n_atlas)
if not os.path.exists(seg):
os.system('label_fusion {0} {1} {2} {3} {4} -method PBCC -par {5}/pbcc.cfg'.format( \
target, n_atlas, image_props, label_props, seg, par_dir))
# Step 6: Refine image registration based on the segmentation
target_seg = seg
image_props = ''
label_props = ''
for atlas in atlas_list:
atlas_dir = os.path.join(atlas_root, atlas)
if not os.path.exists('{0}/ffd_label_{1}_{2}.dof.gz'.format(temp_dir, fr, atlas)):
os.system('mirtk register {0} {1}/segmentation_{2}.nii.gz -parin {3}/ffd_label.cfg \
-dofin {4}/affine_label_ED_{5}.dof.gz -dofout {4}/ffd_label_{2}_{5}.dof.gz'.format( \
target_seg, atlas_dir, fr, par_dir, temp_dir, atlas))
os.system('mirtk transform-image {0}/lvsa_{2}.nii.gz {1}/image_{2}_ffd_label_from_{3}.nii.gz \
-dofin {1}/ffd_label_{2}_{3}.dof.gz -target {4} -interp Linear'.format(atlas_dir, temp_dir, fr, atlas, target))
os.system('mirtk transform-image {0}/segmentation_{2}.nii.gz {1}/label_{2}_ffd_label_from_{3}.nii.gz \
-dofin {1}/ffd_label_{2}_{3}.dof.gz -target {4} -interp NN'.format(atlas_dir, temp_dir, fr, atlas, target))
image_props += ' {0}/image_{1}_ffd_label_from_{2}.nii.gz'.format(temp_dir, fr, atlas)
label_props += ' {0}/label_{1}_ffd_label_from_{2}.nii.gz'.format(temp_dir, fr, atlas)
# Multi-atlas segmentation
seg = '{0}/seg_{1}_ffd_label_pbcc_{2}a.nii.gz'.format(temp_dir, fr, n_atlas)
if not os.path.exists(seg):
os.system('label_fusion {0} {1} {2} {3} {4} -method PBCC -par {5}/pbcc.cfg'.format( \
target, n_atlas, image_props, label_props, seg, par_dir))
# Step 7: Estimate transformation from target to template
# This will be used for propagating the template segmentation onto the target image
os.system('prreg {0} {1}/landmarks_ED.vtk -dofout {2}/prreg_template.dof.gz'.format(landmarks_name, template_dir, temp_dir))
os.system('mirtk invert-dof {0}/prreg_template.dof.gz {0}/prreg_inv_template.dof.gz'.format(temp_dir))
target_seg = seg
template_label = '{0}/label_map_{1}.nii.gz'.format(template_dir, fr)
# Use rigid + ffd
# If I use affine in between, sometimes affine registration may create very strange transformation results,
# which moves source image out of the FOV.
if not os.path.exists('{0}/rigid_label_{1}_template.dof.gz'.format(temp_dir, fr)):
os.system('mirtk register {0} {1} -parin {2}/rigid_label.cfg -dofin {3}/prreg_template.dof.gz \
-dofout {3}/rigid_label_{4}_template.dof.gz'.format(target_seg, template_label, par_dir, temp_dir, fr))
if not os.path.exists('{0}/ffd_label_{1}_template.dof.gz'.format(temp_dir, fr)):
os.system('mirtk register {0} {1} -parin {2}/ffd_label_fine.cfg -dofin {3}/rigid_label_{4}_template.dof.gz \
-dofout {3}/ffd_label_{4}_template.dof.gz'.format(target_seg, template_label, par_dir, temp_dir, fr))
seg_fit = '{0}/seg_{1}.nii.gz'.format(temp_dir, fr)
os.system('mirtk transform-image {0} {1} -dofin {2}/ffd_label_{3}_template.dof.gz \
-target {2}/sa_{3}.nii.gz -interp NN'.format(template_label, seg_fit, temp_dir, fr))
# Fit the AHA 17-segment model for the ED phase only
if fr == 'ED':
aha_label = '{0}/myo_{1}_AHA17.nii.gz'.format(template_dir, fr)
aha_fit = '{0}/myo_{1}_AHA17.nii.gz'.format(temp_dir, fr)
os.system('mirtk transform-image {0} {1} -dofin {2}/ffd_label_{3}_template.dof.gz \
-target {2}/sa_{3}.nii.gz -interp NN'.format(aha_label, aha_fit, temp_dir, fr))
# Step 8: Estimate transformation from template to target
# This will be used for propagating mesh onto the target image
# I have found that it can be numerically very unstable to invert a non-rigid transformation,
# no matter how much care you have taken in performing the registration. So the best solution
# to propagate the mesh is to perfrom registration from template to target.
# To ensure the low-resolution segmentation is not too bad for using as a target image in subsequent registration,
# we upsample the segmentation.
os.system('resample {0}/sa_{1}.nii.gz {0}/sa_{1}_up.nii.gz -size 1.25 1.25 2 -linear'.format(temp_dir, fr))
os.system('mirtk transform-image {0}/label_map_{1}.nii.gz {2}/seg_{1}_up.nii.gz \
-dofin {2}/ffd_label_{1}_template.dof.gz -target {2}/sa_{1}_up.nii.gz -interp NN'.format(template_dir, fr, temp_dir))
template_label = '{0}/label_map_{1}.nii.gz'.format(template_dir, fr)
target_seg = '{0}/seg_{1}_up.nii.gz'.format(temp_dir, fr)
# Use rigid + ffd
# If I use affine in between, sometimes affine registration may create very strange transformation results,
# which moves source image out of the FOV.
if not os.path.exists('{0}/rigid_label_inv_{1}_template.dof.gz'.format(temp_dir, fr)):
os.system('mirtk register {0} {1} -parin {2}/rigid_label.cfg -dofin {3}/prreg_inv_template.dof.gz \
-dofout {3}/rigid_label_inv_{4}_template.dof.gz'.format(template_label, target_seg, par_dir, temp_dir, fr))
if not os.path.exists('{0}/ffd_label_inv_{1}_template.dof.gz'.format(temp_dir, fr)):
os.system('mirtk register {0} {1} -parin {2}/ffd_label_fine.cfg -dofin {3}/rigid_label_inv_{4}_template.dof.gz \
-dofout {3}/ffd_label_inv_{4}_template.dof.gz'.format(template_label, target_seg, par_dir, temp_dir, fr))
# Fit the meshes
for part in ['myo', 'endo', 'epi', 'rv', 'heart']:
template_mesh = '{0}/{1}_{2}.vtk'.format(template_dir, part, fr)
mesh_fit = '{0}/{1}_{2}.vtk'.format(temp_dir, part, fr)
os.system('mirtk transform-points {0} {1} -dofin {2}/ffd_label_inv_{3}_template.dof.gz'.format(template_mesh, mesh_fit, temp_dir, fr))
os.system('surface_smooth {0} {0} 500 -relaxation 0.01'.format(mesh_fit))
# Step 8: copy results to the output directory
os.system('cp {0}/sa_{1}.nii.gz {2}'.format(temp_dir, fr, output_dir))
os.system('cp {0}/seg_{1}.nii.gz {2}'.format(temp_dir, fr, output_dir))
os.system('cp {0}/seg_{1}_up.nii.gz {2}'.format(temp_dir, fr, output_dir))
if fr == 'ED':
os.system('cp {0}/myo_{1}_AHA17.nii.gz {2}'.format(temp_dir, fr, output_dir))
for part in ['myo', 'endo', 'epi', 'rv', 'heart']:
os.system('cp {0}/{1}_{2}.vtk {3}'.format(temp_dir, part, fr, output_dir))
os.system('cp {0}/rigid_label_{1}_template.dof.gz {2}'.format(temp_dir, fr, dof_dir))
os.system('cp {0}/rigid_label_inv_{1}_template.dof.gz {2}'.format(temp_dir, fr, dof_dir))
os.system('cp {0}/ffd_label_{1}_template.dof.gz {2}'.format(temp_dir, fr, dof_dir))
os.system('cp {0}/ffd_label_inv_{1}_template.dof.gz {2}'.format(temp_dir, fr, dof_dir))
# Remove temporary directory
if remove_temp:
os.system('rm -rf {0}'.format(temp_dir))
|
baiwenjia/CIMAS
|
cimas.py
|
Python
|
apache-2.0
| 15,158
|
[
"VTK"
] |
ab8fe050024c4faab69f0f108fdb4bd0b3f01f6f2710fa43861f3b15f18447b1
|
import pytest
class TestHasCheckedFiled:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/form")
def test_is_true_if_a_checked_field_is_on_the_page(self, session):
assert session.has_checked_field("gender_female")
assert session.has_checked_field("Hamster")
def test_is_true_for_disabled_checkboxes_if_disabled_is_true(self, session):
assert session.has_checked_field("Disabled Checkbox", disabled=True)
def test_is_false_if_an_unchecked_field_is_on_the_page(self, session):
assert not session.has_checked_field("form_pets_cat")
assert not session.has_checked_field("Male")
def test_is_false_if_no_field_is_on_the_page(self, session):
assert not session.has_checked_field("Does Not Exist")
def test_is_false_for_disabled_checkboxes_by_default(self, session):
assert not session.has_checked_field("Disabled Checkbox")
def test_is_false_for_disabled_checkboxes_if_disabled_is_false(self, session):
assert not session.has_checked_field("Disabled Checkbox", disabled=False)
def test_is_true_for_disabled_checkboxes_if_disabled_is_all(self, session):
assert session.has_checked_field("Disabled Checkbox", disabled="all")
def test_is_true_for_enabled_checkboxes_if_disabled_is_all(self, session):
assert session.has_checked_field("gender_female", disabled="all")
def test_is_true_after_an_unchecked_checkbox_is_checked(self, session):
session.check("form_pets_cat")
assert session.has_checked_field("form_pets_cat")
def test_is_false_after_a_checked_checkbox_is_unchecked(self, session):
session.uncheck("form_pets_dog")
assert not session.has_checked_field("form_pets_dog")
def test_is_true_after_an_unchecked_radio_button_is_chosen(self, session):
session.choose("gender_male")
assert session.has_checked_field("gender_male")
def test_is_false_after_another_radio_button_in_the_group_is_chosen(self, session):
session.choose("gender_male")
assert not session.has_checked_field("gender_female")
class TestHasNoCheckedFiled:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/form")
def test_is_false_if_a_checked_field_is_on_the_page(self, session):
assert not session.has_no_checked_field("gender_female")
assert not session.has_no_checked_field("Hamster")
def test_is_false_for_disabled_checkboxes_if_disabled_is_true(self, session):
assert not session.has_no_checked_field("Disabled Checkbox", disabled=True)
def test_is_true_if_an_unchecked_field_is_on_the_page(self, session):
assert session.has_no_checked_field("form_pets_cat")
assert session.has_no_checked_field("Male")
def test_is_true_if_no_field_is_on_the_page(self, session):
assert session.has_no_checked_field("Does Not Exist")
def test_is_true_for_disabled_checkboxes_by_default(self, session):
assert session.has_no_checked_field("Disabled Checkbox")
def test_is_true_for_disabled_checkboxes_if_disabled_is_false(self, session):
assert session.has_no_checked_field("Disabled Checkbox", disabled=False)
def test_is_false_for_disabled_checkboxes_if_disabled_is_all(self, session):
assert not session.has_no_checked_field("Disabled Checkbox", disabled="all")
def test_is_false_for_enabled_checkboxes_if_disabled_is_all(self, session):
assert not session.has_no_checked_field("gender_female", disabled="all")
def test_is_false_after_an_unchecked_checkbox_is_checked(self, session):
session.check("form_pets_cat")
assert not session.has_no_checked_field("form_pets_cat")
def test_is_true_after_a_checked_checkbox_is_unchecked(self, session):
session.uncheck("form_pets_dog")
assert session.has_no_checked_field("form_pets_dog")
def test_is_false_after_an_unchecked_radio_button_is_chosen(self, session):
session.choose("gender_male")
assert not session.has_no_checked_field("gender_male")
def test_is_true_after_another_radio_button_in_the_group_is_chosen(self, session):
session.choose("gender_male")
assert session.has_no_checked_field("gender_female")
|
elliterate/capybara.py
|
capybara/tests/session/test_has_checked_field.py
|
Python
|
mit
| 4,316
|
[
"VisIt"
] |
11240a39ebc00be0b8999870648fc5705f33516ece128b5426e5473d3736a26a
|
from __future__ import division
from iotbx.pdb.multimer_reconstruction import multimer
import iotbx.pdb
import mmtbx.f_model
from cctbx import xray
import scitbx.lbfgs
import getpass
import os
import sys
from scitbx.array_family import flex
ncs_1_copy="""\
MTRIX1 1 1.000000 0.000000 0.000000 0.00000 1
MTRIX2 1 0.000000 1.000000 0.000000 0.00000 1
MTRIX3 1 0.000000 0.000000 1.000000 0.00000 1
MTRIX1 2 0.496590 -0.643597 0.582393 0.00000
MTRIX2 2 0.867925 0.376088 -0.324443 0.00000
MTRIX3 2 -0.010221 0.666588 0.745356 0.00000
MTRIX1 3 -0.317946 -0.173437 0.932111 0.00000
MTRIX2 3 0.760735 -0.633422 0.141629 0.00000
MTRIX3 3 0.565855 0.754120 0.333333 0.00000
ATOM 1 N THR A 1 9.670 10.289 11.135 1.00 20.00 N
ATOM 2 CA THR A 1 9.559 8.931 10.615 1.00 20.00 C
ATOM 3 C THR A 1 9.634 7.903 11.739 1.00 20.00 C
ATOM 4 O THR A 1 10.449 8.027 12.653 1.00 20.00 O
ATOM 5 CB THR A 1 10.660 8.630 9.582 1.00 20.00 C
ATOM 6 OG1 THR A 1 10.560 9.552 8.490 1.00 20.00 O
ATOM 7 CG2 THR A 1 10.523 7.209 9.055 1.00 20.00 C
TER
"""
class minimizer(object):
def __init__(self,
fmodel,
ncs_transformations_object=None,
ncs_atom_selection = None,
run_finite_grad_differences_test = False,
max_iterations=100,
sites = False,
u_iso = False):
"""Implementing strict NCS to refinement minimization
Arguments:
fmodel : fmodel of the complete ASU
ncs_transformation_object : information on the NCS to ASU
transformations and chains. A multimer object
ncs_atom_selection : boolean array for selection of atoms in the NCS.
A flex bool array
"""
self.fmodel = fmodel
self.fmodel.xray_structure.scatterers().flags_set_grads(state=False)
self.x_target_functor = self.fmodel.target_functor()
self.sites = sites
self.u_iso = u_iso
self.ncs_to_asu = ncs_transformations_object
self.run_finite_grad_differences_test = run_finite_grad_differences_test
if run_finite_grad_differences_test:
# perform gradient calc test
self.buffer_max_grad = flex.double()
self.buffer_calc_grad = flex.double()
# xray structure of NCS chains for self.x
ncs_fmodel_xrs = self.fmodel.xray_structure.select(ncs_atom_selection)
if(self.sites):
self.x = ncs_fmodel_xrs.sites_cart().as_double()
if(self.u_iso):
assert ncs_fmodel_xrs.scatterers().size() == \
ncs_fmodel_xrs.use_u_iso().count(True)
self.x = ncs_fmodel_xrs.extract_u_iso_or_u_equiv()
# Use all scatterers for gradient calculations
if(self.sites):
xray.set_scatterer_grad_flags(
scatterers = self.fmodel.xray_structure.scatterers(),
site = True)
if(self.u_iso):
sel = flex.bool(
self.fmodel.xray_structure.scatterers().size(), True).iselection()
self.fmodel.xray_structure.scatterers().flags_set_grad_u_iso(
iselection = sel)
self.minimizer = scitbx.lbfgs.run(
target_evaluator=self,
termination_params=scitbx.lbfgs.termination_parameters(
max_iterations=max_iterations),
exception_handling_params=scitbx.lbfgs.exception_handling_parameters(
ignore_line_search_failed_rounding_errors=True,
ignore_line_search_failed_step_at_lower_bound=True,
ignore_line_search_failed_maxfev=True))
self.fmodel.xray_structure.tidy_us()
self.fmodel.xray_structure.apply_symmetry_sites()
self.fmodel.update_xray_structure(
xray_structure = self.fmodel.xray_structure,
update_f_calc = True)
self.tested = 0
if run_finite_grad_differences_test:
if self.buffer_max_grad:
print 'compare max_grad to calc_grad'
for a,f in zip(self.buffer_max_grad, self.buffer_calc_grad):
print '{0:10.5f} {1:10.5f} delta = {2:10.5f}'.format(a,f,abs(a-f))
print '-'*45
diff = flex.abs(self.buffer_max_grad - self.buffer_calc_grad)
s = diff < 1.e-3
if(s.size()>0 and s.count(True)*100./s.size()>50):
self.tested += 1
def compute_functional_and_gradients(self,compute_gradients=True):
"""(bool) -> float, flex.double array
Function which calculates the target function and gradients.
It is called by the lbfgs minimizer
Argument:
compute_gradients : When True gradients are calculated
"""
if(self.sites):
self.update_model_sites()
elif(self.u_iso):
self.update_model_asu_b_factors()
self.fmodel.update_xray_structure(
xray_structure = self.fmodel.xray_structure,
update_f_calc = True)
tgx = self.x_target_functor(compute_gradients=compute_gradients)
if(self.sites):
tx = tgx.target_work()
f = tx
if compute_gradients:
gx = flex.vec3_double(tgx.\
gradients_wrt_atomic_parameters(site=True).packed())
g = self.average_grad(grad=gx,apply_rotation=True).as_double()
if self.run_finite_grad_differences_test:
self.finite_difference_test(g)
if(self.u_iso):
tx = tgx.target_work()
f = tx
if compute_gradients:
gx = tgx.gradients_wrt_atomic_parameters(u_iso=True)
g = self.average_grad(grad=gx,apply_rotation=False).as_double()
if not compute_gradients:
g = None
return f, g
def update_model_sites(self,x=None):
"""
update fmodel using a complete ASU
Argument
x : sites coordinates of a single NCS
"""
if not x:
x = self.x
# rebuild the complete ASU coordinate from the NCS
x_asu = self.rebuild_asu_from_ncs_coordinates(x)
self.fmodel.xray_structure.set_sites_cart(
sites_cart = flex.vec3_double(x_asu))
def update_model_asu_b_factors(self,x_asu=None):
"""
update fmodel using a complete set of B-factors
All B-factors are the same
Argument
x_asu : B-factors of a single NCS
"""
if not x_asu:
x_asu = self.rebuild_asu_b_factors()
self.fmodel.xray_structure.set_u_iso(values = x_asu)
def rebuild_asu_b_factors(self):
"""
"""
n = self.ncs_to_asu.number_of_transforms + 1
x = list(self.x) * n
return flex.double(x)
def rebuild_asu_from_ncs_coordinates(self, x):
""" apply rotation and translation to x
Argument:
x : sites coordinates of single NCS
returns:
new_x : coordinates of the original x and all coordinates resulting
from application of rotation and translation.
type scitbx_array_family_flex_ext.double
"""
rotations = self.ncs_to_asu.rotation_matrices
translations = self.ncs_to_asu.translation_vectors
assert len(rotations)==len(translations)
new_x = list(x)
x = flex.vec3_double(x)
for r,t in zip(rotations,translations):
tmp_x = r.elems*x + t
new_x += list(tmp_x.as_double())
return flex.double(new_x)
def average_grad(self,grad,apply_rotation=False):
"""(vec3_double,bool) -> vec3_double
Argument:
grad : the gradient of the complete ASU
apply_rotation : If true, apply NCS rotation before averaging
Returns:
g_ave : The average the gradients of all NCS copies in the ASU
"""
n = self.ncs_to_asu.number_of_transforms
# gradients of the first NCS copy
ncs_end = len(grad)//(n+1)
assert ncs_end*(n+1)==len(grad)
g_ave = grad[:ncs_end]
for i in range(n):
g = grad[ncs_end*(i+1):ncs_end*(i+2)]
if apply_rotation:
# multiply the transpose of the rotation of each NCS copy
# gradients, by the gradients
rt = self.ncs_to_asu.rotation_matrices[i].transpose().elems
g = rt*g
g_ave += g
# average the NCS copies contributions
g_ave = g_ave.as_double()/(n+1)
if apply_rotation: g_ave = flex.vec3_double(g_ave)
assert type(grad)==type(g_ave)
return g_ave
def finite_difference_test(self,g):
"""
Run basic gradient test. compare numerical estimate gradient to
the largest calculated one. using t'(x)=(t(x+d)-t(x-d))/(2d)
Argument:
g : gradient, flex array
"""
if(self.fmodel.r_work()>1.e-3):
g = g.as_double()
d = 1.e-5
# find the index of the max gradient value
i_g_max = flex.max_index(flex.abs(g))
x_d = self.x
# calc t(x+d)
x_d[i_g_max] = self.x[i_g_max] + d
self.update_model_sites(x = x_d)
self.fmodel.update_xray_structure(update_f_calc=True)
t1,_ = self.compute_functional_and_gradients(compute_gradients=False)
# calc t(x-d)
x_d[i_g_max] = self.x[i_g_max] - d
self.update_model_sites(x = x_d)
del x_d
self.fmodel.update_xray_structure(update_f_calc=True)
t2,_ = self.compute_functional_and_gradients(compute_gradients=False)
# Return fmodel to the correct coordinates values
self.update_model_sites(x = self.x)
self.fmodel.update_xray_structure(update_f_calc=True)
self.buffer_max_grad.append(g[i_g_max])
self.buffer_calc_grad.append((t1-t2)/(d*2))
def save_pdb_file(macro_cycle,fmodel,m_shaken,u_iso,sites):
"""
Save pdb file for visualization
"""
method = ''
if sites: method += '_sites'
if u_iso: method += '_u_iso'
# fn = 'refinement_by{0}_step_{1}.pdb'.format(method,macro_cycle)
n = str(macro_cycle)
if len(n)==1: n = '0'+n
fn = 'refinement{}.pdb'.format(n)
xrs_refined = fmodel.xray_structure
m_shaken.assembled_multimer.adopt_xray_structure(xrs_refined)
m_shaken.write(fn)
def create_pymol_movie():
"""create pymol movie
"""
from glob import glob
import pymol
file_list = glob("refinement*.pdb")
pymol.finish_launching()
pymol.cmd.bg_color('white')
for i,fn in enumerate(file_list):
# pymol.cmd.load(fn,"mov",state=i)
# pymol.cmd.load('full_asu.pdb',"mov",state=i)
pymol.cmd.load(fn)
pymol.cmd.load('full_asu.pdb')
pymol.cmd.frame(1)
pymol.cmd.mview('store')
#
pymol.cmd.mset("1 -%d"%len(file_list))
def run(
n_macro_cycle=10,
sites=True,
u_iso=False,
run_finite_grad_differences_test = False):
"""
Arguments:
__________
n_macro_cycle : Number of refinement macro cycles
"""
# 1 NCS copy: starting template to generate whole asu; place into P1 box
pdb_inp = iotbx.pdb.input(source_info=None, lines=ncs_1_copy)
mtrix_object = pdb_inp.process_mtrix_records()
ph = pdb_inp.construct_hierarchy()
xrs = pdb_inp.xray_structure_simple()
xrs_one_ncs = xrs.orthorhombic_unit_cell_around_centered_scatterers(
buffer_size=8)
ph.adopt_xray_structure(xrs_one_ncs)
of = open("one_ncs_in_asu.pdb", "w")
print >> of, mtrix_object.format_MTRIX_pdb_string()
print >> of, ph.as_pdb_string(crystal_symmetry=xrs_one_ncs.crystal_symmetry())
of.close()
# 1 NCS copy -> full asu (expand NCS). This is the answer-strucure
m = multimer("one_ncs_in_asu.pdb",'cau',error_handle=True,eps=1e-2)
assert m.number_of_transforms == 2, m.number_of_transforms
xrs_asu = m.assembled_multimer.extract_xray_structure(
crystal_symmetry = xrs_one_ncs.crystal_symmetry())
m.write("full_asu.pdb")
assert xrs_asu.crystal_symmetry().is_similar_symmetry(
xrs_one_ncs.crystal_symmetry())
# Generate Fobs from answer structure
f_obs = abs(xrs_asu.structure_factors(d_min=2, algorithm="direct").f_calc())
r_free_flags = f_obs.generate_r_free_flags()
mtz_dataset = f_obs.as_mtz_dataset(column_root_label="F-obs")
mtz_dataset.add_miller_array(
miller_array=r_free_flags,
column_root_label="R-free-flags")
mtz_object = mtz_dataset.mtz_object()
mtz_object.write(file_name = "data.mtz")
# Shake structure - subject to refinement input
xrs_shaken = xrs_one_ncs.deep_copy_scatterers()
if sites:
xrs_shaken.shake_sites_in_place(mean_distance=0.3)
if u_iso:
xrs_shaken.shake_adp()
ph.adopt_xray_structure(xrs_shaken)
of = open("one_ncs_in_asu_shaken.pdb", "w")
print >> of, mtrix_object.format_MTRIX_pdb_string()
print >> of, ph.as_pdb_string(crystal_symmetry=xrs.crystal_symmetry())
of.close()
### Refinement
params = mmtbx.f_model.sf_and_grads_accuracy_master_params.extract()
params.algorithm = "direct"
# Get the xray_structure of the shaken ASU
m_shaken = multimer(
pdb_input_file_name="one_ncs_in_asu_shaken.pdb",
reconstruction_type='cau',error_handle=True,eps=1e-2)
xrs_shaken_asu = m_shaken.assembled_multimer.as_pdb_input().\
xray_structure_simple(crystal_symmetry=xrs_one_ncs.crystal_symmetry())
# Save the shaken ASU for inspection
m_shaken.write(pdb_output_file_name='asu_shaken.pdb')
# Create a boolean selection string for selecting chains in NCS
selection_str = 'chain A'
ncs_selection = m_shaken.assembled_multimer.\
atom_selection_cache().selection(selection_str)
fmodel = mmtbx.f_model.manager(
f_obs = f_obs,
r_free_flags = r_free_flags,
xray_structure = xrs_shaken_asu,
sf_and_grads_accuracy_params = params,
target_name = "ls_wunit_k1")
print "start r_factor: %6.4f" % fmodel.r_work()
refine_method = 'sites'
for macro_cycle in xrange(n_macro_cycle):
# refine coordinates
if(sites):
minimized = minimizer(
fmodel = fmodel,
ncs_transformations_object=m,
ncs_atom_selection = ncs_selection,
run_finite_grad_differences_test = run_finite_grad_differences_test,
sites = True)
print " macro_cycle %3d (sites) r_factor: %6.4f"%(macro_cycle,
fmodel.r_work())
# refine ADPs
if(u_iso):
minimized = minimizer(
fmodel = fmodel,
ncs_transformations_object=m,
ncs_atom_selection = ncs_selection,
run_finite_grad_differences_test = run_finite_grad_differences_test,
u_iso = True)
print " macro_cycle %3d (adp) r_factor: %6.4f"%(macro_cycle,
fmodel.r_work())
if (0): save_pdb_file(
macro_cycle=macro_cycle,
fmodel=fmodel,
m_shaken=m_shaken,
u_iso=u_iso,
sites=sites)
if (1): save_pdb_file(
macro_cycle=macro_cycle,
fmodel=fmodel,
m_shaken=m_shaken,
u_iso=u_iso,
sites=sites)
# create_pymol_movie()
def set_test_folder():
"""
Change working directory to avoid littering of
phenix_sources\phenix_regression\development\ncs_constraints.py
"""
username = getpass.getuser()
if username.lower() == 'youval':
osType = sys.platform
if osType.startswith('win'):
tempdir = (r'C:\Phenix\Dev\Work\work\NCS\junk')
else:
tempdir = ('/net/cci/youval/Work/work/NCS/junk')
os.chdir(tempdir)
if __name__ == "__main__":
set_test_folder()
run(n_macro_cycle=40,
sites=True,
u_iso=False,
run_finite_grad_differences_test = False)
|
youdar/work
|
work/NCS/test_files/run_03.py
|
Python
|
mit
| 14,976
|
[
"PyMOL"
] |
510aa5f3459964895457dc2b4921204770a577be6ea9fd3f77cd67ab55ed822e
|
from ase import Atoms
from ase.db import connect
from ase.structure import molecule
from ase.calculators.emt import EMT
from ase.constraints import FixAtoms, FixBondLength
for name in ['y2.json', 'y2.db']:
c = connect(name)
print(name, c)
id = c.reserve(abc=7)
c.delete([d.id for d in c.select(abc=7)])
id = c.reserve(abc=7)
assert c[id].abc == 7
a = c.get_atoms(id)
c.write(Atoms())
ch4 = molecule('CH4', calculator=EMT())
ch4.constraints = [FixAtoms(indices=[1]),
FixBondLength(0, 2)]
f1 = ch4.get_forces()
print(f1)
c.delete([d.id for d in c.select(C=1)])
id = c.write(ch4)
f2 = c.get(C=1).forces
assert abs(f2.sum(0)).max() < 1e-14
f3 = c.get_atoms(C=1).get_forces()
assert abs(f1 - f3).max() < 1e-14
try:
c.update(id, abc={'a': 42})
except ValueError:
pass
else:
2 / 0
c.update(id, grr='hmm')
assert c.get(C=1).id == id
|
PHOTOX/fuase
|
ase/ase/test/db2.py
|
Python
|
gpl-2.0
| 979
|
[
"ASE"
] |
037260eef955b20464ccc535399f4a0cb85de4e93c66e04654bea24589206d50
|
# Copyright 2009 Lee Harr
#
# This file is part of pybotwar.
#
# Pybotwar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pybotwar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pybotwar. If not, see <http://www.gnu.org/licenses/>.
import random
import Box2D as box2d
pi = 3.1415927410125732
import conf
import viewselect
view = viewselect.get_view_module()
def pos_tuple_scale(pos):
min_x = 30
min_y = 25
return ((pos.x+min_x)*50./min_x, (pos.y+min_y)*50./min_y)
def size_tuple_scale(pos_x, pos_y):
min_x = 30
min_y = 25
return (pos_x*100./min_x, pos_y*100./min_y)
class Robot(object):
nrobots = 0
def __init__(self, wld, kind, name, pos, ang):
w = wld.w
Robot.nrobots += 1
self.n = Robot.nrobots
self.alive = True
self.health = conf.maxhealth
self.kind = kind
self.name = name
self._pingtype = 'w'
self._pingangle = 0
self._pingdist = 0
self._pinged = -5 # Tick most recently pinged by another robot's radar
self._cannonheat = 0
self._cannonreload = 0
self._kills = 0 # number of robots killed while this one is still alive
self._damage_caused = 0
bodyDef = box2d.b2BodyDef()
bodyDef.position = pos
bodyDef.angle = ang
bodyDef.linearDamping = conf.robot_linearDamping
bodyDef.angularDamping = conf.robot_angularDamping
bodyDef.userData = {}
body = w.CreateBody(bodyDef)
shapeDef = box2d.b2PolygonDef()
shapeDef.SetAsBox(1, 1)
shapeDef.density = conf.robot_density
shapeDef.friction = conf.robot_friction
shapeDef.restitution = conf.robot_restitution
shapeDef.filter.groupIndex = -self.n
body.CreateShape(shapeDef)
body.SetMassFromShapes()
body.userData['actor'] = self
body.userData['kind'] = 'robot'
self.body = body
turretDef = box2d.b2BodyDef()
turretDef.position = pos
turretDef.angle = ang
turretDef.linearDamping = 0
turretDef.angularDamping = 0
turret = w.CreateBody(bodyDef)
shapeDef = box2d.b2PolygonDef()
shapeDef.SetAsBox(.1, .1)
shapeDef.density = 1
shapeDef.friction = 0
shapeDef.restitution = 0
shapeDef.filter.groupIndex = -self.n
turret.CreateShape(shapeDef)
turret.SetMassFromShapes()
self.turret = turret
jointDef = box2d.b2RevoluteJointDef()
jointDef.Initialize(body, turret, pos)
jointDef.maxMotorTorque = conf.turret_maxMotorTorque
jointDef.motorSpeed = 0.0
jointDef.enableMotor = True
self.turretjoint = w.CreateJoint(jointDef).getAsType()
self._turretangletarget = 0
v = wld.v.addrobot(pos, ang)
self.v = v
i = wld.v.addrobotinfo(self.n, name)
self.i = i
def to_dict(self):
roboDict = {}
roboDict['position'] = pos_tuple_scale(self.body.position)
roboDict['rotation'] = self.gyro()
roboDict['health'] = self.health
roboDict['name'] = self.name
roboDict['turret_angle'] = self.get_turretangle()
return roboDict
def gyro(self):
'return robot angle wrt world in degrees.'
radians = self.body.angle
degrees = int(round((180 / pi) * radians))
return degrees
def set_turretangle(self, angle):
'Angle comes in degrees. Convert to radians and set.'
radians = (pi / 180.) * angle
self._turretangletarget = radians
def get_turretangle(self):
'return turret angle in degrees.'
degrees = int(round((180 / pi) * self.turretjoint.GetJointAngle()))
return degrees
def turretcontrol(self):
joint = self.turretjoint
angleError = joint.GetJointAngle() - self._turretangletarget
gain = 0.5
joint.SetMotorSpeed(-gain * angleError)
class Bullet(object):
def __init__(self, wld, robot):
self.wld = wld
w = wld.w
self.robot = robot # Fired by this robot
self._fuse = None
self._exploding = False
r = robot.turret
pos = r.position
vel = r.linearVelocity
ang = r.angle
blocalvel = box2d.b2Vec2(conf.bulletspeed, 0)
bwvel = r.GetWorldVector(blocalvel)
bvel = bwvel + vel
#print bvel, bvel.Length()
bodyDef = box2d.b2BodyDef()
blocalpos = box2d.b2Vec2(.1, 0)
bwpos = r.GetWorldVector(blocalpos)
bpos = bwpos + pos
bodyDef.position = bpos
bodyDef.angle = ang
bodyDef.isBullet = True
bodyDef.linearDamping = 0
bodyDef.userData = {}
body = w.CreateBody(bodyDef)
#print body
#print 'IB', body.isBullet
body.linearVelocity = bvel
shapeDef = box2d.b2PolygonDef()
shapeDef.SetAsBox(.1, .1)
shapeDef.density = conf.bullet_density
shapeDef.restitution = 0
shapeDef.friction = 0
shapeDef.filter.groupIndex = -robot.n
b = body.CreateShape(shapeDef)
b.userData = {}
body.SetMassFromShapes()
body.userData['actor'] = self
body.userData['kind'] = 'bullet'
body.userData['shooter'] = robot
self.body = body
v = wld.v.addbullet(pos)
self.v = v
def to_dict(self):
bulletDict = {}
bulletDict['position'] = pos_tuple_scale(self.body.position)
bulletDict['angle'] = self.body.angle
bulletDict['exploding'] = 0 if not self._exploding else self._exploding
return bulletDict
def explode(self):
self._exploding = 1
#robot = self.body.userData['shooter'].name
#print robot,'bullet explode at', self.body.position
for ring, radius in enumerate(conf.explosion_radii):
cdef = box2d.b2CircleDef()
cdef.radius = radius
s = self.body.CreateShape(cdef)
s.userData = {}
s.userData['ring'] = ring
s.userData['bullet'] = self
s.userData['hits'] = {0:[], 1:[], 2:[]}
e = self.wld.v.addexplosion(self.body.position)
self.e = e
class Wall(object):
def __init__(self, w, pos, size):
walldef = box2d.b2BodyDef()
walldef.position = pos
walldef.userData = {}
wallbod = w.CreateBody(walldef)
wallbod.userData['actor'] = None
wallbod.userData['kind'] = 'wall'
wallbod.iswall = True
wallshp = box2d.b2PolygonDef()
width, height = size
wallshp.SetAsBox(width, height)
wallbod.CreateShape(wallshp)
self.body = wallbod
self.width = size[0]
self.height = size[1]
v = view.Wall(pos, size)
self.v = v
def to_dict(self):
wallDict = {}
wallDict['position'] = pos_tuple_scale(self.body.position)
(width, height) = size_tuple_scale(self.width, self.height)
wallDict['width'] = width
wallDict['height'] = height
return wallDict
class World(object):
def __init__(self):
self.count = 1000
self.force = 10
self.robots = {}
self.bullets = []
self.sprites = {}
self.walls = []
self.to_destroy = []
halfx = 30
self.ahalfx = 20
halfy = 25
self.ahalfy = 20
gravity = (0, 0)
doSleep = True
self.timeStep = 1.0 / 60.0
self.velIterations = 10
self.posIterations = 8
aabb = box2d.b2AABB()
aabb.lowerBound = (-halfx, -halfy)
aabb.upperBound = (halfx, halfy)
self.w = box2d.b2World(aabb, gravity, doSleep)
self.w.GetGroundBody().SetUserData({'actor': None})
self.makearena()
def makearena(self):
self.v = view.Arena()
ahx = self.ahalfx
ahy = self.ahalfy
self.walls.append(Wall(self.w, (-ahx, 0), (1, ahy+1)))
self.walls.append(Wall(self.w, (ahx, 0), (1, ahy+1)))
self.walls.append(Wall(self.w, (0, ahy), (ahx+1, 1)))
self.walls.append(Wall(self.w, (0, -ahy), (ahx+1, 1)))
for block in range(5):
#self.makeblock()
pass
def makeblock(self):
x = random.randrange(-self.ahalfx, self.ahalfx+1)
y = random.randrange(-self.ahalfy, self.ahalfy+1)
w = random.randrange(1, 20)/10.0
h = random.randrange(1, 20)/10.0
wl = Wall(self.w, (x, y), (w, h))
def posoccupied(self, pos):
px, py = pos.x, pos.y
for name, robot in self.robots.items():
rbpos = robot.body.position
rx, ry = rbpos.x, rbpos.y
if (rx-2 < px < rx+2) and (ry-2 < py < ry+2):
return True
return False
def makerobot(self, kind, name, pos=None, ang=None):
rhx = self.ahalfx-2
rhy = self.ahalfy-2
while pos is None or self.posoccupied(pos):
rx = random.randrange(-rhx, rhx)
ry = random.randrange(-rhy, rhy)
pos = box2d.b2Vec2(rx, ry)
if ang is None:
ang = random.randrange(628) / float(100)
robot = Robot(self, kind, name, pos, ang)
self.robots[name] = robot
return robot
def makebullet(self, rname, fuse=None):
robot = self.robots[rname]
if robot._cannonheat > conf.cannon_maxheat:
# tried to fire when the cannon was overheated
robot._cannonreload += conf.overheat_fire_reload_penalty
return None
elif robot._cannonreload > 0:
# tried to fire when the cannon was not loaded
robot._cannonreload += conf.unloaded_fire_reload_penalty
return None
bullet = Bullet(self, robot)
bullet._fuse = fuse
self.bullets.append(bullet)
robot._cannonheat += conf.cannon_heating_per_shot
robot._cannonreload = conf.cannon_reload_ticks
return bullet
def makeping(self, rname, rnd):
robot = self.robots[rname]
body = robot.turret
segmentLength = 65.0
blocalpos = box2d.b2Vec2(1.12, 0)
segment = box2d.b2Segment()
laserStart = (1.12, 0)
laserDir = (segmentLength, 0.0)
segment.p1 = body.GetWorldPoint(laserStart)
segment.p2 = body.GetWorldVector(laserDir)
segment.p2+=segment.p1
lambda_, normal, shape = self.w.RaycastOne(segment, False, None)
hitp = (1 - lambda_) * segment.p1 + lambda_ * segment.p2
angle = robot.get_turretangle()
dist = box2d.b2Distance(segment.p1, hitp)
if shape is not None:
hitbody = shape.GetBody()
kind = hitbody.userData['kind']
if kind == 'robot':
actor = hitbody.userData['actor']
if actor._pinged != rnd - 1:
actor._pinged = rnd
return kind, angle, dist
else:
# Not sure why shape returns None here. Seems to be when the
# robot is pressed right up against a wall, though.
return 'w', angle, 0
def step(self):
#self.moveit()
#print 'STEP', self.w.Step
self.w.Step(self.timeStep, self.velIterations, self.posIterations)
self.do_destroy()
self.showit()
def showit(self):
for name, robot in self.robots.items():
r = robot.body
robot.turretcontrol()
#vel = r.linearVelocity.Length()
#pos = r.position.Length()
pos2 = r.position
ang = r.angle
turret = robot.turretjoint
tang = turret.GetJointAngle()
#print '{name}: {pos:6.2f} {ang:5.1f} {vel:5.1f}'.format(
# name=name, vel=vel, pos=pos, ang=ang)
robot.v.setpos(pos2)
robot.v.set_rotation(-ang)
#robot.t.setpos(pos2)
robot.v.set_turr_rot(-tang)
if robot._cannonheat > 0:
robot._cannonheat -= conf.cannon_cooling_per_tick
if robot._cannonreload > 0:
robot._cannonreload -= 1
for bullet in self.bullets:
b = bullet.body
pos2 = b.position
bullet.v.setpos(pos2)
#print bullet.linearVelocity
if bullet._fuse is not None:
bullet._fuse -= 1
if bullet._fuse == 0:
print 'shell explodes'
bullet.explode()
if bullet._exploding:
if bullet._exploding > 2:
if bullet not in self.to_destroy:
self.to_destroy.append(bullet)
else:
bullet._exploding += 1
#print
self.v.step()
def do_destroy(self):
while self.to_destroy:
model = self.to_destroy.pop()
body = model.body
if hasattr(body, 'iswall') and body.iswall:
continue
#print 'destroy', id(body)
if model in self.bullets:
self.bullets.remove(model)
if model._exploding:
model.e.kill()
#print 's0', self.v.sprites
model.v.kill()
if model.body.userData['kind'] == 'robot':
self.w.DestroyBody(model.turret)
#del self.robots[model.name]
#print 's1', self.v.sprites
#print 'destroying', id(body)
self.w.DestroyBody(body)
#print 'destroyed', id(body)
def make_testrobots(self):
self.makerobot('R1', (4, 0), pi)
self.makerobot('R2', (-4, 0), 0)
self.makerobot('R3', (0, 4), pi)
self.makerobot('R4', (0, -4), 0)
self.makerobot('R5', (4, 4), pi)
self.makerobot('R6', (-4, 4), 0)
self.makerobot('R7', (-4, -4), pi)
self.makerobot('R8', (4, -4), 0)
self.makerobot('R1')
self.makerobot('R2')
self.makerobot('R3')
self.makerobot('R4')
self.makerobot('R5')
self.makerobot('R6')
self.makerobot('R7')
self.makerobot('R8')
def testmoves(self):
self.count -= 1
if self.count < 0:
self.force = -self.force
self.count = 1000
for name, robot in self.robots.items():
r = robot.body
pos = r.position
vel = r.linearVelocity
#print 'pos', pos
#print dir(vel)
localforce = box2d.b2Vec2(self.force, 0)
worldforce = r.GetWorldVector(localforce)
r.ApplyForce(worldforce, pos)
#if r.angularVelocity < .5:
#r.ApplyTorque(.5)
#else:
#print 'av', r.angle
r.ApplyTorque(4)
bullet = random.randrange(3)
if bullet == 2:
#print name, 'shoots'
self.makebullet(name)
def to_dict(self):
bullets = [b.to_dict() for b in self.bullets]
robots = [r.to_dict() for r in self.robots.values()]
sprites = [s.to_dict() for s in self.sprites]
walls = [w.to_dict() for w in self.walls]
worldDict = {
'bullets': bullets,
'robots': robots,
'sprites': sprites,
'walls': walls,
}
return worldDict
class CL(box2d.b2ContactListener):
def Result(self, result):
s1 = result.shape1
b1 = s1.GetBody()
actor1 = b1.userData['actor']
kind1 = b1.userData.get('kind', None)
s2 = result.shape2
b2 = s2.GetBody()
actor2 = b2.userData['actor']
kind2 = b2.userData.get('kind', None)
dmg = 0
hitdmg = conf.direct_hit_damage
cds = conf.collision_damage_start
cdf = conf.collision_damage_factor
nimpulse = result.normalImpulse
timpulse = result.tangentImpulse
impulse = box2d.b2Vec2(nimpulse, timpulse).Length()
coldmg = int((cdf * (impulse - cds))**2) + 1
if kind2=='robot':
if kind1=='bullet':
ring = s1.userData.get('ring', None)
shooter = b1.userData['shooter']
if ring is None and shooter == actor2:
#can't shoot yourself
pass
elif ring is None:
dmg = hitdmg
print 'Robot', actor2.name, 'shot for', dmg,
else:
hits = s1.userData['hits']
if actor2 not in hits[ring]:
dmg = conf.explosion_damage[ring]
print 'Robot', actor2.name, 'in blast area for', dmg
hits[ring].append(actor2)
else:
pass
#print actor2.name, 'already hit by ring', ring
else:
shooter = None
if impulse > cds:
dmg = coldmg
print 'Robot', actor2.name, 'coll for', dmg,
if dmg:
actor2.health -= dmg
if shooter is not None:
shooter._damage_caused += dmg
actor2.i.health.step(dmg)
if actor2.health <= 0:
actor2.alive = False
if conf.remove_dead_robots:
if actor2 not in self.w.to_destroy:
self.w.to_destroy.append(actor2)
print
else:
print 'down to', actor2.health
if kind1=='robot':
if kind2=='bullet':
ring = s2.userData.get('ring', None)
shooter = b2.userData['shooter']
if ring is None and shooter == actor1:
#can't shoot yourself
pass
elif ring is None:
dmg = hitdmg
print 'Robot', actor1.name, 'shot for', dmg,
else:
hits = s2.userData['hits']
if actor1 not in hits[ring]:
dmg = conf.explosion_damage[ring]
print 'Robot', actor1.name, 'in blast area for', dmg
hits[ring].append(actor1)
else:
pass
#print actor1.name, 'already hit by ring', ring
else:
shooter = None
if impulse > cds:
dmg = coldmg
print 'Robot', actor1.name, 'coll for', dmg,
if dmg:
actor1.health -= dmg
if shooter is not None:
shooter._damage_caused += dmg
actor1.i.health.step(dmg)
if actor1.health <= 0:
actor1.alive = False
if conf.remove_dead_robots:
if actor1 not in self.w.to_destroy:
self.w.to_destroy.append(actor1)
print
else:
print 'down to', actor1.health
if actor1 in self.w.bullets and not actor1._exploding:
if actor1 not in self.w.to_destroy:
self.w.to_destroy.append(actor1)
if actor2 in self.w.bullets and not actor2._exploding:
if actor2 not in self.w.to_destroy:
self.w.to_destroy.append(actor2)
if __name__ == '__main__':
w = World()
cl = CL()
w.w.SetContactListener(cl)
cl.w = w
while not w.v.quit:
w.step()
|
CodingRobots/CodingRobots
|
world.py
|
Python
|
gpl-3.0
| 20,209
|
[
"BLAST"
] |
a377b3d76965fee90d388577853ba069b8d75b898542c945c98dfd128ed91c48
|
#-*- coding:utf-8 -*-
"""sslfetch.connections.py
Python Lib for ssl connection downloads.
Complete with:
optional timestamp file to load
Headers:
'User-Agent'
'If-Modified-Since',
'last-modified'
custom
proxies
Copyright (C) 2013 Brian Dolbec <dolsen@gentoo.org>
Distributed under the terms of the GNU General Public License v2
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import sys
import time
import os
VERIFY_SSL = False
VERIFY_MSGS = []
import requests
from requests.exceptions import SSLError
# py3.2
if sys.hexversion >= 0x30200f0:
VERIFY_SSL = True
else:
try: # import and enable SNI support for py2
from requests.packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
VERIFY_SSL = True
VERIFY_MSGS = ["Successfully enabled ssl certificate verification."]
except ImportError as e:
VERIFY_MSGS = [
"Failed to import and inject pyopenssl/SNI support into urllib3",
"Disabling certificate verification",
"Error was: {0}".format(e)
]
VERIFY_SSL = False
def fileopen(path, mode='r', enc="UTF-8"):
"""py2, py3 compatibility function"""
try:
f = open(path, mode, encoding=enc)
except TypeError:
f = open(path, mode)
return f
def get_timestamp(tpath):
if tpath and os.path.exists(tpath):
with fileopen(tpath,'r') as previous:
timestamp = previous.readline()
return timestamp
return ""
class Connector(object):
"""Primary connection interface using the dev-python/requests package
"""
def __init__(self, output_dict, proxies=None, useragent='ssl-fetch'):
"""Connector __init__()
@param output_dict: dictionary of: eg: {
'info': logging.info, # function
'error': logging.error, # function
'debug': logging.debug, # function
'warning': logging.warning # function
'exception': logging.exception #function
'kwargs-info: {}, # dict for **kwargs use
'kwargs-error': {} # dict for **kwargs use
}
all output will be called via the self.output() using:
def output(self, mode, msg):
kwargs = self.output_dict['kwargs-%s' % mode]
func = self.output_dict[mode]
func(msg, **kwargs)
NOTE: logging module primarily uses the setLevel()
so the kwargs-* parameters should be {}.
For custom output modules, the kwargs-* variables can be set
to whatever is needed to be passed to them.
eg:
'kwargs-info: {'level': 2},
@param proxies: dictionary, default of None, it will try to
get them from the environment.
@param useragent: string, the User-Agent string to pass to the server,
default of 'ssl-fetch' is just due to parameter ordering
"""
self.output_dict = output_dict
self.proxies = proxies or self.get_env_proxies()
self.headers = {'Accept-Charset': 'utf-8',
'User-Agent': useragent}
self._remaining = 0
# check if there were any initialization messages
# and output them now that we have an output assigned
if VERIFY_MSGS:
for msg in VERIFY_MSGS:
self.output('info', msg + '\n')
def add_timestamp(self, headers, tpath=None, timestamp=None):
"""Adds an 'If-Modified-Since' header to the headers using
the information supplied via a tpath file or timestamp.
@param headers: dictionary, optional headers to use
@param tpath: string, optional filepath to a timestamp file
to use in the headers
@param timestamp: string, optional timestamp to use in the headers
@rtype: dictionary of updated headers
"""
if tpath:
timestamp = get_timestamp(tpath)
if timestamp:
headers['If-Modified-Since'] = timestamp.strip()
self.output('info', 'Current-modified: %s\n' % timestamp)
return headers
def connect_url(self, url, headers=None, tpath=None, timestamp=None, stream=False):
"""Establishes a verified connection to the specified url
@param url: string
@param headers: dictionary, optional headers to use
@param tpath: string, optional filepath to a timestamp file
to use in the headers
@param timestamp: string, optional timestamp to use in the headers
@rtype: requests connection instance
"""
if not headers:
headers = self.headers
if timestamp or tpath:
self.add_timestamp(headers, tpath=tpath, timestamp=timestamp)
verify = url.startswith('https') and VERIFY_SSL
self.output('debug', "Enabled ssl certificate verification: %s, for: %s\n"
%(str(verify), url))
self.output('debug', 'Connector.connect_url(); headers = %s\n'
%str(headers))
self.output('debug', 'Connector.connect_url(); connecting to opener\n')
try:
connection = requests.get(
url,
headers=headers,
verify=verify,
proxies=self.proxies,
stream=stream,
)
self.output('debug', 'Connector.connect_url() HEADERS = %s\n'
%str(connection.headers))
self.output('debug', 'Connector.connect_url() Status_code = %i\n'
% connection.status_code)
return connection
except SSLError as error:
self.output('error', 'Connector.connect_url(); Failed to update the '
'mirror list from: %s\nSSL-fetch:SSLError was:%s\n'
% (url, str(error)))
except Exception as error:
self.output('exception', 'Connector.connect_url(); Failed to retrieve '
'the content from: %s\nSSL-fetch:Error was: %s\n'
% (url, str(error)))
return None
@staticmethod
def normalize_headers(headers, to_lower=True):
""" py2, py3 compatibility function,
since only py2 returns keys as lower()
This function maps a lower or upper case key to the original key.
"""
if to_lower:
return dict((x.lower(), x) for x in list(headers))
return dict((x.upper(), x) for x in list(headers))
def fetch_file(self, url, save_path, tpath=None, buf=1024, climit=60, timestamp=None):
"""Fetch blobs of files
@param url: string of the content to fetch
@param save_path: file path to save the file to
@param tpath: string, optional filepath to a timestamp file
to use in the headers
@param buf: integer of the buffer size
@param climit: Minimum time limit (minutes) before a cycle passes,
allowing a file to be downloaded again. Default time is
60 minutes.
@param timestamp: string, optional timestamp to use in the headers
@returns (success bool, content fetched , timestamp of fetched content,
content headers returned)
"""
if tpath and os.path.exists(tpath):
if not self.verify_cycle(tpath, climit):
self.output('warning',
' ** Re-fetch cycle timeout of %s minutes not yet '
'reached... %s minutes remaining'
% (climit, self._remaining))
return (True, '', '')
connection = self.connect_url(url, tpath=tpath, stream=True)
if not connection:
return (False, '', '')
timestamp = self.get_last_modified(connection)
datestamp = self.get_date(connection)
if connection.status_code in [304]:
self.output('info', 'File already up to date: %s\n'
% url)
self.output('info', 'Date: %s\n' % datestamp)
return (True, '', '')
elif connection.status_code in [404]:
self.output('error', 'Connector.fetch_file(); '
'HTTP Status-Code was: %s\nurl:%s'
% (str(connection.status_code), url))
return (False, '', '')
else:
self.output('info', 'New file downloaded for: %s\n'
% url)
with open(save_path, 'wb') as handle:
handle.writelines(connection.iter_content(buf))
if tpath:
with fileopen(tpath, mode='w') as stamp:
stamp.write(str(timestamp) + '\n')
return (True, '', timestamp)
def fetch_content(self, url, tpath=None, climit=60, timestamp=None):
"""Fetch the content.
@param url: string of the content to fetch
@param tpath: string, optional filepath to a timestamp file
to use in the headers
@param climit: Minimum time limit (minutes) before a cycle passes,
allowing a file to be downloaded again. Default time is
60 minutes.
@param timestamp: string, optional timestamp to use in the headers
@returns (success bool, content fetched , timestamp of fetched content,
content headers returned)
"""
if tpath and os.path.exists(tpath):
if not self.verify_cycle(tpath, climit):
self.output('warning',
' ** Re-fetch cycle timeout of %s minutes not yet '
'reached... %s minutes remaining'
% (climit, self._remaining))
return (False, '', '')
connection = self.connect_url(url, tpath=tpath)
if not connection:
return (False, '', '')
timestamp = self.get_last_modified(connection)
datestamp = self.get_date(connection)
if connection.status_code in [304]:
self.output('info', 'Content already up to date: %s\n'
% url)
self.output('info', 'Date: %s\n' % datestamp)
elif connection.status_code not in [200]:
self.output('error', 'Connector.fetch_content(); '
'HTTP Status-Code was:\nurl: %s\n%s'
% (url, str(connection.status_code)))
else:
self.output('info', 'New content downloaded for: %s\n'
% url)
return (True, connection.content, timestamp)
return (False, '', '')
def output(self, mode, msg):
'''Generic output module which calls the mapped functions
from the class's init.
@param mode: string, one of ['info', 'error']
@param msg: string
'''
kwargs = self.output_dict.get('kwargs-%s' % mode,
self.output_dict['kwargs-error'])
func = self.output_dict.get(mode, self.output_dict['error'])
func(msg, **kwargs)
def get_env_proxies(self):
'''Sets proxies from the environment'''
self.proxies = {}
for proxy in ['http_proxy', 'https_proxy']:
prox = proxy.split('_')[0]
self.proxies[prox] = os.getenv(proxy)
return
def get_last_modified(self, connection):
'''Extracts the timestamp info from the connection headers
@param connection: requests connection instance
'''
if 'last-modified' in connection.headers:
timestamp = connection.headers['last-modified']
else:
timestamp = None
return timestamp
def get_date(self, connection):
if 'date' in connection.headers:
timestamp = connection.headers['date']
else:
timestamp = None
return timestamp
def verify_cycle(self, tpath, climit, verify_only=False):
"""Checks the mtime of a timestamp file against the mtime of the
current system time. If the difference of the two is less than the
climit provided then the mtime of the timestamp file gets updated
to the current system time.
@param tpath: string, filepath to a timestamp file to use in the
headers.
@param climit: Minimum time limit (minutes) before a cycle passes,
allowing a file to be downloaded again. Default time is
60 minutes.
@param verify_only: Optional boolean to turn on/off updating the time
of the timestamp file.
@rtype: bool
"""
dtime = os.path.getctime(tpath) # Mtime of timestamp file.
stime = time.mktime(time.localtime()) # Current system time
# Both times are measured in seconds from epoch.
# Seconds to minutes = seconds/60
time_diff = int((stime-dtime)/60)
self._remaining = 0
if time_diff >= climit:
if not verify_only:
# Update the mtime of the timestamp file. A cycle has passed.
os.utime(tpath, None) # None is a param needed for py2.7 compat.
return True
elif not verify_only:
self._remaining = climit - time_diff
self.output('info', "Re-fetch cycle timeout of: "
"%s minutes, Remaining: %s minutes "
% (climit, self._remaining))
return False
|
dol-sen/ssl-fetch
|
sslfetch/connections.py
|
Python
|
gpl-2.0
| 14,171
|
[
"Brian"
] |
1f7acdd20b74b0dc3b84d9fba3989e3ce7082e6d19d932bb39fe67a1e35cd771
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
""" Submodule declaring the folder folders class. """
__docformat__ = "restructuredtext en"
from . import logger
class JobFolder(object):
""" High-throughput folder class.
Means to organize any calculations in folders and subfolders. A folder
is executable is the ``functional`` attribute is not ``None``. The
attribute should be set to a pickleable calleable. The parameters for the
calls should be inserted in the ``params`` attribute. Sub-folders can be
added using the :py:meth:`__div__` and :py:meth:`__setitem__`
methods. The latter offers the ability to access and set subfolders at
any point within the tree of folders from any subfolder. The executable
subfolders can also be iterated in a manner similar to a job-dictionary.
Finally, a folder can be executed `via` the :py:meth:`compute` method.
"""
def __init__(self):
super(JobFolder, self).__init__()
# List of subfolders (as in subdirectories).
super(JobFolder, self).__setattr__("children", {})
# This particular folder.
super(JobFolder, self).__setattr__("params", {})
# This particular folder is not set.
super(JobFolder, self).__setattr__("_functional", None)
# Parent folder.
super(JobFolder, self).__setattr__("parent", None)
@property
def functional(self):
""" Returns current functional.
The functional is implemented as a property to make sure that it is
either None or a pickleable callable. The functional is **deepcopied**
from the input. In other words, this functional stored in the
folderdictionary is no longuer the one given on input -- it is not a
reference to the input. This parameter can never be truly deleted.
>>> del folder.functional
is equivalent to:
>>> folder.functional = None
.. note:: To store a reference to a global functional, one could do
``folder._functional = functional`` instead. However, modifying
the input functional will affect the stored functional and
vice-versa.
"""
return self._functional
@functional.setter
def functional(self, value):
from pickle import dumps, loads # ascertains pickle-ability, copies functional
if value is not None and not hasattr(value, "__call__"):
raise ValueError("folder.functional should be either None(no job) or a callable.")
# ascertains pickle-ability
try:
string = dumps(value)
except Exception as e:
raise ValueError(
"Could not pickle functional. Caught Error:\n{0}".format(e))
logger.debug('jobfolder.functional.setter for name: %s ' % self.name)
try:
self._functional = loads(string)
except Exception as e:
raise ValueError("Could not reload pickled functional. Caught Error:\n{0}".format(e))
@functional.deleter
def functional(self): self._functional = None
@property
def name(self):
""" Returns the name of this dictionary as an absolute path. """
if self.parent is None:
return "/"
string = None
for key, item in self.parent.children.items():
if id(item) == id(self):
string = self.parent.name + key
break
if string is None:
raise RuntimeError("Could not determine the name of the dictionary.")
return string + '/'
@property
def is_executable(self):
""" True if functional is not None. """
return self.functional is not None
@property
def untagged_folders(self):
""" Returns a string with only untagged folders. """
result = "Folders: \n"
for name, folder in self.items():
if not folder.is_tagged:
result += " " + name + "\n"
return result
@property
def is_tagged(self):
""" True if current folder is tagged.
In practice, this is used to turn a folder *on* (untagged) or
*off* (tagged). The meaning of *tagged* is not enforced, so it could be
used for other purposes.
"""
return hasattr(self, "_tagged")
@property
def nbfolders(self):
""" Returns the number of folders in sub-tree. """
return len([0 for j, o in self.items()])
@property
def root(self):
""" Returns root dictionary. """
result = self
while result.parent is not None:
result = result.parent
return result
def __getitem__(self, index):
""" Returns folder description from the dictionary.
If the folder does not exist, will create it.
"""
from re import split
from os.path import normpath
index = normpath(str(index))
if index == "" or index is None or index == ".":
return self
if index[0] == "/":
return self.root[index[1:]]
result = self
names = split(r"(?<!\\)/", index)
for i, name in enumerate(names):
if name == "..":
if result.parent is None:
raise KeyError("Cannot go below root level.")
result = result.parent
elif name in result.children:
result = result.children[name]
else:
raise KeyError("folder " + index + " does not exist.")
return result
def __delitem__(self, index):
""" Returns folder description from the dictionary.
If the folder does not exist, will create it.
"""
from os.path import normpath, relpath
index = normpath(index)
try:
deletee = self.__getitem__(index) # checks if exists.
except KeyError:
raise
if isinstance(deletee, JobFolder):
if id(self) == id(deletee):
raise KeyError("Will not commit suicide.")
parent = self.parent
while parent is not None:
if id(parent) == id(deletee):
raise KeyError("Will not go Oedipus on you.")
parent = parent.parent
parent = self[index + "/.."]
name = relpath(index, index + "/..")
if name in parent.children:
if id(self) == id(parent.children[name]):
raise KeyError("Will not delete self.")
return parent.children.pop(name)
raise KeyError("folder " + index + " does not exist.")
def __setitem__(self, name, value):
""" Sets folder/subfolder description in the dictionary.
If the folder does not exist, will create it. A deepcopy_ of
value is inserted, rather than a simple shallow ref.
.. _deepcopy: http://docs.python.org/library/copy.html#copy.deepcopy
"""
from copy import deepcopy
from os.path import normpath, dirname, basename
index = normpath(str(name))
parentpath, childpath = dirname(index), basename(index)
if len(parentpath) != 0:
if parentpath not in self:
raise KeyError('Could not find parent folder {0}.'.format(parentpath))
mother = self[parentpath]
parent = self.parent
while parent is not None:
if parent is mother:
raise KeyError('Will not set parent folder of current folder.')
if len(childpath) == 0 or childpath == '.':
raise KeyError('Will not set current directory.')
if childpath == '..':
raise KeyError('Will not set parent directory.')
parent = self if len(parentpath) == 0 else self[parentpath]
parent.children[childpath] = deepcopy(value)
parent.children[childpath].parent = parent
def __div__(self, name):
""" Adds a folderdictionary to the tree.
Any *path* can be given as input. This is akin to doing `mkdir -p`.
The newly created folder folders is returned.
"""
from re import split
from os.path import normpath
index = normpath(str(name))
if index in ["", ".", None]:
return self
if index[0] == "/": # could create infinit loop.
result = self
while result.parent is not None:
result = result.parent
return result / index[1:]
names = split(r"(?<!\\)/", index)
result = self
for name in names:
if name == "..":
if result.parent is None:
raise RuntimeError('Cannot descend below root.')
result = result.parent
continue
elif name not in result.children:
result.children[name] = JobFolder()
result.children[name].parent = result
result = result.children[name]
return result
__truediv__ = __div__
def subfolders(self):
""" Sorted keys of the folders directly under this one. """
return sorted(self.children.keys())
def compute(self, **kwargs):
""" Executes the functional in this particular folder.
If this particular folder of the folder folders is not executable (e.g.
``self.functional is None``), then ``None`` is returned.
If, on the other hand, this folder contains a real functional, then the
latter is called taking the parameters stored in the folder as keyword
arguments. Futhermore, additional keyword arguments passed to this
method are passed on the functional, possibly overriding those stored
in the folder. The return from the functional is returned by this
method: In practice the call is as follows:
>>> return self.functional(**self.params.copy().update(kwargs))
"""
if not self.is_executable:
return None
params = self.params.copy()
params.update(kwargs)
logger.info('jobfolder.compute: self: %s' % self)
logger.info('jobfolder.compute: kwargs: %s', kwargs)
logger.info('jobfolder.compute: params: %s', params)
logger.debug('jobfolder.compute: ===== start self.functional =====')
logger.debug(repr(self.functional))
logger.debug('jobfolder.compute: ===== end self.functional =====')
logger.info('jobfolder.compute: type(self.functional): %s' % type(self.functional))
logger.info('jobfolder.compute: before call')
# This calls the dynamically compiled code
# created by tools/makeclass: create_call_from_iter
res = self.functional.__call__(**params)
logger.critical('jobfolder.compute: after call')
return res
def update(self, other, merge=False):
""" Updates folder and tree with other.
:param other:
:py:class:`JobFolder` dictionary from which to update.
:param bool merge:
If false (default), then actual folders in ``other`` completely
overwrite actual folders in ``self``. If False, then ``params`` in
``self`` is updated with ``params`` in ``other`` if either one is
an executable folder. If ``other`` is an executable folder, then ``functional`` in
``self`` is overwritten. If ``other`` is not an executable folder, then
``functional`` in ``self`` is not replaced.
Updates the dictionaries of parameters and sub-folders. Actual folders in
``other`` (eg with ``self.is_executable==True``) will completely overwrite those in
``self``. if items in ``other`` are found in ``self``, unless merge is
set to true. This function is recurrent: subfolders are also updated.
"""
for key, value in other.children.items():
if key in self:
self[key].update(value)
else:
self[key] = value
if not merge:
if not other.is_executable:
return
self.params = other.params
self.functional = other.functional
else:
if not (self.is_executable or other.is_executable):
return
self.params.update(other.params)
if other.functional is not None:
self.functional = other.functional
def __str__(self):
result = "Folders: \n"
for name in self.keys():
result += " " + name + "\n"
return result
def tag(self):
""" Tags this folder. """
if self.is_executable:
super(JobFolder, self).__setattr__("_tagged", True)
def untag(self):
""" Untags this folder. """
if hasattr(self, "_tagged"):
self.__delattr__("_tagged")
def __delattr__(self, name):
""" Deletes folder attribute. """
if name in self.__dict__:
return self.__dict__.pop(name)
if name in self.params:
return self.params.pop(name)
raise AttributeError("Unknown folder attribute " + name + ".")
def __getattr__(self, name):
""" Returns folder parameter.
Folder parameters stored in :py:attr:`Jobdict.params` can also be accessed
via the ``.`` operator.
"""
if name in self.params:
return self.params[name]
raise AttributeError("Unknown folder attribute " + name + ".")
def __setattr__(self, name, value):
""" Sets folder parameter.
Folder parameters stored in :py:attr:`Jobdict.params` can also be accessed
via the ``.`` operator.
"""
from pickle import dumps
if name in self.params:
try:
dumps(value)
except Exception as e:
raise ValueError("Could not pickle folder-parameter. Caught error:\n{0}".format(e))
else:
self.params[name] = value
else:
super(JobFolder, self).__setattr__(name, value)
def __dir__(self):
from itertools import chain
result = chain([u for u in self.__dict__ if u[0] != '_'],
[u for u in dir(self.__class__) if u[0] != '_'],
[u for u in self.params.keys() if u[0] != '_'])
return list(set(result))
def __getstate__(self):
d = self.__dict__.copy()
params = d.pop("params")
return d, params
def __setstate__(self, args):
super(JobFolder, self).__setattr__("params", args[1])
self.__dict__.update(args[0])
def items(self, prefix=''):
""" Iterates over executable sub-folders.
Iterates over all executable subfolders. A subfolder is executable if it
holds a functional to execute.
:param str prefix:
Prefix to add to the name of this folder. Convenient when iterating
over a folder folders with the intention of executing the folders it
contains.
:return: yields (directory, folder):
- name of this folder, prefixed with ``prefix``.
- folder is an executable :py:class:`Folderdict`.
"""
from os.path import join
# Yield this folder if it exists.
if self.is_executable:
yield prefix, self
# Walk throught children folderdict.
for name in self.subfolders():
for u in self[name].items(join(prefix, name)):
yield u
def iterleaves(self):
""" Iterates over end of sub-trees. """
# Yield this folder if it exists.
if len(self.children) == 0:
yield self.name
# Walk throught children folderdict.
for name in self.children:
for u in self[name].iterleaves():
yield u
def values(self):
""" Iterates over all executable sub-folders. """
for name, folder in self.items():
yield folder
def keys(self):
""" Iterates over names of all executable subfolders. """
for name, folder in self.items():
yield name
__iter__ = keys
""" Iterator over keys. """
def __len__(self):
""" Number of executable jobs in jobfolder """
for i, u in enumerate(self.items()):
pass
return i + 1
def __contains__(self, index):
""" Returns true if index a branch in the folder folders. """
from re import split
from os.path import normpath
index = normpath(index)
if index == '/':
return True
if index[0] == '/':
return index[1:] in self.root
names = split(r"(?<!\\)/", index)
if len(names) == 0:
return False
if len(names) == 1:
return names[0] in self.children
if names[0] not in self.children:
return False
new_index = normpath(index[len(names[0]) + 1:])
if len(new_index) == 0:
return True
return new_index in self[names[0]]
def __copy__(self):
""" Performs a shallow copy of this folder folders.
Shallow copies are made of all internal dictionaries children and
params. However, functional and params values should the same
object as self. The sub-branches of the returned dictionary are shallow
copies of the sub-branches of self. In other words, the functional and
refences in params dictionary are in common between result and self,
but nothing else.
The returned dictionary does not have a parent!
"""
from copy import copy
result = JobFolder()
result._functional = self._functional
result.params = self.params.copy()
result.parent = None
for name, value in self.children.items():
result.children[name] = copy(value)
result.children[name].parent = result
attrs = self.__dict__.copy()
attrs.pop('params')
attrs.pop('parent')
attrs.pop('children')
attrs.pop('_functional')
result.__dict__.update(attrs)
return result
|
pylada/pylada-light
|
src/pylada/jobfolder/jobfolder.py
|
Python
|
gpl-3.0
| 19,629
|
[
"CRYSTAL",
"VASP"
] |
104e27371f25aec9c307e1ce0e55e671996591d94daab9ca513a1138b535d5e9
|
# The help text for various thresholding options whose code resides here is in modules/identify.py
from __future__ import absolute_import
from __future__ import division
import inspect
import math
import numpy as np
import scipy.ndimage
import scipy.sparse
import scipy.interpolate
from .otsu import otsu, entropy, otsu3, entropy3
from .smooth import smooth_with_noise
from .filter import stretch, unstretch
from six.moves import range
from six.moves import zip
TM_OTSU = "Otsu"
TM_OTSU_GLOBAL = "Otsu Global"
TM_OTSU_ADAPTIVE = "Otsu Adaptive"
TM_OTSU_PER_OBJECT = "Otsu PerObject"
TM_MOG = "MoG"
TM_MOG_GLOBAL = "MoG Global"
TM_MOG_ADAPTIVE = "MoG Adaptive"
TM_MOG_PER_OBJECT = "MoG PerObject"
TM_BACKGROUND = "Background"
TM_BACKGROUND_GLOBAL = "Background Global"
TM_BACKGROUND_ADAPTIVE = "Background Adaptive"
TM_BACKGROUND_PER_OBJECT = "Background PerObject"
TM_ROBUST_BACKGROUND = "RobustBackground"
TM_ROBUST_BACKGROUND_GLOBAL = "RobustBackground Global"
TM_ROBUST_BACKGROUND_ADAPTIVE = "RobustBackground Adaptive"
TM_ROBUST_BACKGROUND_PER_OBJECT = "RobustBackground PerObject"
TM_RIDLER_CALVARD = "RidlerCalvard"
TM_RIDLER_CALVARD_GLOBAL = "RidlerCalvard Global"
TM_RIDLER_CALVARD_ADAPTIVE = "RidlerCalvard Adaptive"
TM_RIDLER_CALVARD_PER_OBJECT = "RidlerCalvard PerObject"
TM_KAPUR = "Kapur"
TM_KAPUR_GLOBAL = "Kapur Global"
TM_KAPUR_ADAPTIVE = "Kapur Adaptive"
TM_KAPUR_PER_OBJECT = "Kapur PerObject"
TM_MCT = "MCT"
TM_MCT_GLOBAL = "MCT Global"
TM_MCT_ADAPTIVE = "MCT Adaptive"
TM_MCT_PER_OBJECT = "MCT PerObject"
TM_MANUAL = "Manual"
TM_MEASUREMENT = "Measurement"
TM_BINARY_IMAGE = "Binary image"
"""Compute a single threshold for the entire image"""
TM_GLOBAL = "Global"
"""Compute a local thresholding matrix of the same size as the image"""
TM_ADAPTIVE = "Adaptive"
"""Compute a threshold for each labeled object in the image"""
TM_PER_OBJECT = "PerObject"
TM_METHODS = [
TM_OTSU,
TM_MOG,
TM_BACKGROUND,
TM_ROBUST_BACKGROUND,
TM_RIDLER_CALVARD,
TM_KAPUR,
TM_MCT,
]
TM_GLOBAL_METHODS = [" ".join((x, TM_GLOBAL)) for x in TM_METHODS]
def get_threshold(
threshold_method,
threshold_modifier,
image,
mask=None,
labels=None,
threshold_range_min=None,
threshold_range_max=None,
threshold_correction_factor=1.0,
adaptive_window_size=10,
**kwargs
):
"""Compute a threshold for an image
threshold_method - one of the TM_ methods above
threshold_modifier - TM_GLOBAL to calculate one threshold over entire image
TM_ADAPTIVE to calculate a per-pixel threshold
TM_PER_OBJECT to calculate a different threshold for
each object
image - a NxM numpy array of the image data
Returns a tuple of local_threshold and global_threshold where:
* global_threshold is the single number calculated using the threshold
method over the whole image
* local_threshold is the global_threshold for global methods. For adaptive
and per-object thresholding, local_threshold is a matrix of threshold
values representing the threshold to be applied at each pixel of the
image.
Different methods have optional and required parameters:
Required:
TM_PER_OBJECT:
labels - a labels matrix that defines the extents of the individual objects
to be thresholded separately.
Optional:
All:
mask - a mask of the significant pixels in the image
threshold_range_min, threshold_range_max - constrain the threshold
values to be examined to values between these limits
threshold_correction_factor - the calculated threshold is multiplied
by this number to get the final threshold
TM_MOG (mixture of Gaussians):
object_fraction - fraction of image expected to be occupied by objects
(pixels that are above the threshold)
TM_OTSU - We have algorithms derived from Otsu. There is a three-class
version of Otsu in addition to the two class. There is also
an entropy measure in addition to the weighted variance.
two_class_otsu - assume that the distribution represents
two intensity classes if true, three if false.
use_weighted_variance - use Otsu's weighted variance if true,
an entropy measure if false
assign_middle_to_foreground - assign pixels in the middle class
in a three-class Otsu to the foreground if true
or the background if false.
"""
global_threshold = get_global_threshold(threshold_method, image, mask, **kwargs)
global_threshold *= threshold_correction_factor
if not threshold_range_min is None:
global_threshold = max(global_threshold, threshold_range_min)
if not threshold_range_max is None:
global_threshold = min(global_threshold, threshold_range_max)
if threshold_modifier == TM_GLOBAL:
local_threshold = global_threshold
elif threshold_modifier == TM_ADAPTIVE:
local_threshold = get_adaptive_threshold(
threshold_method,
image,
global_threshold,
mask,
adaptive_window_size=adaptive_window_size,
**kwargs
)
local_threshold = local_threshold * threshold_correction_factor
elif threshold_modifier == TM_PER_OBJECT:
local_threshold = get_per_object_threshold(
threshold_method,
image,
global_threshold,
mask,
labels,
threshold_range_min,
threshold_range_max,
**kwargs
)
local_threshold = local_threshold * threshold_correction_factor
else:
raise NotImplementedError(
"%s thresholding is not implemented" % (threshold_modifier)
)
if isinstance(local_threshold, np.ndarray):
#
# Constrain thresholds to within .7 to 1.5 of the global threshold.
#
threshold_range_min = max(threshold_range_min, global_threshold * 0.7)
threshold_range_max = min(threshold_range_max, global_threshold * 1.5)
if not threshold_range_min is None:
local_threshold[local_threshold < threshold_range_min] = threshold_range_min
if not threshold_range_max is None:
local_threshold[local_threshold > threshold_range_max] = threshold_range_max
if (threshold_modifier == TM_PER_OBJECT) and (labels is not None):
local_threshold[labels == 0] = 1.0
else:
if not threshold_range_min is None:
local_threshold = max(local_threshold, threshold_range_min)
if not threshold_range_max is None:
local_threshold = min(local_threshold, threshold_range_max)
return local_threshold, global_threshold
def get_global_threshold(threshold_method, image, mask=None, **kwargs):
"""Compute a single threshold over the whole image"""
if mask is not None and not np.any(mask):
return 1
if threshold_method == TM_OTSU:
fn = get_otsu_threshold
elif threshold_method == TM_MOG:
fn = get_mog_threshold
elif threshold_method == TM_BACKGROUND:
fn = get_background_threshold
elif threshold_method == TM_ROBUST_BACKGROUND:
fn = get_robust_background_threshold
elif threshold_method == TM_RIDLER_CALVARD:
fn = get_ridler_calvard_threshold
elif threshold_method == TM_KAPUR:
fn = get_kapur_threshold
elif threshold_method == TM_MCT:
fn = get_maximum_correlation_threshold
else:
raise NotImplementedError("%s algorithm not implemented" % (threshold_method))
kwargs = dict([(k, v) for k, v in kwargs.items() if k in fn.args])
return fn(image, mask, **kwargs)
def get_adaptive_threshold(
threshold_method, image, threshold, mask=None, adaptive_window_size=10, **kwargs
):
"""Given a global threshold, compute a threshold per pixel
Break the image into blocks, computing the threshold per block.
Afterwards, constrain the block threshold to .7 T < t < 1.5 T.
Block sizes must be at least 50x50. Images > 500 x 500 get 10x10
blocks.
"""
# for the X and Y direction, find the # of blocks, given the
# size constraints
image_size = np.array(image.shape[:2], dtype=int)
nblocks = image_size // adaptive_window_size
if any(n < 2 for n in nblocks):
raise ValueError(
"Adaptive window cannot exceed 50%% of an image dimension.\n"
"Window of %dpx is too large for a %sx%s image" % (
adaptive_window_size, image_size[1], image_size[0]
)
)
#
# Use a floating point block size to apportion the roundoff
# roughly equally to each block
#
increment = np.array(image_size, dtype=float) / np.array(nblocks, dtype=float)
#
# Put the answer here
#
thresh_out = np.zeros(image_size, image.dtype)
#
# Loop once per block, computing the "global" threshold within the
# block.
#
block_threshold = np.zeros([nblocks[0], nblocks[1]])
for i in range(nblocks[0]):
i0 = int(i * increment[0])
i1 = int((i + 1) * increment[0])
for j in range(nblocks[1]):
j0 = int(j * increment[1])
j1 = int((j + 1) * increment[1])
block = image[i0:i1, j0:j1]
block_mask = None if mask is None else mask[i0:i1, j0:j1]
block_threshold[i, j] = get_global_threshold(
threshold_method, block, mask=block_mask, **kwargs
)
#
# Use a cubic spline to blend the thresholds across the image to avoid image artifacts
#
spline_order = min(3, np.min(nblocks) - 1)
xStart = int(increment[0] / 2)
xEnd = int((nblocks[0] - 0.5) * increment[0])
yStart = int(increment[1] / 2)
yEnd = int((nblocks[1] - 0.5) * increment[1])
xtStart = 0.5
xtEnd = image.shape[0] - 0.5
ytStart = 0.5
ytEnd = image.shape[1] - 0.5
block_x_coords = np.linspace(xStart, xEnd, nblocks[0])
block_y_coords = np.linspace(yStart, yEnd, nblocks[1])
adaptive_interpolation = scipy.interpolate.RectBivariateSpline(
block_x_coords,
block_y_coords,
block_threshold,
bbox=(xtStart, xtEnd, ytStart, ytEnd),
kx=spline_order,
ky=spline_order,
)
thresh_out_x_coords = np.linspace(
0.5, int(nblocks[0] * increment[0]) - 0.5, thresh_out.shape[0]
)
thresh_out_y_coords = np.linspace(
0.5, int(nblocks[1] * increment[1]) - 0.5, thresh_out.shape[1]
)
thresh_out = adaptive_interpolation(thresh_out_x_coords, thresh_out_y_coords)
return thresh_out
def get_per_object_threshold(
method,
image,
threshold,
mask=None,
labels=None,
threshold_range_min=None,
threshold_range_max=None,
**kwargs
):
"""Return a matrix giving threshold per pixel calculated per-object
image - image to be thresholded
mask - mask out "don't care" pixels
labels - a label mask indicating object boundaries
threshold - the global threshold
"""
if labels is None:
labels = np.ones(image.shape, int)
if not mask is None:
labels[np.logical_not(mask)] = 0
label_extents = scipy.ndimage.find_objects(labels, np.max(labels))
local_threshold = np.ones(image.shape, image.dtype)
for i, extent in enumerate(label_extents, start=1):
label_mask = labels[extent] == i
if not mask is None:
label_mask = np.logical_and(mask[extent], label_mask)
values = image[extent]
per_object_threshold = get_global_threshold(
method, values, mask=label_mask, **kwargs
)
local_threshold[extent][label_mask] = per_object_threshold
return local_threshold
def get_otsu_threshold(
image,
mask=None,
two_class_otsu=True,
use_weighted_variance=True,
assign_middle_to_foreground=True,
):
if not mask is None:
image = image[mask]
else:
image = np.array(image.flat)
image = image[image >= 0]
if len(image) == 0:
return 1
image, d = log_transform(image)
if two_class_otsu:
if use_weighted_variance:
threshold = otsu(image)
else:
threshold = entropy(image)
else:
if use_weighted_variance:
t1, t2 = otsu3(image)
else:
t1, t2 = entropy3(image)
threshold = t1 if assign_middle_to_foreground else t2
threshold = inverse_log_transform(threshold, d)
return threshold
get_otsu_threshold.args = inspect.getargspec(get_otsu_threshold).args
def get_mog_threshold(image, mask=None, object_fraction=0.2):
"""Compute a background using a mixture of gaussians
This function finds a suitable
threshold for the input image Block. It assumes that the pixels in the
image belong to either a background class or an object class. 'pObject'
is an initial guess of the prior probability of an object pixel, or
equivalently, the fraction of the image that is covered by objects.
Essentially, there are two steps. First, a number of Gaussian
distributions are estimated to match the distribution of pixel
intensities in OrigImage. Currently 3 Gaussian distributions are
fitted, one corresponding to a background class, one corresponding to
an object class, and one distribution for an intermediate class. The
distributions are fitted using the Expectation-Maximization (EM)
algorithm, a procedure referred to as Mixture of Gaussians modeling.
When the 3 Gaussian distributions have been fitted, it's decided
whether the intermediate class models background pixels or object
pixels based on the probability of an object pixel 'pObject' given by
the user.
"""
cropped_image = np.array(image.flat) if mask is None else image[mask]
pixel_count = np.product(cropped_image.shape)
max_count = 512 ** 2 # maximum # of pixels analyzed
#
# We need at least 3 pixels to keep from crashing because the highest
# and lowest are chopped out below.
#
object_fraction = float(object_fraction)
background_fraction = 1.0 - object_fraction
if pixel_count < 3 / min(object_fraction, background_fraction):
return 1
if np.max(cropped_image) == np.min(cropped_image):
return cropped_image[0]
number_of_classes = 3
if pixel_count > max_count:
np.random.seed(0)
pixel_indices = np.random.permutation(pixel_count)[:max_count]
cropped_image = cropped_image[pixel_indices]
# Initialize mean and standard deviations of the three Gaussian
# distributions by looking at the pixel intensities in the original
# image and by considering the percentage of the image that is
# covered by object pixels. Class 1 is the background class and Class
# 3 is the object class. Class 2 is an intermediate class and we will
# decide later if it encodes background or object pixels. Also, for
# robustness the we remove 1% of the smallest and highest intensities
# in case there are any quantization effects that have resulted in
# unnaturally many 0:s or 1:s in the image.
cropped_image.sort()
one_percent = (np.product(cropped_image.shape) + 99) // 100
cropped_image = cropped_image[one_percent:-one_percent]
pixel_count = np.product(cropped_image.shape)
# Guess at the class means for the 3 classes: background,
# in-between and object
bg_pixel = cropped_image[int(round(pixel_count * background_fraction / 2.0))]
fg_pixel = cropped_image[int(round(pixel_count * (1 - object_fraction / 2)))]
class_mean = np.array([bg_pixel, (bg_pixel + fg_pixel) / 2, fg_pixel])
class_std = np.ones((3,)) * 0.15
# Initialize prior probabilities of a pixel belonging to each class.
# The intermediate class steals some probability from the background
# and object classes.
class_prob = np.array(
[3.0 / 4.0 * background_fraction, 1.0 / 4.0, 3.0 / 4.0 * object_fraction]
)
# Expectation-Maximization algorithm for fitting the three Gaussian
# distributions/classes to the data. Note, the code below is general
# and works for any number of classes. Iterate until parameters don't
# change anymore.
class_count = np.prod(class_mean.shape)
#
# Do a coarse iteration on subsampled data and a fine iteration on the real
# data
#
r = np.random.RandomState()
r.seed(np.frombuffer(cropped_image[:100].data, np.uint8).tolist())
for data in (
r.permutation(cropped_image)[0 : (len(cropped_image) // 10)],
cropped_image,
):
delta = 1
pixel_count = len(data)
while delta > 0.001:
old_class_mean = class_mean.copy()
# Update probabilities of a pixel belonging to the background or
# object1 or object2
pixel_class_prob = np.ndarray((pixel_count, class_count))
for k in range(class_count):
norm = scipy.stats.norm(class_mean[k], class_std[k])
pixel_class_prob[:, k] = class_prob[k] * norm.pdf(data)
pixel_class_normalizer = np.sum(pixel_class_prob, 1) + 0.000000000001
for k in range(class_count):
pixel_class_prob[:, k] = pixel_class_prob[:, k] / pixel_class_normalizer
# Update parameters in Gaussian distributions
class_prob[k] = np.mean(pixel_class_prob[:, k])
class_mean[k] = np.sum(pixel_class_prob[:, k] * data) / (
class_prob[k] * pixel_count
)
class_std[k] = (
math.sqrt(
np.sum(pixel_class_prob[:, k] * (data - class_mean[k]) ** 2)
/ (pixel_count * class_prob[k])
)
+ 0.000001
)
delta = np.sum(np.abs(old_class_mean - class_mean))
# Now the Gaussian distributions are fitted and we can describe the
# histogram of the pixel intensities as the sum of these Gaussian
# distributions. To find a threshold we first have to decide if the
# intermediate class 2 encodes background or object pixels. This is
# done by choosing the combination of class probabilities "class_prob"
# that best matches the user input "object_fraction".
# Construct an equally spaced array of values between the background
# and object mean
ndivisions = 10000
level = (
np.arange(ndivisions) * ((class_mean[2] - class_mean[0]) / ndivisions)
+ class_mean[0]
)
class_gaussian = np.ndarray((ndivisions, class_count))
for k in range(class_count):
norm = scipy.stats.norm(class_mean[k], class_std[k])
class_gaussian[:, k] = class_prob[k] * norm.pdf(level)
if abs(class_prob[1] + class_prob[2] - object_fraction) < abs(
class_prob[2] - object_fraction
):
# classifying the intermediate as object more closely models
# the user's desired object fraction
background_distribution = class_gaussian[:, 0]
object_distribution = class_gaussian[:, 1] + class_gaussian[:, 2]
else:
background_distribution = class_gaussian[:, 0] + class_gaussian[:, 1]
object_distribution = class_gaussian[:, 2]
# Now, find the threshold at the intersection of the background
# distribution and the object distribution.
index = np.argmin(np.abs(background_distribution - object_distribution))
return level[index]
get_mog_threshold.args = inspect.getargspec(get_mog_threshold).args
def get_background_threshold(image, mask=None):
"""Get threshold based on the mode of the image
The threshold is calculated by calculating the mode and multiplying by
2 (an arbitrary empirical factor). The user will presumably adjust the
multiplication factor as needed."""
cropped_image = np.array(image.flat) if mask is None else image[mask]
if np.product(cropped_image.shape) == 0:
return 0
img_min = np.min(cropped_image)
img_max = np.max(cropped_image)
if img_min == img_max:
return cropped_image[0]
# Only do the histogram between values a bit removed from saturation
robust_min = 0.02 * (img_max - img_min) + img_min
robust_max = 0.98 * (img_max - img_min) + img_min
nbins = 256
cropped_image = cropped_image[
np.logical_and(cropped_image > robust_min, cropped_image < robust_max)
]
if len(cropped_image) == 0:
return robust_min
h = scipy.ndimage.histogram(cropped_image, robust_min, robust_max, nbins)
index = np.argmax(h)
cutoff = float(index) / float(nbins - 1)
#
# If we have a low (or almost no) background, the cutoff will be
# zero since the background falls into the lowest bin. We want to
# offset by the robust cutoff factor of .02. We rescale by 1.04
# to account for the 0.02 at the top and bottom.
#
cutoff = (cutoff + 0.02) / 1.04
return img_min + cutoff * 2 * (img_max - img_min)
get_background_threshold.args = inspect.getargspec(get_background_threshold).args
def get_robust_background_threshold(
image,
mask=None,
lower_outlier_fraction=0.05,
upper_outlier_fraction=0.05,
deviations_above_average=2.0,
average_fn=np.mean,
variance_fn=np.std,
):
"""Calculate threshold based on mean & standard deviation
The threshold is calculated by trimming the top and bottom 5% of
pixels off the image, then calculating the mean and standard deviation
of the remaining image. The threshold is then set at 2 (empirical
value) standard deviations above the mean.
image - the image to threshold
mask - mask of pixels to consider (default = all pixels)
lower_outlier_fraction - after ordering the pixels by intensity, remove
the pixels from 0 to len(image) * lower_outlier_fraction from
the threshold calculation (default = .05).
upper_outlier_fraction - remove the pixels from
len(image) * (1 - upper_outlier_fraction) to len(image) from
consideration (default = .05).
deviations_above_average - calculate the standard deviation or MAD and
multiply by this number and add to the average to get the final
threshold (default = 2)
average_fn - function used to calculate the average intensity (e.g.
np.mean, np.median or some sort of mode function). Default = np.mean
variance_fn - function used to calculate the amount of variance.
Default = np.sd
"""
cropped_image = np.array(image.flat) if mask is None else image[mask]
n_pixels = np.product(cropped_image.shape)
if n_pixels < 3:
return 0
cropped_image.sort()
if cropped_image[0] == cropped_image[-1]:
return cropped_image[0]
low_chop = int(round(n_pixels * lower_outlier_fraction))
hi_chop = n_pixels - int(round(n_pixels * upper_outlier_fraction))
im = cropped_image if low_chop == 0 else cropped_image[low_chop:hi_chop]
mean = average_fn(im)
sd = variance_fn(im)
return mean + sd * deviations_above_average
get_robust_background_threshold.args = inspect.getargspec(
get_robust_background_threshold
).args
def mad(a):
"""Calculate the median absolute deviation of a sample
a - a numpy array-like collection of values
returns the median of the deviation of a from its median.
"""
a = np.asfarray(a).flatten()
return np.median(np.abs(a - np.median(a)))
def binned_mode(a):
"""Calculate a binned mode of a sample
a - array of values
This routine bins the sample into np.sqrt(len(a)) bins. This is a
number that is a compromise between fineness of measurement and
the stochastic nature of counting which roughly scales as the
square root of the sample size.
"""
a = np.asarray(a).flatten()
a_min = np.min(a)
a_max = np.max(a)
n_bins = np.ceil(np.sqrt(len(a)))
b = ((a - a_min) / (a_max - a_min) * n_bins).astype(int)
idx = np.argmax(np.bincount(b))
return np.percentile(a, 100 * float(idx + 0.5) / n_bins)
def get_ridler_calvard_threshold(image, mask=None):
"""Find a threshold using the method of Ridler and Calvard
The reference for this method is:
"Picture Thresholding Using an Iterative Selection Method"
by T. Ridler and S. Calvard, in IEEE Transactions on Systems, Man and
Cybernetics, vol. 8, no. 8, August 1978.
"""
cropped_image = np.array(image.flat) if mask is None else image[mask]
if np.product(cropped_image.shape) < 3:
return 0
if np.min(cropped_image) == np.max(cropped_image):
return cropped_image[0]
# We want to limit the dynamic range of the image to 256. Otherwise,
# an image with almost all values near zero can give a bad result.
min_val = np.max(cropped_image) / 256
cropped_image[cropped_image < min_val] = min_val
im = np.log(cropped_image)
min_val = np.min(im)
max_val = np.max(im)
im = (im - min_val) / (max_val - min_val)
pre_thresh = 0
# This method needs an initial value to start iterating. Using
# graythresh (Otsu's method) is probably not the best, because the
# Ridler Calvard threshold ends up being too close to this one and in
# most cases has the same exact value.
new_thresh = otsu(im)
delta = 0.00001
while abs(pre_thresh - new_thresh) > delta:
pre_thresh = new_thresh
mean1 = np.mean(im[im < pre_thresh])
mean2 = np.mean(im[im >= pre_thresh])
new_thresh = np.mean([mean1, mean2])
return math.exp(min_val + (max_val - min_val) * new_thresh)
get_ridler_calvard_threshold.args = inspect.getargspec(
get_ridler_calvard_threshold
).args
def get_kapur_threshold(image, mask=None):
"""The Kapur, Sahoo, & Wong method of thresholding, adapted to log-space."""
cropped_image = np.array(image.flat) if mask is None else image[mask]
if np.product(cropped_image.shape) < 3:
return 0
if np.min(cropped_image) == np.max(cropped_image):
return cropped_image[0]
log_image = np.log2(smooth_with_noise(cropped_image, 8))
min_log_image = np.min(log_image)
max_log_image = np.max(log_image)
histogram = scipy.ndimage.histogram(log_image, min_log_image, max_log_image, 256)
histogram_values = (
min_log_image
+ (max_log_image - min_log_image) * np.arange(256, dtype=float) / 255
)
# drop any zero bins
keep = histogram != 0
histogram = histogram[keep]
histogram_values = histogram_values[keep]
# check for corner cases
if np.product(histogram_values) == 1:
return 2 ** histogram_values[0]
# Normalize to probabilities
p = histogram.astype(float) / float(np.sum(histogram))
# Find the probabilities totals up to and above each possible threshold.
lo_sum = np.cumsum(p)
hi_sum = lo_sum[-1] - lo_sum
lo_e = np.cumsum(p * np.log2(p))
hi_e = lo_e[-1] - lo_e
# compute the entropies
lo_entropy = lo_e / lo_sum - np.log2(lo_sum)
hi_entropy = hi_e / hi_sum - np.log2(hi_sum)
sum_entropy = lo_entropy[:-1] + hi_entropy[:-1]
sum_entropy[np.logical_not(np.isfinite(sum_entropy))] = np.Inf
entry = np.argmin(sum_entropy)
return 2 ** ((histogram_values[entry] + histogram_values[entry + 1]) / 2)
get_kapur_threshold.args = inspect.getargspec(get_kapur_threshold).args
def get_maximum_correlation_threshold(image, mask=None, bins=256):
"""Return the maximum correlation threshold of the image
image - image to be thresholded
mask - mask of relevant pixels
bins - # of value bins to use
This is an implementation of the maximum correlation threshold as
described in Padmanabhan, "A novel algorithm for optimal image thresholding
of biological data", Journal of Neuroscience Methods 193 (2010) p 380-384
"""
if mask is not None:
image = image[mask]
image = image.ravel()
nm = len(image)
if nm == 0:
return 0
#
# Bin the image
#
min_value = np.min(image)
max_value = np.max(image)
if min_value == max_value:
return min_value
image = ((image - min_value) * (bins - 1) / (max_value - min_value)).astype(int)
histogram = np.bincount(image)
#
# Compute (j - mean) and (j - mean) **2
mean_value = np.mean(image)
diff = np.arange(len(histogram)) - mean_value
diff2 = diff * diff
ndiff = histogram * diff
ndiff2 = histogram * diff2
#
# This is the sum over all j of (j-mean)**2. It's a constant that could
# be factored out, but I follow the method and use it anyway.
#
sndiff2 = np.sum(ndiff2)
#
# Compute the cumulative sum from i to m which is the cumsum at m
# minus the cumsum at i-1
cndiff = np.cumsum(ndiff)
numerator = np.hstack([[cndiff[-1]], cndiff[-1] - cndiff[:-1]])
#
# For the bottom, we need (Nm - Ni) * Ni / Nm
#
ni = nm - np.hstack([[0], np.cumsum(histogram[:-1])]) # number of pixels above i-1
denominator = np.sqrt(sndiff2 * (nm - ni) * ni / nm)
#
mct = numerator / denominator
mct[denominator == 0] = 0
my_bin = np.argmax(mct) - 1
return min_value + my_bin * (max_value - min_value) / (bins - 1)
get_maximum_correlation_threshold.args = inspect.getargspec(
get_maximum_correlation_threshold
).args
def weighted_variance(image, mask, binary_image):
"""Compute the log-transformed variance of foreground and background
image - intensity image used for thresholding
mask - mask of ignored pixels
binary_image - binary image marking foreground and background
"""
if not np.any(mask):
return 0
#
# Clamp the dynamic range of the foreground
#
minval = np.max(image[mask]) / 256
if minval == 0:
return 0
fg = np.log2(np.maximum(image[binary_image & mask], minval))
bg = np.log2(np.maximum(image[(~binary_image) & mask], minval))
nfg = np.product(fg.shape)
nbg = np.product(bg.shape)
if nfg == 0:
return np.var(bg)
elif nbg == 0:
return np.var(fg)
else:
return (np.var(fg) * nfg + np.var(bg) * nbg) / (nfg + nbg)
def sum_of_entropies(image, mask, binary_image):
"""Bin the foreground and background pixels and compute the entropy
of the distribution of points among the bins
"""
mask = mask.copy()
mask[np.isnan(image)] = False
if not np.any(mask):
return 0
#
# Clamp the dynamic range of the foreground
#
minval = np.max(image[mask]) / 256
if minval == 0:
return 0
clamped_image = image.copy()
clamped_image[clamped_image < minval] = minval
#
# Smooth image with -8 bits of noise
#
image = smooth_with_noise(clamped_image, 8)
im_min = np.min(image)
im_max = np.max(image)
#
# Figure out the bounds for the histogram
#
upper = np.log2(im_max)
lower = np.log2(im_min)
if upper == lower:
# All values are the same, answer is log2 of # of pixels
return math.log(np.sum(mask), 2)
#
# Create log-transformed lists of points in the foreground and background
#
fg = image[binary_image & mask]
bg = image[(~binary_image) & mask]
if len(fg) == 0 or len(bg) == 0:
return 0
log_fg = np.log2(fg)
log_bg = np.log2(bg)
#
# Make these into histograms
hfg = np.histogram(log_fg, 256, range=(lower, upper), normed=False, weights=None)[0]
hbg = np.histogram(log_bg, 256, range=(lower, upper), normed=False, weights=None)[0]
# hfg = scipy.ndimage.histogram(log_fg,lower,upper,256)
# hbg = scipy.ndimage.histogram(log_bg,lower,upper,256)
#
# Drop empty bins
#
hfg = hfg[hfg > 0]
hbg = hbg[hbg > 0]
if np.product(hfg.shape) == 0:
hfg = np.ones((1,), int)
if np.product(hbg.shape) == 0:
hbg = np.ones((1,), int)
#
# Normalize
#
hfg = hfg.astype(float) / float(np.sum(hfg))
hbg = hbg.astype(float) / float(np.sum(hbg))
#
# Compute sum of entropies
#
return np.sum(hfg * np.log2(hfg)) + np.sum(hbg * np.log2(hbg))
def log_transform(image):
"""Renormalize image intensities to log space
Returns a tuple of transformed image and a dictionary to be passed into
inverse_log_transform. The minimum and maximum from the dictionary
can be applied to an image by the inverse_log_transform to
convert it back to its former intensity values.
"""
orig_min, orig_max = scipy.ndimage.extrema(image)[:2]
#
# We add 1/2 bit noise to an 8 bit image to give the log a bottom
#
limage = image.copy()
noise_min = orig_min + (orig_max - orig_min) / 256.0 + np.finfo(image.dtype).eps
limage[limage < noise_min] = noise_min
d = {"noise_min": noise_min}
limage = np.log(limage)
log_min, log_max = scipy.ndimage.extrema(limage)[:2]
d["log_min"] = log_min
d["log_max"] = log_max
return stretch(limage), d
def inverse_log_transform(image, d):
"""Convert the values in image back to the scale prior to log_transform
image - an image or value or values similarly scaled to image
d - object returned by log_transform
"""
return np.exp(unstretch(image, d["log_min"], d["log_max"]))
|
CellProfiler/centrosome
|
centrosome/threshold.py
|
Python
|
bsd-3-clause
| 33,646
|
[
"Gaussian"
] |
e5a42466ab5c8717d8f8013b6e176e246271df457f59a6cb5a1e0a60535439ab
|
import os
import unittest
from datetime import timedelta
import numpy as np
import netCDF4
from pyaxiom.netcdf import EnhancedDataset
from pyaxiom.netcdf.sensors import TimeSeries, get_dataframe_from_variable
import logging
from pyaxiom import logger
logger.level = logging.INFO
logger.addHandler(logging.StreamHandler())
class TestTimeSeries(unittest.TestCase):
def setUp(self):
self.output_directory = os.path.join(os.path.dirname(__file__), "output")
self.latitude = 34
self.longitude = -72
self.station_name = "PytoolsTestStation"
self.global_attributes = dict(id='this.is.the.id')
self.fillvalue = -9999.9
def test_timeseries(self):
filename = 'test_timeseries.nc'
times = [0, 1000, 2000, 3000, 4000, 5000]
verticals = None
ts = TimeSeries(output_directory=self.output_directory,
latitude=self.latitude,
longitude=self.longitude,
station_name=self.station_name,
global_attributes=self.global_attributes,
output_filename=filename,
times=times,
verticals=verticals)
values = [20, 21, 22, 23, 24, 25]
attrs = dict(standard_name='sea_water_temperature')
ts.add_variable('temperature', values=values, attributes=attrs)
nc = netCDF4.Dataset(os.path.join(self.output_directory, filename))
assert nc is not None
# Basic metadata on all timeseries
self.assertEqual(nc.cdm_data_type, 'Station')
self.assertEqual(nc.geospatial_lat_units, 'degrees_north')
self.assertEqual(nc.geospatial_lon_units, 'degrees_east')
self.assertEqual(nc.geospatial_vertical_units, 'meters')
self.assertEqual(nc.geospatial_vertical_positive, 'down')
self.assertEqual(nc.featureType, 'timeSeries')
self.assertEqual(nc.geospatial_vertical_resolution, '0')
# No verticals, so these were not set
with self.assertRaises(AttributeError):
nc.geospatial_vertical_min
with self.assertRaises(AttributeError):
nc.geospatial_vertical_max
assert nc.variables.get('time').size == len(times)
assert nc.variables.get('temperature').size == len(times)
assert (nc.variables.get('temperature')[:] == np.asarray(values)).all()
def test_timeseries_extra_values(self):
"""
This will map directly to the time variable and ignore any time indexes
that are not found. The 'times' parameter to add_variable should be
the same length as the values parameter.
"""
filename = 'test_timeseries_extra_values.nc'
times = [0, 1000, 2000, 3000, 4000, 5000]
verticals = None
ts = TimeSeries(output_directory=self.output_directory,
latitude=self.latitude,
longitude=self.longitude,
station_name=self.station_name,
global_attributes=self.global_attributes,
output_filename=filename,
times=times,
verticals=verticals)
values = [20, 21, 22, 23, 24, 25, 26, 27, 28]
value_times = [0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000]
attrs = dict(standard_name='sea_water_temperature')
ts.add_variable('temperature', values=values, attributes=attrs, times=value_times)
nc = netCDF4.Dataset(os.path.join(self.output_directory, filename))
assert nc is not None
self.assertEqual(nc.geospatial_vertical_resolution, '0')
# No verticals, so these were not set
with self.assertRaises(AttributeError):
nc.geospatial_vertical_min
with self.assertRaises(AttributeError):
nc.geospatial_vertical_max
assert nc.variables.get('time').size == len(times)
assert nc.variables.get('temperature').size == len(times)
assert (nc.variables.get('temperature')[:] == np.asarray(values[0:6])).all()
def test_timeseries_profile(self):
filename = 'test_timeseries_profile.nc'
times = [0, 1000, 2000, 3000, 4000, 5000]
verticals = [0, 1, 2]
ts = TimeSeries(output_directory=self.output_directory,
latitude=self.latitude,
longitude=self.longitude,
station_name=self.station_name,
global_attributes=self.global_attributes,
output_filename=filename,
times=times,
verticals=verticals)
values = np.repeat([20, 21, 22, 23, 24, 25], len(verticals))
attrs = dict(standard_name='sea_water_temperature')
ts.add_variable('temperature', values=values, attributes=attrs)
nc = netCDF4.Dataset(os.path.join(self.output_directory, filename))
assert nc is not None
# Basic metadata on all timeseries
self.assertEqual(nc.cdm_data_type, 'Station')
self.assertEqual(nc.geospatial_lat_units, 'degrees_north')
self.assertEqual(nc.geospatial_lon_units, 'degrees_east')
self.assertEqual(nc.geospatial_vertical_units, 'meters')
self.assertEqual(nc.geospatial_vertical_positive, 'down')
self.assertEqual(nc.featureType, 'timeSeriesProfile')
self.assertEqual(nc.geospatial_vertical_resolution, '1 1')
self.assertEqual(nc.geospatial_vertical_min, 0)
self.assertEqual(nc.geospatial_vertical_max, 2)
assert nc.variables.get('time').size == len(times)
assert nc.variables.get('z').size == len(verticals)
assert nc.variables.get('z').positive == 'down'
assert nc.variables.get('temperature').size == len(times) * len(verticals)
assert (nc.variables.get('temperature')[:] == values.reshape((len(times), len(verticals)))).all()
def test_timeseries_profile_different_z_name(self):
filename = 'test_timeseries_profile_different_z_name.nc'
times = [0, 1000, 2000, 3000, 4000, 5000]
verticals = [0, 1, 2]
ts = TimeSeries(output_directory=self.output_directory,
latitude=self.latitude,
longitude=self.longitude,
station_name=self.station_name,
global_attributes=self.global_attributes,
output_filename=filename,
times=times,
verticals=verticals,
vertical_positive='up',
vertical_axis_name='height'
)
values = np.repeat([20, 21, 22, 23, 24, 25], len(verticals))
attrs = dict(standard_name='sea_water_temperature')
ts.add_variable('temperature', values=values, attributes=attrs)
nc = netCDF4.Dataset(os.path.join(self.output_directory, filename))
assert nc is not None
self.assertEqual(nc.geospatial_vertical_resolution, '1 1')
self.assertEqual(nc.geospatial_vertical_min, 0)
self.assertEqual(nc.geospatial_vertical_max, 2)
assert nc.variables.get('time').size == len(times)
assert nc.variables.get('height').size == len(verticals)
assert nc.variables.get('height').positive == 'up'
assert nc.variables.get('temperature').size == len(times) * len(verticals)
assert (nc.variables.get('temperature')[:] == values.reshape((len(times), len(verticals)))).all()
def test_timeseries_profile_extra_values(self):
"""
This will map directly to the time variable and ignore any time indexes
that are not found. The 'times' parameter to add_variable should be
the same length as the values parameter.
"""
filename = 'test_timeseries_profile_extra_values.nc'
times = [0, 1000, 2000, 3000, 4000, 5000]
verticals = [0, 1, 2]
ts = TimeSeries(output_directory=self.output_directory,
latitude=self.latitude,
longitude=self.longitude,
station_name=self.station_name,
global_attributes=self.global_attributes,
output_filename=filename,
times=times,
verticals=verticals)
values = np.repeat([20, 21, 22, 23, 24, 25, 26, 27, 28], len(verticals))
new_times = [0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000]
values_times = np.repeat(new_times, len(verticals))
values_verticals = np.repeat(verticals, len(new_times))
attrs = dict(standard_name='sea_water_temperature')
ts.add_variable('temperature', values=values, attributes=attrs, times=values_times, verticals=values_verticals)
nc = netCDF4.Dataset(os.path.join(self.output_directory, filename))
assert nc is not None
self.assertEqual(nc.geospatial_vertical_resolution, '1 1')
self.assertEqual(nc.geospatial_vertical_min, 0)
self.assertEqual(nc.geospatial_vertical_max, 2)
assert nc.variables.get('time').size == len(times)
assert nc.variables.get('z').size == len(verticals)
assert nc.variables.get('temperature').size == len(times) * len(verticals)
assert (nc.variables.get('temperature')[:] == np.repeat([20, 21, 22, 23, 24, 25], len(verticals)).reshape((len(times), len(verticals)))).all()
def test_timeseries_profile_duplicate_heights(self):
filename = 'test_timeseries_profile_duplicate_heights.nc'
times = [0, 1000, 2000, 3000, 4000, 5000]
verticals = [0, 0, 0, 1, 1, 1]
ts = TimeSeries(output_directory=self.output_directory,
latitude=self.latitude,
longitude=self.longitude,
station_name=self.station_name,
global_attributes=self.global_attributes,
output_filename=filename,
times=times,
verticals=verticals)
values = np.repeat([20, 21, 22, 23, 24, 25], 2)
attrs = dict(standard_name='sea_water_temperature')
ts.add_variable('temperature', values=values, attributes=attrs)
nc = netCDF4.Dataset(os.path.join(self.output_directory, filename))
assert nc is not None
self.assertEqual(nc.geospatial_vertical_resolution, '1')
self.assertEqual(nc.geospatial_vertical_min, 0)
self.assertEqual(nc.geospatial_vertical_max, 1)
assert nc.variables.get('time').size == len(times)
assert nc.variables.get('z').size == len(list(set(verticals)))
assert nc.variables.get('temperature').size == len(times) * len(list(set(verticals)))
assert (nc.variables.get('temperature')[:] == values.reshape((len(times), 2))).all()
def test_timeseries_profile_with_shape(self):
filename = 'test_timeseries_profile_with_shape.nc'
times = [0, 1000, 2000, 3000, 4000, 5000]
verticals = [0, 1, 2]
ts = TimeSeries(output_directory=self.output_directory,
latitude=self.latitude,
longitude=self.longitude,
station_name=self.station_name,
global_attributes=self.global_attributes,
output_filename=filename,
times=times,
verticals=verticals)
values = np.repeat([20, 21, 22, 23, 24, 25], len(verticals)).reshape((len(times), len(verticals)))
attrs = dict(standard_name='sea_water_temperature')
ts.add_variable('temperature', values=values, attributes=attrs)
nc = netCDF4.Dataset(os.path.join(self.output_directory, filename))
assert nc is not None
self.assertEqual(nc.geospatial_vertical_resolution, '1 1')
self.assertEqual(nc.geospatial_vertical_min, 0)
self.assertEqual(nc.geospatial_vertical_max, 2)
assert nc.variables.get('time').size == len(times)
assert nc.variables.get('z').size == len(verticals)
assert nc.variables.get('temperature').size == len(times) * len(verticals)
assert (nc.variables.get('temperature')[:] == values.reshape((len(times), len(verticals)))).all()
def test_timeseries_profile_fill_value_in_z(self):
filename = 'test_timeseries_profile_fill_value_in_z.nc'
times = [0, 1000, 2000, 3000, 4000, 5000]
# Vertical fills MUST be at the BEGINNING of the array!!!!
verticals = [self.fillvalue, 0]
ts = TimeSeries(output_directory=self.output_directory,
latitude=self.latitude,
longitude=self.longitude,
station_name=self.station_name,
global_attributes=self.global_attributes,
output_filename=filename,
times=times,
verticals=verticals)
values = [self.fillvalue, 20, self.fillvalue, 21, self.fillvalue, 22, self.fillvalue, 23, self.fillvalue, 24, self.fillvalue, 25]
attrs = dict(standard_name='sea_water_temperature')
ts.add_variable('temperature', values=values, attributes=attrs, fillvalue=self.fillvalue)
nc = netCDF4.Dataset(os.path.join(self.output_directory, filename))
assert nc is not None
self.assertEqual(nc.geospatial_vertical_resolution, '0')
self.assertEqual(nc.geospatial_vertical_min, 0)
self.assertEqual(nc.geospatial_vertical_max, 0)
assert nc.variables.get('time').size == len(times)
assert nc.variables.get('z').size == len(verticals)
assert nc.variables.get('temperature').size == len(times) * len(verticals)
assert nc.variables.get('temperature')[:][0][1] == 20
assert nc.variables.get('temperature')[:].mask[0][0] == True
assert nc.variables.get('temperature')[:][1][1] == 21
assert nc.variables.get('temperature')[:].mask[1][0] == True
assert nc.variables.get('temperature')[:][2][1] == 22
assert nc.variables.get('temperature')[:].mask[2][0] == True
assert nc.variables.get('temperature')[:][3][1] == 23
assert nc.variables.get('temperature')[:].mask[3][0] == True
assert nc.variables.get('temperature')[:][4][1] == 24
assert nc.variables.get('temperature')[:].mask[4][0] == True
assert nc.variables.get('temperature')[:][5][1] == 25
assert nc.variables.get('temperature')[:].mask[5][0] == True
assert (nc.variables.get('temperature')[:] == np.asarray(values).reshape((len(times), len(verticals)))).all()
def test_timeseries_profile_unsorted_time_and_z(self):
filename = 'test_timeseries_profile_unsorted_time_and_z.nc'
times = [5000, 1000, 2000, 3000, 4000, 0]
verticals = [0, 50]
ts = TimeSeries(output_directory=self.output_directory,
latitude=self.latitude,
longitude=self.longitude,
station_name=self.station_name,
global_attributes=self.global_attributes,
output_filename=filename,
times=times,
verticals=verticals)
values = np.repeat([20, 21, 22, 23, 24, 25], len(verticals))
attrs = dict(standard_name='sea_water_temperature')
ts.add_variable('temperature', values=values, attributes=attrs, fillvalue=self.fillvalue)
nc = netCDF4.Dataset(os.path.join(self.output_directory, filename))
assert nc is not None
self.assertEqual(nc.geospatial_vertical_resolution, '50')
self.assertEqual(nc.geospatial_vertical_min, 0)
self.assertEqual(nc.geospatial_vertical_max, 50)
assert nc.variables.get('time').size == len(times)
assert nc.variables.get('z').size == len(verticals)
assert nc.variables.get('temperature').size == len(times) * len(verticals)
assert nc.variables.get('temperature')[:][0][0] == 25
assert nc.variables.get('temperature')[:][0][1] == 25
assert nc.variables.get('temperature')[:][1][0] == 21
assert nc.variables.get('temperature')[:][1][1] == 21
assert nc.variables.get('temperature')[:][2][0] == 22
assert nc.variables.get('temperature')[:][2][1] == 22
assert nc.variables.get('temperature')[:][3][0] == 23
assert nc.variables.get('temperature')[:][3][1] == 23
assert nc.variables.get('temperature')[:][4][0] == 24
assert nc.variables.get('temperature')[:][4][1] == 24
assert nc.variables.get('temperature')[:][5][0] == 20
assert nc.variables.get('temperature')[:][5][1] == 20
def test_timeseries_profile_with_bottom_temperature(self):
filename = 'test_timeseries_profile_with_bottom_temperature.nc'
times = [0, 1000, 2000, 3000, 4000, 5000]
verticals = [0, 1, 2]
ts = TimeSeries(output_directory=self.output_directory,
latitude=self.latitude,
longitude=self.longitude,
station_name=self.station_name,
global_attributes=self.global_attributes,
output_filename=filename,
times=times,
verticals=verticals)
values = np.repeat([20, 21, 22, 23, 24, 25], len(verticals))
bottom_values = [30, 31, 32, 33, 34, 35]
attrs = dict(standard_name='sea_water_temperature')
ts.add_variable('temperature', values=values, attributes=attrs)
ts.add_variable('bottom_temperature', values=bottom_values, verticals=[60], unlink_from_profile=True, attributes=attrs)
nc = netCDF4.Dataset(os.path.join(self.output_directory, filename))
assert nc is not None
self.assertEqual(nc.geospatial_vertical_resolution, '1 1')
self.assertEqual(nc.geospatial_vertical_min, 0)
self.assertEqual(nc.geospatial_vertical_max, 2)
assert nc.variables.get('time').size == len(times)
assert nc.variables.get('z').size == len(verticals)
assert nc.variables.get('temperature').size == len(times) * len(verticals)
assert nc.variables.get('sensor_depth') is not None
assert nc.variables.get('bottom_temperature').size == len(times)
assert (nc.variables.get('temperature')[:] == values.reshape((len(times), len(verticals)))).all()
assert (nc.variables.get('bottom_temperature')[:] == np.asarray(bottom_values)).all()
def test_timeseries_many_variables(self):
filename = 'test_timeseries_many_variables.nc'
times = [0, 1000, 2000, 3000, 4000, 5000]
verticals = [0, 1, 2]
ts = TimeSeries(output_directory=self.output_directory,
latitude=self.latitude,
longitude=self.longitude,
station_name=self.station_name,
global_attributes=self.global_attributes,
output_filename=filename,
times=times,
verticals=verticals)
values = np.repeat([20, 21, 22, 23, 24, 25], len(verticals))
bottom_values = [30, 31, 32, 33, 34, 35]
full_masked = values.view(np.ma.MaskedArray)
full_masked.mask = True
attrs = dict(standard_name='sea_water_temperature')
ts.add_variable('temperature', values=values, attributes=attrs)
ts.add_variable('salinity', values=values.reshape((len(times), len(verticals))))
ts.add_variable('dissolved_oxygen', values=full_masked, fillvalue=full_masked.fill_value)
ts.add_variable('bottom_temperature', values=bottom_values, verticals=[60], unlink_from_profile=True, attributes=attrs)
nc = netCDF4.Dataset(os.path.join(self.output_directory, filename))
assert nc is not None
self.assertEqual(nc.geospatial_vertical_resolution, '1 1')
self.assertEqual(nc.geospatial_vertical_min, 0)
self.assertEqual(nc.geospatial_vertical_max, 2)
assert nc.variables.get('time').size == len(times)
assert nc.variables.get('z').size == len(verticals)
assert nc.variables.get('temperature').size == len(times) * len(verticals)
assert (nc.variables.get('temperature')[:] == values.reshape((len(times), len(verticals)))).all()
assert (nc.variables.get('salinity')[:] == values.reshape((len(times), len(verticals)))).all()
assert nc.variables.get('dissolved_oxygen')[:].mask.all()
class TestTimeseriesTimeBounds(unittest.TestCase):
def setUp(self):
self.output_directory = os.path.join(os.path.dirname(__file__), "output")
self.latitude = 34
self.longitude = -72
self.station_name = "PytoolsTestStation"
self.global_attributes = dict(id='this.is.the.id')
self.filename = 'test_timeseries_bounds.nc'
self.times = [0, 1000, 2000, 3000, 4000, 5000]
verticals = [0]
self.ts = TimeSeries(output_directory=self.output_directory,
latitude=self.latitude,
longitude=self.longitude,
station_name=self.station_name,
global_attributes=self.global_attributes,
output_filename=self.filename,
times=self.times,
verticals=verticals)
self.values = [20, 21, 22, 23, 24, 25]
attrs = dict(standard_name='sea_water_temperature')
self.ts.add_variable('temperature', values=self.values, attributes=attrs)
def tearDown(self):
os.remove(os.path.join(self.output_directory, self.filename))
def test_time_bounds_start(self):
delta = timedelta(seconds=1000)
self.ts.add_time_bounds(delta=delta, position='start')
nc = netCDF4.Dataset(os.path.join(self.output_directory, self.filename))
assert nc.variables.get('time_bounds').shape == (len(self.times), 2,)
assert (nc.variables.get('time_bounds')[:] == np.asarray([
[0, 1000],
[1000, 2000],
[2000, 3000],
[3000, 4000],
[4000, 5000],
[5000, 6000]
])).all()
nc.close()
def test_time_bounds_middle(self):
delta = timedelta(seconds=1000)
self.ts.add_time_bounds(delta=delta, position='middle')
nc = netCDF4.Dataset(os.path.join(self.output_directory, self.filename))
assert nc.variables.get('time_bounds').shape == (len(self.times), 2,)
assert (nc.variables.get('time_bounds')[:] == np.asarray([
[ -500, 500],
[ 500, 1500],
[ 1500, 2500],
[ 2500, 3500],
[ 3500, 4500],
[ 4500, 5500]
])).all()
nc.close()
def test_time_bounds_end(self):
delta = timedelta(seconds=1000)
self.ts.add_time_bounds(delta=delta, position='end')
nc = netCDF4.Dataset(os.path.join(self.output_directory, self.filename))
assert nc.variables.get('time_bounds').shape == (len(self.times), 2,)
assert (nc.variables.get('time_bounds')[:] == np.asarray([
[-1000, 0],
[ 0, 1000],
[ 1000, 2000],
[ 2000, 3000],
[ 3000, 4000],
[ 4000, 5000]
])).all()
nc.close()
class TestDataFrameFromVariable(unittest.TestCase):
def test_sensor_with_depths(self):
ncfile1 = os.path.join(os.path.dirname(__file__), 'resources', 'sensor_with_depths_1.nc')
ncd1 = EnhancedDataset(ncfile1)
ncvar1 = ncd1.variables['soil_moisture_percent']
df1 = get_dataframe_from_variable(ncd1, ncvar1)
ncd1.close()
ncfile2 = os.path.join(os.path.dirname(__file__), 'resources', 'sensor_with_depths_2.nc')
ncd2 = EnhancedDataset(ncfile2)
ncvar2 = ncd2.variables['soil_moisture_percent']
df2 = get_dataframe_from_variable(ncd2, ncvar2)
ncd2.close()
df = df2.combine_first(df1)
assert not df.empty
|
ocefpaf/pyaxiom
|
pyaxiom/tests/test_timeseries.py
|
Python
|
mit
| 25,516
|
[
"NetCDF"
] |
adfcd3ca77d3451f3668207545a02a9d454a8b869efa93e49c657cca20d70788
|
#!/usr/bin/env python
#
# This file is part of OpenDrift.
#
# OpenDrift is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2
#
# OpenDrift is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenDrift. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2015, Knut-Frode Dagestad, MET Norway
# Utility script to display contents of a netCDF CF-compliant
# file or URL containing driver data suitable for opendrift
#
# Knut-Frode Dagestad, 19 Feb 2015
import sys
import argparse
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
sys.path.append("..")
try:
from opendrift.readers import reader_netCDF_CF_generic
from opendrift.readers import reader_ROMS_native
readers = [reader_netCDF_CF_generic, reader_ROMS_native]
except ImportError: # development
sys.exit('Please add opendrift folder to your PYTHONPATH.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('filename',
help='<URL or netCDF filename>')
parser.add_argument('lon', help='longitude in degrees')
parser.add_argument('lat', help='longitude in degrees')
parser.add_argument('time', help='YYYYMMDDHHMM')
parser.add_argument('-e', action='store_true',
help='Report errors on failure.')
args = parser.parse_args()
for reader in readers:
try:
print('Testing %s...' % reader.__file__)
r = reader.Reader(args.filename)
print(r)
break
except Exception as me:
if args.e is True:
print(me)
import traceback
print(traceback.format_exc())
print('---------------------------------------')
print('...not applicable.')
if not 'r' in locals():
sys.exit('No readers applicable for ' + args.filename)
time = datetime.strptime(args.time, '%Y%m%d%H%M')
lon = np.atleast_1d(args.lon)
lat = np.atleast_1d(args.lat)
x, y = r.lonlat2xy(lon, lat)
r.buffer = 3
i=3; j=3 # center of block
uv = r.get_variables(['x_sea_water_velocity', 'y_sea_water_velocity',
'sea_floor_depth_below_sea_level'],
time, x, y, r.z, block=True)
u_comp = uv['x_sea_water_velocity'][:,i,j]
v_comp = uv['y_sea_water_velocity'][:,i,j]
depth = uv['sea_floor_depth_below_sea_level'][i,j]
print(depth, 'depth')
u_comp = u_comp[0:sum(~u_comp.mask)]
v_comp = v_comp[0:sum(~v_comp.mask)]
u_rot, v_rot = r.rotate_vectors(x, y, u_comp, v_comp,
r.proj, '+proj=latlong')
vel = np.sqrt(u_rot**2 + v_rot**2)
mx = np.max((np.abs(u_rot), np.abs(v_rot)))
fig = plt.figure()
ax = fig.gca()
plt.quiver(0, 0, u_rot, v_rot, r.z[0:sum(~u_rot.mask)],
scale=1.1, scale_units='x')
plt.axis([-mx, mx, -mx, mx])
plt.grid('on')
cb = plt.colorbar(ticks=r.z[0:sum(~u_rot.mask)])
cb.set_label('Depth [m]')
plt.ylabel('North component [m/s]')
plt.xlabel('East component [m/s]')
textstr = u'%s UTC\n%.3fN\N{DEGREE SIGN}, %.3fE\N{DEGREE SIGN}' % \
(time, np.float(args.lat), np.float(args.lon))
ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=14,
verticalalignment='top',
bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5))
plt.show()
|
knutfrode/opendrift
|
opendrift/scripts/hodograph.py
|
Python
|
gpl-2.0
| 3,847
|
[
"NetCDF"
] |
5e58b6e9c760b657989098e2748f24ae0dd41e2ff4ee7740baf6adef643c7306
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Real NVP bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.layers import core as layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import template as template_ops
from tensorflow.python.ops.distributions import bijector
__all__ = [
"RealNVP",
"real_nvp_default_template"
]
class RealNVP(bijector.Bijector):
"""RealNVP "affine coupling layer" for vector-valued events.
Real NVP models a normalizing flow on a `D`-dimensional distribution via a
single `D-d`-dimensional conditional distribution [(Dinh et al., 2017)][1]:
`y[d:D] = y[d:D] * math_ops.exp(log_scale_fn(y[d:D])) + shift_fn(y[d:D])`
`y[0:d] = x[0:d]`
The last `D-d` units are scaled and shifted based on the first `d` units only,
while the first `d` units are 'masked' and left unchanged. Real NVP's
`shift_and_log_scale_fn` computes vector-valued quantities. For
scale-and-shift transforms that do not depend on any masked units, i.e.
`d=0`, use the `tfb.Affine` bijector with learned parameters instead.
Masking is currently only supported for base distributions with
`event_ndims=1`. For more sophisticated masking schemes like checkerboard or
channel-wise masking [(Papamakarios et al., 2016)[4], use the `tfb.Permute`
bijector to re-order desired masked units into the first `d` units. For base
distributions with `event_ndims > 1`, use the `tfb.Reshape` bijector to
flatten the event shape.
Recall that the MAF bijector [(Papamakarios et al., 2016)][4] implements a
normalizing flow via an autoregressive transformation. MAF and IAF have
opposite computational tradeoffs - MAF can train all units in parallel but
must sample units sequentially, while IAF must train units sequentially but
can sample in parallel. In contrast, Real NVP can compute both forward and
inverse computations in parallel. However, the lack of an autoregressive
transformations makes it less expressive on a per-bijector basis.
A "valid" `shift_and_log_scale_fn` must compute each `shift` (aka `loc` or
"mu" in [Papamakarios et al. (2016)][4]) and `log(scale)` (aka "alpha" in
[Papamakarios et al. (2016)][4]) such that each are broadcastable with the
arguments to `forward` and `inverse`, i.e., such that the calculations in
`forward`, `inverse` [below] are possible. For convenience,
`real_nvp_default_nvp` is offered as a possible `shift_and_log_scale_fn`
function.
NICE [(Dinh et al., 2014)][2] is a special case of the Real NVP bijector
which discards the scale transformation, resulting in a constant-time
inverse-log-determinant-Jacobian. To use a NICE bijector instead of Real
NVP, `shift_and_log_scale_fn` should return `(shift, None)`, and
`is_constant_jacobian` should be set to `True` in the `RealNVP` constructor.
Calling `real_nvp_default_template` with `shift_only=True` returns one such
NICE-compatible `shift_and_log_scale_fn`.
Caching: the scalar input depth `D` of the base distribution is not known at
construction time. The first call to any of `forward(x)`, `inverse(x)`,
`inverse_log_det_jacobian(x)`, or `forward_log_det_jacobian(x)` memoizes
`D`, which is re-used in subsequent calls. This shape must be known prior to
graph execution (which is the case if using tf.layers).
#### Example Use
```python
tfd = tf.contrib.distributions
tfb = tfd.bijectors
# A common choice for a normalizing flow is to use a Gaussian for the base
# distribution. (However, any continuous distribution would work.) E.g.,
nvp = tfd.TransformedDistribution(
distribution=tfd.MultivariateNormalDiag(loc=[0., 0., 0.])),
bijector=tfb.RealNVP(
num_masked=2,
shift_and_log_scale_fn=tfb.real_nvp_default_template(
hidden_layers=[512, 512])))
x = nvp.sample()
nvp.log_prob(x)
nvp.log_prob(0.)
```
For more examples, see [Jang (2018)][3].
#### References
[1]: Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density Estimation
using Real NVP. In _International Conference on Learning
Representations_, 2017. https://arxiv.org/abs/1605.08803
[2]: Laurent Dinh, David Krueger, and Yoshua Bengio. NICE: Non-linear
Independent Components Estimation. _arXiv preprint arXiv:1410.8516_,
2014. https://arxiv.org/abs/1410.8516
[3]: Eric Jang. Normalizing Flows Tutorial, Part 2: Modern Normalizing Flows.
_Technical Report_, 2018. http://blog.evjang.com/2018/01/nf2.html
[4]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
"""
def __init__(self,
num_masked,
shift_and_log_scale_fn,
is_constant_jacobian=False,
validate_args=False,
name=None):
"""Creates the Real NVP or NICE bijector.
Args:
num_masked: Python `int` indicating that the first `d` units of the event
should be masked. Must be in the closed interval `[1, D-1]`, where `D`
is the event size of the base distribution.
shift_and_log_scale_fn: Python `callable` which computes `shift` and
`log_scale` from both the forward domain (`x`) and the inverse domain
(`y`). Calculation must respect the "autoregressive property" (see class
docstring). Suggested default
`masked_autoregressive_default_template(hidden_layers=...)`.
Typically the function contains `tf.Variables` and is wrapped using
`tf.make_template`. Returning `None` for either (both) `shift`,
`log_scale` is equivalent to (but more efficient than) returning zero.
is_constant_jacobian: Python `bool`. Default: `False`. When `True` the
implementation assumes `log_scale` does not depend on the forward domain
(`x`) or inverse domain (`y`) values. (No validation is made;
`is_constant_jacobian=False` is always safe but possibly computationally
inefficient.)
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str`, name given to ops managed by this object.
Raises:
ValueError: If num_masked < 1.
"""
name = name or "real_nvp"
if num_masked <= 0:
raise ValueError("num_masked must be a positive integer.")
self._num_masked = num_masked
# At construction time, we don't know input_depth.
self._input_depth = None
self._shift_and_log_scale_fn = shift_and_log_scale_fn
super(RealNVP, self).__init__(
forward_min_event_ndims=1,
is_constant_jacobian=is_constant_jacobian,
validate_args=validate_args,
name=name)
def _cache_input_depth(self, x):
if self._input_depth is None:
self._input_depth = x.shape.with_rank_at_least(1)[-1].value
if self._input_depth is None:
raise NotImplementedError(
"Rightmost dimension must be known prior to graph execution.")
if self._num_masked >= self._input_depth:
raise ValueError(
"Number of masked units must be smaller than the event size.")
def _forward(self, x):
self._cache_input_depth(x)
# Performs scale and shift.
x0, x1 = x[:, :self._num_masked], x[:, self._num_masked:]
shift, log_scale = self._shift_and_log_scale_fn(
x0, self._input_depth - self._num_masked)
y1 = x1
if log_scale is not None:
y1 *= math_ops.exp(log_scale)
if shift is not None:
y1 += shift
y = array_ops.concat([x0, y1], axis=-1)
return y
def _inverse(self, y):
self._cache_input_depth(y)
# Performs un-shift and un-scale.
y0, y1 = y[:, :self._num_masked], y[:, self._num_masked:]
shift, log_scale = self._shift_and_log_scale_fn(
y0, self._input_depth - self._num_masked)
x1 = y1
if shift is not None:
x1 -= shift
if log_scale is not None:
x1 *= math_ops.exp(-log_scale)
x = array_ops.concat([y0, x1], axis=-1)
return x
def _inverse_log_det_jacobian(self, y):
self._cache_input_depth(y)
y0 = y[:, :self._num_masked]
_, log_scale = self._shift_and_log_scale_fn(
y0, self._input_depth - self._num_masked)
if log_scale is None:
return constant_op.constant(0., dtype=y.dtype, name="ildj")
return -math_ops.reduce_sum(log_scale, axis=-1)
def _forward_log_det_jacobian(self, x):
self._cache_input_depth(x)
x0 = x[:, :self._num_masked]
_, log_scale = self._shift_and_log_scale_fn(
x0, self._input_depth - self._num_masked)
if log_scale is None:
return constant_op.constant(0., dtype=x.dtype, name="fldj")
return math_ops.reduce_sum(log_scale, axis=-1)
def real_nvp_default_template(
hidden_layers,
shift_only=False,
activation=nn_ops.relu,
name=None,
*args,
**kwargs):
"""Build a scale-and-shift function using a multi-layer neural network.
This will be wrapped in a make_template to ensure the variables are only
created once. It takes the `d`-dimensional input x[0:d] and returns the `D-d`
dimensional outputs `loc` ("mu") and `log_scale` ("alpha").
Arguments:
hidden_layers: Python `list`-like of non-negative integer, scalars
indicating the number of units in each hidden layer. Default: `[512, 512].
shift_only: Python `bool` indicating if only the `shift` term shall be
computed (i.e. NICE bijector). Default: `False`.
activation: Activation function (callable). Explicitly setting to `None`
implies a linear activation.
name: A name for ops managed by this function. Default:
"real_nvp_default_template".
*args: `tf.layers.dense` arguments.
**kwargs: `tf.layers.dense` keyword arguments.
Returns:
shift: `Float`-like `Tensor` of shift terms ("mu" in
[Papamakarios et al. (2016)][1]).
log_scale: `Float`-like `Tensor` of log(scale) terms ("alpha" in
[Papamakarios et al. (2016)][1]).
Raises:
NotImplementedError: if rightmost dimension of `inputs` is unknown prior to
graph execution.
#### References
[1]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
"""
with ops.name_scope(name, "real_nvp_default_template"):
def _fn(x, output_units):
"""Fully connected MLP parameterized via `real_nvp_template`."""
for units in hidden_layers:
x = layers.dense(
inputs=x,
units=units,
activation=activation,
*args,
**kwargs)
x = layers.dense(
inputs=x,
units=(1 if shift_only else 2) * output_units,
activation=None,
*args,
**kwargs)
if shift_only:
return x, None
shift, log_scale = array_ops.split(x, 2, axis=-1)
return shift, log_scale
return template_ops.make_template(
"real_nvp_default_template", _fn)
|
nburn42/tensorflow
|
tensorflow/contrib/distributions/python/ops/bijectors/real_nvp.py
|
Python
|
apache-2.0
| 12,026
|
[
"Gaussian"
] |
bb6c9cd7510517660b4aaf895654807e87a29a3190ee1f7b80cae75b6240422c
|
# coding: utf8
"""Livestreamer extracts streams from various services.
The main compontent of Livestreamer is a command-line utility that
launches the streams in a video player.
An API is also provided that allows direct access to stream data.
Full documentation is available at http://docs.livestreamer.io/.
"""
__title__ = "livestreamer"
__version__ = "1.12.0"
__license__ = "Simplified BSD"
__author__ = "Christopher Rosell"
__copyright__ = "Copyright 2011-2015 Christopher Rosell"
__credits__ = [
"Agustín Carrasco (@asermax)",
"Andrew Bashore (@bashtech)",
"Andy Mikhailenko (@neithere)",
"Athanasios Oikonomou (@athoik)",
"Brian Callahan (@ibara)",
"Che (@chhe)",
"Christopher Rosell (@chrippa)",
"Daniel Miranda (@danielkza)",
"Daniel Wallace (@gtmanfred)",
"David Arvelo (@darvelo)",
"Dominik Dabrowski (@doda)",
"Eric J (@wormeyman)",
"Ethan Jones (@jonesz)",
"Gaspard Jankowiak (@gapato)",
"Jaime Marquínez Ferrándiz (@jaimeMF)",
"Jan Tore Morken (@jantore)",
"John Peterson (@john-peterson)",
"Jon Bergli Heier (@sn4kebite)",
"Kacper (@kasper93)",
"Martin Panter (@vadmium)",
"Max Nordlund (@maxnordlund)",
"Michael Cheah (@cheah)",
"Moritz Blanke",
"Niall McAndrew (@niallm90)",
"Niels Kräupl (@Gamewalker)",
"Pascal Romahn (@skulblakka)",
"Sam Edwards (@dotsam)",
"Stefan Breunig (@breunigs)",
"Suhail Patel (@suhailpatel)",
"Sunaga Takahiro (@sunaga720)",
"Vitaly Evtushenko (@eltiren)",
"Warnar Boekkooi (@boekkooi)",
"@btiom",
"@daslicious",
"@MasterofJOKers",
"@medina",
"@monkeyphysics",
"@nixxquality",
"@papplampe",
"@t0mm0",
"@ToadKing",
"@unintended",
"@wolftankk",
"@yeeeargh"
]
from .api import streams
from .exceptions import (LivestreamerError, PluginError, NoStreamsError,
NoPluginError, StreamError)
from .session import Livestreamer
|
mrquim/mrquimrepo
|
script.module.livestreamer/lib/livestreamer/__init__.py
|
Python
|
gpl-2.0
| 1,974
|
[
"Brian"
] |
c2bddc66ce817e4f90fcc7fce1378f19310808c82704d4359255b22270da0398
|
#
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performs client tasks for testing IMAP OAuth2 authentication.
To use this script, you'll need to have registered with Google as an OAuth
application and obtained an OAuth client ID and client secret.
See http://code.google.com/apis/accounts/docs/OAuth2.html for instructions on
registering and for documentation of the APIs invoked by this code.
This script has 3 modes of operation.
1. The first mode is used to generate and authorize an OAuth2 token, the
first step in logging in via OAuth2.
oauth2 --user=xxx@gmail.com \
--client_id=1038[...].apps.googleusercontent.com \
--client_secret=VWFn8LIKAMC-MsjBMhJeOplZ \
--generate_oauth2_token
The script will converse with Google and generate an oauth request
token, then present you with a URL you should visit in your browser to
authorize the token. Once you get the verification code from the Google
website, enter it into the script to get your OAuth access token. The output
from this command will contain the access token, a refresh token, and some
metadata about the tokens. The access token can be used until it expires, and
the refresh token lasts indefinitely, so you should record these values for
reuse.
2. The script will generate new access tokens using a refresh token.
oauth2 --user=xxx@gmail.com \
--client_id=1038[...].apps.googleusercontent.com \
--client_secret=VWFn8LIKAMC-MsjBMhJeOplZ \
--refresh_token=1/Yzm6MRy4q1xi7Dx2DuWXNgT6s37OrP_DW_IoyTum4YA
3. The script will generate an OAuth2 string that can be fed
directly to IMAP or SMTP. This is triggered with the --generate_oauth2_string
option.
oauth2 --generate_oauth2_string --user=xxx@gmail.com \
--access_token=ya29.AGy[...]ezLg
The output of this mode will be a base64-encoded string. To use it, connect to a
IMAPFE and pass it as the second argument to the AUTHENTICATE command.
a AUTHENTICATE XOAUTH2 a9sha9sfs[...]9dfja929dk==
"""
import base64
import imaplib2 as imaplib
import json
from optparse import OptionParser
import smtplib
import sys
import urllib
def SetupOptionParser():
# Usage message is the module's docstring.
parser = OptionParser(usage=__doc__)
parser.add_option('--generate_oauth2_token',
action='store_true',
dest='generate_oauth2_token',
help='generates an OAuth2 token for testing')
parser.add_option('--generate_oauth2_string',
action='store_true',
dest='generate_oauth2_string',
help='generates an initial client response string for '
'OAuth2')
parser.add_option('--client_id',
default=None,
help='Client ID of the application that is authenticating. '
'See OAuth2 documentation for details.')
parser.add_option('--client_secret',
default=None,
help='Client secret of the application that is '
'authenticating. See OAuth2 documentation for '
'details.')
parser.add_option('--access_token',
default=None,
help='OAuth2 access token')
parser.add_option('--refresh_token',
default=None,
help='OAuth2 refresh token')
parser.add_option('--scope',
default='https://mail.google.com/',
help='scope for the access token. Multiple scopes can be '
'listed separated by spaces with the whole argument '
'quoted.')
parser.add_option('--test_imap_authentication',
action='store_true',
dest='test_imap_authentication',
help='attempts to authenticate to IMAP')
parser.add_option('--test_smtp_authentication',
action='store_true',
dest='test_smtp_authentication',
help='attempts to authenticate to SMTP')
parser.add_option('--user',
default=None,
help='email address of user whose account is being '
'accessed')
return parser
# The URL root for accessing Google Accounts.
GOOGLE_ACCOUNTS_BASE_URL = 'https://accounts.google.com'
# Hardcoded dummy redirect URI for non-web apps.
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
def AccountsUrl(command):
"""Generates the Google Accounts URL.
Args:
command: The command to execute.
Returns:
A URL for the given command.
"""
return '%s/%s' % (GOOGLE_ACCOUNTS_BASE_URL, command)
def UrlEscape(text):
# See OAUTH 5.1 for a definition of which characters need to be escaped.
return urllib.quote(text, safe='~-._')
def UrlUnescape(text):
# See OAUTH 5.1 for a definition of which characters need to be escaped.
return urllib.unquote(text)
def FormatUrlParams(params):
"""Formats parameters into a URL query string.
Args:
params: A key-value map.
Returns:
A URL query string version of the given parameters.
"""
param_fragments = []
for param in sorted(params.iteritems(), key=lambda x: x[0]):
param_fragments.append('%s=%s' % (param[0], UrlEscape(param[1])))
return '&'.join(param_fragments)
def GeneratePermissionUrl(client_id, scope='https://mail.google.com/'):
"""Generates the URL for authorizing access.
This uses the "OAuth2 for Installed Applications" flow described at
https://developers.google.com/accounts/docs/OAuth2InstalledApp
Args:
client_id: Client ID obtained by registering your app.
scope: scope for access token, e.g. 'https://mail.google.com'
Returns:
A URL that the user should visit in their browser.
"""
params = {}
params['client_id'] = client_id
params['redirect_uri'] = REDIRECT_URI
params['scope'] = scope
params['response_type'] = 'code'
return '%s?%s' % (AccountsUrl('o/oauth2/auth'),
FormatUrlParams(params))
def AuthorizeTokens(client_id, client_secret, authorization_code):
"""Obtains OAuth access token and refresh token.
This uses the application portion of the "OAuth2 for Installed Applications"
flow at https://developers.google.com/accounts/docs/OAuth2InstalledApp#handlingtheresponse
Args:
client_id: Client ID obtained by registering your app.
client_secret: Client secret obtained by registering your app.
authorization_code: code generated by Google Accounts after user grants
permission.
Returns:
The decoded response from the Google Accounts server, as a dict. Expected
fields include 'access_token', 'expires_in', and 'refresh_token'.
"""
params = {}
params['client_id'] = client_id
params['client_secret'] = client_secret
params['code'] = authorization_code
params['redirect_uri'] = REDIRECT_URI
params['grant_type'] = 'authorization_code'
request_url = AccountsUrl('o/oauth2/token')
response = urllib.urlopen(request_url, urllib.urlencode(params)).read()
return json.loads(response)
def RefreshToken(client_id, client_secret, refresh_token):
"""Obtains a new token given a refresh token.
See https://developers.google.com/accounts/docs/OAuth2InstalledApp#refresh
Args:
client_id: Client ID obtained by registering your app.
client_secret: Client secret obtained by registering your app.
refresh_token: A previously-obtained refresh token.
Returns:
The decoded response from the Google Accounts server, as a dict. Expected
fields include 'access_token', 'expires_in', and 'refresh_token'.
"""
params = {}
params['client_id'] = client_id
params['client_secret'] = client_secret
params['refresh_token'] = refresh_token
params['grant_type'] = 'refresh_token'
request_url = AccountsUrl('o/oauth2/token')
response = urllib.urlopen(request_url, urllib.urlencode(params)).read()
return json.loads(response)
def GenerateOAuth2String(username, access_token, base64_encode=True):
"""Generates an IMAP OAuth2 authentication string.
See https://developers.google.com/google-apps/gmail/oauth2_overview
Args:
username: the username (email address) of the account to authenticate
access_token: An OAuth2 access token.
base64_encode: Whether to base64-encode the output.
Returns:
The SASL argument for the OAuth2 mechanism.
"""
auth_string = 'user=%s\1auth=Bearer %s\1\1' % (username, access_token)
if base64_encode:
auth_string = base64.b64encode(auth_string)
return auth_string
def TestImapAuthentication(user, auth_string):
"""Authenticates to IMAP with the given auth_string.
Prints a debug trace of the attempted IMAP connection.
Args:
user: The Gmail username (full email address)
auth_string: A valid OAuth2 string, as returned by GenerateOAuth2String.
Must not be base64-encoded, since imaplib does its own base64-encoding.
"""
print
imap_conn = imaplib.IMAP4_SSL('imap.gmail.com')
imap_conn.debug = 4
imap_conn.authenticate('XOAUTH2', lambda x: auth_string)
imap_conn.select('INBOX')
def TestSmtpAuthentication(user, auth_string):
"""Authenticates to SMTP with the given auth_string.
Args:
user: The Gmail username (full email address)
auth_string: A valid OAuth2 string, not base64-encoded, as returned by
GenerateOAuth2String.
"""
print
smtp_conn = smtplib.SMTP('smtp.gmail.com', 587)
smtp_conn.set_debuglevel(True)
smtp_conn.ehlo('test')
smtp_conn.starttls()
smtp_conn.docmd('AUTH', 'XOAUTH2 ' + base64.b64encode(auth_string))
def RequireOptions(options, *args):
missing = [arg for arg in args if getattr(options, arg) is None]
if missing:
print 'Missing options: %s' % ' '.join(missing)
sys.exit(-1)
def main(argv):
options_parser = SetupOptionParser()
(options, args) = options_parser.parse_args()
if options.refresh_token:
RequireOptions(options, 'client_id', 'client_secret')
response = RefreshToken(options.client_id, options.client_secret,
options.refresh_token)
print 'Access Token: %s' % response['access_token']
print 'Access Token Expiration Seconds: %s' % response['expires_in']
elif options.generate_oauth2_string:
RequireOptions(options, 'user', 'access_token')
print ('OAuth2 argument:\n' +
GenerateOAuth2String(options.user, options.access_token))
elif options.generate_oauth2_token:
RequireOptions(options, 'client_id', 'client_secret')
print 'To authorize token, visit this url and follow the directions:'
print ' %s' % GeneratePermissionUrl(options.client_id, options.scope)
authorization_code = raw_input('Enter verification code: ')
response = AuthorizeTokens(options.client_id, options.client_secret,
authorization_code)
print 'Refresh Token: %s' % response['refresh_token']
print 'Access Token: %s' % response['access_token']
print 'Access Token Expiration Seconds: %s' % response['expires_in']
elif options.test_imap_authentication:
RequireOptions(options, 'user', 'access_token')
TestImapAuthentication(options.user,
GenerateOAuth2String(options.user, options.access_token,
base64_encode=False))
elif options.test_smtp_authentication:
RequireOptions(options, 'user', 'access_token')
TestSmtpAuthentication(options.user,
GenerateOAuth2String(options.user, options.access_token,
base64_encode=False))
else:
options_parser.print_help()
print 'Nothing to do, exiting.'
return
if __name__ == '__main__':
main(sys.argv)
|
memorydump85/cardamon
|
moneyplot/oauth2.py
|
Python
|
mit
| 12,190
|
[
"VisIt"
] |
6d0cc68a62d77bd3156efe682a124cfb6a0bca83eaea59810378a2a78fc3d59a
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Application to demonstrate the use of the MeshToolkit library
#
# External dependencies
import argparse
import sys
import numpy as np
import MeshToolkit as mtk
# Initialisation
input_mesh = None
# Create a command line argument parser
parser = argparse.ArgumentParser( description='Process 3D triangular meshes.', usage='%(prog)s [options] input_mesh' )
parser.add_argument( 'input_mesh', nargs='?', default=None, help='Input mesh file in PLY format' )
parser.add_argument( '-i', action='store_true', help='Print mesh informations' )
parser.add_argument( '-b', action='store_true', help='Color vertices on a border' )
parser.add_argument( '-c', action='store_true', help='Check different mesh parameters' )
parser.add_argument( '-gc', action='store_true', help='Compute the surface gaussian curvature' )
parser.add_argument( '-nc', action='store_true', help='Compute the surface normal curvature' )
parser.add_argument( '-ul', nargs=2, metavar=('N', 'D'), help='Uniform laplacian smoothing with N iteration steps and D diffusion constant' )
parser.add_argument( '-ncf', nargs=2, metavar=('N', 'D'), help='Normalized curvature flow smoothing with N iteration steps and D diffusion constant' )
parser.add_argument( '-o', metavar='file', action='store', help='Write the resulting mesh to a PLY file' )
parser.add_argument( '-cm', default='CubeHelix', metavar='colormap', action='store', help='Colormap (default: cubehelix)' )
parser.add_argument( '-t', action='store_true', help='Test function' )
parser.add_argument( '-qt', action='store_true', help='Launch OpenGL viewer with Qt' )
parser.add_argument( '-glut', action='store_true', help='Launch OpenGL viewer with GLUT' )
# Process command line parameters
args = parser.parse_args()
# Input mesh
if args.input_mesh :
# Read the input mesh file
print( 'Read file ' + args.input_mesh + '... ' )
input_mesh = mtk.ReadPly( args.input_mesh )
# Compute surface normals
print( 'Compute normals... ' )
input_mesh.UpdateNormals()
# Register neighborhood informations
print( 'Register neighbors... ' )
input_mesh.UpdateNeighbors()
# Launch standalone QtViewer
elif args.qt :
print( 'Launch Qt viewer... ' )
mtk.QtViewer()
# Launch standalone Test
elif args.t :
print( 'Test... ' )
mtk.Test()
# No input file
else :
# Print help message
print( '\nNo input mesh file given...\n' )
parser.print_help()
sys.exit()
# Print mesh informations
if args.i :
print( input_mesh )
# Check some mesh parameters
if args.c :
print( 'Check mesh... ' )
print( mtk.Check( input_mesh ) )
# Color vertices on a border
if args.b :
print( 'Color border vertices... ' )
input_mesh.colors = mtk.Colormap( args.cm ).ValueArrayToColor( input_mesh.GetBorderVertices() )
# Compute gaussian curvature
if args.gc :
print( 'Compute gaussian curvature... ' )
curvature = mtk.GetGaussianCurvatureReference( input_mesh )
mtk.Histogram( curvature )
input_mesh.colors = mtk.Colormap( args.cm ).ValueArrayToColor( curvature )
# Compute normal curvature
if args.nc :
print( 'Compute normal curvature... ' )
curvature = mtk.GetNormalCurvature( input_mesh )
mtk.Statistics( np.sqrt( (curvature**2).sum(axis=1) ) )
mtk.Histogram( np.sqrt( (curvature**2).sum(axis=1) ) )
input_mesh.colors = mtk.Colormap( args.cm ).VectorArrayToColor( curvature )
# Apply uniform laplacian smoothing
if args.ul :
print( 'Uniform laplacian smoothing... ' )
mtk.UniformLaplacianSmoothing( input_mesh, int( args.ul[0] ), float( args.ul[1] ) )
# Apply normalized curvature flow smoothing
if args.ncf :
print( 'Normalized curvature flow smoothing... ' )
mtk.NormalizedCurvatureFlowSmoothing( input_mesh, int( args.ncf[0] ), float( args.ncf[1] ) )
# Test
if args.t and args.input_mesh :
print( 'Test... ' )
mtk.Test( input_mesh )
# Write resulting mesh
if args.o :
print( 'Write file ' + args.o + '... ' )
mtk.WritePly( input_mesh, args.o )
# Launch GlutViewer
if args.glut :
print( 'Launch GLUT viewer... ' )
mtk.GlutViewer( input_mesh ).Run()
# Launch QtViewer
if args.qt :
print( 'Launch Qt viewer... ' )
mtk.QtViewer( mesh=input_mesh )
|
microy/MeshToolkit
|
meshtoolkit.py
|
Python
|
mit
| 4,117
|
[
"Gaussian"
] |
f2e227b53faabb4720c39519cdd7d056d5ec3f4037ed2dbd75dba990fb538659
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
from PyQt4 import QtGui, QtCore
class Qt4Progress(QtGui.QProgressDialog):
def __init__(self, title, parent=None):
QtGui.QProgressDialog.__init__(self, parent)
self.nstep = 0
self.text = None
self.oldprogress = 0
self.progress = 0
self.calls = 0
self.loop=QtCore.QEventLoop(self)
self.setWindowTitle(title)
def initialize(self, nstep, text=None):
self.nstep = nstep
self.text = text
self.setRange(0,nstep)
if text:
self.setLabelText(text)
self.setValue(1)
#sys.stdout.write("\n")
def update(self, step, text=None):
if text:
self.setLabelText(text)
self.setValue(step)
self.loop.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents)
|
cclib/cclib
|
cclib/progress/qt4progress.py
|
Python
|
bsd-3-clause
| 1,013
|
[
"cclib"
] |
c663b30ec9984bccf151229be418a9181ba8069895ae5d912b9dcde9c21c90b5
|
#
# packages.py: package management - mainly package installation
#
# Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006 Red Hat, Inc.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Erik Troan <ewt@redhat.com>
# Matt Wilson <msw@redhat.com>
# Michael Fulbright <msf@redhat.com>
# Jeremy Katz <katzj@redhat.com>
#
import glob
import iutil
import isys
import os
import time
import sys
import string
import language
import shutil
import traceback
from flags import flags
from product import *
from constants import *
from upgrade import bindMountDevDirectory
from storage.errors import *
import logging
log = logging.getLogger("anaconda")
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
def doPostAction(anaconda):
anaconda.id.instClass.postAction(anaconda)
def firstbootConfiguration(anaconda):
if anaconda.id.firstboot == FIRSTBOOT_RECONFIG:
f = open(anaconda.rootPath + '/etc/reconfigSys', 'w+')
f.close()
elif anaconda.id.firstboot == FIRSTBOOT_SKIP:
f = open(anaconda.rootPath + '/etc/sysconfig/firstboot', 'w+')
f.write('RUN_FIRSTBOOT=NO')
f.close()
return
def writeKSConfiguration(anaconda):
log.info("Writing autokickstart file")
fn = anaconda.rootPath + "/root/anaconda-ks.cfg"
anaconda.id.writeKS(fn)
def copyAnacondaLogs(anaconda):
log.info("Copying anaconda logs")
for (fn, dest) in (("/tmp/anaconda.log", "anaconda.log"),
("/tmp/syslog", "anaconda.syslog"),
("/tmp/X.log", "anaconda.xlog"),
("/tmp/program.log", "anaconda.program.log"),
("/tmp/storage.log", "anaconda.storage.log"),
("/tmp/ifcfg.log", "anaconda.ifcfg.log"),
("/tmp/yum.log", "anaconda.yum.log")):
if os.access(fn, os.R_OK):
try:
shutil.copyfile(fn, "%s/var/log/%s" %(anaconda.rootPath, dest))
os.chmod("%s/var/log/%s" %(anaconda.rootPath, dest), 0600)
except:
pass
def turnOnFilesystems(anaconda):
if anaconda.dir == DISPATCH_BACK:
if not anaconda.id.upgrade:
log.info("unmounting filesystems")
anaconda.id.storage.umountFilesystems()
return DISPATCH_NOOP
if not anaconda.id.upgrade:
if not anaconda.id.storage.fsset.active:
# turn off any swaps that we didn't turn on
# needed for live installs
iutil.execWithRedirect("swapoff", ["-a"],
stdout = "/dev/tty5", stderr="/dev/tty5")
anaconda.id.storage.devicetree.teardownAll()
upgrade_migrate = False
if anaconda.id.upgrade:
for d in anaconda.id.storage.migratableDevices:
if d.format.migrate:
upgrade_migrate = True
title = None
message = None
details = None
try:
anaconda.id.storage.doIt()
except DeviceResizeError as (msg, device):
# XXX does this make any sense? do we support resize of
# devices other than partitions?
title = _("Device Resize Failed")
message = _("An error was encountered while "
"resizing device %s.") % (device,)
details = msg
except DeviceCreateError as (msg, device):
title = _("Device Creation Failed")
message = _("An error was encountered while "
"creating device %s.") % (device,)
details = msg
except DeviceDestroyError as (msg, device):
title = _("Device Removal Failed")
message = _("An error was encountered while "
"removing device %s.") % (device,)
details = msg
except DeviceError as (msg, device):
title = _("Device Setup Failed")
message = _("An error was encountered while "
"setting up device %s.") % (device,)
details = msg
except FSResizeError as (msg, device):
title = _("Resizing Failed")
message = _("There was an error encountered while "
"resizing the device %s.") % (device,)
if os.path.exists("/tmp/resize.out"):
details = open("/tmp/resize.out", "r").read()
else:
details = "%s" %(msg,)
except FSMigrateError as (msg, device):
title = _("Migration Failed")
message = _("An error was encountered while "
"migrating filesystem on device %s.") % (device,)
details = msg
except FormatCreateError as (msg, device):
title = _("Formatting Failed")
message = _("An error was encountered while "
"formatting device %s.") % (device,)
details = msg
except Exception as e:
# catch-all
title = _("Storage Activation Failed")
message = _("An error was encountered while "
"activating your storage configuration.")
details = str(e)
if title:
rc = anaconda.intf.detailedMessageWindow(title, message, details,
type = "custom",
custom_buttons = [_("_File Bug"), _("_Exit installer")])
if rc == 0:
raise
elif rc == 1:
sys.exit(1)
if not anaconda.id.upgrade:
anaconda.id.storage.turnOnSwap()
anaconda.id.storage.mountFilesystems(raiseErrors=False,
readOnly=False,
skipRoot=anaconda.backend.skipFormatRoot)
else:
if upgrade_migrate:
# we should write out a new fstab with the migrated fstype
shutil.copyfile("%s/etc/fstab" % anaconda.rootPath,
"%s/etc/fstab.anaconda" % anaconda.rootPath)
anaconda.id.storage.fsset.write(anaconda.rootPath)
# and make sure /dev is mounted so we can read the bootloader
bindMountDevDirectory(anaconda.rootPath)
def setupTimezone(anaconda):
# we don't need this on an upgrade or going backwards
if anaconda.id.upgrade or anaconda.dir == DISPATCH_BACK:
return
os.environ["TZ"] = anaconda.id.timezone.tz
tzfile = "/usr/share/zoneinfo/" + anaconda.id.timezone.tz
tzlocalfile = "/etc/localtime"
if not os.access(tzfile, os.R_OK):
log.error("unable to set timezone")
else:
try:
os.remove(tzlocalfile)
except OSError:
pass
try:
shutil.copyfile(tzfile, tzlocalfile)
except OSError as e:
log.error("Error copying timezone (from %s): %s" %(tzfile, e.strerror))
if iutil.isS390():
return
args = [ "--hctosys" ]
if anaconda.id.timezone.utc:
args.append("-u")
try:
iutil.execWithRedirect("/usr/sbin/hwclock", args, stdin = None,
stdout = "/dev/tty5", stderr = "/dev/tty5")
except RuntimeError:
log.error("Failed to set clock")
# FIXME: this is a huge gross hack. hard coded list of files
# created by anaconda so that we can not be killed by selinux
def setFileCons(anaconda):
def lst(root):
rc = [root]
for (root, dirs, files) in os.walk(root):
rc.extend(map(lambda d: root+"/"+d, dirs))
rc.extend(map(lambda d: root+"/"+d, files))
return rc
if flags.selinux:
log.info("setting SELinux contexts for anaconda created files")
files = ["/etc/rpm/macros", "/etc/dasd.conf", "/etc/zfcp.conf",
"/etc/lilo.conf.anaconda", "/lib64", "/usr/lib64",
"/etc/blkid.tab", "/etc/blkid.tab.old",
"/etc/mtab", "/etc/fstab", "/etc/resolv.conf",
"/etc/modprobe.conf", "/etc/modprobe.conf~",
"/var/log/wtmp", "/var/run/utmp", "/etc/crypttab",
"/dev/log", "/var/lib/rpm", "/", "/etc/raidtab",
"/etc/mdadm.conf", "/etc/sysconfig/network",
"/etc/udev/rules.d/70-persistent-net.rules",
"/root/install.log", "/root/install.log.syslog",
"/etc/shadow", "/etc/shadow-", "/etc/gshadow"] + \
glob.glob('/etc/dhcp/dhclient-*.conf')
vgs = ["/dev/%s" % vg.name for vg in anaconda.id.storage.vgs]
for f in files + vgs:
isys.resetFileContext(os.path.normpath(f), anaconda.rootPath)
# ugh, this is ugly
for d in ["/etc/sysconfig/network-scripts", "/var/cache/yum", "/var/lib/rpm", "/var/lib/yum", "/etc/lvm", "/dev/mapper", "/etc/iscsi", "/var/lib/iscsi", "/root", "/var/log", "/etc/modprobe.d", "/etc/sysconfig" ]:
if not os.path.isdir(anaconda.rootPath + d):
continue
# This is stupid, but resetFileContext expects to get the path
# without "/mnt/sysimage" in front, whereas everything else needs
# it there. So we add it to get the list of files, then
# immediately remove it, then pass it back to resetFileContext
# anyway.
for f in map(lambda f: f.replace(anaconda.rootPath, ""),
filter(lambda f: os.access(f, os.R_OK),
lst(anaconda.rootPath+d))):
ret = isys.resetFileContext(os.path.normpath(f),
anaconda.rootPath)
return
# FIXME: using rpm directly here is kind of lame, but in the yum backend
# we don't want to use the metadata as the info we need would require
# the filelists. and since we only ever call this after an install is
# done, we can be guaranteed this will work. put here because it's also
# used for livecd installs
def rpmKernelVersionList(rootPath = "/"):
import rpm
def get_version(header):
for f in header['filenames']:
if f.startswith('/boot/vmlinuz-'):
return f[14:]
elif f.startswith('/boot/efi/EFI/redhat/vmlinuz-'):
return f[29:]
return ""
def get_tag(header):
if header['name'] == "kernel":
return "base"
elif header['name'].startswith("kernel-"):
return header['name'][7:]
return ""
versions = []
iutil.resetRpmDb(rootPath)
ts = rpm.TransactionSet(rootPath)
mi = ts.dbMatch('provides', 'kernel')
for h in mi:
v = get_version(h)
tag = get_tag(h)
if v == "" or tag == "":
log.warning("Unable to determine kernel type/version for %s-%s-%s.%s" %(h['name'], h['version'], h['release'], h['arch']))
continue
# rpm really shouldn't return the same kernel more than once... but
# sometimes it does (#467822)
if (v, h['arch'], tag) in versions:
continue
versions.append( (v, h['arch'], tag) )
return versions
def rpmSetupGraphicalSystem(anaconda):
import rpm
iutil.resetRpmDb(anaconda.rootPath)
ts = rpm.TransactionSet(anaconda.rootPath)
# Only add "rhgb quiet" on non-s390, non-serial installs
if iutil.isConsoleOnVirtualTerminal() and \
(ts.dbMatch('provides', 'rhgb').count() or \
ts.dbMatch('provides', 'plymouth').count()):
anaconda.id.bootloader.args.append("rhgb quiet")
if ts.dbMatch('provides', 'service(graphical-login)').count() and \
ts.dbMatch('provides', 'xorg-x11-server-Xorg').count() and \
anaconda.id.displayMode == 'g' and not flags.usevnc:
anaconda.id.desktop.setDefaultRunLevel(5)
#Recreate initrd for use when driver disks add modules
def recreateInitrd (kernelTag, instRoot):
log.info("recreating initrd for %s" % (kernelTag,))
iutil.execWithRedirect("/sbin/new-kernel-pkg",
[ "--mkinitrd", "--dracut", "--depmod", "--install", kernelTag ],
stdout = "/dev/null", stderr = "/dev/null",
root = instRoot)
def betaNagScreen(anaconda):
publicBetas = { "Red Hat Linux": "Red Hat Linux Public Beta",
"Red Hat Enterprise Linux": "Red Hat Enterprise Linux Public Beta",
"Fedora Core": "Fedora Core",
"Fedora": "Fedora" }
if anaconda.dir == DISPATCH_BACK:
return DISPATCH_NOOP
fileagainst = None
for (key, val) in publicBetas.items():
if productName.startswith(key):
fileagainst = val
if fileagainst is None:
fileagainst = "%s Beta" %(productName,)
while 1:
rc = anaconda.intf.messageWindow( _("Warning! This is pre-release software!"),
_("Thank you for downloading this "
"pre-release of %(productName)s.\n\n"
"This is not a final "
"release and is not intended for use "
"on production systems. The purpose of "
"this release is to collect feedback "
"from testers, and it is not suitable "
"for day to day usage.\n\n"
"To report feedback, please visit:\n\n"
" %(bugzillaUrl)s\n\n"
"and file a report against '%(fileagainst)s'.\n")
% {'productName': productName,
'bugzillaUrl': bugzillaUrl,
'fileagainst': fileagainst},
type="custom", custom_icon="warning",
custom_buttons=[_("_Exit"), _("_Install anyway")])
if not rc:
msg = _("Your system will now be rebooted...")
buttons = [_("_Back"), _("_Reboot")]
rc = anaconda.intf.messageWindow( _("Warning! This is pre-release software!"),
msg,
type="custom", custom_icon="warning",
custom_buttons=buttons)
if rc:
sys.exit(0)
else:
break
def doReIPL(anaconda):
if not iutil.isS390() or anaconda.dir == DISPATCH_BACK:
return DISPATCH_NOOP
anaconda.reIPLMessage = iutil.reIPL(anaconda, os.getppid())
return DISPATCH_FORWARD
|
icomfort/anaconda
|
packages.py
|
Python
|
gpl-2.0
| 14,576
|
[
"VisIt"
] |
fef5f81c3287cf674783f70083dcad854afc2e9900321e20e3e139b936379714
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import numpy as np
import MDAnalysis as mda
from MDAnalysis.core import topologyattrs
from MDAnalysis.lib.mdamath import triclinic_box
from MDAnalysis.lib.distances import transform_RtoS
def assert_in_box(positions, box):
"""Asserts that all `positions` are strictly within the primary periodic
image as defined by `box`
"""
relpos = transform_RtoS(positions, box)
assert np.all((relpos >= 0.0) & (relpos < 1.0))
class UnWrapUniverse(object):
"""A Universe containing small molecule clusters with molecules (as well as
clusters) broken or whole accross periodic boundaries:
The universe comprises 47 atoms in 12 molecules:
* Three "molecules" (type A) containing a single atom:
1. within the central image
2. in a neighboring image
3. in an image two boxes away from the central image
* Four molecules (type B) containing three atoms:
4. within the central image
5. in the corner of the box, close to 4., broken accross one boundary
but whole accross another
6. outside the box, broken diagonally accross two neighboring images,
close to 5.
7. whole accross a boundary opposite to the location of 4., i.e.,
close to 4. in terms of PBC
* Two cyclical molecules (type C) containing four atoms:
8. broken accross the front/back but whole accross the top face
9. within the central image close to the front and bottom face, close
to 8. in terms of PBC
* Three linear chain molecules (type D) containing 8 atoms, spanning
more than half a box length:
10. within the central image relatively close to the top boundary
11. close to 10, broken mid-molecule accross the left/right boundary
12. close to 11. but in another image, whole accross the same
boundary as 11. but not mid-molecule
There are 15 residues in the universe:
Molecules of type A, B, and C each have a single residue, while each of
the chain molecules of type D have two residues with 4 atoms per residue.
Atoms can be selected by their residue's resname. For molecules of type
A, B, and C, the resnames are A, B, and C, respectively. Molecules of
type D have the resnames D1 and D2.
Atoms can also be selected by their moltype attribute, which is identical
to the corresponding molecule type (A, B, C, D).
The molecules/residues are contained in 6 segments, whereof the first
three segments contain all molecules of type A, B, or C, respectively.
Each of the remaining three segments contain a molecule of type D.
A projection onto the x-z plane of box (orthorhombic case) looks roughly
like this::
: :
: :
6 : 8 : :
- + - -:+-------+----------:- - - - - - - - - -: -
|5 8 | :
| | :
| (10) | :
| o-o-o-o-x-x-x-x | :
+x-x-x-x o-o-o-o+ :
| (11) | :
| | :
| 1 | 2 : 3
| | :
| 9 | :
| 4 ! 7| :
| ! 9 !| :
| 4-4 7+7 :
| | :
5+5 | :
- + - -:+------------------:- - - - - - - - - -: -
6-6 : : :
: :
: :
: :
: (12) :
: x-x-x-x-o-o+o-o
: :
Note that the cyclic structures of molecules 8 and 9 lie in the x-y plane
and are therefore not fully visible in the x-z plane projection.
Parameters
----------
have_bonds : bool, optional
If ``True``, intramolecular bonds will be present in the topology.
If ``False``, there will be no bond information.
have_masses : bool, optional
If ``True``, each atom will be assigned a mass of 1 u.
If ``False``, masses won't be present in the topology.
have_molnums : bool, optional
If ``True``, the topology will have molecule information (molnums).
If ``False``, that information will be missing.
have_charges : bool, optional
If ``False``, charges won't be present in the topology.
If ``True``, atoms will carry the following charges::
* atoms of molecule type A: 2 (total: 6)
* atoms of molecule type B: -0.5 (total: -6)
* atoms of molecule type C: -1.5 (total: -12)
* atoms of molecule type D: 0.5 (total: 12)
is_triclinic : bool, optional
If ``False``, the box will be a cube with an edge length of 10 Angstrom.
If ``True``, the cubic box will be sheared by 1 Angstrom in the x and
y directions.
"""
def __new__(cls, have_bonds=True, have_masses=True, have_molnums=True,
have_charges=True, is_triclinic=False):
# box:
a = 10.0 # edge length
tfac = 0.1 # factor for box vector shift of triclinic boxes (~84°)
if is_triclinic:
box = triclinic_box([a, 0.0, 0.0],
[a * tfac, a, 0.0],
[a * tfac, a * tfac, a])
else:
box = np.array([a, a, a, 90.0, 90.0, 90.0], dtype=np.float32)
# number of atoms, residues, and segments:
n_atoms = 47
n_residues = 15
n_segments = 6
# resindices:
residx = np.empty(n_atoms, dtype=np.int64)
# type A
rix = 0
for i in range(0, 3, 1):
residx[i] = rix
rix += 1
# type B
for i in range(3, 15, 3):
residx[i:i+3] = rix
rix += 1
# type C & D
for i in range(15, 47, 4):
residx[i:i+4] = rix
rix += 1
# segindices:
segidx = np.empty(n_residues, dtype=np.int64)
segidx[0:3] = 0
segidx[3:7] = 1
segidx[7:9] = 2
segidx[9:11] = 3
segidx[11:13] = 4
segidx[13:15] = 5
# universe:
u = mda.Universe.empty(
# topology things
n_atoms=n_atoms,
n_residues=n_residues,
n_segments=n_segments,
atom_resindex=residx,
residue_segindex=segidx,
# trajectory things
trajectory=True,
velocities=False,
forces=False,
)
# resnames: we always want those for selection purposes
resnames = ['A'] * 3
resnames += ['B'] * 4
resnames += ['C'] * 2
resnames += ['D1', 'D2'] * 3
u.add_TopologyAttr(topologyattrs.Resnames(resnames))
# moltypes: we always want those for selection purposes
moltypes = ['A'] * 3
moltypes += ['B'] * 4
moltypes += ['C'] * 2
moltypes += ['D'] * 6
u.add_TopologyAttr(topologyattrs.Moltypes(moltypes))
# trajectory:
ts = u.trajectory.ts
ts.frame = 0
ts.dimensions = box
# positions:
relpos = np.empty((n_atoms, 3), dtype=np.float32)
# type A
relpos[0:3, :] = np.array([[0.5, 0.5, 0.5],
[1.4, 0.5, 0.5],
[2.1, 0.5, 0.5]], dtype=np.float32)
# type B
relpos[3:15, :] = np.array([[0.1, 0.1, 0.2],
[0.1, 0.1, 0.1],
[0.2, 0.1, 0.1],
[-0.05, 0.2, 0.05],
[0.05, 0.2, 0.05],
[0.05, 0.2, 0.95],
[-0.2, -0.9, 1.05],
[-0.2, 0.1, -0.05],
[-0.1, 0.1, -0.05],
[0.95, 0.2, 0.25],
[0.95, 0.2, 0.15],
[1.05, 0.2, 0.15]], dtype=np.float32)
# type C
relpos[15:23, :] = np.array([[0.4, 0.95, 1.05],
[0.4, 0.95, 0.95],
[0.4, 0.05, 0.95],
[0.4, 0.05, 1.05],
[0.6, 0.05, 0.25],
[0.6, 0.05, 0.15],
[0.6, 0.15, 0.15],
[0.6, 0.15, 0.25]], dtype=np.float32)
# type D
relpos[23:47, :] = np.array([[0.2, 0.7, 0.8],
[0.3, 0.7, 0.8],
[0.4, 0.7, 0.8],
[0.5, 0.7, 0.8],
[0.6, 0.7, 0.8],
[0.7, 0.7, 0.8],
[0.8, 0.7, 0.8],
[0.9, 0.7, 0.8],
[0.66, 0.75, 0.7],
[0.76, 0.75, 0.7],
[0.86, 0.75, 0.7],
[0.96, 0.75, 0.7],
[0.06, 0.75, 0.7],
[0.16, 0.75, 0.7],
[0.26, 0.75, 0.7],
[0.36, 0.75, 0.7],
[1.14, 0.65, -0.4],
[1.04, 0.65, -0.4],
[0.94, 0.65, -0.4],
[0.84, 0.65, -0.4],
[0.74, 0.65, -0.4],
[0.64, 0.65, -0.4],
[0.54, 0.65, -0.4],
[0.44, 0.65, -0.4]], dtype=np.float32)
# make a copy, we need the original later
_relpos = relpos.copy()
# apply y- and z-dependent shift of x and y coords for triclinic boxes:
if is_triclinic:
# x-coord shift depends on y- and z-coords
_relpos[:, 0] += tfac * _relpos[:, 1:].sum(axis=1)
# y-coord shift depends on z-coords only
_relpos[:, 1] += tfac * _relpos[:, 2]
# scale relative to absolute positions:
ts.positions = (_relpos * np.array([a, a, a])).astype(np.float32)
# bonds:
if have_bonds:
bonds = []
# type A has no bonds
#type B
for base in range(3, 15, 3):
for i in range(2):
bonds.append((base + i, base + i + 1))
# type C
for base in range(15, 23, 4):
for i in range(3):
bonds.append((base + i, base + i + 1))
bonds.append((0 + base, 3 + base))
# type D
for base in range(23, 47, 8):
for i in range(7):
bonds.append((base + i, base + i + 1))
u.add_TopologyAttr(topologyattrs.Bonds(bonds))
# masses:
if have_masses:
# masses are all set to 1 so that one can cross-check the results of
# reference='com' with reference='cog' unwrapping
masses = np.ones(n_atoms)
u.add_TopologyAttr(topologyattrs.Masses(masses))
# molnums:
if have_molnums:
molnums = [0, 1, 2]
molnums += [3, 4, 5, 6]
molnums += [7, 8]
molnums += [9, 9, 10, 10, 11, 11]
u.add_TopologyAttr(topologyattrs.Molnums(molnums))
# charges:
if have_charges:
# type A
charges = [2] * 3
# type B
charges += [-0.5] * 12
# type C
charges += [-1.5] * 8
# type C
charges += [0.5] * 24
u.add_TopologyAttr(topologyattrs.Charges(charges))
# shamelessly monkey-patch some custom universe attributes:
u._is_triclinic = is_triclinic
u._relpos = relpos
u._tfac = tfac
u._box_edge = a
# bind custom methods to universe:
u.unwrapped_coords = cls.unwrapped_coords.__get__(u)
u.wrapped_coords = cls.wrapped_coords.__get__(u)
u.center = cls.center.__get__(u)
return u
def unwrapped_coords(self, compound, reference):
"""Returns coordinates which correspond to the unwrapped system.
Parameters
----------
compound : {'group', 'segments', 'residues', 'molecules', 'fragments'}
Which type of component is unwrapped.
reference : {'com', 'cog', None}
The reference point of each compound that is shifted into the
primary unit cell.
Note
----
This function assumes that all atom masses are equal. Therefore, the
returned coordinates for ``reference='com'`` and ``reference='cog'`` are
identical.
"""
if reference is not None:
ref = reference.lower()
if ref not in ['com', 'cog']:
raise ValueError("Unknown unwrap reference: {}"
"".format(reference))
comp = compound.lower()
if comp not in ['group', 'segments', 'residues', 'molecules',
'fragments']:
raise ValueError("Unknown unwrap compound: {}".format(compound))
# get relative positions:
relpos = self._relpos.copy()
# type B
# molecule 5, atom 2 & molecule 6, atom 1 & 2
relpos[8, :] = [0.05, 0.2, -0.05]
relpos[10, :] = [-0.2, -0.9, 0.95]
relpos[11, :] = [-0.1, -0.9, 0.95]
# type C
# molecule 8, atoms 2 & 3
relpos[17, :] = [0.4, 1.05, 0.95]
relpos[18, :] = [0.4, 1.05, 1.05]
# type D
# molecule 11, residue 1
relpos[35:39, :] = np.array([[1.06, 0.75, 0.7],
[1.16, 0.75, 0.7],
[1.26, 0.75, 0.7],
[1.36, 0.75, 0.7]], dtype=np.float32)
# apply image shifts if necessary:
if reference is None:
if comp == 'residues':
#second residue of molecule 11
relpos[35:39, :] = np.array([[0.06, 0.75, 0.7],
[0.16, 0.75, 0.7],
[0.26, 0.75, 0.7],
[0.36, 0.75, 0.7]],
dtype=np.float32)
else:
# molecule 2 & 3
relpos[1:3, :] = np.array([[0.4, 0.5, 0.5],
[0.1, 0.5, 0.5]], dtype=np.float32)
# molecule 6
relpos[9:12, :] = np.array([[0.8, 0.1, 1.05],
[0.8, 0.1, 0.95],
[0.9, 0.1, 0.95]], dtype=np.float32)
#molecule 8
relpos[15:19, :] = np.array([[0.4, -0.05, 0.05],
[0.4, -0.05, -0.05],
[0.4, 0.05, -0.05],
[0.4, 0.05, 0.05]], dtype=np.float32)
if comp == 'residues':
#molecule 11, residue 1 & molecule 12
relpos[35:47, :] = np.array([[0.06, 0.75, 0.7],
[0.16, 0.75, 0.7],
[0.26, 0.75, 0.7],
[0.36, 0.75, 0.7],
[1.14, 0.65, 0.6],
[1.04, 0.65, 0.6],
[0.94, 0.65, 0.6],
[0.84, 0.65, 0.6],
[0.74, 0.65, 0.6],
[0.64, 0.65, 0.6],
[0.54, 0.65, 0.6],
[0.44, 0.65, 0.6]],
dtype=np.float32)
else:
#molecule 11 & 12
relpos[31:47, :] = np.array([[-0.34, 0.75, 0.7],
[-0.24, 0.75, 0.7],
[-0.14, 0.75, 0.7],
[-0.04, 0.75, 0.7],
[0.06, 0.75, 0.7],
[0.16, 0.75, 0.7],
[0.26, 0.75, 0.7],
[0.36, 0.75, 0.7],
[1.14, 0.65, 0.6],
[1.04, 0.65, 0.6],
[0.94, 0.65, 0.6],
[0.84, 0.65, 0.6],
[0.74, 0.65, 0.6],
[0.64, 0.65, 0.6],
[0.54, 0.65, 0.6],
[0.44, 0.65, 0.6]],
dtype=np.float32)
# apply y- and z-dependent shift of x and y coords for triclinic boxes:
if self._is_triclinic:
# x-coord shift depends on y- and z-coords
relpos[:, 0] += self._tfac * relpos[:, 1:].sum(axis=1)
# y-coord shift depends on z-coords only
relpos[:, 1] += self._tfac * relpos[:, 2]
# scale relative to absolute positions:
a = self._box_edge
positions = relpos * np.array([a, a, a])
return positions.astype(np.float32)
def wrapped_coords(self, compound, center):
"""Returns coordinates which correspond to the wrapped system.
Parameters
----------
compound : {'atoms', 'group', 'segments', 'residues', 'molecules', \
'fragments'}
Which type of component is unwrapped. Note that for ``'group'``,
the result will only be correct *if the group is the entire system*.
center : {'com', 'cog'}
The reference point of each compound that is shifted into the
primary unit cell.
Note
----
This function assumes that all atom masses are equal. Therefore, the
returned coordinates for ``center='com'`` and ``center='cog'`` are
identical.
"""
ctr = center.lower()
if ctr not in ['com', 'cog']:
raise ValueError("Unknown unwrap reference: {}".format(center))
comp = compound.lower()
if comp not in ['atoms', 'group', 'segments', 'residues',
'molecules', 'fragments']:
raise ValueError("Unknown unwrap compound: {}".format(compound))
# wrapped relative positions:
relpos = self._relpos.copy()
# apply required box shifts:
if comp == 'atoms':
# type A
# type A
# molecule 2: negative x-shift
relpos[1, 0] -= 1.0
# molecule 2: negative double x-shift
relpos[2, 0] -= 2.0
# type B
# molecule 5, atom 0: positive x-shift
relpos[6, 0] += 1.0
# molecule 6, atom 0: positive x- and y-shift and negative z-shift
relpos[9, :] += [1.0, 1.0, -1.0]
# molecule 6, atom 2 & 3: positive x- and z-shift
relpos[10:12, :] += [1.0, 0.0, 1.0]
# molecule 7, atom 2: negative x-shift
relpos[14, 0] -= 1.0
# type C
# molecule 8, atoms 0 & 3: negative z-shift
relpos[15, 2] -= 1.0
relpos[18, 2] -= 1.0
# type D
# molecule 12, atoms 0 & 1: negative x-shift
relpos[39:41, 0] -= 1.0
# molecule 12: positive z-shift
relpos[39:47, 2] += 1.0
elif comp == 'group':
# com or cog of entire system is within box, so no shift
pass
elif comp == 'segments':
# type A
# molecules 1-3: negative x-shift
relpos[0:3, 0] -= 1.0
# type D
# molecule 12: positive z-shift
relpos[39:47, 2] += 1.0
else: # comp is residues, molecules, or fragments
# type A
# molecule 2: negative x-shift
relpos[1, 0] -= 1.0
# molecule 2: negative double x-shift
relpos[2, 0] -= 2.0
#type B
# molecule 6: positive x- and y-shift
relpos[9:12, :2] += 1.0
#type C
# molecule 8: negative z-shift
relpos[15:19, 2] -= 1.0
#type D
# molecule 12: positive z-shift
relpos[39:47, 2] += 1.0
# apply y- and z-dependent shift of x and y coords for triclinic boxes:
if self._is_triclinic:
# x-coord shift depends on y- and z-coords
relpos[:, 0] += self._tfac * relpos[:, 1:].sum(axis=1)
# y-coord shift depends on z-coords only
relpos[:, 1] += self._tfac * relpos[:, 2]
# scale relative to absolute positions:
a = self._box_edge
positions = relpos * np.array([a, a, a])
return positions.astype(np.float32)
def center(self, compound):
"""Returns centers which correspond to the unwrapped system.
Parameters
----------
compound : {'atoms', 'group', 'segments', 'residues', 'molecules', \
'fragments'}
Which type of component is unwrapped. Note that for ``'group'``,
the result will only be correct *if the group is the entire system*.
Note
----
This function assumes that all atom masses are equal. Therefore, the
returned coordinates for ``center='com'`` and ``center='cog'`` are
identical.
"""
relpos = self.unwrapped_coords(compound, reference=None)
comp = compound.lower()
if comp not in ['group', 'segments', 'residues', 'molecules',
'fragments']:
raise ValueError("Unknown unwrap compound: {}".format(compound))
pos = 0
if compound=="residues":
center_pos = np.zeros((15, 3), dtype=np.float32)
else:
center_pos = np.zeros((12, 3), dtype=np.float32)
for base in range(3):
loc_center = relpos[base, :]
center_pos[pos,:] = loc_center
pos+=1
for base in range(3, 15, 3):
loc_center = np.mean(relpos[base:base + 3, :], axis=0)
center_pos[pos,:] = loc_center
pos+=1
if compound=="residues":
for base in range(15, 47, 4):
loc_center = np.mean(relpos[base:base + 4, :], axis=0)
center_pos[pos,:] = loc_center
pos+=1
else:
for base in range(15, 23, 4):
loc_center = np.mean(relpos[base:base + 4, :], axis=0)
center_pos[pos,:] = loc_center
pos+=1
for base in range(23, 47, 8):
loc_center = np.mean(relpos[base:base + 8, :], axis=0)
center_pos[pos,:] = loc_center
pos+=1
if compound == "group":
center_pos = center_pos[11]
elif compound == "segments":
center_pos = center_pos[9:]
return center_pos
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/core/util.py
|
Python
|
gpl-2.0
| 25,240
|
[
"MDAnalysis"
] |
22323b163e505df2b148d01dd5f8754e928abc97be5352b3da7b83ee1f820de2
|
# This wizard contributed by Ezequiel "Zac" Panepucci 011114
# modified by Warren L. DeLano
from pymol.wizard import Wizard
from pymol import cmd
import pymol
from chempy.models import Indexed
from chempy import Atom,Bond
from chempy.cpv import add, sub, cross_product, scale, dot_product
from chempy.cpv import normalize, project, remove_component, reverse
from pymol.cgo import *
import math
pseudo_atoms = [
['X', 'PLN1' , '1', 'PSDO', [0.0,0.0,0.0]],
['X', 'PLN2' , '1', 'PSDO', [10.0,0.0,0.0]],
['X', 'PLN3' , '1', 'PSDO', [10.0,10.0,0.0]],
['X', 'PLN4' , '1', 'PSDO', [10.0,10.0,4]]]
default_mode = 'box'
default_name = 'box'
class Box(Wizard):
atom=None
messages=1
labeling=1
obj_name=None
def __init__(self,_self=cmd):
Wizard.__init__(self,_self)
self.editing_name = 0
self.copying = 0
self.points_name = ''
self.set_name(default_name)
self.mode = default_mode
self.modes = [
'box',
'walls',
'plane',
'quad',
]
self.mode_name = {
'box':'Box',
'walls':'Walls',
'plane':'Plane',
'quad':'Quad',
}
smm = []
smm.append([ 2, 'Box Mode', '' ])
for a in self.modes:
smm.append([ 1, self.mode_name[a], 'cmd.get_wizard().set_mode("'+a+'")'])
self.menu['mode']=smm
self.update_box()
def set_mode(self,mode):
if mode in self.modes:
self.mode = mode
self.status = 0
self.update_box()
self.cmd.refresh_wizard()
def get_prompt(self):
self.prompt = []
return self.prompt
def toggle_points(self):
if self.points_name in self.cmd.get_names(enabled_only=1):
self.cmd.disable(self.points_name)
else:
self.cmd.enable(self.points_name)
def delete_box(self):
self.cmd.delete(self.point_name)
self.cmd.delete(self.cgo_name)
def edit_name(self,copying=0):
self.editing_name = 1
self.copying = copying
self.new_name = ''
self.cmd.refresh_wizard()
def auto_position(self,fract=0.75,size=1.0):
if self.points_name in self.cmd.get_names():
if fract<0.5: fract = 1.0 - fract
if fract>0.995: fract = 0.995 # minimum 1% distance from clipping planes
fov = float(self.cmd.get_setting_legacy("field_of_view"))
tan_half_fov = math.tan(math.pi*fov/360.0)
model = self.cmd.get_model(self.points_name)
view = self.cmd.get_view()
# locate box 1/4 and 3/4 distance from clipping plane
one_minus_fract = 1.0 - fract
plane_z1 = - (view[11] + (one_minus_fract*view[15] + fract*view[16]))
plane_z2 = - (view[11] + (fract*view[15] + one_minus_fract*view[16]))
normal = [ 0.0, 0.0, 1.0 ]
# choose the size of the plane (larger than the fov, but not by much)
plane_size = size*(one_minus_fract*view[15] + fract*view[16])*tan_half_fov
obj = []
plane = [
[ -plane_size, -plane_size, plane_z1 ],
[ -plane_size, plane_size, plane_z1 ],
[ plane_size, -plane_size, plane_z1 ],
[ -plane_size, -plane_size, plane_z2 ],
]
# then transform plane coordinates into model space
plane = map( lambda p,v=view: [
v[0] * p[0] + v[1] * p[1] + v[2]* p[2],
v[3] * p[0] + v[4] * p[1] + v[5]* p[2],
v[6] * p[0] + v[7] * p[1] + v[8]* p[2]
], plane )
normal = apply( lambda p,v=view:[
v[0] * p[0] + v[1] * p[1] + v[2]* p[2],
v[3] * p[0] + v[4] * p[1] + v[5]* p[2],
v[6] * p[0] + v[7] * p[1] + v[8]* p[2]
], (normal,) )
plane = map( lambda p,v=view: [
v[12] + p[0], v[13] + p[1], v[14] + p[2]
], plane )
model.atom[0].coord = plane[0]
model.atom[1].coord = plane[1]
model.atom[2].coord = plane[2]
model.atom[3].coord = plane[3]
self.cmd.load_model(model, '_tmp', zoom=0)
self.cmd.update(self.points_name,"_tmp")
self.cmd.delete("_tmp")
def set_name(self,name):
hidden_name = None
if self.points_name != '':
if self.points_name in self.cmd.get_names("all"):
hidden_name = "_"+self.cgo_name
self.cmd.disable(self.points_name)
self.cmd.set_name(self.points_name, hidden_name) # hide
self.name = name
self.points_name = self.name + "_points"
self.cgo_name = self.name
if self.copying and hidden_name != None:
self.cmd.copy(self.points_name, hidden_name, zoom=0)
print "copy"
else:
hidden_name = "_"+self.cgo_name
if hidden_name in self.cmd.get_names("all"):
self.cmd.set_name(hidden_name, self.points_name)
self.copying = 0
if not self.points_name in self.cmd.get_names():
model = Indexed()
origin = self.cmd.get_view()[12:15]
for a in pseudo_atoms:
new_atom = Atom()
(new_atom.symbol, new_atom.name, new_atom.resi, new_atom.resn, new_atom.coord) = a
new_atom.coord[0] = new_atom.coord[0] + origin[0]
new_atom.coord[1] = new_atom.coord[1] + origin[1]
new_atom.coord[2] = new_atom.coord[2] + origin[2]
new_atom.flags = 0x2200000 # set surface ignore flag
model.atom.append(new_atom)
self.cmd.load_model(model,self.points_name,zoom=0)
self.cmd.set("surface_mode",0,self.points_name) # make sure no surface is shown
self.coord = None
self.cmd.color("green","%s`1"%self.points_name)
self.cmd.color("green","%s`2"%self.points_name)
self.cmd.color("red" ,"%s`3"%self.points_name)
self.cmd.color("blue" ,"%s`4"%self.points_name)
self.cmd.show_as("nb_spheres",self.points_name)
self.auto_position(0.75,0.5)
self.cmd.enable(self.points_name)
self.points_enabled = 1
def update_box(self):
if self.points_name in self.cmd.get_names():
model = self.cmd.get_model(self.points_name)
self.coord = (
model.atom[0].coord,
model.atom[1].coord,
model.atom[2].coord,
model.atom[3].coord,
)
p = self.coord[0]
d10 = sub(self.coord[1], p)
d20 = sub(self.coord[2], p)
d30 = sub(self.coord[3], p)
x10_20 = cross_product(d10,d20)
if self.mode != 'quad':
if dot_product(d30,x10_20)<0.0:
p = model.atom[1].coord
d10 = sub(self.coord[0], p)
d20 = sub(self.coord[2], p)
d30 = sub(self.coord[3], p)
n10_20 = normalize(x10_20)
n10 = normalize(d10)
d100 = d10
d010 = remove_component(d20, n10)
if self.mode != 'quad':
d001 = project(d30, n10_20)
else:
d001 = n10_20
n100 = normalize(d100)
n010 = normalize(d010)
n001 = normalize(d001)
f100 = reverse(n100)
f010 = reverse(n010)
f001 = reverse(n001)
if self.mode == 'quad':
p000 = p
p100 = add(p, remove_component(d10,n001))
p010 = add(p, remove_component(d20,n001))
p001 = add(p, remove_component(d30,n001))
else:
p000 = p
p100 = add(p,d100)
p010 = add(p,d010)
p001 = add(p,d001)
p110 = add(p100, d010)
p011 = add(p010, d001)
p101 = add(p100, d001)
p111 = add(p110, d001)
obj = []
if self.mode == 'box': # standard box
obj.extend([ BEGIN, TRIANGLE_STRIP ])
obj.append(NORMAL); obj.extend(f001)
obj.append(VERTEX); obj.extend(p000)
obj.append(VERTEX); obj.extend(p010)
obj.append(VERTEX); obj.extend(p100)
obj.append(VERTEX); obj.extend(p110)
obj.append(END)
obj.extend([ BEGIN, TRIANGLE_STRIP ])
obj.append(NORMAL); obj.extend(n001)
obj.append(VERTEX); obj.extend(p001)
obj.append(VERTEX); obj.extend(p101)
obj.append(VERTEX); obj.extend(p011)
obj.append(VERTEX); obj.extend(p111)
obj.append(END)
obj.extend([ BEGIN, TRIANGLE_STRIP ])
obj.append(NORMAL); obj.extend(f010)
obj.append(VERTEX); obj.extend(p000)
obj.append(VERTEX); obj.extend(p100)
obj.append(VERTEX); obj.extend(p001)
obj.append(VERTEX); obj.extend(p101)
obj.append(END)
obj.extend([ BEGIN, TRIANGLE_STRIP ])
obj.append(NORMAL); obj.extend(n010)
obj.append(VERTEX); obj.extend(p010)
obj.append(VERTEX); obj.extend(p011)
obj.append(VERTEX); obj.extend(p110)
obj.append(VERTEX); obj.extend(p111)
obj.append(END)
obj.extend([ BEGIN, TRIANGLE_STRIP ])
obj.append(NORMAL); obj.extend(f100)
obj.append(VERTEX); obj.extend(p000)
obj.append(VERTEX); obj.extend(p001)
obj.append(VERTEX); obj.extend(p010)
obj.append(VERTEX); obj.extend(p011)
obj.append(END)
obj.extend([ BEGIN, TRIANGLE_STRIP ])
obj.append(NORMAL); obj.extend(n100)
obj.append(VERTEX); obj.extend(p100)
obj.append(VERTEX); obj.extend(p110)
obj.append(VERTEX); obj.extend(p101)
obj.append(VERTEX); obj.extend(p111)
obj.append(END)
model.atom[0].coord = p000
model.atom[1].coord = p100
model.atom[2].coord = add(p010, scale(d100,0.5))
model.atom[3].coord = add(add(p001, scale(d010,0.5)),d100)
elif self.mode=='walls':
obj.extend([ BEGIN, TRIANGLE_STRIP ])
obj.append(NORMAL); obj.extend(n001)
obj.append(VERTEX); obj.extend(p000)
obj.append(VERTEX); obj.extend(p100)
obj.append(VERTEX); obj.extend(p010)
obj.append(VERTEX); obj.extend(p110)
obj.append(END)
obj.extend([ BEGIN, TRIANGLE_STRIP ])
obj.append(NORMAL); obj.extend(n010)
obj.append(VERTEX); obj.extend(p000)
obj.append(VERTEX); obj.extend(p001)
obj.append(VERTEX); obj.extend(p100)
obj.append(VERTEX); obj.extend(p101)
obj.append(END)
obj.extend([ BEGIN, TRIANGLE_STRIP ])
obj.append(NORMAL); obj.extend(n100)
obj.append(VERTEX); obj.extend(p000)
obj.append(VERTEX); obj.extend(p010)
obj.append(VERTEX); obj.extend(p001)
obj.append(VERTEX); obj.extend(p011)
obj.append(END)
model.atom[0].coord = p000
model.atom[1].coord = p100
model.atom[2].coord = p010
model.atom[3].coord = p001
elif self.mode=='plane':
obj.extend([ BEGIN, TRIANGLE_STRIP ])
obj.append(NORMAL); obj.extend(n001)
obj.append(VERTEX); obj.extend(p000)
obj.append(VERTEX); obj.extend(p100)
obj.append(VERTEX); obj.extend(p010)
obj.append(VERTEX); obj.extend(p110)
obj.append(END)
model.atom[0].coord = p000
model.atom[1].coord = p100
model.atom[2].coord = p010
model.atom[3].coord = add(add(p001, scale(d010,0.5)),scale(d100,0.5))
elif self.mode=='quad':
obj.extend([ BEGIN, TRIANGLE_STRIP ])
obj.append(NORMAL); obj.extend(n001)
obj.append(VERTEX); obj.extend(p000)
obj.append(VERTEX); obj.extend(p100)
obj.append(VERTEX); obj.extend(p010)
obj.append(VERTEX); obj.extend(p001)
obj.append(END)
model.atom[0].coord = p000
model.atom[1].coord = p100
model.atom[2].coord = p010
model.atom[3].coord = p001
self.cmd.load_model(model, '_tmp', zoom=0)
self.cmd.update(self.points_name,"_tmp")
self.cmd.delete("_tmp")
# then we load it into PyMOL
self.cmd.delete(self.cgo_name)
self.cmd.load_cgo(obj,self.cgo_name,zoom=0)
self.cmd.order(self.cgo_name+" "+self.points_name,sort=1,location='bottom')
self.cmd.set("nonbonded_size",math.sqrt(dot_product(d10,d10))/10,self.points_name)
def get_panel(self):
return [
[ 1, 'Box Wizard',''],
[ 3, self.mode_name[self.mode],'mode'],
[ 2, 'Change Name','cmd.get_wizard().edit_name()'],
[ 2, 'Copy Box','cmd.get_wizard().edit_name(copying=1)'],
[ 2, 'Toggle Points','cmd.get_wizard().toggle_points()'],
# [ 2, 'Update','cmd.get_wizard().update_box()'],
[ 2, 'Auto-Position (50%)','cmd.get_wizard().auto_position(0.75)'],
[ 2, 'Auto-Position (99%)','cmd.get_wizard().auto_position(0.99)'],
[ 2, 'Done','cmd.set_wizard()'],
]
def get_event_mask(self):
if self.editing_name:
return Wizard.event_mask_pick + Wizard.event_mask_select + \
Wizard.event_mask_scene + Wizard.event_mask_key
else:
return Wizard.event_mask_pick + Wizard.event_mask_select + Wizard.event_mask_scene
def do_scene(self):
if self.points_name in self.cmd.get_names("objects"):
if self.coord == None:
self.update_box()
else:
model = self.cmd.get_model(self.points_name)
coord = (
model.atom[0].coord,
model.atom[1].coord,
model.atom[2].coord,
model.atom[3].coord,
)
if self.coord != coord:
self.update_box()
def do_pick(self,bondFlag):
pass
def do_key(self,k,x,y,m):
if k in [8,127]:
self.new_name = self.new_name[:-1]
elif k>32:
self.new_name = self.new_name + chr(k)
elif k==10 or k==13:
self.editing_name = 0
self.new_name = string.strip(self.new_name)
if self.new_name == '':
self.new_name = 'box'
else:
self.new_name = self.new_name
self.set_name(self.new_name)
self.update_box()
self.cmd.refresh_wizard()
return 1
def get_prompt(self):
if self.editing_name:
self.prompt = [ "Enter box name: " + self.new_name ]
else:
self.prompt = None
return self.prompt
|
gratefulfrog/lib
|
python/pymol/wizard/box.py
|
Python
|
gpl-2.0
| 15,266
|
[
"ChemPy",
"PyMOL"
] |
7498becd660bd15c713c873cdc9da29894a52c4658b81fe89ffa1333c3e4c0f5
|
# -*- coding: utf-8 -*-
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
BOOLOP_SYMBOLS = {
And: 'and',
Or: 'or'
}
BINOP_SYMBOLS = {
Add: '+',
Sub: '-',
Mult: '*',
Div: '/',
FloorDiv: '//',
Mod: '%',
LShift: '<<',
RShift: '>>',
BitOr: '|',
BitAnd: '&',
BitXor: '^'
}
CMPOP_SYMBOLS = {
Eq: '==',
Gt: '>',
GtE: '>=',
In: 'in',
Is: 'is',
IsNot: 'is not',
Lt: '<',
LtE: '<=',
NotEq: '!=',
NotIn: 'not in'
}
UNARYOP_SYMBOLS = {
Invert: '~',
Not: 'not',
UAdd: '+',
USub: '-'
}
ALL_SYMBOLS = {}
ALL_SYMBOLS.update(BOOLOP_SYMBOLS)
ALL_SYMBOLS.update(BINOP_SYMBOLS)
ALL_SYMBOLS.update(CMPOP_SYMBOLS)
ALL_SYMBOLS.update(UNARYOP_SYMBOLS)
def parse(expr, filename='<unknown>', mode='exec'):
"""Parse an expression into an AST node."""
return compile(expr, filename, mode, PyCF_ONLY_AST)
def to_source(node, indent_with=' ' * 4):
"""
This function can convert a node tree back into python sourcecode. This
is useful for debugging purposes, especially if you're dealing with custom
asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
"""
generator = SourceGenerator(indent_with)
generator.visit(node)
return ''.join(generator.result)
def dump(node):
"""
A very verbose representation of the node passed. This is useful for
debugging purposes.
"""
def _format(node):
if isinstance(node, AST):
return '%s(%s)' % (node.__class__.__name__,
', '.join('%s=%s' % (a, _format(b))
for a, b in iter_fields(node)))
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy the source location hint (`lineno` and `col_offset`) from the
old to the new node if possible and return the new one.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
Some nodes require a line number and the column offset. Without that
information the compiler will abort the compilation. Because it can be
a dull task to add appropriate line numbers and column offsets when
adding new nodes this function can help. It copies the line number and
column offset of the parent node to the child nodes without this
information.
Unlike `copy_location` this works recursive and won't touch nodes that
already have a location information.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line numbers of all nodes by `n` if they have line number
attributes. This is useful to "move code" to a different location in a
file.
"""
for node in zip((node,), walk(node)):
if 'lineno' in node._attributes:
node.lineno = getattr(node, 'lineno', 0) + n
def iter_fields(node):
"""Iterate over all fields of a node, only yielding existing fields."""
# CPython 2.5 compat
if not hasattr(node, '_fields') or not node._fields:
return
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def get_fields(node):
"""Like `iter_fiels` but returns a dict."""
return dict(iter_fields(node))
def iter_child_nodes(node):
"""Iterate over all child nodes or a node."""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_child_nodes(node):
"""Like `iter_child_nodes` but returns a list."""
return list(iter_child_nodes(node))
def get_compile_mode(node):
"""
Get the mode for `compile` of a given node. If the node is not a `mod`
node (`Expression`, `Module` etc.) a `TypeError` is thrown.
"""
if not isinstance(node, mod):
raise TypeError('expected mod node, got %r' % node.__class__.__name__)
return {
Expression: 'eval',
Interactive: 'single'
}.get(node.__class__, 'expr')
def get_docstring(node):
"""
Return the docstring for the given node or `None` if no docstring can be
found. If the node provided does not accept docstrings a `TypeError`
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Str):
return node.body[0].s
def walk(node):
"""
Iterate over all nodes. This is useful if you only want to modify nodes in
place and don't care about the context or the order the nodes are returned.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
Walks the abstract syntax tree and call visitor functions for every node
found. The visitor functions may return values which will be forwarded
by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def get_visitor(self, node):
"""
Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node)
return self.generic_visit(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
Here an example transformer that rewrites all `foo` to `data['foo']`::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes
you must either transform the child nodes yourself or call the generic
visit function for the node first.
Nodes that were part of a collection of statements (that applies to
all statement nodes) may also return a list of nodes rather than just
a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
class SourceGenerator(NodeVisitor):
"""
This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with):
self.result = []
self.indent_with = indent_with
self.indentation = 0
self.new_lines = 0
def write(self, x):
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(x)
def newline(self, n=1):
self.new_lines = max(self.new_lines, n)
def body(self, statements):
self.new_line = True
self.indentation += 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
def body_or_else(self, node):
self.body(node.body)
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
padding = [None] * (len(node.args) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + node.vararg)
if node.kwarg is not None:
write_comma()
self.write('**' + node.kwarg)
def decorators(self, node):
for decorator in node.decorator_list:
self.newline()
self.write('@')
self.visit(decorator)
# Statements
def visit_Assign(self, node):
self.newline()
for idx, target in enumerate(node.targets):
if idx:
self.write(', ')
self.visit(target)
self.write(' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
self.newline()
self.visit(node.target)
self.write(BINOP_SYMBOLS[type(node.op)] + '=')
self.visit(node.value)
def visit_ImportFrom(self, node):
self.newline()
self.write('from %s%s import ' % ('.' * node.level, node.module))
for idx, item in enumerate(node.names):
if idx:
self.write(', ')
self.write(item)
def visit_Import(self, node):
self.newline()
for item in node.names:
self.write('import ')
self.visit(item)
def visit_Expr(self, node):
self.newline()
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.newline(n=2)
self.decorators(node)
self.newline()
self.write('def %s(' % node.name)
self.signature(node.args)
self.write('):')
self.body(node.body)
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.newline(n=3)
self.decorators(node)
self.newline()
self.write('class %s' % node.name)
for base in node.bases:
paren_or_comma()
self.visit(base)
# XXX: the if here is used to keep this module compatible
# with python 2.6.
if hasattr(node, 'keywords'):
for keyword in node.keywords:
paren_or_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
paren_or_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
paren_or_comma()
self.write('**')
self.visit(node.kwargs)
self.write(have_args and '):' or ':')
self.body(node.body)
def visit_If(self, node):
self.newline()
self.write('if ')
self.visit(node.test)
self.write(':')
self.body(node.body)
while True:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], If):
node = else_[0]
self.newline()
self.write('elif ')
self.visit(node.test)
self.write(':')
self.body(node.body)
else:
self.newline()
self.write('else:')
self.body(else_)
break
def visit_For(self, node):
self.newline()
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.body_or_else(node)
def visit_While(self, node):
self.newline()
self.write('while ')
self.visit(node.test)
self.write(':')
self.body_or_else(node)
def visit_With(self, node):
self.newline()
self.write('with ')
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
self.write(':')
self.body(node.body)
def visit_Pass(self, node):
self.newline()
self.write('pass')
def visit_Print(self, node):
# XXX: python 2.6 only
self.newline()
self.write('print ')
want_comma = False
if node.dest is not None:
self.write(' >> ')
self.visit(node.dest)
want_comma = True
for value in node.values:
if want_comma:
self.write(', ')
self.visit(value)
want_comma = True
if not node.nl:
self.write(',')
def visit_Delete(self, node):
self.newline()
self.write('del ')
for idx, target in enumerate(node):
if idx:
self.write(', ')
self.visit(target)
def visit_TryExcept(self, node):
self.newline()
self.write('try:')
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
def visit_TryFinally(self, node):
self.newline()
self.write('try:')
self.body(node.body)
self.newline()
self.write('finally:')
self.body(node.finalbody)
def visit_Global(self, node):
self.newline()
self.write('global ' + ', '.join(node.names))
def visit_Nonlocal(self, node):
self.newline()
self.write('nonlocal ' + ', '.join(node.names))
def visit_Return(self, node):
self.newline()
self.write('return ')
self.visit(node.value)
def visit_Break(self, node):
self.newline()
self.write('break')
def visit_Continue(self, node):
self.newline()
self.write('continue')
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.newline()
self.write('raise')
if hasattr(node, 'exc') and node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
elif hasattr(node, 'type') and node.type is not None:
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
# Expressions
def visit_Attribute(self, node):
self.visit(node.value)
self.write('.' + node.attr)
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
self.visit(node.func)
self.write('(')
for arg in node.args:
write_comma()
self.visit(arg)
for keyword in node.keywords:
write_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
write_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
write_comma()
self.write('**')
self.visit(node.kwargs)
self.write(')')
def visit_Name(self, node):
self.write(node.id)
def visit_Str(self, node):
self.write(repr(node.s))
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node):
self.write(repr(node.n))
def visit_Tuple(self, node):
self.write('(')
idx = -1
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(idx and ')' or ',)')
def sequence_visit(left, right):
def visit(self, node):
self.write(left)
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(right)
return visit
visit_List = sequence_visit('[', ']')
visit_Set = sequence_visit('{', '}')
del sequence_visit
def visit_Dict(self, node):
self.write('{')
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
if idx:
self.write(', ')
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_BinOp(self, node):
self.write('(')
self.visit(node.left)
self.write(' %s ' % BINOP_SYMBOLS[type(node.op)])
self.visit(node.right)
self.write(')')
def visit_BoolOp(self, node):
self.write('(')
for idx, value in enumerate(node.values):
if idx:
self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)])
self.visit(value)
self.write(')')
def visit_Compare(self, node):
self.write('(')
self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(' %s ' % CMPOP_SYMBOLS[type(op)])
self.visit(right)
self.write(')')
def visit_UnaryOp(self, node):
self.write('(')
op = UNARYOP_SYMBOLS[type(node.op)]
self.write(op)
if op == 'not':
self.write(' ')
self.visit(node.operand)
self.write(')')
def visit_Subscript(self, node):
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, Name) and node.step.id == 'None'):
self.visit(node.step)
def visit_ExtSlice(self, node):
for idx, item in node.dims:
if idx:
self.write(', ')
self.visit(item)
def visit_Yield(self, node):
self.write('yield ')
self.visit(node.value)
def visit_Lambda(self, node):
self.write('lambda ')
self.signature(node.args)
self.write(': ')
self.visit(node.body)
def visit_Ellipsis(self, node):
self.write('Ellipsis')
def generator_visit(left, right):
def visit(self, node):
self.write(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.write(right)
return visit
visit_ListComp = generator_visit('[', ']')
visit_GeneratorExp = generator_visit('(', ')')
visit_SetComp = generator_visit('{', '}')
del generator_visit
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.write('}')
def visit_IfExp(self, node):
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
self.write('`')
self.visit(node.value)
self.write('`')
# Helper Nodes
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(' as ' + node.asname)
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(' if ')
self.visit(if_)
def visit_excepthandler(self, node):
self.newline()
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
self.visit(node.name)
self.write(':')
self.body(node.body)
|
codendev/rapidwsgi
|
src/mako/_ast_util.py
|
Python
|
gpl-3.0
| 26,249
|
[
"VisIt"
] |
a6a56f1107c68a9439cfe345e248afe3dce9785872d169f0b193acee0aa5dc6b
|
# Copyright 2014 by Kevin Wu.
# Revisions copyright 2014 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Provides code to access the REST-style KEGG online API.
This module aims to make the KEGG online REST-style API easier to use. See:
http://www.kegg.jp/kegg/rest/keggapi.html
The KEGG REST-style API provides simple access to a range of KEGG databases.
This works using simple URLs (which this module will construct for you),
with any errors indicated via HTTP error levels.
The functionality is somewhat similar to Biopython's Bio.TogoWS and Bio.Entrez
modules.
Currently KEGG does not provide any usage guidelines (unlike the NCBI whose
requirements are reasonably clear). To avoid risking overloading the service,
Biopython will only allow three calls per second.
References:
Kanehisa, M. and Goto, S.; KEGG: Kyoto Encyclopedia of Genes and Genomes.
Nucleic Acids Res. 28, 29-34 (2000).
"""
from Bio._py3k import urlopen as _urlopen
from Bio._py3k import _binary_to_string_handle
def _q(op, arg1, arg2=None, arg3=None):
URL = "http://rest.kegg.jp/%s"
if arg2 and arg3:
args = "%s/%s/%s/%s" % (op, arg1, arg2, arg3)
elif arg2:
args = "%s/%s/%s" % (op, arg1, arg2)
else:
args = "%s/%s" % (op, arg1)
resp = _urlopen(URL % (args))
if "image" == arg2:
return resp
return _binary_to_string_handle(resp)
# http://www.kegg.jp/kegg/rest/keggapi.html
def kegg_info(database):
"""KEGG info - Displays the current statistics of a given database.
db - database or organism (string)
The argument db can be a KEGG database name (e.g. 'pathway' or its
official abbreviation, 'path'), or a KEGG organism code or T number
(e.g. 'hsa' or 'T01001' for human).
A valid list of organism codes and their T numbers can be obtained
via kegg_info('organism') or http://rest.kegg.jp/list/organism
"""
# TODO - return a string (rather than the handle?)
# TODO - chache and validate the organism code / T numbers?
# TODO - can we parse the somewhat formatted output?
#
# http://rest.kegg.jp/info/<database>
#
# <database> = pathway | brite | module | disease | drug | environ |
# ko | genome |<org> | compound | glycan | reaction |
# rpair | rclass | enzyme | genomes | genes | ligand | kegg
# <org> = KEGG organism code or T number
return _q("info", database)
def kegg_list(database, org=None):
"""KEGG list - Entry list for database, or specified database entries.
db - database or organism (string)
org - optional organism (string), see below.
For the pathway and module databases the optional organism can be
used to restrict the results.
"""
# TODO - split into two functions (dbentries seems separate)?
#
# http://rest.kegg.jp/list/<database>/<org>
#
# <database> = pathway | module
# <org> = KEGG organism code
if isinstance(database, str) and (database in ["pathway", "module"]) and org:
resp = _q("list", database, org)
elif isinstance(database, str) and database and org:
raise Exception("Invalid database arg for kegg list request.")
# http://rest.kegg.jp/list/<database>
#
# <database> = pathway | brite | module | disease | drug | environ |
# ko | genome | <org> | compound | glycan | reaction |
# rpair | rclass | enzyme | organism
# <org> = KEGG organism code or T number
#
#
# http://rest.kegg.jp/list/<dbentries>
#
# <dbentries> = KEGG database entries involving the following <database>
# <database> = pathway | brite | module | disease | drug | environ |
# ko | genome | <org> | compound | glycan | reaction |
# rpair | rclass | enzyme
# <org> = KEGG organism code or T number
else:
if isinstance(database, list) and len(database) <= 100:
database = ("+").join(database)
elif isinstance(database, list) and len(database) > 100:
raise Exception("Maximuim number of databases is 100 for kegg list query")
resp = _q("list", database)
return resp
def kegg_find(database, query, option=None):
"""KEGG find - Data search.
Finds entries with matching query keywords or other query data in
a given database.
db - database or organism (string)
query - search terms (string)
option - search option (string), see below.
For the compound and drug database, set option to the string 'formula',
'exact_mass' or 'mol_weight' to search on that field only. The
chemical formula search is a partial match irrespective of the order
of atoms given. The exact mass (or molecular weight) is checked by
rounding off to the same decimal place as the query data. A range of
values may also be specified with the minus(-) sign.
"""
# TODO - return list of tuples?
#
# http://rest.kegg.jp/find/<database>/<query>/<option>
#
# <database> = compound | drug
# <option> = formula | exact_mass | mol_weight
if database in ["compound", "drug"] and \
option in ["formula", "exact_mass", "mol_weight"]:
resp = _q("find", database, query, option)
elif option:
raise Exception("Invalid option arg for kegg find request.")
# http://rest.kegg.jp/find/<database>/<query>
#
# <database> = pathway | module | disease | drug | environ | ko |
# genome | <org> | compound | glycan | reaction | rpair |
# rclass | enzyme | genes | ligand
# <org> = KEGG organism code or T number
else:
if isinstance(query, list):
query = "+".join(query)
resp = _q("find", database, query)
return resp
def kegg_get(dbentries, option=None):
"""KEGG get - Data retrieval.
dbentries - Identifiers (single string, or list of strings), see below.
option - One of "aaseq", "ntseq", "mol", "kcf", "image", "kgml" (string)
The input is limited up to 10 entries.
The input is limited to one pathway entry with the image or kgml option.
The input is limited to one compound/glycan/drug entry with the image option.
Returns a handle.
"""
if isinstance(dbentries, list) and len(dbentries) <= 10:
dbentries = "+".join(dbentries)
elif isinstance(dbentries, list) and len(dbentries) > 10:
raise Exception("Maximum number of dbentries is 10 for kegg get query")
# http://rest.kegg.jp/get/<dbentries>[/<option>]
#
# <dbentries> = KEGG database entries involving the following <database>
# <database> = pathway | brite | module | disease | drug | environ |
# ko | genome | <org> | compound | glycan | reaction |
# rpair | rclass | enzyme
# <org> = KEGG organism code or T number
#
# <option> = aaseq | ntseq | mol | kcf | image
if option in ["aaseq", "ntseq", "mol", "kcf", "image", "kgml"]:
resp = _q("get", dbentries, option)
elif option:
raise Exception("Invalid option arg for kegg get request.")
else:
resp = _q("get", dbentries)
return resp
def kegg_conv(target_db, source_db, option=None):
"""KEGG conv - convert KEGG identifiers to/from outside identifiers
target_db - Target database
source_db_or_dbentries - source database or database entries
option - Can be "turtle" or "n-triple" (string).
"""
# http://rest.kegg.jp/conv/<target_db>/<source_db>[/<option>]
#
# (<target_db> <source_db>) = (<kegg_db> <outside_db>) |
# (<outside_db> <kegg_db>)
#
# For gene identifiers:
# <kegg_db> = <org>
# <org> = KEGG organism code or T number
# <outside_db> = ncbi-gi | ncbi-geneid | uniprot
#
# For chemical substance identifiers:
# <kegg_db> = drug | compound | glycan
# <outside_db> = pubchem | chebi
#
# <option> = turtle | n-triple
#
# http://rest.kegg.jp/conv/<target_db>/<dbentries>[/<option>]
#
# For gene identifiers:
# <dbentries> = database entries involving the following <database>
# <database> = <org> | ncbi-gi | ncbi-geneid | uniprot
# <org> = KEGG organism code or T number
#
# For chemical substance identifiers:
# <dbentries> = database entries involving the following <database>
# <database> = drug | compound | glycan | pubchem | chebi
#
# <option> = turtle | n-triple
if option and option not in ["turtle", "n-triple"]:
raise Exception("Invalid option arg for kegg conv request.")
if isinstance(source_db, list):
source_db = "+".join(source_db)
if target_db in ["ncbi-gi", "ncbi-geneid", "uniprot"] or \
source_db in ["ncbi-gi", "ncbi-geneid", "uniprot"] or \
(target_db in ["drug", "compound", "glycan"] and
source_db in ["pubchem", "glycan"]) or \
(target_db in ["pubchem", "glycan"] and
source_db in ["drug", "compound", "glycan"]):
if option:
resp = _q("conv", target_db, source_db, option)
else:
resp = _q("conv", target_db, source_db)
return resp
else:
raise Exception("Bad argument target_db or source_db for kegg conv request.")
def kegg_link(target_db, source_db, option=None):
"""KEGG link - find related entries by using database cross-references.
target_db - Target database
source_db_or_dbentries - source database
option - Can be "turtle" or "n-triple" (string).
"""
# http://rest.kegg.jp/link/<target_db>/<source_db>[/<option>]
#
# <target_db> = <database>
# <source_db> = <database>
#
# <database> = pathway | brite | module | ko | genome | <org> | compound |
# glycan | reaction | rpair | rclass | enzyme | disease |
# drug | dgroup | environ
#
# <option> = turtle | n-triple
# http://rest.kegg.jp/link/<target_db>/<dbentries>[/<option>]
#
# <dbentries> = KEGG database entries involving the following <database>
# <database> = pathway | brite | module | ko | genome | <org> | compound |
# glycan | reaction | rpair | rclass | enzyme | disease |
# drug | dgroup | environ | genes
#
# <option> = turtle | n-triple
if option and option not in ["turtle", "n-triple"]:
raise Exception("Invalid option arg for kegg conv request.")
if isinstance(source_db, list):
source_db = "+".join(source_db)
if option:
resp = _q("link", target_db, source_db, option)
else:
resp = _q("link", target_db, source_db)
return resp
|
zjuchenyuan/BioWeb
|
Lib/Bio/KEGG/REST.py
|
Python
|
mit
| 10,820
|
[
"Biopython"
] |
865facb891c85ddc0faca9a2309e1b862b3349698e2fb9ba0620e7a7d47a5b35
|
"""
Ze Neek Player
- A CLI-based internet radio stream player for neeks
- Play internet radio from your command line C:
- Geek + Nerd = Neek. Get it?
- Made in Python 2.7
- Requires PyGst (Gstreamer Python Bindings)
- Requires Gstreamer, its plugins and the development files
for Gstreamer as well.
License: GNU GPLv3
- mister-raindrop
Visit: https://github.com/mister-raindrop/neek-player
- for source code and development updates
Visit: https://kookiecancode.wordpress.com/
- for author's blog. He tries hard to write but fails.
File: Contains player controls and the main class.
This is where gstreamer comes in play.
"""
import pygst
pygst.require("0.10")
import gst
class neekPlayer:
def __init__(self, stream_url):
self.stream_url = stream_url
self.neekPipeline = gst.Pipeline("neekPipeline")
self.neek_player = gst.element_factory_make("playbin", "neekplayer")
self.set_neek_url(self.stream_url)
self.neekPipeline.add(self.neek_player)
def set_neek_url(self, url):
self.neek_player.set_property('uri', url)
def neek_play(self):
self.neekPipeline.set_state(gst.STATE_PLAYING)
def neek_stop(self):
self.neekPipeline.set_state(gst.STATE_PAUSED)
def neek_null(self):
self.neekPipeline.set_state(gst.STATE_NULL)
|
mister-raindrop/neek-player
|
neekPlayer.py
|
Python
|
gpl-3.0
| 1,295
|
[
"VisIt"
] |
cf7382421f56bde6cbb2a622ba39eb9e922cb5400be472fd39f67cec24f83365
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from future.utils.six import BytesIO
from tempfile import NamedTemporaryFile
from os.path import exists, join
from unittest import TestCase, main
from shutil import rmtree
from tempfile import mkdtemp
from uuid import uuid4
from skbio.util import (safe_md5, remove_files, create_dir, flatten,
is_casava_v180_or_later)
from skbio.util._misc import _handle_error_codes
class MiscTests(TestCase):
"""Test object for the miscellaneous utility functions"""
def setUp(self):
self.dirs_to_remove = []
def tearDown(self):
for element in self.dirs_to_remove:
rmtree(element)
def test_is_casava_v180_or_later(self):
"""Attempt to determine casava version"""
self.assertFalse(is_casava_v180_or_later(b'@foo'))
id_ = b'@M00176:17:000000000-A0CNA:1:1:15487:1773 1:N:0:0'
self.assertTrue(is_casava_v180_or_later(id_))
with self.assertRaises(ValueError):
is_casava_v180_or_later(b'foo')
def test_safe_md5(self):
"""Make sure we have the expected md5"""
exp = 'ab07acbb1e496801937adfa772424bf7'
fd = BytesIO(b'foo bar baz')
obs = safe_md5(fd)
self.assertEqual(obs.hexdigest(), exp)
fd.close()
def test_remove_files(self):
"""Remove files functions as expected """
# create list of temp file paths
test_fds = [NamedTemporaryFile(delete=False) for i in range(5)]
test_filepaths = [element.name for element in test_fds]
# should work just fine
remove_files(test_filepaths)
# check that an error is raised on trying to remove the files...
self.assertRaises(OSError, remove_files, test_filepaths)
# touch one of the filepaths so it exists
extra_file = NamedTemporaryFile(delete=False).name
test_filepaths.append(extra_file)
# no error is raised on trying to remove the files
# (although 5 don't exist)...
remove_files(test_filepaths, error_on_missing=False)
# ... and the existing file was removed
self.assertFalse(exists(extra_file))
# try to remove them with remove_files and verify that an IOError is
# raises
self.assertRaises(OSError, remove_files, test_filepaths)
# now get no error when error_on_missing=False
remove_files(test_filepaths, error_on_missing=False)
def test_create_dir(self):
"""create_dir creates dir and fails meaningful."""
# create a directory
tmp_dir_path = mkdtemp()
# create a random temporary directory name
tmp_dir_path2 = join(mkdtemp(), str(uuid4()))
tmp_dir_path3 = join(mkdtemp(), str(uuid4()))
self.dirs_to_remove += [tmp_dir_path, tmp_dir_path2, tmp_dir_path3]
# create on existing dir raises OSError if fail_on_exist=True
self.assertRaises(OSError, create_dir, tmp_dir_path,
fail_on_exist=True)
self.assertEqual(create_dir(tmp_dir_path, fail_on_exist=True,
handle_errors_externally=True), 1)
# return should be 1 if dir exist and fail_on_exist=False
self.assertEqual(create_dir(tmp_dir_path, fail_on_exist=False), 1)
# if dir not there make it and return always 0
self.assertEqual(create_dir(tmp_dir_path2), 0)
self.assertEqual(create_dir(tmp_dir_path3, fail_on_exist=True), 0)
def test_handle_error_codes_no_error(self):
obs = _handle_error_codes('/foo/bar/baz')
self.assertEqual(obs, 0)
def test_flatten(self):
"""flatten should remove one level of nesting from nested sequences"""
self.assertEqual(flatten(['aa', 'bb', 'cc']), list('aabbcc'))
self.assertEqual(flatten([1, [2, 3], [[4, [5]]]]), [1, 2, 3, [4, [5]]])
if __name__ == '__main__':
main()
|
JWDebelius/scikit-bio
|
skbio/util/tests/test_misc.py
|
Python
|
bsd-3-clause
| 4,255
|
[
"scikit-bio"
] |
990c8544f9bc33ffb4ae7405136c6f445e34f8a480ff35c4569f4f12f8e80c1f
|
# flake8: noqa: E241
from tests.core import mock
from trakt import Trakt
from httmock import HTTMock
def test_movie():
with HTTMock(mock.fixtures, mock.unknown):
movies = Trakt['movies'].popular()
assert [(m.title, m.year) for m in movies] == [
("Deadpool", 2016),
("The Dark Knight", 2008),
("Inception", 2010),
("Guardians of the Galaxy", 2014),
("The Avengers", 2012),
("The Matrix", 1999),
("Interstellar", 2014),
("Suicide Squad", 2016),
("Star Wars: The Force Awakens", 2015),
("Frozen", 2013)
]
def test_show():
with HTTMock(mock.fixtures, mock.unknown):
shows = Trakt['shows'].popular()
assert [(m.title, m.year) for m in shows] == [
("Game of Thrones", 2011),
("Breaking Bad", 2008),
("The Walking Dead", 2010),
("The Big Bang Theory", 2007),
("Dexter", 2006),
("Sherlock", 2010),
("How I Met Your Mother", 2005),
("Arrow", 2012),
("Friends", 1994),
("Homeland", 2011)
]
|
fuzeman/trakt.py
|
tests/test_popular.py
|
Python
|
mit
| 1,088
|
[
"Galaxy"
] |
54e2d352daff72630f5d8921a51601d1434950d529091cf446b4fcf502112cfc
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Extract fasta sequences that are above a minimal length.
Usage:
%program <input_file> <min_length> <output_file>"""
import sys
import re
try:
from Bio import SeqIO
except:
print("This program requires the Biopython library")
sys.exit(0)
try:
fasta_file = sys.argv[1] # Input fasta file
min_length = int(sys.argv[2]) # Minimum length of sequence
result_file = sys.argv[3] # Output fasta file
except:
print(__doc__)
sys.exit(0)
fasta_sequences = SeqIO.parse(open(fasta_file),'fasta')
with open(result_file, "w") as f:
for seq in fasta_sequences:
if len(str(seq.seq)) >= min_length:
SeqIO.write([seq], f, "fasta")
|
enormandeau/Scripts
|
fasta_extract_min_length.py
|
Python
|
gpl-3.0
| 737
|
[
"Biopython"
] |
c483b4e5398071da5368661dbee59218c827f4ec5955bafe36409b6a37c83c5b
|
import os
import Forge.core.System
def createRenderHierarchy( path, sceneName, frames ):
"""create the render folder hierarchy"""
'@parameter path (string) Path of the environement.'
'@parameter sceneName (string) Name of the scene.'
'@parameter frames (array of int) Frames to render.'
folders = [ 'data', 'images', 'rib', 'rib/job', 'shaders', 'log' ]
[ folders.append( 'rib/' + str(int(frame)).zfill(4) ) for frame in frames ]
[ Forge.core.System.mkdir( '%srenderman/%s/%s' %(path, sceneName, folder) ) for folder in folders ]
def ribJob( path, sceneName ):
"""define rib job content"""
'@parameter path (string) Path of the environement.'
'@parameter sceneName (string) Name of the scene.'
'return ribJob content (string)'
jobRibContent = '\n'
jobRibContent += '\nOption "ribparse" "string varsubst" [""]'
jobRibContent += '\nOption "ribparse" "string varsubst" ["$"]'
jobRibContent += '\nIfBegin "!defined(RMSPROJ_FROM_ENV)"'
jobRibContent += '\nOption "user" "string RMSPROJ" "%s"' %(path)
jobRibContent += '\nIfEnd'
jobRibContent += '\nIfBegin "!defined(RMSTREE)"'
jobRibContent += '\nOption "user" "string RMSTREE" "C:/Program Files/Pixar/RenderManStudio-20.0-maya2015/"'
jobRibContent += '\nIfEnd'
jobRibContent += '\nOption "searchpath" "string resource" ["${RMSPROJ}:@"]'
jobRibContent += '\nOption "searchpath" "string archive" ["${RMSPROJ}:."]'
jobRibContent += '\nOption "searchpath" "string display" ["${RMSTREE}/bin:@"]'
jobRibContent += '\nOption "searchpath" "string shader" ["${RMSPROJ}:${RMSTREE}/lib/shaders/:@"]'
jobRibContent += '\nOption "searchpath" "string texture" ["${RMSPROJ}:${RMSTREE}/lib/textures/:@"]'
jobRibContent += '\nOption "searchpath" "string rixplugin" ["${RMSTREE}/lib/shaders/:@"]'
jobRibContent += '\nOption "searchpath" "string dirmap" [""]'
jobRibContent += '\nOption "searchpath" "string procedural" ["${RMSTREE}/lib/plugins/:${RMSTREE}/lib/plugins:${RMANTREE}/etc:@"]'
jobRibContent += '\n'
return jobRibContent
def objectMatch( obj, rules ):
"""define if an object name match to a rules"""
'@parameter obj (str) object name.'
'@parameter rules (str) rules string.'
'return macthing object (bool)'
import re
for rule in rules.split( ';' ):
wildcard = '*'
if wildcard in rule:
try:
rule = re.compile(rule)
if re.match( rule, obj ):
return True
except:
pass
else:
if rule == obj:
return True
return False
def ribLight( lights ):
"""define rib light content"""
'@parameter lights (dict) Lights data.'
'return ribLight content (string)'
lightRibContent = ''
for key, value in lights.iteritems():
lightRibContent += '\n AttributeBegin '
lightRibContent += '\n Attribute "identifier" "string name" ["%s"]' %(key)
lightRibContent += '\n Transform %s' %(value['matrix'])
# env
if value['type'] == 0:
lightRibContent += '\n Rotate -90 1 0 0'
lightRibContent += '\n Bxdf "PxrLightEmission" "visualizer" "string __instanceid" ["%s_visualizer"]' %(key)
lightRibContent += '\n IfBegin "!defined(user:shader_bindingstrength) || $user:shader_bindingstrength <= 0" '
lightRibContent += '\n ShadingRate 10'
lightRibContent += '\n Surface "%s" %s ' %(value['slo'], value['settings'])
lightRibContent += ' "__instanceid" ["%s_0"]' %(value['slo'])
lightRibContent += '\n ShadingRate 10'
lightRibContent += '\n AreaLightSource "%s" "%s" %s' %(value['slo'], key, value['settings'])
lightRibContent += ' "__instanceid" ["%s_0"]' %(value['slo'])
lightRibContent += '\n Attribute "user" "int shader_bindingstrength" [0]'
lightRibContent += '\n IfEnd '
lightRibContent += '\n Attribute "visibility" "int camera" [0]'
lightRibContent += '\n Attribute "visibility" "int indirect" [0] "int transmission" [0]'
lightRibContent += '\n ShadingRate 10'
lightRibContent += '\n Sides 1'
lightRibContent += '\n Attribute "dice" "string offscreenstrategy" ["sphericalprojection"]'
lightRibContent += '\n ReverseOrientation '
# env
if value['type'] == 0:
lightRibContent += '\n Geometry "envsphere" "constant float[2] resolution" [1024 512]'
# rect
elif value['type'] == 1:
lightRibContent += '\n Geometry "rectlight" '
# disk and spot
elif value['type'] == 2:
lightRibContent += '\n Disk 0 0.5 360 '
# sphere
elif value['type'] == 3:
lightRibContent += '\n Geometry "spherelight" "constant float radius" [0.5] '
# distant
elif value['type'] == 4:
lightRibContent += '\n Geometry "distantlight" "constant float anglesubtended" [-1] '
lightRibContent += '\n AttributeEnd '
lightRibContent += '\n Illuminate "%s" 1' %(key)
return lightRibContent
def ribAttribute( geometry, attributes ):
"""define rib Attribute content for a geometry"""
'@parameter geometry (str) geometry name.'
'@parameter attributes (list) attributes data.'
'return ribAttribute content (string)'
attributeRibContent = ''
check_matte = False
check_side = False
check_visibilityCamera = False
check_visibilityTransmission = False
check_visibilityIndirect = False
check_traceMaxdiffusedepth = False
check_traceMaxspeculardepth = False
for attribute in attributes:
if objectMatch( geometry, attribute['rule'] ):
# geometric
if not check_matte and attribute['m_matte_int']:
attributeRibContent += '\n Matte %s' %(attribute['m_matte_int'])
check_matte = True
if not check_side and attribute['m_side_int']:
attributeRibContent += '\n Sides %s' %(attribute['m_side_int'])
check_side = True
# visibility
if not check_visibilityCamera and attribute['m_visibility_camera_int']:
attributeRibContent += '\n Attribute "visibility" "int camera" [%s]' %(attribute['m_visibility_camera_int'])
check_visibilityCamera = True
if not check_visibilityTransmission and attribute['m_visibility_transmission_int']:
attributeRibContent += '\n Attribute "visibility" "int transmission" [%s]' %(attribute['m_visibility_transmission_int'])
check_visibilityTransmission = True
if not check_visibilityIndirect and attribute['m_visibility_indirect_int']:
attributeRibContent += '\n Attribute "visibility" "int indirect" [%s]' %(attribute['m_visibility_indirect_int'])
check_visibilityIndirect = True
# trace
if not check_traceMaxdiffusedepth and attribute['m_trace_maxdiffusedepth_int']:
attributeRibContent += '\n Attribute "trace" "int maxdiffusedepth" [%s]' %(attribute['m_trace_maxdiffusedepth_int'])
check_traceMaxdiffusedepth = True
if not check_traceMaxspeculardepth and attribute['m_trace_maxspeculardepth_int']:
attributeRibContent += '\n Attribute "trace" "int maxspeculardepth" [%s]' %(attribute['m_trace_maxspeculardepth_int'])
check_traceMaxspeculardepth = True
return attributeRibContent
def ribShading( geometry, shaders ):
"""define rib Shading content for a geometry"""
'@parameter geometry (str) geometry name.'
'@parameter shaders (list) shaders data.'
'return rib Shading content (string)'
for shader in shaders:
if objectMatch( geometry, shader['rule'] ):
return '\n %s' %( shader['value'] )
return ''
def ribGeometry( geometries, rlf ):
"""define rib geometry content"""
'@parameter geometries (dict) geometries data.'
'@parameter rlf (dict) rlf data both shaders and attributes.'
'return ribGeometry content (string)'
# default shader
geometryRibContent = ' Bxdf "PxrConstant" "unassigned" "color emitColor" [1.0 0.0 1.0] "__instanceid" ["unassigned_0"]'
for key, value in geometries.iteritems():
geometryRibContent += '\n AttributeBegin '
geometryRibContent += '\n Attribute "identifier" "string name" ["%s"]' %(key)
# add attributes
geometryRibContent += ribAttribute( key, rlf['attribute'] )
# add shader
geometryRibContent += ribShading( key, rlf['shading'] )
geometryRibContent += '\n ConcatTransform %s' %(value['matrix'])
geometryRibContent += '\n TransformBegin '
# todo : find a good way to get the boundingbox and replace ReadArchive by Procedural2
geometryRibContent += '\n ReadArchive "%s"' %(value['path'])
# geometryRibContent += '\n Procedural2 "DelayedReadArchive2" "SimpleBound" "string filename" ["%s"] "float[6] bound" [-1 1 -1 1 -1 1] "int __immediatesubdivide" [0]' %(meshPath)
geometryRibContent += '\n TransformEnd '
geometryRibContent += '\n AttributeEnd '
return geometryRibContent
def writeAlf( path, sceneName, passName, frames, displayType ):
"""write an alf file"""
'@parameter path (string) Path of the environement.'
'@parameter sceneName (string) Name of the scene.'
'@parameter passName (string) Name of the pass.'
'@parameter frames (array of int) Frames to render.'
'return alf file path (string)'
frame = str(int(frames[0])).zfill(4)
filePath = '%srenderman/%s/data/spool_%s.%s.alf' %(path, sceneName, passName, frame)
ribPath = 'renderman/%s/rib/%s/%s.%s.rib' %(sceneName, frame, passName, frame)
outputPath = '%srenderman/%s/images/%s' %(path, sceneName, sceneName)
itPath = 'C:/Program Files/Pixar/RenderManStudio-20.0-maya2015/bin/it'
if displayType == 0:
task = 'Task -title {%s%s} -cmds {' %(passName, frame)
task += '\nRemoteCmd {prman -t:0 -Progress -recover %r -checkpoint 5m -cwd "%D('+path+')" "%D('+ribPath+')"} -service {PixarRender}'
task +='\n} -preview {sho "%s.exr" }' %(outputPath)
elif displayType == 1:
task = 'Task {%s%s} -cmds {' %(passName, frame)
task += '\nCmd -service {local:PixarRender} {prman -t:0 -Progress -recover %r -checkpoint 0 -dspyserver "'+itPath+'" -cwd "%D('+path+')" "%D('+ribPath+')"}'
task += '\n} -preview {sho "%s"}' %(outputPath)
alfContent = '\n'
alfContent += '\nJob -title {%s_%s.%s} -comment {#username BC)} -dirmaps {' %(sceneName, passName, frame)
alfContent += '\n {}'
alfContent += '\n} -envkey {rms-20.0-maya-2015 prman-20.0} -pbias 1 -crews {} -tags {} -service {} -whendone {} -whenerror {} -serialsubtasks 1 -subtasks {'
alfContent += '\n Task {Frames} -serialsubtasks 1 -subtasks {'
alfContent += '\n Task {Images 1} -subtasks {'
alfContent += '\n %s' %(task)
alfContent += '\n }'
alfContent += '\n }'
alfContent += '\n}'
alfContent += '\n'
Forge.core.System.setFile( path=filePath, content=alfContent)
return filePath
def writeRibJob( path, sceneName ):
"""write a rib job file"""
'@parameter path (string) Path of the environement.'
'@parameter sceneName (string) Name of the scene.'
filePath = '%srenderman/%s/rib/job/job.rib' %(path, sceneName)
jobRibContent = ribJob( path, sceneName )
Forge.core.System.setFile( path=filePath, content=jobRibContent )
def writeRibFrame( path, sceneName, passName, frame ):
"""write a rib frame file"""
'@parameter path (string) Path of the environement.'
'@parameter sceneName (string) Name of the scene.'
'@parameter passName (string) Name of the pass.'
'@parameter frame (string) Frame to render.'
filePath = '%srenderman/%s/rib/%s/%s.rib' %(path, sceneName, frame, frame)
frameRibContent = ribJob( path, sceneName )
frameRibContent += '\nReadArchive "renderman/%s/rib/%s/%s.%s.rib"' %( sceneName, frame, passName, frame )
Forge.core.System.setFile( path=filePath, content=frameRibContent)
def writeRibPass( args, frame ):
"""write a rib pass file"""
'@parameter args (dict) Rendering arguments.'
'@parameter frame (string) Frame to render.'
itPath = 'C:/Program%20Files/Pixar/RenderManStudio-20.0-maya2015/bin/it'
path = args['globals']['variables']['path']
sceneName = args['globals']['variables']['sceneName']
passName = args['globals']['variables']['passName']
displayType = args['globals']['variables']['displayType']
objectSettings = args['data']['object']
cameraSettings = args['data']['camera']
filtername = args['globals']['settings']['display']['filter']
filterwidth = args['globals']['settings']['display']['filterwidth']
channels = args['globals']['settings']['display']['channels']
order = args['globals']['settings']['render']['order']
minwidth = str( args['globals']['settings']['render']['minwidth'] )
texturememory = str( args['globals']['settings']['render']['texturememory'] )
geocachememory = str( args['globals']['settings']['render']['geocachememory'] )
proceduralmemory = str( args['globals']['settings']['render']['proceduralmemory'] )
opacitycachememory = str( args['globals']['settings']['render']['opacitycachememory'] )
CropWindow = ' '.join([ str(i)for i in args['globals']['settings']['render']['CropWindow'] ])
minsamples = str( args['globals']['settings']['render']['minsamples'] )
maxsamples = str( args['globals']['settings']['render']['maxsamples'] )
maxPathLength = str( args['globals']['settings']['render']['maxPathLength'] )
numLightSamples = str( args['globals']['settings']['render']['numLightSamples'] )
numBxdfSamples = str( args['globals']['settings']['render']['numBxdfSamples'] )
numIndirectSamples = str( args['globals']['settings']['render']['numIndirectSamples'] )
allowCaustics = str( args['globals']['settings']['render']['allowCaustics'] )
Format = ' '.join([ str(i)for i in args['globals']['settings']['render']['Format'] ])
pixelVariance = str( args['globals']['settings']['render']['PixelVariance'] )
maxdiffusedepth = str( args['globals']['settings']['render']['maxdiffusedepth'] )
maxspeculardepth = str( args['globals']['settings']['render']['maxspeculardepth'] )
pass_camera_name = args['globals']['settings']['render']['pass_camera_name']
screenHeight = float(args['globals']['settings']['render']['Format'][1]) / float(args['globals']['settings']['render']['Format'][0])
ScreenWindow = '-1 1 -%s %s' %(screenHeight, screenHeight)
camera_clipping = ' '.join([ str(i)for i in cameraSettings[ cameraSettings.keys()[0] ]['clipping'] ])
camera_fov = str( cameraSettings[ cameraSettings.keys()[0] ]['fov'] )
camera_translate = cameraSettings[ cameraSettings.keys()[0] ]['translate']
camera_rotate = cameraSettings[ cameraSettings.keys()[0] ]['rotate']
lightPath = '%srenderman/_lib/shaders/areaLight' %(path)
statPath = '%srenderman/%s/log/%s.%s.xml' %(path, sceneName, passName, frame)
outputPath = '%srenderman/%s/images/%s' %(path, sceneName, passName)
meshPath = objectSettings[ objectSettings.keys()[0] ]['path']
display = ''
toggle = ''
for channel in channels:
if toggle:
display += '\n DisplayChannel "%s"' %(channel['name'])
if channel['lpe']:
display += ' "string source" ["color lpe:%s"]' %(channel['lpe'])
if displayType == 0:
display += '\n Display "%s%s_%s.%s.exr" "openexr" "%s" ' %(toggle, outputPath, channel['label'], frame, channel['name'].split(' ')[-1])
display += '"string autocrop" ["true"] "string exrcompression" ["zip"] "string exrpixeltype" ["%s"] ' %(channel['type'])
display += '"string filter" ["%s"] "float[2] filterwidth" [%i %i] ' %(filtername, filterwidth[0], filterwidth[1])
display += '"int[4] quantize" [0 0 0 0] "float dither" [0] '
display += '"float[2] exposure" [1 1] "float[3] remap" [0 0 0]'
elif displayType == 1:
display += '\n Display "%s%s_%s.%s" "it" "%s" ' %(toggle, outputPath, channel['label'], frame, channel['name'].split(' ')[-1])
display += '"string filter" ["%s"] "float[2] filterwidth" [%i %i] ' %(filtername, filterwidth[0], filterwidth[1])
display += '"int[4] quantize" [0 0 0 0] "float dither" [0] '
display += '"float[2] exposure" [1 1] "float[3] remap" [0 0 0] '
display += '"int merge" [0] "string connection" ["-launchURI %s"] ' %(itPath)
if not toggle:
display += '"string dspyParams" [" itOpenHandler {::ice::startTimer;};;; '
display += 'itCloseHandler {::ice::endTimer %arglist; };;; '
display += 'dspyRender -renderer preview -time 1 -crop 0 1 0 1 '
display += '-port 53781 -context \\"%s\\" ' %(outputPath)
display += '-notes \\"(Date : \\nPxrPathTracer MaxSamples : Mode : Light : Bsdf : Indir : \\""]'
toggle = '+'
filePath = '%srenderman/%s/rib/%s/%s.%s.rib' %(path, sceneName, frame, passName, frame)
passRibContent = '\nversion 3.04'
passRibContent += ribJob(path, sceneName)
passRibContent += '\nFrameBegin 1'
passRibContent += '\n Identity '
passRibContent += '\n Option "user" "string pass_id" ["%s"] "string pass_phase" ["/Job/Frames/Images"] "string pass_class" ["Final"] "string pass_flavor" [""] "string pass_crew" [""] "string pass_camera_name" ["%s"] "string pass_camera_flavor" [""] "string pass_layer" ["defaultRenderLayer"] "string renderer" ["RIS"] "int pass_features_trace" [1] "int input_color_profile" [0]' %(passName, pass_camera_name)
passRibContent += '\n Option "trace" "int maxdepth" [10]'
passRibContent += '\n PixelVariance %s' %(pixelVariance)
passRibContent += '\n Option "bucket" "string order" ["%s"]' %(order)
passRibContent += '\n Option "limits" "int[2] bucketsize" [16 16]'
passRibContent += '\n Option "limits" "int gridsize" [256]'
passRibContent += '\n Option "trace" "float decimationrate" [1]'
passRibContent += '\n Option "hair" "float minwidth" [%s]' %(minwidth)
passRibContent += '\n Option "statistics" "int level" [1]'
passRibContent += '\n Option "statistics" "string filename" ["stdout"]'
passRibContent += '\n Option "statistics" "string xmlfilename" ["%s"]' %(statPath)
passRibContent += '\n Option "limits" "color zthreshold" [0.996 0.996 0.996]'
passRibContent += '\n Option "limits" "color othreshold" [0.996 0.996 0.996]'
passRibContent += '\n Option "limits" "int texturememory" [%s]' %(texturememory)
passRibContent += '\n Option "limits" "int geocachememory" [%s]' %(geocachememory)
passRibContent += '\n Option "limits" "int proceduralmemory" [%s]' %(proceduralmemory)
passRibContent += '\n Option "shading" "int directlightinglocalizedsampling" [0]'
passRibContent += '\n Option "limits" "int opacitycachememory" [%s]' %(opacitycachememory)
passRibContent += '\n CropWindow %s' %(CropWindow)
passRibContent += '\n Hider "raytrace" "int adaptall" [0] "string integrationmode" ["path"] "int incremental" [1] "string pixelfiltermode" ["weighted"] "int minsamples" [%s] "int maxsamples" [%s]' %(minsamples, maxsamples)
passRibContent += '\n Integrator "PxrPathTracer" "PxrPathTracer" "int maxPathLength" [%s] "string sampleMode" ["bxdf"] "int numLightSamples" [%s] "int numBxdfSamples" [%s] "int numIndirectSamples" [%s] "int numDiffuseSamples" [1] "int numSpecularSamples" [1] "int numSubsurfaceSamples" [1] "int numRefractionSamples" [1] "int rouletteDepth" [4] "float rouletteThreshold" [0.2] "string imagePlaneSubset" ["rman__imageplane"] "int clampDepth" [2] "float clampLuminance" [10] "int allowCaustics" [%s]' %(maxPathLength, numLightSamples, numBxdfSamples, numIndirectSamples, allowCaustics)
passRibContent += '\n Format %s' %(Format)
passRibContent += '\n%s' %(display)
passRibContent += '\n Clipping %s' %(camera_clipping)
passRibContent += '\n Projection "perspective" "fov" [%s]' %(camera_fov)
# render camera
passRibContent += '\n ScreenWindow %s' %(ScreenWindow)
passRibContent += '\n Shutter 0 0'
passRibContent += '\n Rotate %s 1 0 0' %( camera_rotate[0] )
passRibContent += '\n Rotate %s 0 1 0' %( camera_rotate[1] )
passRibContent += '\n Rotate %s 0 0 1' %( camera_rotate[2] )
passRibContent += '\n Scale 1 1 -1'
passRibContent += '\n Translate %s %s %s' %( camera_translate[0], camera_translate[1], camera_translate[2] )
# world attributes
passRibContent += '\n Camera "world" "float[2] shutteropening" [0 1]'
passRibContent += '\n Option "user" "color camera_bg" [0 0 0] "float camera_bga" [0]'
passRibContent += '\n Imager "background" "color color" [0 0 0] "float alpha" [0]'
passRibContent += '\n WorldBegin '
passRibContent += '\n ScopedCoordinateSystem "world_ref"'
passRibContent += '\n Attribute "visibility" "int transmission" [1] "int indirect" [1]'
passRibContent += '\n Bxdf "PxrDiffuse" "default" '
passRibContent += '\n Attribute "user" "int shader_bindingstrength" [0]'
passRibContent += '\n Attribute "trace" "int maxdiffusedepth" [%s] "int maxspeculardepth" [%s] "int samplemotion" [1] "float autobias" [1] "float bias" [0.001] "int displacements" [1]' %(maxdiffusedepth, maxspeculardepth)
passRibContent += '\n Attribute "dice" "string referencecamera" ["worldcamera"]'
passRibContent += '\n ShadingRate 1'
passRibContent += '\n Attribute "displacementbound" "string coordinatesystem" ["shader"] "float sphere" [0]'
passRibContent += '\n Attribute "photon" "string causticmap" [""] "string globalmap" [""]'
# lights
passRibContent += '\n'
passRibContent += ribLight( args['data']['light'] )
passRibContent += '\n'
# geometry
passRibContent += '\n'
passRibContent += ribGeometry( args['data']['object'], args['rlf'] )
passRibContent += '\n'
passRibContent += '\n WorldEnd '
passRibContent += '\nFrameEnd '
passRibContent += '\n'
Forge.core.System.setFile( path=filePath, content=passRibContent)
def launchRender( args ):
"""launch a render with rib export and execution of an alf file"""
'@parameter args (dict) All arguments for the render.'
# args = {
# 'globals' : {
# 'path':'',
# 'sceneName':'',
# 'passName':'',
# 'frames':[],
# 'displayType':0,
# 'settings':{
# 'display':{'driver':'openexr','channel':'rgba','filter':'gaussian','filterwidth':[2,2]},
# 'render':{'pass_camera_name':'perspShape','CropWindow':[0,1,0,1],'minsamples':0,'maxsamples':512,'PixelVariance':0.005,'maxPathLength':10,'numLightSamples':8,'numBxdfSamples':8,'numIndirectSamples':1,'allowCaustics':0,'maxdiffusedepth':1,'maxspeculardepth':2,'Format':[960,540,1],'order':'horizontal','minwidth':0.5,'texturememory':2097152,'geocachememory':2097152,'proceduralmemory':0,'opacitycachememory':1024000}
# }
# },
# 'rlf' : {
# 'shading':{ 'materialName':{'class':'PxrDiffuse', 'rule':'', 'value':'' } },
# 'attribute':[{'class':'primaryOff', 'rule':'', 'value':'' }],
# },
# 'data' : {
# 'object':{'objectName':{'path':''}},
# 'light':{ 'lightName':{'class':'AeraLight', 'value':''} },
# 'camera':{ 'cameraName':{'value':''} },
# }
# }
varGlob = args['globals']['variables']
# pre render
createRenderHierarchy( varGlob['path'], varGlob['sceneName'], varGlob['frames'] )
alfPath = writeAlf( varGlob['path'], varGlob['sceneName'], varGlob['passName'], varGlob['frames'], varGlob['displayType'] )
writeRibJob( varGlob['path'], varGlob['sceneName'] )
writeRibJob( varGlob['path'], varGlob['sceneName'] )
for frame in varGlob['frames']:
frame = str( int(varGlob['frames'][0]) ).zfill(4)
writeRibFrame( varGlob['path'], varGlob['sceneName'], varGlob['passName'], frame )
writeRibPass( args, frame )
# launch render
Forge.core.Process.launchSoftware( Forge.core.Env().localqueue, [alfPath] )
|
Black-Cog/Forge
|
core/rendermanRib.py
|
Python
|
bsd-3-clause
| 25,735
|
[
"Gaussian"
] |
ea18ac718f6fc5c752f5577a7c823dc1df862616a781bcb39d4580f4e4d4afef
|
"""Test the PhyML executable.
https://github.com/biopython/biopython/blob/master/Tests/test_phyml_tool.py
"""
import sys
import os
import unittest
from Bio import Phylo
from Bio.Phylo.Applications import PhymlCommandline
from Bio import MissingExternalDependencyError
class PhymlTest(unittest.TestCase):
"""Test for application wrapper."""
def __init__(self):
# Try to avoid problems when the OS is in another language
os.environ['LANG'] = 'C'
phyml_exe = None
exename = "PhyML-3.1_win32.exe" if sys.platform == "win32" else "phyml"
from Bio._py3k import getoutput
try:
output = getoutput(exename + " --version")
if "not found" not in output and "20" in output:
phyml_exe = exename
except OSError:
# Python 2.6 or 2.7 on Windows XP:
# WindowsError: [Error 2] The system cannot find the file specified
# Python 3.3 or 3.4 on Windows XP:
# FileNotFoundError: [WinError 2] The system cannot find the file
# specified
pass
if not phyml_exe:
raise MissingExternalDependencyError(
"Install PhyML 3.0 if you want to use the \
Bio.Phylo.Applications wrapper.")
# Example Phylip file with 13 aligned protein sequences
EX_PHYLIP = 'HTR1E_aligned.phy'
self.EX_PHYLIP = EX_PHYLIP
def test_phyml(self):
"""Run PhyML using the wrapper."""
cmd = PhymlCommandline(
self.phyml_exe,
input=self.EX_PHYLIP,
datatype='nt')
# Smoke test
try:
out, err = cmd()
self.assertTrue(len(out) > 0)
self.assertEqual(len(err), 0)
# Check the output tree
tree = Phylo.read(self.EX_PHYLIP + '_phyml_tree.txt', 'newick')
self.assertEqual(tree.count_terminals(), 13)
finally:
# Clean up generated files
for suffix in ['_phyml_tree.txt', '_phyml_stats.txt']:
fname = self.EX_PHYLIP + suffix
if os.path.isfile(fname):
os.remove(fname)
if __name__ == '__main__':
unittest.main()
|
datasnakes/Datasnakes-Scripts
|
OrthoEvol/Orthologs/Phylogenetics/PhyML/phyml_test/phyml_test.py
|
Python
|
mit
| 2,300
|
[
"Biopython"
] |
62187af817cfc5bd8155b3f430befb9e5debac53781003d7e7905602071541bc
|
import os
from dirac.lib.base import *
from DIRAC import gConfig, gLogger, rootPath
from dirac.lib.diset import getRPCClient
from dirac.lib.credentials import getUserDN, getUsername
from dirac.lib.credentials import getSelectedGroup, checkUserCredentials
from DIRAC.FrameworkSystem.Client.UserProfileClient import UserProfileClient
class ExternalController(BaseController):
def index(self):
return self.display()
def display(self):
checkUserCredentials()
dn = getUserDN()
user = getUsername()
group = getSelectedGroup()
gLogger.always( "User: %s, Group: %s, DN: %s" % ( user , group , dn ) )
templates = [ "reg_%s.mako" % i for i in [ "done" , "form" , "info" ] ]
html = [ "reg_%s.html" % i for i in [ "footer" , "header" , "conditions" , "form" , "done" ] ]
files = templates + html
basedir = os.path.join( rootPath , "Web" , "dirac" , "templates" )
for i in files:
f = os.path.join( basedir , i )
if not os.path.exists( f ):
gLogger.error( "File does not exists: %s" % f )
return render( "web/External.mako" )
if dn and user == "anonymous":
upc = UserProfileClient( "Registration" , getRPCClient )
result = upc.retrieveVar( dn )
if result[ "OK" ]:
c.sent = result[ "Value" ]
return render( "/reg_done.mako" )
else:
return render("/reg_form.mako")
if not dn or dn == "":
return render("/reg_info.mako")
if "site" not in request.params:
c.select = gConfig.getValue( "/Website/DefaultExternalURL", "http://diracgrid.org" )
return render( "web/External.mako" )
# No idea what this code should do...
if request.params.has_key( "site" ) and len( request.params[ "site" ] ) > 0:
if str( request.params[ "site" ] ) != "All":
c.select = str( request.params[ "site" ] )
gLogger.debug("Request's body:")
for key in request.params.keys():
if not key == "site" and len(request.params[key]) > 0:
c.select = c.select + "&" + key + "=" + request.params[key]
try:
gLogger.debug("%s - %s" % (key,request.params[key]))
except Exception,x:
gLogger.error("Exception: %s" % str(x))
pass
return render( "web/External.mako" )
|
DIRACGrid/DIRACWeb
|
dirac/controllers/web/External.py
|
Python
|
gpl-3.0
| 2,298
|
[
"DIRAC"
] |
9ae9ac66d5b94a044a10ffec6509106408a1ece6a3d9ad66e0e905e27982363f
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014, 2015 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from flask import Blueprint, render_template, url_for
from inspire.modules.forms import fields
from invenio.base.i18n import _
from wtforms.validators import DataRequired
from .form import InspireForm
blueprint = Blueprint(
'inspire_forms',
__name__,
url_prefix='/forms',
template_folder='templates',
static_folder="static",
)
class DemoForm(InspireForm):
"""Demo sample Form."""
nickname = fields.StringField(
_("Nickname"),
validators=[DataRequired(message=_("Nickname not provided"))],
placeholder="My placeholder",
description="My description",
export_key="custom_nickname_key"
)
password = fields.PasswordField(_("Password"))
referer = fields.HiddenField()
login_method = fields.HiddenField()
_title = _("Demo INSPIRE form")
groups = [
('Personal information',
['nickname', 'password']),
]
@blueprint.route('/demoform', methods=['GET', 'POST'])
def demoform():
"""View for INSPIRE demo form."""
# from inspire.modules.forms.utils import DataExporter
form = DemoForm(data={"nickname": "John Doe"})
ctx = {
"action": url_for('.demoform'),
"name": "inspireForm",
"id": "inspireForm",
}
if form.validate_on_submit():
# If it is needed to post process the form keys, for example to match
# the names in the JSONAlchemy, one can use the DataExporter.
# The keys will then be renamed using `export_key` parameter.
# visitor = DataExporter()
# visitor.visit(form)
# visitor.data
from invenio_workflows.models import BibWorkflowObject
from flask.ext.login import current_user
myobj = BibWorkflowObject.create_object(id_user=current_user.get_id())
myobj.set_data(form.data)
# Start workflow. delayed=True will execute the workflow in the
# background using, for example, Celery.
myobj.start_workflow("demoworkflow", delayed=True)
return render_template('forms/form_demo_success.html', form=form)
return render_template('forms/form_demo.html', form=form, **ctx)
|
Dziolas/inspire-next
|
inspire/modules/forms/views.py
|
Python
|
gpl-2.0
| 3,060
|
[
"VisIt"
] |
821401e77bb5d57b948525c7fb04ce195766eecbbf67cad84dfbb4edd6b88836
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
from __future__ import absolute_import
from os import path
import numpy as np
import scipy.optimize as opt
try:
# scipy's factorial was moved to special in scipy 1.3.0+
from scipy.special import factorial
except ImportError:
from scipy.misc import factorial
import warnings
from . import equation_of_state as eos
from .. import constants as constants
from ..utils.chemistry import read_masses
from ..utils.math import bracket
atomic_masses=read_masses()
# energy_states should provide the energies and degeneracies of each electronic level in a variety of elements
class DKS_L(eos.EquationOfState):
"""
Base class for the finite strain liquid equation of state detailed
in :cite:`deKoker2013` (supplementary materials).
"""
"""
Ideal gas contributions (translational and electronic)
to thermodynamic properties
"""
def _ln_partition_function(self, mass, temperature):
"""
Calculates the natural log of the partition function
"""
return 3./2.*np.log(temperature) \
+ 3./2.*np.log(mass*constants.Boltzmann \
/(2*np.pi*constants.Dirac*constants.Dirac)) \
def _F_ig(self, temperature, volume, params):
"""
The ideal gas contribution to the helmholtz free energy
Eq. S6, see also eq. 16.72 of Callen., 1985; p. 373
"""
V = volume/constants.Avogadro
figoverRT=0.
for element, N in params['formula'].items(): # N is a.p.f.u
if N > 1.e-5:
mass = atomic_masses[element]/constants.Avogadro
figoverRT += -N*(np.log(V) + self._ln_partition_function(mass, temperature) \
+ 1.) + N*np.log(N)
return constants.gas_constant*temperature*figoverRT
def _S_ig(self, temperature, volume, params):
"""
The ideal gas contribution to the entropy
"""
V = volume/constants.Avogadro
entropy_sum=0.
for element, N in params['formula'].items(): # N is a.p.f.u
if N > 1.e-5:
mass = atomic_masses[element]/constants.Avogadro
entropy_sum -= -N*(np.log(V) + self._ln_partition_function(mass, temperature) \
+ 5./2.) + N*np.log(N)
return constants.gas_constant*entropy_sum
def _C_v_ig(self, temperature, volume, params):
"""
The ideal gas contribution to the heat capacity
"""
n_atoms=0
for element, N in params['formula'].items():
n_atoms += N
return 1.5*constants.gas_constant*n_atoms
def _P_ig(self, temperature, volume, params):
"""
The ideal gas contribution to the pressure
PV = nRT
"""
n_atoms=0
for element, N in params['formula'].items():
n_atoms += N
return n_atoms*constants.gas_constant*temperature / volume
def _K_T_ig(self, temperature, volume, params):
"""
The ideal gas contribution to the isothermal bulk modulus
V * d/dV(-nRT/V) = V*nRT/V^2
"""
n_atoms=0
for element, N in params['formula'].items():
n_atoms += N
return n_atoms*constants.gas_constant*temperature / volume
def _alphaK_T_ig(self, temperature, volume, params):
"""
The ideal gas contribution to the product of the
thermal expansivity and isothermal bulk modulus
d/dT(nRT/V) = nR/V
"""
n_atoms=0
for element, N in params['formula'].items():
n_atoms += N
return n_atoms*constants.gas_constant / volume
"""
Electronic contributions to thermodynamic properties
"""
def _zeta(self, temperature, volume, params): # eq. S5a, beta in deKoker thesis (3.34)
return params['zeta_0']*(np.power(volume/params['el_V_0'], params['xi']))
def _dzetadV(self, temperature, volume, params):
return params['zeta_0']*params['xi']*(np.power(volume/params['el_V_0'], params['xi']))/volume
def _d2zetadV2(self, temperature, volume, params):
return params['zeta_0'] \
* params['xi'] * (params['xi'] - 1.) \
* (np.power(volume/params['el_V_0'], params['xi'])) \
/ volume / volume
def _Tel(self, temperature, volume, params): # eq. S5b
return params['Tel_0']*(np.power(volume/params['el_V_0'], params['eta']))
def _dTeldV(self, temperature, volume, params):
return params['Tel_0'] * params['eta'] \
* (np.power(volume/params['el_V_0'], params['eta'])) \
/ volume
def _d2TeldV2(self, temperature, volume, params):
return params['Tel_0'] \
* params['eta'] * (params['eta'] - 1.) \
* (np.power(volume/params['el_V_0'], params['eta'])) \
/ volume / volume
def _gimel(self, temperature_el, temperature, volume, params): # -F_el/zeta, 3.30 in de Koker thesis
return 0.5*(temperature*temperature - temperature_el*temperature_el) \
- temperature*temperature_el*np.log(temperature/temperature_el)
def _dgimeldTel(self, temperature_el, temperature, volume, params):
return (temperature-temperature_el) - temperature*np.log(temperature/temperature_el)
def _dgimeldT(self, temperature_el, temperature, volume, params):
return (temperature-temperature_el) - temperature_el*np.log(temperature/temperature_el)
def _d2gimeldTdTel(self, temperature_el, temperature, volume, params):
return -np.log(temperature/temperature_el)
def _d2gimeldTel2(self, temperature_el, temperature, volume, params):
return (temperature/temperature_el) - 1.
def _F_el(self, temperature, volume, params): # F_el
temperature_el = self._Tel(temperature, volume, params)
if temperature < temperature_el:
F_el = 0
else:
F_el = -self._zeta(temperature, volume, params) \
* self._gimel(temperature_el, temperature, volume, params)
return F_el
def _S_el(self, temperature, volume, params): # S_el
temperature_el = self._Tel(temperature, volume, params)
if temperature < temperature_el:
S_el = 0
else:
S_el = self._zeta(temperature, volume, params) \
* self._dgimeldT(temperature_el, temperature, volume, params)
return S_el
def _P_el(self, temperature, volume, params): # P_el
temperature_el = self._Tel(temperature, volume, params)
if temperature < temperature_el:
P_el = 0
else:
P_el = self._dzetadV(temperature, volume, params) \
* self._gimel(temperature_el, temperature, volume, params) \
+ self._zeta(temperature, volume, params) \
* self._dTeldV(temperature, volume, params) \
* self._dgimeldTel(temperature_el, temperature, volume, params)
return P_el
def _K_T_el(self, temperature, volume, params): # K_T_el
temperature_el = self._Tel(temperature, volume, params)
if temperature < temperature_el:
K_T_el = 0
else:
K_T_el = -volume \
* ( self._d2zetadV2(temperature, volume, params) \
* self._gimel(temperature_el, temperature, volume, params) \
+ 2. * self._dzetadV(temperature, volume, params) \
* self._dgimeldTel(temperature_el, temperature, volume, params) \
* self._dTeldV(temperature, volume, params) \
+ self._zeta(temperature, volume, params) \
* ( self._d2TeldV2(temperature, volume, params) \
* self._dgimeldTel(temperature_el, temperature, volume, params) \
+ self._dTeldV(temperature, volume, params) \
* self._dTeldV(temperature, volume, params) \
* self._d2gimeldTel2(temperature_el, temperature, volume, params)))
return K_T_el
def _alphaK_T_el(self, temperature, volume, params): # (alphaK_T)_el
temperature_el = self._Tel(temperature, volume, params)
if temperature < temperature_el:
alphaK_T_el = 0
else:
alphaK_T_el = self._dzetadV(temperature, volume, params) \
* self._dgimeldT(temperature_el, temperature, volume, params) \
+ self._zeta(temperature, volume, params) \
* self._d2gimeldTdTel(temperature_el, temperature, volume, params) \
* self._dTeldV(temperature, volume, params)
return alphaK_T_el
def _C_v_el(self, temperature, volume, params): # C_el, eq. 3.28 of de Koker thesis
temperature_el = self._Tel(temperature, volume, params)
zeta = self._zeta(temperature, volume, params)
if temperature > temperature_el:
Cv_el = zeta*(temperature - temperature_el)
else:
Cv_el = 0.
return Cv_el
"""
Excess (bonding) contributions to thermodynamic properties
"""
# Finite strain
def _finite_strain(self, temperature, volume, params): # f(V), eq. S3a
return (1./2.)*(np.power(params['V_0']/volume, 2./3.) - 1.0)
def _dfdV(self, temperature, volume, params): # f(V), eq. S3a
return (-1./3.)*np.power(params['V_0']/volume, 2./3.)/volume
def _d2fdV2(self,temperature, volume, params):
return (5./9.)*np.power(params['V_0']/volume, 2./3.)/volume/volume
# Temperature
def _theta(self, temperature, volume, params): # theta, eq. S3b
return np.power(temperature/params['T_0'], params['m']) - 1.
def _dthetadT(self, temperature, volume, params):
return params['m']*np.power(temperature/params['T_0'], params['m']) \
/ temperature
def _d2thetadT2(self, temperature, volume, params):
return params['m']*(params['m']-1.)*np.power(temperature/params['T_0'], params['m']) \
/ temperature / temperature
def _F_xs(self, temperature, volume, params): # F_xs, eq. S2
f = self._finite_strain(temperature, volume, params)
theta = self._theta(temperature, volume, params)
energy = 0.
for i in range(len(params['a'])):
ifact=factorial(i, exact=False)
for j in range(len(params['a'][0])):
jfact=factorial(j, exact=False)
energy += params['a'][i][j]*np.power(f, i)*np.power(theta, j)/ifact/jfact
return energy
def _S_xs(self, temperature, volume, params): # F_xs, eq. 3.18
f = self._finite_strain(temperature, volume, params)
theta = self._theta(temperature, volume, params)
entropy = 0.
for i in range(len(params['a'])):
ifact = factorial(i, exact=False)
for j in range(len(params['a'][0])):
if j > 0:
jfact = factorial(j, exact=False)
entropy += j*params['a'][i][j]*np.power(f, i)*np.power(theta, j-1.)/ifact/jfact
return -self._dthetadT(temperature, volume, params)*entropy
def _P_xs(self, temperature, volume, params): # P_xs, eq. 3.17 of de Koker thesis
f = self._finite_strain(temperature, volume, params)
theta = self._theta(temperature, volume, params)
pressure=0.
for i in range(len(params['a'])):
ifact=factorial(i, exact=False)
if i > 0:
for j in range(len(params['a'][0])):
jfact=factorial(j, exact=False)
pressure += float(i)*params['a'][i][j]*np.power(f, float(i)-1.)*np.power(theta, float(j))/ifact/jfact
return -self._dfdV(temperature, volume, params)*pressure
def _K_T_xs(self, temperature, volume, params): # K_T_xs, eq. 3.20 of de Koker thesis
f = self._finite_strain(temperature, volume, params)
theta = self._theta(temperature, volume, params)
K_ToverV=0.
for i in range(len(params['a'])):
ifact=factorial(i, exact=False)
for j in range(len(params['a'][0])):
if i > 0:
jfact=factorial(j, exact=False)
prefactor = float(i) * params['a'][i][j] \
* np.power(theta, float(j)) / ifact / jfact
K_ToverV += prefactor*self._d2fdV2(temperature, volume, params) \
* np.power(f, float(i-1))
if i > 1:
dfdV = self._dfdV(temperature, volume, params)
K_ToverV += prefactor * dfdV * dfdV \
* float(i-1) * np.power(f, float(i-2))
return volume*K_ToverV
def _alphaK_T_xs(self, temperature, volume, params): # eq. 3.21 of de Koker thesis
f = self._finite_strain(temperature, volume, params)
theta = self._theta(temperature, volume, params)
sum_factors = 0.
for i in range(len(params['a'])):
ifact=factorial(i, exact=False)
if i > 0:
for j in range(len(params['a'][0])):
if j > 0:
jfact=factorial(j, exact=False)
sum_factors += float(i)*float(j)*params['a'][i][j] \
* np.power(f, float(i-1)) * np.power(theta, float(j-1)) \
/ ifact / jfact
return -self._dfdV(temperature, volume, params) \
* self._dthetadT(temperature, volume, params) \
* sum_factors
def _C_v_xs(self, temperature, volume, params): # Cv_xs, eq. 3.22 of de Koker thesis
f = self._finite_strain(temperature, volume, params)
theta = self._theta(temperature, volume, params)
C_voverT=0.
for i in range(len(params['a'])):
ifact=factorial(i, exact=False)
for j in range(len(params['a'][0])):
if j > 0:
jfact=factorial(j, exact=False)
prefactor = float(j)*params['a'][i][j]*np.power(f, float(i))/ifact/jfact
C_voverT += prefactor * self._d2thetadT2(temperature, volume, params) \
* np.power(theta, float(j-1))
if j > 1:
dthetadT = self._dthetadT(temperature, volume, params)
C_voverT += prefactor * dthetadT * dthetadT \
* float(j-1) * np.power(theta, float(j-2))
return -temperature*C_voverT
"""
Magnetic contributions to thermodynamic properties
(as found in Ramo and Stixrude, 2014)
"""
def _spin(self, temperature, volume, params):
S_a = 0.
S_b = 0.
numerator = 0.
numerator_2 = 0.
n_atoms = 0.
if 'spin_a' in params:
for element, N in params['formula'].items():
if element == 'Fe':
n_atoms += N
VoverVx = volume/params['V_0']
S_a = params['spin_a'][0] + params['spin_a'][1]*VoverVx
S_b = (params['spin_b'][0]
+ params['spin_b'][1]/VoverVx
+ params['spin_b'][2]/(np.power(VoverVx, 2.))
+ params['spin_b'][3]/(np.power(VoverVx, 3.)))
# S = S_a*T + S_b
# d(2S + 1)/dV
numerator=-2.*(-params['spin_a'][1]*temperature
+ params['spin_b'][1]/(np.power(VoverVx, 2.))
+ 2.*params['spin_b'][2]/(np.power(VoverVx, 3.))
+ 3.*params['spin_b'][3]/(np.power(VoverVx, 4.)))/params['V_0']
# d2S/dV2
numerator_2 = 2.*((2.*params['spin_b'][1]/(np.power(VoverVx, 3.))
+ 6.*params['spin_b'][2]/(np.power(VoverVx, 4.))
+ 12.*params['spin_b'][3]/(np.power(VoverVx, 5.)))
/np.power(params['V_0'], 2.))
return S_a, S_b, numerator, numerator_2, n_atoms
def _F_mag(self, temperature, volume, params):
S_a, S_b, numerator, numerator_2, n_atoms = self._spin(temperature, volume, params)
S = S_a*temperature + S_b
return -n_atoms*constants.gas_constant*temperature*np.log(2.*S + 1.)
def _S_mag(self, temperature, volume, params):
S_a, S_b, numerator, numerator_2, n_atoms = self._spin(temperature, volume, params)
S = S_a*temperature + S_b
return n_atoms*constants.gas_constant * ((2.*S_a*temperature/(2.*S + 1.)
+ np.log(2.*S + 1.)))
def _P_mag(self, temperature, volume, params):
S_a, S_b, numerator, numerator_2, n_atoms = self._spin(temperature, volume, params)
S = S_a*temperature + S_b
dFdV = -n_atoms*constants.gas_constant*temperature*numerator/(2.*S + 1.)
return -dFdV
def _K_T_mag(self, temperature, volume, params):
S_a, S_b, numerator, numerator_2, n_atoms = self._spin(temperature, volume, params)
S = S_a*temperature + S_b
dFdV = numerator/(2.*S + 1.)
d2FdV2 = numerator_2/(2.*S + 1.) - np.power(dFdV, 2.)
return -volume*n_atoms*constants.gas_constant*temperature*d2FdV2
def _alphaK_T_mag(self, temperature, volume, params): # WARNING: numeric differentiation a.t.m.
return (self._P_mag(temperature + 0.5, volume, params)
- self._P_mag(temperature - 0.5, volume, params))
def _C_v_mag(self, temperature, volume, params):
S_a, S_b, numerator, numerator_2, n_atoms = self._spin(temperature, volume, params)
S = S_a*temperature + S_b
return n_atoms * constants.gas_constant * temperature * 4.*S_a*(S_a*temperature + 2.*S_b + 1.)/np.power(2.*S + 1., 2.)
def _aK_T(self, temperature, volume, params):
aK_T = (self._alphaK_T_ig(temperature, volume, params)
+ self._alphaK_T_el(temperature, volume, params)
+ self._alphaK_T_xs(temperature, volume, params)
+ self._alphaK_T_mag(temperature, volume, params))
return aK_T
# Pressure
def pressure(self, temperature, volume, params):
P = (self._P_ig(temperature, volume, params)
+ self._P_el(temperature, volume, params)
+ self._P_xs(temperature, volume, params)
+ self._P_mag(temperature, volume, params))
return P
def volume(self, pressure, temperature, params):
_delta_pressure = lambda x, pressure, temperature, params: pressure - self.pressure(temperature, x, params)
# we need to have a sign change in [a,b] to find a zero. Let us start with a
# conservative guess:
args = (pressure, temperature, params)
try:
sol = bracket(_delta_pressure, params['V_0'],
1.e-2 * params['V_0'], args)
except ValueError:
raise Exception(
'Cannot find a volume, perhaps you are outside of the range of validity for the equation of state?')
return opt.brentq(_delta_pressure, sol[0], sol[1], args=args)
def isothermal_bulk_modulus(self, pressure,temperature, volume, params):
"""
Returns isothermal bulk modulus :math:`[Pa]`
"""
K_T = (self._K_T_ig(temperature, volume, params)
+ self._K_T_el(temperature, volume, params)
+ self._K_T_xs(temperature, volume, params)
+ self._K_T_mag(temperature, volume, params))
return K_T
def adiabatic_bulk_modulus(self, pressure, temperature, volume, params):
"""
Returns adiabatic bulk modulus. :math:`[Pa]`
"""
K_S = (self.isothermal_bulk_modulus(pressure,temperature, volume, params)
* ( 1. + temperature
* self.thermal_expansivity(pressure, temperature, volume, params)
* self.grueneisen_parameter(pressure, temperature, volume, params)))
return K_S
def grueneisen_parameter(self, pressure, temperature, volume, params):
"""
Returns grueneisen parameter. :math:`[unitless]`
"""
gamma = (self._aK_T(temperature, volume, params)
* volume
/ self.molar_heat_capacity_v(pressure, temperature, volume, params))
return gamma
def shear_modulus(self, pressure, temperature, volume, params):
"""
Returns shear modulus. :math:`[Pa]`
Zero for fluids
"""
return 0.
def molar_heat_capacity_v(self, pressure, temperature, volume, params):
"""
Returns heat capacity at constant volume. :math:`[J/K/mol]`
"""
C_v = (self._C_v_ig(temperature, volume, params)
+ self._C_v_el(temperature, volume, params)
+ self._C_v_xs(temperature, volume, params)
+ self._C_v_mag(temperature, volume, params))
return C_v
def molar_heat_capacity_p(self, pressure, temperature, volume, params):
"""
Returns heat capacity at constant pressure. :math:`[J/K/mol]`
"""
C_p = (self.molar_heat_capacity_v(pressure,temperature, volume, params)
* ( 1. + temperature
* self.thermal_expansivity(pressure, temperature, volume, params)
* self.grueneisen_parameter(pressure, temperature, volume, params) ))
return C_p
def thermal_expansivity(self, pressure, temperature, volume, params):
"""
Returns thermal expansivity. :math:`[1/K]`
"""
alpha = (self._aK_T(temperature, volume, params)
/ self.isothermal_bulk_modulus(0., temperature, volume, params))
return alpha
def gibbs_free_energy( self, pressure, temperature, volume, params):
"""
Returns the Gibbs free energy at the pressure and temperature of the mineral [J/mol]
"""
G = self.helmholtz_free_energy( pressure, temperature, volume, params) + pressure * volume
return G
def entropy( self, pressure, temperature, volume, params):
"""
Returns the entropy at the pressure and temperature of the mineral [J/K/mol]
"""
S = (self._S_ig(temperature, volume, params)
+ self._S_el(temperature, volume, params)
+ self._S_xs(temperature, volume, params)
+ self._S_mag(temperature, volume, params))
return S
def enthalpy( self, pressure, temperature, volume, params):
"""
Returns the enthalpy at the pressure and temperature of the mineral [J/mol]
"""
H = self.helmholtz_free_energy( pressure, temperature, volume, params) + \
temperature * self.entropy( pressure, temperature, volume, params) + \
pressure * self.volume( pressure, temperature, params)
return H
def helmholtz_free_energy( self, pressure, temperature, volume, params):
"""
Returns the Helmholtz free energy at the pressure and temperature of the mineral [J/mol]
"""
F = (self._F_ig(temperature, volume, params)
+ self._F_el(temperature, volume, params)
+ self._F_xs(temperature, volume, params)
+ self._F_mag(temperature, volume, params))
return F
def molar_internal_energy(self, pressure, temperature, volume, params):
E = self.helmholtz_free_energy(pressure, temperature, volume, params) + \
temperature*self.entropy(pressure, temperature, volume, params)
return E
def validate_parameters(self, params):
"""
Check for existence and validity of the parameters
"""
# Check that all the required keys are in the dictionary
expected_keys = ['V_0', 'T_0', 'O_theta', 'O_f', 'm', 'a', 'zeta_0', 'xi', 'Tel_0', 'eta']
for k in expected_keys:
if k not in params:
raise KeyError('params object missing parameter : ' + k)
# Sometimes the standard electronic volume is different to V_0.
# If not, make it the same.
if 'el_V_0' not in params:
params['el_V_0'] = params['V_0']
|
geodynamics/burnman
|
burnman/eos/dks_liquid.py
|
Python
|
gpl-2.0
| 24,441
|
[
"Avogadro",
"DIRAC"
] |
977e7b1886ab9d6a2d036936240c78ca208eb632c535c9f5fed4edba314073f4
|
#!/usr/bin/env python
'''
ooiservices.manage
Manage script for specific services related tasks
'''
from flask.ext.script import Manager
from ooiservices import app
from ooiservices.util.erddap_catalog import ERDDAPCatalog, ERDDAPCatalogEntry
import os
manager = Manager(app)
def add_to_catalog(catalog_path, dataset_id, netcdf_file):
'''
Adds catalog entry to specificed catalog
'''
catalog = ERDDAPCatalog(catalog_path, 'r+')
base_dir = os.path.dirname(netcdf_file)
entry = ERDDAPCatalogEntry(dataset_id,base_dir, netcdf_file)
with entry:
entry.read_vars()
catalog.add_entry(entry)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('catalog_path', help='Path to the datasets.xml file')
parser.add_argument('dataset_id', help='Unique ID for the dataset')
parser.add_argument('netcdf_file', help='A sample netcdf file to parse for metadata')
args = parser.parse_args()
add_to_catalog(args.catalog_path, args.dataset_id, args.netcdf_file)
|
ednad/ooi-ui-services
|
ooiservices/generate_catalog.py
|
Python
|
apache-2.0
| 1,064
|
[
"NetCDF"
] |
02385190330388b7ad9966c00f4e55ef2941188d0bc9c289ec85eddc7f9cede9
|
# Orca
#
# Copyright 2007-2009 Sun Microsystems Inc. and Joanmarie Diggs
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom script for acroread"""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2007-2009 Sun Microsystems Inc. Joanmarie Diggs"
__license__ = "LGPL"
import pyatspi
import orca.debug as debug
import orca.scripts.default as default
import orca.input_event as input_event
import orca.orca as orca
import orca.rolenames as rolenames
import orca.orca_state as orca_state
import orca.settings as settings
import orca.speech as speech
from orca.orca_i18n import _ # for gettext support
from orca.orca_i18n import ngettext
########################################################################
# #
# The acroread script class. #
# #
########################################################################
class Script(default.Script):
ROLE_DOCUMENT = "Document"
ROLE_LINK = "Link"
def __init__(self, app):
"""Creates a new script for the given application.
Arguments:
- app: the application to create a script for.
"""
self.debugLevel = debug.LEVEL_FINEST
default.Script.__init__(self, app)
# Acroread documents are contained in an object whose rolename
# is "Document". "Link" is also capitalized in acroread. We
# need to make these known to Orca for speech and braille output.
#
rolenames.rolenames[self.ROLE_DOCUMENT] = \
rolenames.Rolename(self.ROLE_DOCUMENT,
# Translators: short braille for the
# rolename of a document.
#
_("doc"),
# Translators: long braille for the
# rolename of a document.
#
_("Document"),
# Translators: spoken words for the
# rolename of a document.
#
_("document"))
rolenames.rolenames[self.ROLE_LINK] = \
rolenames.rolenames[pyatspi.ROLE_LINK]
# To handle the multiple, identical object:text-caret-moved events
# and possible focus events that result from a single key press
#
self.currentInputEvent = None
# To handle the case when we get an object:text-caret-moved event
# for some text we just left, but which is still showing on the
# screen.
#
self.lastCaretMovedLine = None
# To minimize chattiness related to focused events when the Find
# toolbar is active.
#
self.findToolbarActive = False
self.findToolbarName = None
self.preFindLine = None
def setupInputEventHandlers(self):
"""Defines InputEventHandler fields for this script that can be
called by the key and braille bindings. In this particular case,
we just want to be able to define our own sayAll() method.
"""
default.Script.setupInputEventHandlers(self)
self.inputEventHandlers["sayAllHandler"] = \
input_event.InputEventHandler(
Script.sayAll,
_("Speaks entire document."))
def getListeners(self):
"""Sets up the AT-SPI event listeners for this script.
"""
listeners = default.Script.getListeners(self)
listeners["object:state-changed:checked"] = \
self.onStateChanged
listeners["object:state-changed:focused"] = \
self.onStateChanged
return listeners
def getDocument(self, locusOfFocus):
""" Obtains the Document object that contains the locusOfFocus.
Arguments:
- locusOfFocus: the locusOfFocus
Returns: the Document object, if found.
"""
document = None
obj = locusOfFocus
while obj.getRole() != pyatspi.ROLE_UNKNOWN:
obj = obj.parent
# This is probably it, but the parent of a text object
# in a table also has a role of 'unknown' which in turn
# has a parent with a role of 'unknown'. The parent of
# the Document object is a drawing area.
#
if obj.parent.getRole() == pyatspi.ROLE_DRAWING_AREA:
document = obj
else:
while obj.getRole() != pyatspi.ROLE_TABLE:
obj = obj.parent
# For now, let's assume no nested tables! :-)
#
while obj.getRole() != pyatspi.ROLE_UNKNOWN:
obj = obj.parent
document = obj
return document
def findNodeInDocument(self, obj):
""" Obtains the location of an object with respect to the
Document object.
Arguments:
- obj: the accessible whose location we're trying to obtain
Returns: a list that represents the object's position,
ordered from child to parent
"""
nodeList = []
document = self.getDocument(obj)
while obj != document:
nodeList.append(obj.getIndexInParent())
obj = obj.parent
return nodeList
def getNextTextObject(self, obj, nodeList=None):
"""A generator of objects with text in the Document object.
Acroread organizes document content into a collection of
individual objects that contain (or have associated) text
and drawing areas which contain such objects along with
additional drawing areas. The depth of the drawing areas
in any given document or drawing area is unknown.
Arguments:
- obj: an Accessible that contains children
- nodeList: a list reflecting the current object's position
"""
if nodeList:
index = nodeList.pop()
else:
index = 0
for i in range(index, obj.childCount):
child = obj[i]
for nextObject in self.getNextTextObject(child, nodeList):
yield nextObject
yield child
def getTableAndDimensions(self, obj):
"""Get the table that this text object is in, along with its
dimensions.
Arguments:
- obj: a text object within the Document object.
Returns the table that this text object cell is in, along with
the number of rows and columns.
"""
table = None
rows = 0
columns = 0
# HACK: Rows, columns, and cells are not labeled or assigned
# roles. However, the table structure and what can claim focus
# SEEM to be consistent. So let's punt until things get properly
# labeled.
#
rolesList = [pyatspi.ROLE_TEXT,
pyatspi.ROLE_UNKNOWN,
pyatspi.ROLE_UNKNOWN,
pyatspi.ROLE_TABLE]
if self.utilities.hasMatchingHierarchy(obj, rolesList):
table = obj.parent.parent.parent
rows = table.childCount
columns = table[0].childCount
return [table, rows, columns]
def getCellCoordinates(self, table, cell):
"""Get the coordinates of the specified text object with respect
to the table that contains it.
Arguments:
- obj: a text object within a table
Returns the row number and column number.
"""
# HACK: Again, these things are not labeled or assigned roles,
# so we're punting for now.
#
column = cell.parent.getIndexInParent() + 1
row = cell.parent.parent.getIndexInParent() + 1
return [row, column]
def checkForTableBoundary (self, oldFocus, newFocus):
"""Check to see if we've crossed any table boundaries,
speaking the appropriate details when we have.
Arguments:
- oldFocus: Accessible that is the old locus of focus
- newFocus: Accessible that is the new locus of focus
"""
if oldFocus == None or newFocus == None:
return
[oldFocusIsTable, oldFocusRows, oldFocusColumns] = \
self.getTableAndDimensions(oldFocus)
[newFocusIsTable, newFocusRows, newFocusColumns] = \
self.getTableAndDimensions(newFocus)
# [[[TODO: JD - It is possible to move focus into the object
# that contains the object that contains the text object. We
# need to detect this and adjust accordingly.]]]
if not oldFocusIsTable and newFocusIsTable:
# Translators: this represents the number of rows in a table.
#
rowString = ngettext("table with %d row",
"table with %d rows",
newFocusRows) % newFocusRows
# Translators: this represents the number of columns in a table.
#
colString = ngettext("%d column",
"%d columns",
newFocusColumns) % newFocusColumns
line = rowString + " " + colString
self.presentMessage(line)
elif oldFocusIsTable and not newFocusIsTable:
# We've left a table. Announce this fact.
#
self.presentMessage(_("leaving table."))
elif oldFocusIsTable and newFocusIsTable:
# See if we've crossed a cell boundary. If so, speak
# what has changed (per Mike).
#
[oldRow, oldCol] = \
self.getCellCoordinates(oldFocusIsTable, oldFocus)
[newRow, newCol] = \
self.getCellCoordinates(newFocusIsTable, newFocus)
# We can't count on being in the first/last cell
# of the new row -- only the first/last cell of
# the new row that contains data.
#
if newRow != oldRow:
# Translators: this represents the row and column we're
# on in a table.
#
line = _("row %(row)d, column %(column)d") \
% {"row": newRow, "column": newCol}
self.presentMessage(line)
elif newCol != oldCol:
# Translators: this represents the column we're
# on in a table.
#
line = _("column %d") % newCol
self.presentMessage(line)
def isInFindToolbar(self, obj):
"""Examines the current object to identify if it is in the Find
tool bar. If so, it also sets findToolbarName so that we can
identify this frame by name independent of localization.
Arguments:
- obj: an Accessible
Returns True if the object is in the Find tool bar.
"""
inFindToolbar = False
rolesList = [pyatspi.ROLE_DRAWING_AREA,
pyatspi.ROLE_DRAWING_AREA,
pyatspi.ROLE_DRAWING_AREA,
pyatspi.ROLE_TOOL_BAR,
pyatspi.ROLE_PANEL,
pyatspi.ROLE_PANEL,
pyatspi.ROLE_FRAME]
try:
while obj.getRole() != pyatspi.ROLE_DRAWING_AREA:
obj = obj.parent
if self.utilities.hasMatchingHierarchy(obj, rolesList):
inFindToolbar = True
frame = self.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_FRAME], [])
self.findToolbarName = frame.name
except:
pass
return inFindToolbar
def onFocus(self, event):
"""Called whenever an object gets focus. Overridden in this script
because we sometimes get a focus event in addition to caret-moved
events when we change from one area in the document to another. We
want to minimize the repetition of text along with the unnecessary
speaking of object types (e.g. drawing area, text, etc.).
Arguments:
- event: the Event
"""
self.currentInputEvent = None
# We sometimes get focus events for items that don't --
# or don't yet) have focus. Ignore these.
#
if event.source.getRole() in [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_RADIO_BUTTON] \
and not event.source.getState().contains(pyatspi.STATE_FOCUSED):
return
if not event.source.getState().contains(pyatspi.STATE_SHOWING):
return
if not self.findToolbarActive \
and event.source.getRole() == pyatspi.ROLE_TEXT:
parent = event.source.parent
if parent and parent.getRole() in [pyatspi.ROLE_DRAWING_AREA,
pyatspi.ROLE_UNKNOWN]:
# We're going to get at least one (and likely several)
# caret-moved events which will cause this to get spoken,
# so skip it for now.
#
return
if event.source.getRole() == pyatspi.ROLE_DRAWING_AREA:
# A drawing area can claim focus when visually what has focus is
# a text object that is a child of the drawing area. When this
# occurs, Orca doesn't see the text. Therefore, try to figure out
# where we are based on where we were and what key we pressed.
# Then set the event.source accordingly before handing things off
# to the default script.
#
debug.println(self.debugLevel, "acroread: Drawing area bug")
lastKey, mods = self.utilities.lastKeyAndModifiers()
locusOfFocusIndex = orca_state.locusOfFocus.getIndexInParent()
childIndex = None
# [[[TODO: JD - These aren't all of the possibilities. This is
# very much a work in progress and of testing.]]]
#
if lastKey == "Up":
childIndex = locusOfFocusIndex - 1
elif lastKey == "Down":
childIndex = locusOfFocusIndex + 1
elif lastKey == "Right" or lastKey == "End":
childIndex = locusOfFocusIndex
elif lastKey == "Left" or lastKey == "Home":
childIndex = locusOfFocusIndex
if (childIndex >= 0):
child = event.source[childIndex]
event.source = child
default.Script.onFocus(self, event)
def locusOfFocusChanged(self, event, oldLocusOfFocus, newLocusOfFocus):
"""Called when the visual object with focus changes. Overridden
in this script to minimize the repetition of text along with
the unnecessary speaking of object types.
Arguments:
- event: if not None, the Event that caused the change
- oldLocusOfFocus: Accessible that is the old locus of focus
- newLocusOfFocus: Accessible that is the new locus of focus
"""
if not newLocusOfFocus or (oldLocusOfFocus == newLocusOfFocus):
return
# Eliminate unnecessary chattiness related to the Find toolbar.
#
if self.findToolbarActive:
if newLocusOfFocus.getRole() == pyatspi.ROLE_TEXT:
newText = self.getTextLineAtCaret(newLocusOfFocus)
if newText == self.preFindLine:
orca.setLocusOfFocus(
event, oldLocusOfFocus, notifyScript=False)
return
if newLocusOfFocus.getRole() == pyatspi.ROLE_DRAWING_AREA:
orca.setLocusOfFocus(event, oldLocusOfFocus, notifyScript=False)
return
utterances = \
self.speechGenerator.generateSpeech(newLocusOfFocus)
speech.speak(utterances)
self.displayBrailleForObject(newLocusOfFocus)
orca.setLocusOfFocus(event, newLocusOfFocus, notifyScript=False)
return
# Eliminate unnecessary chattiness in the Search panel.
#
if newLocusOfFocus.getRole() == pyatspi.ROLE_PUSH_BUTTON \
and oldLocusOfFocus \
and oldLocusOfFocus.getRole() == self.ROLE_LINK \
and newLocusOfFocus.name == oldLocusOfFocus.name:
return
# Eliminate general document chattiness.
#
if newLocusOfFocus.getRole() in [self.ROLE_DOCUMENT,
pyatspi.ROLE_DRAWING_AREA]:
orca.setLocusOfFocus(event, newLocusOfFocus, notifyScript=False)
return
elif newLocusOfFocus.getRole() == self.ROLE_LINK:
# It seems that this will be the only event we will get. But
# the default script's onFocus will result in unnecessary
# verboseness: reporting the drawing area(s) in which this link
# is contained, speaking the periods in a table of contents, etc.
#
utterances = self.speechGenerator.generateSpeech(newLocusOfFocus)
adjustedUtterances = []
for utterance in utterances:
adjustedUtterances.append(
self.utilities.adjustForRepeats(utterance))
speech.speak(adjustedUtterances)
self.displayBrailleForObject(newLocusOfFocus)
orca.setLocusOfFocus(event, newLocusOfFocus, notifyScript=False)
return
default.Script.locusOfFocusChanged(self, event,
oldLocusOfFocus, newLocusOfFocus)
def onCaretMoved(self, event):
"""Called whenever the caret moves. Overridden in this script
because we want to minimize the repetition of text and the speaking
of erroneous events.
Arguments:
- event: the Event
"""
lastInputEvent = orca_state.lastInputEvent
lastKey, mods = self.utilities.lastKeyAndModifiers()
# A single keypress usually results in multiple, not necessarily
# identical, caret-moved events. Check to see if the events are
# identical or very closely timed (time chosen based on testing).
#
if self.currentInputEvent and lastInputEvent:
timeDiff = abs(self.currentInputEvent.time - lastInputEvent.time)
if self.currentInputEvent == lastInputEvent or timeDiff < 0.2:
return
# Changing pages sometimes results in a caret-moved event for
# text that may or may NOT have had focus recently. Sometimes
# we luck out and it's not showing.
#
if not event.source.getState().contains(pyatspi.STATE_SHOWING):
return
# Other times, it's showing, but happens to be the text we just
# left. Since this SEEMS limited to page up/page down, let's be
# conservative until we have evidence to the contrary.
#
textLine = self.getTextLineAtCaret(event.source)
isOldLine = textLine == self.lastCaretMovedLine and \
(lastKey == "Page_Down" or lastKey == "Page_Up")
if isOldLine:
self.lastCaretMovedLine = None
return
else:
self.lastCaretMovedLine = textLine
# [[[TODO: JD - Sometimes it's showing AND we didn't just leave
# it. This also seems to occur sometimes with the Find toolbar.]]]
self.currentInputEvent = orca_state.lastInputEvent
self.checkForTableBoundary(orca_state.locusOfFocus, event.source)
default.Script.onCaretMoved(self, event)
def onStateChanged(self, event):
"""Called whenever an object's state changes.
Arguments:
- event: the Event
"""
if event.type.startswith("object:state-changed:checked") \
and event.source.getRole() == pyatspi.ROLE_RADIO_BUTTON:
# Radio buttons in the Search panel are not automatically
# selected when you arrow to them. You have to press Space
# to select the current radio button. Watch for this.
#
self.visualAppearanceChanged(event, event.source)
return
elif event.type.startswith("object:state-changed:focused") \
and event.detail1 == 1:
if event.source.getRole() == pyatspi.ROLE_PUSH_BUTTON:
# Try to minimize chattiness in the Search panel
#
utterances = \
self.speechGenerator.generateSpeech(event.source)
speech.speak(utterances)
self.displayBrailleForObject(event.source)
orca.setLocusOfFocus(event, event.source, notifyScript=False)
return
elif event.source.getRole() == pyatspi.ROLE_TEXT:
# There's an excellent chance that the Find toolbar just
# gained focus. Check.
#
if self.isInFindToolbar(event.source):
self.findToolbarActive = True
default.Script.onStateChanged(self, event)
def onWindowDeactivated(self, event):
"""Called whenever a toplevel window is deactivated. Overridden
in this script to deal with significant chattiness surrounding
the use of the Find toolbar.
Arguments:
- event: the Event
"""
locusOfFocus = orca_state.locusOfFocus
if event.source.name == self.findToolbarName:
self.findToolbarActive = False
else:
try:
locusOfFocus.queryText()
except NotImplementedError:
pass
else:
self.preFindLine = self.getTextLineAtCaret(locusOfFocus)
default.Script.onWindowDeactivated(self, event)
def textLinesFromNodeList(self, obj, nodeList=None):
"""A generator that can be used to iterate over each line of a
text object, starting at the caret offset. Overridden here
because we are not getting any RELATION_FLOWS_TO from acroread.
Arguments:
- obj: An Accessible that contains children. Initially, the
document itself.
- nodeList: A list reflecting the position of the current text object
"""
for textObj in self.getNextTextObject(obj, nodeList):
for [context, acss] in default.Script.textLines(self, textObj):
yield [context, acss]
def sayAll(self, inputEvent):
"""Speaks the contents of the document beginning with the present
location. Overridden in this script because the default sayAll
only speaks the current text object.
Arguments:
- inputEvent: if not None, the input event that caused this action.
"""
if orca_state.locusOfFocus:
nodeList = self.findNodeInDocument(orca_state.locusOfFocus)
document = self.getDocument(orca_state.locusOfFocus)
# Note: We get the correct progress callback, but acroread
# doesn't seem to respond to setCaretOffset, so we cannot
# update our location when sayAll is interrupted or finished.
#
speech.sayAll(self.textLinesFromNodeList(document, nodeList),
self.__sayAllProgressCallback)
else:
default.Script.sayAll(self, inputEvent)
return True
def sayWord(self, obj):
"""Speaks the word at the caret. Overridden here because we seem
to be getting the details of the word we just left when moving
forward with Control Right Arrow. Control Left Arrow works as
expected with the default script with the exception of crossing
over a blank line (which sometimes causes the word with focus to
be repeated). Both problems are addressed here.
Arguments:
- obj: an Accessible object that implements the AccessibleText
interface
"""
if not (obj.parent.getRole() in [pyatspi.ROLE_DRAWING_AREA,
pyatspi.ROLE_UNKNOWN]):
default.Script.sayWord(self, obj)
else:
text = obj.queryText()
offset = text.caretOffset
lastKey, mods = self.utilities.lastKeyAndModifiers()
if lastKey == "Right":
penultimateWord = orca_state.lastWord
[lastWord, startOffset, endOffset] = \
text.getTextAtOffset(offset,
pyatspi.TEXT_BOUNDARY_WORD_START)
[word, startOffset, endOffset] = \
text.getTextAfterOffset(endOffset+1,
pyatspi.TEXT_BOUNDARY_WORD_START)
if len(penultimateWord) > 0:
lastCharPW = penultimateWord[len(penultimateWord) - 1]
if lastCharPW == "\n":
voice = self.voices[settings.DEFAULT_VOICE]
speech.speakCharacter("\n", voice)
if penultimateWord != lastWord:
word = lastWord
if lastKey == "Left":
lastWord = orca_state.lastWord
[word, startOffset, endOffset] = \
text.getTextAtOffset(offset,
pyatspi.TEXT_BOUNDARY_WORD_START)
if len(word) > 0:
lastChar = word[len(word) - 1]
if lastChar == "\n" and lastWord != word:
voice = self.voices[settings.DEFAULT_VOICE]
speech.speakCharacter("\n", voice)
if lastWord == word:
return
if self.utilities.linkIndex(obj, offset) >= 0:
voice = self.voices[settings.HYPERLINK_VOICE]
elif word.decode("UTF-8").isupper():
voice = self.voices[settings.UPPERCASE_VOICE]
else:
voice = self.voices[settings.DEFAULT_VOICE]
word = self.utilities.adjustForRepeats(word)
orca_state.lastWord = word
speech.speak(word, voice)
self.speakTextSelectionState(obj, startOffset, endOffset)
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/orca/scripts/apps/acroread/script.py
|
Python
|
gpl-3.0
| 27,288
|
[
"ORCA"
] |
0b3ee5895a824fedddf1b3e1a3508b12d0c45f0fbd42cacf25af15326aef2b40
|
"""
Utilities to execute a function with a given proxy.
executeWithUserProxy decorator example usage::
@executeWithUserProxy
def testFcn( x, i, kw = 'qwerty' ):
print "args", x, i
print "kwargs", kw
print os.environ.get( 'X509_USER_PROXY' )
return S_OK()
...
result = testFcn( 1.0, 1, kw = 'asdfghj', proxyUserName = 'atsareg', proxyUserGroup = 'biomed_user' )
"""
import os
from DIRAC import gConfig, gLogger, S_ERROR
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOMSAttributeForGroup, getDNForUsername
from DIRAC.Core.Utilities.LockRing import LockRing
__RCSID__ = "$Id$"
def executeWithUserProxy( fcn ):
"""
Decorator function to execute with a temporary user proxy
:param fcn: function to be decorated
:return: the result of the fcn execution
In order to be executed with a user proxy, the function must be called with the
following parameters:
:param str proxyUserName: the user name of the proxy to be used
:param str proxyUserGroup: the user group of the proxy to be used
:param str proxyUserDN: the user DN of the proxy to be used
:param str proxyWithVOMS: optional flag to dress or not the user proxy with VOMS extension ( default True )
:param str proxyFilePath: optional file location for the temporary proxy
:param bool executionLock: flag to execute with a lock for the time of user proxy application ( default False )
"""
def wrapped_fcn( *args, **kwargs ):
userName = kwargs.pop( 'proxyUserName', '' )
userDN = kwargs.pop( 'proxyUserDN', '' )
userGroup = kwargs.pop( 'proxyUserGroup', '' )
vomsFlag = kwargs.pop( 'proxyWithVOMS', True )
proxyFilePath = kwargs.pop( 'proxyFilePath', False )
executionLockFlag = kwargs.pop( 'executionLock', False )
if executionLockFlag:
executionLock = LockRing().getLock( '_UseUserProxy_', recursive = True )
if ( userName or userDN ) and userGroup:
# Setup user proxy
originalUserProxy = os.environ.get( 'X509_USER_PROXY' )
if userDN:
userDNs = [userDN]
else:
result = getDNForUsername( userName )
if not result[ 'OK' ]:
return result
userDNs = result['Value'] # a same user may have more than one DN
vomsAttr = ''
if vomsFlag:
vomsAttr = getVOMSAttributeForGroup( userGroup )
result = getProxy( userDNs, userGroup, vomsAttr, proxyFilePath )
if not result['OK']:
return result
if executionLockFlag:
executionLock.acquire()
proxyFile = result['Value']
os.environ['X509_USER_PROXY'] = proxyFile
# Check if the caller is executing with the host certificate
useServerCertificate = gConfig.useServerCertificate()
if useServerCertificate:
gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'false' )
try:
return fcn( *args, **kwargs )
except Exception as lException: # pylint: disable=broad-except
value = ','.join( [str( arg ) for arg in lException.args] )
exceptType = lException.__class__.__name__
return S_ERROR( "Exception - %s: %s" % ( exceptType, value ) )
finally:
# Restore the default host certificate usage if necessary
if useServerCertificate:
gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'true' )
if originalUserProxy:
os.environ['X509_USER_PROXY'] = originalUserProxy
else:
os.environ.pop( 'X509_USER_PROXY' )
if executionLockFlag:
executionLock.release()
else:
# No proxy substitution requested
return fcn( *args, **kwargs )
return wrapped_fcn
def getProxy( userDNs, userGroup, vomsAttr, proxyFilePath ):
""" do the actual download of the proxy, trying the different DNs
"""
for userDN in userDNs:
if vomsAttr:
result = gProxyManager.downloadVOMSProxyToFile( userDN, userGroup,
requiredVOMSAttribute = vomsAttr,
filePath = proxyFilePath,
requiredTimeLeft = 3600,
cacheTime = 3600 )
else:
result = gProxyManager.downloadProxyToFile( userDN, userGroup,
filePath = proxyFilePath,
requiredTimeLeft = 3600,
cacheTime = 3600 )
if not result['OK']:
gLogger.error( "Can't download %sproxy " % ( 'VOMS' if vomsAttr else '' ),
"of '%s', group %s to file: " % ( userDN, userGroup ) + result['Message'] )
else:
return result
# If proxy not found for any DN, return an error
return S_ERROR( "Can't download proxy" )
def executeWithoutServerCertificate( fcn ):
"""
Decorator function to execute a call without the server certificate.
This shows useful in Agents when we want to call a DIRAC service
and use the shifter proxy (for example Write calls to the DFC).
The method does not fetch any proxy, it assumes it is already
set up in the environment.
Note that because it modifies the configuration for all thread,
it uses a lock (the same as ExecuteWithUserProxy)
Potential problem:
* there is a lock for this particular method, but any other method
changing the UseServerCertificate value can clash with this.
:param fcn: function to be decorated
:return: the result of the fcn execution
"""
def wrapped_fcn( *args, **kwargs ):
# Get the lock and acquire it
executionLock = LockRing().getLock( '_UseUserProxy_', recursive = True )
executionLock.acquire()
# Check if the caller is executing with the host certificate
useServerCertificate = gConfig.useServerCertificate()
if useServerCertificate:
gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'false' )
try:
return fcn( *args, **kwargs )
except Exception as lException: # pylint: disable=broad-except
value = ','.join( [str( arg ) for arg in lException.args] )
exceptType = lException.__class__.__name__
return S_ERROR( "Exception - %s: %s" % ( exceptType, value ) )
finally:
# Restore the default host certificate usage if necessary
if useServerCertificate:
gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'true' )
# release the lock
executionLock.release()
return wrapped_fcn
|
hgiemza/DIRAC
|
Core/Utilities/Proxy.py
|
Python
|
gpl-3.0
| 6,847
|
[
"DIRAC"
] |
a45adcc1191feab9371b2c16bb93ffb40aa73e9e628276963bbcd5329811e299
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import numpy as np
import pandas as pd
from skbio.tree import DuplicateNodeError, MissingNodeError
from skbio.diversity._phylogenetic import _nodes_by_counts
def _validate_counts_vector(counts, suppress_cast=False):
"""Validate and convert input to an acceptable counts vector type.
Note: may not always return a copy of `counts`!
"""
counts = np.asarray(counts)
if not np.all(np.isreal(counts)):
raise ValueError("Counts vector must contain real-valued entries.")
if counts.ndim != 1:
raise ValueError("Only 1-D vectors are supported.")
elif (counts < 0).any():
raise ValueError("Counts vector cannot contain negative values.")
return counts
def _validate_counts_matrix(counts, ids=None, suppress_cast=False):
results = []
# handle case of where counts is a single vector by making it a matrix.
# this has to be done before forcing counts into an ndarray because we
# don't yet know that all of the entries are of equal length
if isinstance(counts, pd.core.frame.DataFrame):
if ids is not None and len(counts.index) != len(ids):
raise ValueError(
"Number of rows in ``counts``"
" must be equal to number of provided ``ids``."
)
return np.asarray(counts)
else:
if len(counts) == 0 or not isinstance(counts[0], collections.Iterable):
counts = [counts]
counts = np.asarray(counts)
if counts.ndim > 2:
raise ValueError(
"Only 1-D and 2-D array-like objects can be provided "
"as input. Provided object has %d dimensions." %
counts.ndim)
if ids is not None and len(counts) != len(ids):
raise ValueError(
"Number of rows in ``counts`` must be equal "
"to number of provided ``ids``."
)
lens = []
for v in counts:
results.append(_validate_counts_vector(v, suppress_cast))
lens.append(len(v))
if len(set(lens)) > 1:
raise ValueError(
"All rows in ``counts`` must be of equal length."
)
return np.asarray(results)
def _validate_otu_ids_and_tree(counts, otu_ids, tree):
len_otu_ids = len(otu_ids)
set_otu_ids = set(otu_ids)
if len_otu_ids != len(set_otu_ids):
raise ValueError("``otu_ids`` cannot contain duplicated ids.")
if len(counts) != len_otu_ids:
raise ValueError("``otu_ids`` must be the same length as ``counts`` "
"vector(s).")
if len(tree.root().children) == 0:
raise ValueError("``tree`` must contain more than just a root node.")
if len(tree.root().children) > 2:
# this is an imperfect check for whether the tree is rooted or not.
# can this be improved?
raise ValueError("``tree`` must be rooted.")
# all nodes (except the root node) have corresponding branch lengths
# all tip names in tree are unique
# all otu_ids correspond to tip names in tree
branch_lengths = []
tip_names = []
for e in tree.traverse():
if not e.is_root():
branch_lengths.append(e.length)
if e.is_tip():
tip_names.append(e.name)
set_tip_names = set(tip_names)
if len(tip_names) != len(set_tip_names):
raise DuplicateNodeError("All tip names must be unique.")
if np.array([branch is None for branch in branch_lengths]).any():
raise ValueError("All non-root nodes in ``tree`` must have a branch "
"length.")
missing_tip_names = set_otu_ids - set_tip_names
if missing_tip_names != set():
n_missing_tip_names = len(missing_tip_names)
raise MissingNodeError("All ``otu_ids`` must be present as tip names "
"in ``tree``. ``otu_ids`` not corresponding to "
"tip names (n=%d): %s" %
(n_missing_tip_names,
" ".join(missing_tip_names)))
def _vectorize_counts_and_tree(counts, otu_ids, tree):
""" Index tree and convert counts to np.array in corresponding order
"""
tree_index = tree.to_array(nan_length_value=0.0)
otu_ids = np.asarray(otu_ids)
counts = np.atleast_2d(counts)
counts_by_node = _nodes_by_counts(counts, otu_ids, tree_index)
branch_lengths = tree_index['length']
# branch_lengths is just a reference to the array inside of tree_index,
# but it's used so much that it's convenient to just pull it out here.
return counts_by_node.T, tree_index, branch_lengths
def _get_phylogenetic_kwargs(counts, **kwargs):
try:
otu_ids = kwargs.pop('otu_ids')
except KeyError:
raise ValueError("``otu_ids`` is required for phylogenetic diversity "
"metrics.")
try:
tree = kwargs.pop('tree')
except KeyError:
raise ValueError("``tree`` is required for phylogenetic diversity "
"metrics.")
return otu_ids, tree, kwargs
|
gregcaporaso/scikit-bio
|
skbio/diversity/_util.py
|
Python
|
bsd-3-clause
| 5,494
|
[
"scikit-bio"
] |
35fc79021ff22404f2a3f1e56bad16da1338373b9967a4ebab5949b1299096a1
|
# coding: utf8
{
' (leave empty to detach account)': ' (leave empty to detach account)',
' Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': ' Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.',
' by ': ' by ',
' is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities.': ' is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities.',
' on ': ' on ',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'# of Houses Damaged': '# of Houses Damaged',
'# of Houses Destroyed': 'Số căn nhà bị phá hủy',
'# of International Staff': '# of International Staff',
'# of National Staff': '# of National Staff',
'# of People Affected': 'Số người bị ảnh hưởng',
'# of People Deceased': '# of People Deceased',
'# of People Injured': '# of People Injured',
'# of Vehicles': '# of Vehicles',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': '%s rows deleted',
'%s rows updated': '%s rows updated',
'(Constraints Only)': '(Constraints Only)',
') & then click on the map below to adjust the Lat/Lon fields:': ') & then click on the map below to adjust the Lat/Lon fields:',
'* Required Fields': '* Required Fields',
'0-15 minutes': '0-15 minutes',
'1 Assessment': '1 Assessment',
'1 location, shorter time, can contain multiple Tasks': '1 location, shorter time, can contain multiple Tasks',
'1-3 days': '1-3 days',
'1. Fill the necessary fields in BLOCK letters.': '1. Fill the necessary fields in BLOCK letters.',
'15-30 minutes': '15-30 phút',
'2 different options are provided here currently:': '2 different options are provided here currently:',
'2. Always use one box per letter and leave one box space to seperate words.': '2. Always use one box per letter and leave one box space to seperate words.',
'2x4 Car': '2x4 Car',
'30-60 minutes': '30-60 minutes',
'4-7 days': '4-7 days',
'4x4 Car': '4x4 Car',
'8-14 days': '8-14 ngày',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'tài liệu tham khảo như file, URL hay thông tin liên hệ đế xác nhận dữ liệu này.Bạn có thể gõ một vài ký tự đầu của tên tài liệu để kết nối tới tài liệu có sẵn',
'A Warehouse is a physical place to store items.': 'A Warehouse is a physical place to store items.',
'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.': 'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.',
'A brief description of the group (optional)': 'A brief description of the group (optional)',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'A file downloaded from a GPS containing a series of geographic points in XML format.',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.',
'A library of digital resources, such as photos, documents and reports': 'A library of digital resources, such as photos, documents and reports',
'A place within a Site like a Shelf, room, bin number etc.': 'Một nơi trên site như số ngăn ,số phòng,số thùng v.v',
'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.': 'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.',
'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.': 'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.',
'A snapshot of the location or additional documents that contain supplementary information about the Site can be uploaded here.': 'Upload ảnh chụp vị trí hoặc tài liệu bổ sung chứa thông tin bổ sung về trang web tại đây',
'A survey series with id %s does not exist. Please go back and create one.': 'A survey series with id %s does not exist. Please go back and create one.',
'ABOUT': 'ABOUT',
'ABOUT THIS MODULE': 'Giới thiệu Module này',
'ACCESS DATA': 'ACCESS DATA',
'ANY': 'ANY',
'API is documented here': 'API is documented here',
'Ability to Fill Out Surveys': 'Ability to Fill Out Surveys',
'Ability to customize the list of details tracked at a Shelter': 'Ability to customize the list of details tracked at a Shelter',
'Ability to customize the list of human resource tracked at a Shelter': 'Khả năng tùy chỉnh danh sách nguồn nhân lực theo dõi tại nơi cư trú',
'Ability to customize the list of important facilities needed at a Shelter': 'Khả năng tùy chỉnh danh sách các điều kiện quan trọng cần thiết tại một cơ sở cư trú',
'Ability to track partial fulfillment of the request': 'Ability to track partial fulfillment of the request',
'Ability to view Results of Completed and/or partially filled out Surveys': 'Ability to view Results of Completed and/or partially filled out Surveys',
'About': 'About',
'About Sahana': 'About Sahana',
'About Sahana Eden': 'About Sahana Eden',
'About this module': 'About this module',
'Access denied': 'Access denied',
'Accessibility of Affected Location': 'Accessibility of Affected Location',
'Account registered, however registration is still pending approval - please wait until confirmation received.': 'Account registered, however registration is still pending approval - please wait until confirmation received.',
'Acronym': 'Acronym',
"Acronym of the organization's name, eg. IFRC.": "Acronym of the organization's name, eg. IFRC.",
'Actionable': 'Actionable',
'Actionable by all targeted recipients': 'Actionable by all targeted recipients',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>',
'Actioned?': 'Actioned?',
'Active Problems': 'Active Problems',
'Activities': 'Activities',
'Activities matching Assessments:': 'Activities matching Assessments:',
'Activities of boys 13-17yrs before disaster': 'Activities of boys 13-17yrs before disaster',
'Activities of boys 13-17yrs now': 'Activities of boys 13-17yrs now',
'Activities of boys <12yrs before disaster': 'Activities of boys <12yrs before disaster',
'Activities of boys <12yrs now': 'Activities of boys <12yrs now',
'Activities of girls 13-17yrs before disaster': 'Activities of girls 13-17yrs before disaster',
'Activities of girls 13-17yrs now': 'Activities of girls 13-17yrs now',
'Activities of girls <12yrs before disaster': 'Activities of girls <12yrs before disaster',
'Activities of girls <12yrs now': 'Activities of girls <12yrs now',
'Activities:': 'Activities:',
'Activity': 'Activity',
'Activity Added': 'Activity Added',
'Activity Deleted': 'Activity Deleted',
'Activity Details': 'Chi tiết Hoạt động',
'Activity Report': 'Activity Report',
'Activity Reports': 'Activity Reports',
'Activity Type': 'Activity Type',
'Activity Updated': 'Activity Updated',
'Add': 'Thêm',
'Add Activity': 'Thêm hoạt động',
'Add Activity Report': 'Add Activity Report',
'Add Activity Type': 'Thêm loại hoạt động',
'Add Address': 'Add Address',
'Add Aid Request': 'Thêm yêu cầu cứu trợ',
'Add Assessment': 'Add Assessment',
'Add Assessment Summary': 'Add Assessment Summary',
'Add Baseline': 'Add Baseline',
'Add Baseline Type': 'Add Baseline Type',
'Add Bed Type': 'Add Bed Type',
'Add Bin Type': 'Add Bin Type',
'Add Bins': 'Thêm Bin',
'Add Budget': 'Add Budget',
'Add Bundle': 'Add Bundle',
'Add Catalog': 'Thêm Catalog',
'Add Catalog Item': 'Add Catalog Item',
'Add Catalog.': 'Add Catalog.',
'Add Category': 'Thêm nhóm',
'Add Category<>Sub-Category<>Catalog Relation': 'Add Category<>Sub-Category<>Catalog Relation',
'Add Cholera Treatment Capability Information': 'Add Cholera Treatment Capability Information',
'Add Cluster': 'Thêm cụm',
'Add Cluster Subsector': 'Add Cluster Subsector',
'Add Config': 'Add Config',
'Add Contact': 'Thêm thông tin liên lạc',
'Add Contact Information': 'Thêm thông tin liên lạc',
'Add Disaster Victims': 'Add Disaster Victims',
'Add Distribution': 'Add Distribution',
'Add Distribution.': 'Add Distribution.',
'Add Donor': 'Thêm tên người quyên góp vào danh sách',
'Add Feature Class': 'Add Feature Class',
'Add Feature Layer': 'Thêm lớp đặc tính',
'Add Flood Report': 'Add Flood Report',
'Add Group': 'Thêm nhóm',
'Add Group Member': 'Add Group Member',
'Add Hospital': 'Thêm Bệnh viện',
'Add Identification Report': 'Add Identification Report',
'Add Identity': 'Thêm thông tin định danh',
'Add Image': 'Thêm ảnh',
'Add Impact': 'Add Impact',
'Add Impact Type': 'Add Impact Type',
'Add Incident': 'Add Incident',
'Add Incident Report': 'Thêm Báo cáo sự việc',
'Add Inventory Item': 'Add Inventory Item',
'Add Inventory Store': 'Add Inventory Store',
'Add Item': 'Add Item',
'Add Item (s)': 'Add Item (s)',
'Add Item Catalog': 'Add Item Catalog',
'Add Item Catalog ': 'Add Item Catalog ',
'Add Item Catalog Category ': 'Thêm tiêu chí cho catalog hàng hóa',
'Add Item Category': 'Add Item Category',
'Add Item Packet': 'Add Item Packet',
'Add Item Sub-Category': 'Add Item Sub-Category',
'Add Item to Shipment': 'Add Item to Shipment',
'Add Key': 'Add Key',
'Add Kit': 'Thêm Kit',
'Add Layer': 'Thêm lớp',
'Add Location': 'Add Location',
'Add Locations': 'Thêm địa điểm mới',
'Add Log Entry': 'Add Log Entry',
'Add Member': 'Thêm thành viên',
'Add Membership': 'Add Membership',
'Add Message': 'Thêm Tin nhắn',
'Add Need': 'Add Need',
'Add Need Type': 'Add Need Type',
'Add New': 'Add New',
'Add New Activity': 'Add New Activity',
'Add New Address': 'Thêm Địa chỉ mới',
'Add New Aid Request': 'Thêm yêu cầu cứu trợ mới',
'Add New Assessment': 'Add New Assessment',
'Add New Assessment Summary': 'Add New Assessment Summary',
'Add New Baseline': 'Add New Baseline',
'Add New Baseline Type': 'Add New Baseline Type',
'Add New Bin': 'Add New Bin',
'Add New Bin Type': 'Add New Bin Type',
'Add New Budget': 'Add New Budget',
'Add New Bundle': 'Add New Bundle',
'Add New Catalog Item': 'Add New Catalog Item',
'Add New Cluster': 'Thêm cụm mới',
'Add New Cluster Subsector': 'Add New Cluster Subsector',
'Add New Config': 'Thêm cấu hình mới',
'Add New Contact': 'Thêm đầu mối liên lạc mới',
'Add New Distribution': 'Add New Distribution',
'Add New Distribution Item': 'Add New Distribution Item',
'Add New Document': 'Thêm Tài liệu mới',
'Add New Donor': 'Thêm Người quyên góp',
'Add New Entry': 'Add New Entry',
'Add New Feature Class': 'Add New Feature Class',
'Add New Feature Layer': 'Add New Feature Layer',
'Add New Flood Report': 'Thêm báo cáo lũ lụt mới',
'Add New Group': 'Thêm nhóm mới',
'Add New Hospital': 'Thêm Bệnh viện mới',
'Add New Identity': 'Thêm thông tin nhận dạng mới',
'Add New Image': 'Thêm ảnh mới',
'Add New Impact': 'Add New Impact',
'Add New Impact Type': 'Add New Impact Type',
'Add New Incident': 'Thêm sự kiện',
'Add New Incident Report': 'Add New Incident Report',
'Add New Inventory Item': 'Add New Inventory Item',
'Add New Inventory Store': 'Add New Inventory Store',
'Add New Item': 'Add New Item',
'Add New Item Catalog': 'Add New Item Catalog',
'Add New Item Catalog Category': 'Add New Item Catalog Category',
'Add New Item Category': 'Add New Item Category',
'Add New Item Packet': 'Add New Item Packet',
'Add New Item Sub-Category': 'Add New Item Sub-Category',
'Add New Item to Kit': 'Add New Item to Kit',
'Add New Key': 'Thêm Key mới ',
'Add New Kit': 'Thêm Kit mới',
'Add New Layer': 'Add New Layer',
'Add New Location': 'Add New Location',
'Add New Log Entry': 'Add New Log Entry',
'Add New Marker': 'Add New Marker',
'Add New Member': 'Thêm thành viên mới',
'Add New Membership': 'Add New Membership',
'Add New Metadata': 'Add New Metadata',
'Add New Need': 'Add New Need',
'Add New Need Type': 'Add New Need Type',
'Add New Note': 'Add New Note',
'Add New Office': 'Thêm văn phòng mới',
'Add New Organization': 'Thêm một tô chức mới',
'Add New Photo': 'Add New Photo',
'Add New Position': 'Add New Position',
'Add New Problem': 'Thêm vấn đề mới',
'Add New Project': 'Add New Project',
'Add New Projection': 'Add New Projection',
'Add New Rapid Assessment': 'Add New Rapid Assessment',
'Add New Received Item': 'Add New Received Item',
'Add New Record': 'Add New Record',
'Add New Report': 'Thêm báo cáo mới',
'Add New Request': 'Thêm yêu cầu mới',
'Add New Request Item': 'Thêm yêu cầu hàng hóa mới',
'Add New Resource': 'Thêm nguồn lực mới',
'Add New Response': 'Thêm phản hồi mới',
'Add New River': 'Add New River',
'Add New Role': 'Thêm vai trò mới',
'Add New Role to User': 'Gán vai trò mới cho người dùng',
'Add New Sector': 'Add New Sector',
'Add New Sent Item': 'Add New Sent Item',
'Add New Setting': 'Add New Setting',
'Add New Shelter': 'Thêm Nơi cư trú mới',
'Add New Shelter Service': 'Add New Shelter Service',
'Add New Shelter Type': 'Add New Shelter Type',
'Add New Shipment to Send': 'Add New Shipment to Send',
'Add New Site': 'Add New Site',
'Add New Skill': 'Thêm kỹ năng mới',
'Add New Skill Type': 'Add New Skill Type',
'Add New Solution': 'Add New Solution',
'Add New Staff': 'Add New Staff',
'Add New Staff Type': 'Add New Staff Type',
'Add New Storage Location': 'Thêm Vị trí kho lưu trữ mới',
'Add New Survey Answer': 'Add New Survey Answer',
'Add New Survey Question': 'Add New Survey Question',
'Add New Survey Section': 'Add New Survey Section',
'Add New Survey Series': 'Add New Survey Series',
'Add New Survey Template': 'Thêm mẫu khảo sát mới',
'Add New Task': 'Thêm một công việc mới',
'Add New Team': 'Thêm đội mới',
'Add New Theme': 'Add New Theme',
'Add New Ticket': 'Add New Ticket',
'Add New Track': 'Add New Track',
'Add New Unit': 'Thêm đơn vị mới',
'Add New User': 'Thêm người dùng mới',
'Add New User to Role': 'Add New User to Role',
'Add New Warehouse': 'Add New Warehouse',
'Add New Warehouse Item': 'Add New Warehouse Item',
'Add Note': 'Add Note',
'Add Office': 'Thêm thông tin văn phòng',
'Add Organization': 'Thêm Tổ chức',
'Add Peer': 'Add Peer',
'Add Person': 'Thêm cá nhân',
'Add Personal Effects': 'Add Personal Effects',
'Add Photo': 'Add Photo',
'Add Position': 'Add Position',
'Add Problem': 'Add Problem',
'Add Project': 'Thêm dự án',
'Add Projection': 'Add Projection',
'Add Question': 'Add Question',
'Add Rapid Assessment': 'Add Rapid Assessment',
'Add Recipient': 'Thêm người nhận viện trợ',
'Add Recipient Site': 'Add Recipient Site',
'Add Recipient Site.': 'Add Recipient Site.',
'Add Record': 'Add Record',
'Add Recovery Report': 'Add Recovery Report',
'Add Reference Document': 'Add Reference Document',
'Add Report': 'Add Report',
'Add Request': 'Thêm yêu cầu',
'Add Request Detail': 'thêm chi tiết yêu cầu',
'Add Request Item': 'Thêm yêu cầu hàng hóa',
'Add Resource': 'Thêm tại nguyên',
'Add Response': 'Add Response',
'Add River': 'Add River',
'Add Role': 'Add Role',
'Add Section': 'Add Section',
'Add Sector': 'Thêm lĩnh vực',
'Add Sender Organization': 'Thêm tổ chức gửi',
'Add Sender Site': 'Add Sender Site',
'Add Sender Site.': 'Add Sender Site.',
'Add Service Profile': 'Add Service Profile',
'Add Setting': 'Add Setting',
'Add Shelter': 'Add Shelter',
'Add Shelter Service': 'Add Shelter Service',
'Add Shelter Type': 'Add Shelter Type',
'Add Shipment Transit Log': 'Add Shipment Transit Log',
'Add Shipment/Way Bills': 'Add Shipment/Way Bills',
'Add Site': 'Add Site',
'Add Skill': 'Thêm kỹ năng',
'Add Skill Type': 'Thêm loại kỹ năng',
'Add Skill Types': 'Thêm loại kỹ năng',
'Add Solution': 'Add Solution',
'Add Staff': 'Add Staff',
'Add Staff Type': 'Add Staff Type',
'Add Status': 'Add Status',
'Add Storage Bin ': 'Add Storage Bin ',
'Add Storage Bin Type': 'Add Storage Bin Type',
'Add Storage Location': 'Add Storage Location',
'Add Storage Location ': 'Add Storage Location ',
'Add Sub-Category': 'Thêm danh mục cấp dưới',
'Add Subscription': 'Add Subscription',
'Add Survey Answer': 'Thêm trả lời khảo sát',
'Add Survey Question': 'Thêm câu hỏi khảo sát',
'Add Survey Section': 'Add Survey Section',
'Add Survey Series': 'Add Survey Series',
'Add Survey Template': 'Thêm mẫu khảo sát',
'Add Task': 'Add Task',
'Add Team': 'Thêm đội',
'Add Theme': 'Add Theme',
'Add Ticket': 'Add Ticket',
'Add Unit': 'Thêm đơn vị',
'Add User': 'Thêm người dùng',
'Add Volunteer': 'Add Volunteer',
'Add Volunteer Registration': 'Thêm Đăng ký tình nguyện viên',
'Add Warehouse': 'Add Warehouse',
'Add Warehouse Item': 'Add Warehouse Item',
'Add a Person': 'Add a Person',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.',
'Add a Volunteer': 'Add a Volunteer',
'Add a new Site from where the Item is being sent.': 'Thêm Site nơi gửi hàng hóa đến ',
'Add a new Site where the Item is being sent to.': 'Add a new Site where the Item is being sent to.',
'Add an Photo.': 'Add an Photo.',
'Add main Item Category.': 'Add main Item Category.',
'Add main Item Sub-Category.': 'Add main Item Sub-Category.',
'Add new Group': 'Add new Group',
'Add new Individual': 'Add new Individual',
'Add new position.': 'Thêm địa điểm mới',
'Add new project.': 'Thêm dự án mới',
'Add new staff role.': 'Thêm vai trò nhân viên mới',
'Add the Storage Bin Type.': 'Add the Storage Bin Type.',
'Add the Storage Location where this bin is located.': 'Add the Storage Location where this bin is located.',
'Add the Storage Location where this this Bin belongs to.': 'Thêm vị trí kho lưu trữ chứa Bin này',
'Add the main Warehouse/Site information where this Bin belongs to.': 'Add the main Warehouse/Site information where this Bin belongs to.',
'Add the main Warehouse/Site information where this Item is to be added.': 'Thêm thông tin Nhà kho/Site chứa hàng hóa đã được nhập thông tin',
'Add the main Warehouse/Site information where this Storage location is.': 'Add the main Warehouse/Site information where this Storage location is.',
'Add the unit of measure if it doesnt exists already.': 'Add the unit of measure if it doesnt exists already.',
'Add to Bundle': 'Add to Bundle',
'Add to Catalog': 'Add to Catalog',
'Add to budget': 'Add to budget',
'Add/Edit/Remove Layers': 'Thêm/Sửa/Xóa các lớp',
'Additional Beds / 24hrs': 'Additional Beds / 24hrs',
'Additional Comments': 'Additional Comments',
"Additional quantity quantifier – e.g. '4x5'.": "Additional quantity quantifier – e.g. '4x5'.",
'Address': 'Địa chỉ',
'Address Details': 'Address Details',
'Address Type': 'Loại địa chỉ',
'Address added': 'Địa chỉ đã được thêm',
'Address deleted': 'Đã xóa địa chỉ',
'Address updated': 'Address updated',
'Addresses': 'Các địa chỉ',
'Adequate': 'Adequate',
'Adequate food and water available': 'Adequate food and water available',
'Adjust Item(s) Quantity': 'Adjust Item(s) Quantity',
'Adjust Items due to Theft/Loss': 'Adjust Items due to Theft/Loss',
'Admin': 'Quản trị viên',
'Admin Email': 'Email của quản trị viên',
'Admin Name': 'Tên quản trị viên',
'Admin Tel': 'Số điện thoại của Quản trị viên',
'Administration': 'Quản trị',
'Administrator': 'Quản trị viên',
'Admissions/24hrs': 'Admissions/24hrs',
'Adolescent (12-20)': 'Adolescent (12-20)',
'Adolescent participating in coping activities': 'Adolescent participating in coping activities',
'Adult (21-50)': 'Adult (21-50)',
'Adult ICU': 'Adult ICU',
'Adult Psychiatric': 'Bệnh nhân tâm thần',
'Adult female': 'Nữ giới',
'Adult male': 'Adult male',
'Adults in prisons': 'Adults in prisons',
'Advanced Bin Search': 'Advanced Bin Search',
'Advanced Catalog Search': 'Tìm kiếm danh mục nâng cao',
'Advanced Category Search': 'Tìm kiếm danh mục nâng cao',
'Advanced Item Search': 'Advanced Item Search',
'Advanced Location Search': 'Tìm kiếm vị trí nâng cao',
'Advanced Site Search': 'Advanced Site Search',
'Advanced Sub-Category Search': 'Advanced Sub-Category Search',
'Advanced Unit Search': 'Advanced Unit Search',
'Advanced:': 'Advanced:',
'Advisory': 'Advisory',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.',
'Age Group': 'Nhóm tuổi',
'Age group': 'Age group',
'Age group does not match actual age.': 'Nhóm tuổi không phù hợp với tuổi thật',
'Aggravating factors': 'Aggravating factors',
'Aggregate Items': 'Aggregate Items',
'Agriculture': 'Agriculture',
'Aid Request': 'Yêu cầu cứu trợ',
'Aid Request Details': 'Chi tiết yêu cầu cứu trợ',
'Aid Request added': 'Đã thêm yêu cầu viện trợ',
'Aid Request deleted': 'Đã xóa yêu cầu cứu trợ',
'Aid Request updated': 'Đã cập nhật Yêu cầu cứu trợ',
'Aid Requests': 'yêu cầu cứu trợ',
'Air Transport Service': 'Air Transport Service',
'Air tajin': 'Air tajin',
'Aircraft Crash': 'Aircraft Crash',
'Aircraft Hijacking': 'Aircraft Hijacking',
'Airport Closure': 'Đóng cửa sân bay',
'Airspace Closure': 'Airspace Closure',
'Alcohol': 'Alcohol',
'Alert': 'Alert',
'All': 'All',
'All Inbound & Outbound Messages are stored here': 'All Inbound & Outbound Messages are stored here',
'All Locations': 'All Locations',
'All Requested Items': 'Hàng hóa được yêu cầu',
'All Resources': 'All Resources',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.',
'Allowed to push': 'Cho phép bấm nút',
'Allows a Budget to be drawn up': 'Allows a Budget to be drawn up',
'Allows authorized users to control which layers are available to the situation map.': 'Cho phép người dùng đã đăng nhập kiểm soát layer nào phù hợp với bản đồ tình huống',
'Alternative infant nutrition in use': 'Alternative infant nutrition in use',
'Alternative places for studying': 'Alternative places for studying',
'Alternative places for studying available': 'Alternative places for studying available',
'Ambulance Service': 'Ambulance Service',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.',
'Analysis of Completed Surveys': 'Analysis of Completed Surveys',
'Animal Die Off': 'Animal Die Off',
'Animal Feed': 'Animal Feed',
'Animals': 'Animals',
'Answer Choices (One Per Line)': 'Chọn câu trả lời',
'Anthropolgy': 'Anthropolgy',
'Antibiotics available': 'Antibiotics available',
'Antibiotics needed per 24h': 'Antibiotics needed per 24h',
'Any available Metadata in the files will be read automatically, such as Timestamp, Author, Latitude & Longitude.': 'Thông tin có sẵn trong file như Timestamp,Tác giả, Kinh độ, Vĩ độ sẽ được đọc tự động',
'Apparent Age': 'Apparent Age',
'Apparent Gender': 'Apparent Gender',
'Appropriate clothing available': 'Appropriate clothing available',
'Appropriate cooking equipment/materials in HH': 'Appropriate cooking equipment/materials in HH',
'Approx. number of cases/48h': 'Approx. number of cases/48h',
'Approximately how many children under 5 with diarrhea in the past 48 hours?': 'Approximately how many children under 5 with diarrhea in the past 48 hours?',
'Archive not Delete': 'Archive not Delete',
'Arctic Outflow': 'Arctic Outflow',
'Are basic medical supplies available for health services since the disaster?': 'Are basic medical supplies available for health services since the disaster?',
'Are breast milk substitutes being used here since the disaster?': 'Are breast milk substitutes being used here since the disaster?',
'Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?': 'Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?',
'Are the chronically ill receiving sufficient care and assistance?': 'Are the chronically ill receiving sufficient care and assistance?',
'Are there adults living in prisons in this area?': 'Are there adults living in prisons in this area?',
'Are there alternative places for studying?': 'Are there alternative places for studying?',
'Are there cases of diarrhea among children under the age of 5?': 'Are there cases of diarrhea among children under the age of 5?',
'Are there children living in adult prisons in this area?': 'Are there children living in adult prisons in this area?',
'Are there children living in boarding schools in this area?': 'Are there children living in boarding schools in this area?',
'Are there children living in homes for disabled children in this area?': 'Are there children living in homes for disabled children in this area?',
'Are there children living in juvenile detention in this area?': 'Are there children living in juvenile detention in this area?',
'Are there children living in orphanages in this area?': 'Are there children living in orphanages in this area?',
'Are there children with chronical illnesses in your community?': 'Are there children with chronical illnesses in your community?',
'Are there health services functioning for the community since the disaster?': 'Are there health services functioning for the community since the disaster?',
'Are there older people living in care homes in this area?': 'Are there older people living in care homes in this area?',
'Are there older people with chronical illnesses in your community?': 'Are there older people with chronical illnesses in your community?',
'Are there people with chronical illnesses in your community?': 'Are there people with chronical illnesses in your community?',
'Are there separate latrines for women and men available?': 'Are there separate latrines for women and men available?',
'Are there staff present and caring for the residents in these institutions?': 'Are there staff present and caring for the residents in these institutions?',
'Area': 'Area',
'Assessment': 'Assessment',
'Assessment Details': 'Assessment Details',
'Assessment Reported': 'Assessment Reported',
'Assessment Summaries': 'Assessment Summaries',
'Assessment Summary Details': 'Assessment Summary Details',
'Assessment Summary added': 'Assessment Summary added',
'Assessment Summary deleted': 'Assessment Summary deleted',
'Assessment Summary updated': 'Assessment Summary updated',
'Assessment Type': 'Assessment Type',
'Assessment added': 'Assessment added',
'Assessment deleted': 'Assessment deleted',
'Assessment updated': 'Đã cập nhật Trị giá tính thuế',
'Assessments': 'Assessments',
'Assessments Needs vs. Activities': 'Assessments Needs vs. Activities',
'Assessments and Activities': 'Assessments and Activities',
'Assessments are structured reports done by Professional Organizations': 'Assessments are structured reports done by Professional Organizations',
'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments': 'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments',
'Assessments:': 'Assessments:',
'Assessor': 'Assessor',
'Assign Storage Location': 'Assign Storage Location',
'Assign to Org.': 'Assign to Org.',
'Assigned': 'Assigned',
'Assigned To': 'Assigned To',
'Assigned to': 'Assigned to',
'Assistance for immediate repair/reconstruction of houses': 'Assistance for immediate repair/reconstruction of houses',
'Assistant': 'Assistant',
'At/Visited Location (not virtual)': 'At/Visited Location (not virtual)',
'Attend to information sources as described in <instruction>': 'Attend to information sources as described in <instruction>',
'Attribution': 'Attribution',
'Audit Read': 'Audit Read',
'Audit Write': 'Audit Write',
"Authenticate system's Twitter account": "Authenticate system's Twitter account",
'Author': 'Author',
'Automotive': 'Automotive',
'Available Beds': 'Available Beds',
'Available Messages': 'Available Messages',
'Available Records': 'Available Records',
'Available databases and tables': 'Available databases and tables',
'Available from': 'Available from',
'Available in Viewer?': 'Available in Viewer?',
'Available until': 'Sẵn sàng cho đến khi',
'Availability': 'Availability',
'Avalanche': 'Avalanche',
'Avoid the subject event as per the <instruction>': 'Avoid the subject event as per the <instruction>',
'Babies who are not being breastfed, what are they being fed on?': 'Babies who are not being breastfed, what are they being fed on?',
'Baby And Child Care': 'Chăm sóc trẻ em',
'Background Colour': 'Background Colour',
'Background Colour for Text blocks': 'Background Colour for Text blocks',
'Bahai': 'Bahai',
'Baldness': 'Baldness',
'Balochi': 'Balochi',
'Banana': 'Banana',
'Bank/micro finance': 'Tài chính Ngân hàng',
'Base Layer?': 'Base Layer?',
'Base Unit': 'Đơn vị cơ sở',
'Baseline Number of Beds': 'Baseline Number of Beds',
'Baseline Type': 'Baseline Type',
'Baseline Type Details': 'Baseline Type Details',
'Baseline Type added': 'Baseline Type added',
'Baseline Type deleted': 'Baseline Type deleted',
'Baseline Type updated': 'Baseline Type updated',
'Baseline Types': 'Baseline Types',
'Baseline added': 'Baseline added',
'Baseline deleted': 'Baseline deleted',
'Baseline number of beds of that type in this unit.': 'Baseline number of beds of that type in this unit.',
'Baseline updated': 'Baseline updated',
'Baselines': 'Baselines',
'Baselines Details': 'Baselines Details',
'Basic Assessment': 'Basic Assessment',
'Basic Assessment Reported': 'Basic Assessment Reported',
'Basic Details': 'Basic Details',
'Basic information on the requests and donations, such as category, the units, contact details and the status.': 'Thông tin cơ bản về các yêu cầu và quyên góp như thể loại, tên đơn vị, chi tiết liên lạc và tình trạng',
'Basic medical supplies available prior to disaster': 'Basic medical supplies available prior to disaster',
'Basic medical supplies available since disaster': 'Basic medical supplies available since disaster',
'Basic reports on the Shelter and drill-down by region': 'Báo cáo cơ bản về nơi cư trú và báo cáo chi tiết theo vùng',
'Baud': 'Bốt',
'Baud rate to use for your modem - The default is safe for most cases': 'Baud rate to use for your modem - The default is safe for most cases',
'Bed Capacity': 'Bed Capacity',
'Bed Capacity per Unit': 'Bed Capacity per Unit',
'Bed Type': 'Loại Giường',
'Bed type already registered': 'Bed type already registered',
'Bedding materials available': 'Bedding materials available',
'Beneficiary Type': 'Beneficiary Type',
'Biological Hazard': 'Biological Hazard',
'Biscuits': 'Biscuits',
'Blizzard': 'Blizzard',
'Blood Type (AB0)': 'Blood Type (AB0)',
'Blowing Snow': 'Gió tuyết đang thổi',
'Boat': 'Boat',
'Bodies found': 'Bodies found',
'Bodies recovered': 'Bodies recovered',
'Body': 'Body',
'Body Recovery Reports': 'Body Recovery Reports',
'Body Recovery Request': 'Body Recovery Request',
'Body Recovery Requests': 'Body Recovery Requests',
'Bomb': 'Bomb',
'Bomb Explosion': 'Nổ bom',
'Bomb Threat': 'Bomb Threat',
'Border Colour for Text blocks': 'Màu viền cho khối văn bản',
'Bounding Box Insets': 'Bounding Box Insets',
'Bounding Box Size': 'Bounding Box Size',
'Boys 13-18 yrs in affected area': 'Boys 13-18 yrs in affected area',
'Boys 13-18 yrs not attending school': 'Boys 13-18 yrs not attending school',
'Boys 6-12 yrs in affected area': 'Boys 6-12 yrs in affected area',
'Boys 6-12 yrs not attending school': 'Boys 6-12 yrs not attending school',
'Breast milk substitutes in use since disaster': 'Breast milk substitutes in use since disaster',
'Breast milk substitutes used prior to disaster': 'Breast milk substitutes used prior to disaster',
'Bricks': 'Bricks',
'Bridge Closed': 'Bridge Closed',
'Bucket': 'Bucket',
'Buddhist': 'Người theo đạo Phật',
'Budget': 'Ngân sách',
'Budget Details': 'Budget Details',
'Budget Updated': 'Budget Updated',
'Budget added': 'Budget added',
'Budget deleted': 'Budget deleted',
'Budget updated': 'Budget updated',
'Budgeting Module': 'Budgeting Module',
'Budgets': 'Ngân sách',
'Buffer': 'Buffer',
'Building Aide': 'Building Aide',
'Building Collapsed': 'Sập nhà',
'Built using the Template agreed by a group of NGOs working together as the': 'Built using the Template agreed by a group of NGOs working together as the',
'Bulk Uploader': 'Bulk Uploader',
'Bundle': 'Bundle',
'Bundle Contents': 'Bundle Contents',
'Bundle Details': 'Bundle Details',
'Bundle Updated': 'Cập nhật Bundle',
'Bundle added': 'Bundle added',
'Bundle deleted': 'Bundle deleted',
'Bundle updated': 'Bundle updated',
'Bundles': 'Bundles',
'Burn': 'Burn',
'Burn ICU': 'Burn ICU',
'Burned/charred': 'Burned/charred',
'Business damaged': 'Business damaged',
'By Warehouse': 'By Warehouse',
'CBA Women': 'CBA Women',
'CSS file %s not writable - unable to apply theme!': 'không viết được file CSS %s - không thể áp dụng chủ đề',
'Calculate': 'Tính toán',
'Camp': 'Camp',
'Camp Coordination/Management': 'Camp Coordination/Management',
'Can users register themselves for authenticated login access?': 'Can users register themselves for authenticated login access?',
"Can't import tweepy": "Can't import tweepy",
'Cancel': 'Cancel',
'Cancelled': 'Cancelled',
'Candidate Matches for Body %s': 'Candidate Matches for Body %s',
'Canned Fish': 'Canned Fish',
'Cannot be empty': 'Cannot be empty',
'Cannot delete whilst there are linked records. Please delete linked records first.': 'Không xóa được khi đang có bản thu liên quan.Hãy xóa bản thu trước',
'Capacity (Max Persons)': 'Capacity (Max Persons)',
'Capacity (W x D X H)': 'Capacity (W x D X H)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Nắm bắt thông tin của các nạn nhân chịu ảnh hưởng của thiên tai(Khách du lịch,Gia đình...)',
'Capture Information on each disaster victim': 'Capture Information on each disaster victim',
'Capturing organizational information of a relief organization and all the projects they have in the region': 'Capturing organizational information of a relief organization and all the projects they have in the region',
'Capturing the essential services each Volunteer is providing and where': 'Capturing the essential services each Volunteer is providing and where',
'Capturing the projects each organization is providing and where': 'Capturing the projects each organization is providing and where',
'Cardiology': 'Bệnh tim mạch',
'Cash available to restart business': 'Cash available to restart business',
'Cassava': 'Cassava',
'Casual Labor': 'Nhân công thời vụ',
'Catalog': 'Catalog',
'Catalog Item': 'Catalog Item',
'Catalog Item added': 'Catalog Item added',
'Catalog Item deleted': 'Catalog Item deleted',
'Catalog Item updated': 'Catalog Item updated',
'Catalog Items': 'Catalog Items',
'Catalog Name': 'Catalog Name',
'Category': 'Category',
'Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog Relation',
'Category<>Sub-Category<>Catalog Relation added': 'Category<>Sub-Category<>Catalog Relation added',
'Category<>Sub-Category<>Catalog Relation deleted': 'Category<>Sub-Category<>Catalog Relation deleted',
'Category<>Sub-Category<>Catalog Relation updated': 'Category<>Sub-Category<>Catalog Relation updated',
'Central point to record details on People': 'Central point to record details on People',
'Change Password': 'Change Password',
'Check for errors in the URL, maybe the address was mistyped.': 'Kiểm tra lỗi URL, có lẽ địa chỉ URL bị gõ sai.',
'Check if the URL is pointing to a directory instead of a webpage.': 'Kiểm tra URL trỏ về thư mục hay trang web',
'Check outbox for the message status': 'Check outbox for the message status',
'Check to delete': 'Check to delete',
'Check-in': 'Check-in',
'Check-out': 'Check-out',
'Checklist': 'Checklist',
'Checklist created': 'Checklist created',
'Checklist deleted': 'Checklist deleted',
'Checklist of Operations': 'Checklist of Operations',
'Checklist updated': 'Checklist updated',
'Chemical Hazard': 'Chemical Hazard',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack',
'Chicken': 'Chicken',
'Child': 'Child',
'Child (2-11)': 'Child (2-11)',
'Child (< 18 yrs)': 'Child (< 18 yrs)',
'Child Abduction Emergency': 'Child Abduction Emergency',
'Child headed households (<18 yrs)': 'Child headed households (<18 yrs)',
'Children (2-5 years)': 'Children (2-5 years)',
'Children (5-15 years)': 'Children (5-15 years)',
'Children (< 2 years)': 'Trẻ em (dưới 2 tuổi)',
'Children in adult prisons': 'Children in adult prisons',
'Children in boarding schools': 'Children in boarding schools',
'Children in homes for disabled children': 'Children in homes for disabled children',
'Children in juvenile detention': 'Children in juvenile detention',
'Children in orphanages': 'Children in orphanages',
'Children living on their own (without adults)': 'Children living on their own (without adults)',
'Children not enrolled in new school': 'Children not enrolled in new school',
'Children orphaned by the disaster': 'Children orphaned by the disaster',
'Children separated from their parents/caregivers': 'Children separated from their parents/caregivers',
'Children that have been sent to safe places': 'Children that have been sent to safe places',
'Children who have disappeared since the disaster': 'Children who have disappeared since the disaster',
'Children with chronical illnesses': 'Children with chronical illnesses',
'Chinese (Taiwan)': 'Chinese (Taiwan)',
'Cholera Treatment': 'Cholera Treatment',
'Cholera Treatment Capability': 'Cholera Treatment Capability',
'Cholera Treatment Center': 'Cholera Treatment Center',
'Cholera-Treatment-Center': 'Cholera-Treatment-Center',
'Choosing Skill and Resources of Volunteers': 'Choosing Skill and Resources of Volunteers',
'Christian': 'Christian',
'Church': 'Church',
'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': 'Hoàn cảnh mất tích, những nhân chứng nhìn thấy lần gần đây nhất nạn nhân còn sống',
'City': 'City',
'Civil Emergency': 'Civil Emergency',
'Clear Selection': 'Clear Selection',
"Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.": "Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.",
'Click on the link ': 'Click on the link ',
'Client IP': 'Client IP',
'Clinical Laboratory': 'Clinical Laboratory',
'Clinical Operations': 'Clinical Operations',
'Clinical Status': 'Clinical Status',
'Close map': 'Close map',
'Closed': 'Đã đóng',
'Closure': 'Closure',
'Clothing': 'Clothing',
'Cluster': 'Cluster',
'Cluster Details': 'Cluster Details',
'Cluster Distance': 'Cluster Distance',
'Cluster Subsector': 'Cluster Subsector',
'Cluster Subsector Details': 'Cluster Subsector Details',
'Cluster Subsector added': 'Cluster Subsector added',
'Cluster Subsector deleted': 'Cluster Subsector deleted',
'Cluster Subsector updated': 'Cluster Subsector updated',
'Cluster Subsectors': 'Cluster Subsectors',
'Cluster Threshold': 'Cluster Threshold',
'Cluster added': 'Đã thêm cụm',
'Cluster deleted': 'Cluster deleted',
'Cluster updated': 'Cluster updated',
'Cluster(s)': 'Cluster(s)',
'Clusters': 'Clusters',
'Code': 'Mã',
'Cold Wave': 'Cold Wave',
'Collective center': 'Collective center',
'Colour for Underline of Subheadings': 'Colour for Underline of Subheadings',
'Colour of Buttons when hovering': 'Colour of Buttons when hovering',
'Colour of bottom of Buttons when not pressed': 'Colour of bottom of Buttons when not pressed',
'Colour of bottom of Buttons when pressed': 'Colour of bottom of Buttons when pressed',
'Colour of dropdown menus': 'Colour of dropdown menus',
'Colour of selected Input fields': 'Màu của trường đã được chọn',
'Colour of selected menu items': 'Colour of selected menu items',
'Column Choices (One Per Line': 'Chọn cột',
'Combined Method': 'Combined Method',
'Come back later.': 'Come back later.',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Come back later. Everyone visiting this site is probably experiencing the same problem as you.',
'Comments': 'Bình luận',
'Commiting a changed spreadsheet to the database': 'Commiting a changed spreadsheet to the database',
'Communication problems': 'Communication problems',
'Community Centre': 'Community Centre',
'Community Health Center': 'Trung tâm sức khỏe cộng đồng',
'Community Member': 'Thành viên cộng đồng',
'Complete Unit Label for e.g. meter for m.': 'hoàn thành các bản đơn vị, ví dụ đơn vị của mét là m',
'Completed': 'Completed',
'Complexion': 'Complexion',
'Compose': 'Compose',
'Compromised': 'Compromised',
'Config': 'Tùy chỉnh',
'Config added': 'Cấu hình đã được thêm',
'Config deleted': 'Config deleted',
'Config updated': 'Cập nhật tùy chỉnh',
'Configs': 'Configs',
'Configure Run-time Settings': 'Configure Run-time Settings',
'Confirmed': 'Confirmed',
'Confirmed Incidents': 'Confirmed Incidents',
'Conflict Details': 'Conflict Details',
'Conflict Resolution': 'Conflict Resolution',
'Consumable': 'Consumable',
'Contact': 'Contact',
'Contact Data': 'Dữ liệu liên lạc',
'Contact Details': 'Contact Details',
'Contact Information': 'Contact Information',
'Contact Method': 'Contact Method',
'Contact Person': 'Contact Person',
'Contact details': 'Contact details',
'Contact information added': 'Contact information added',
'Contact information deleted': 'Contact information deleted',
'Contact information updated': 'Contact information updated',
'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': 'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.',
'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': 'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.',
'Contact us': 'Contact us',
'Contacts': 'Contacts',
'Contents': 'Contents',
'Contradictory values!': 'Contradictory values!',
'Contributor': 'Người đóng góp',
'Conversion Tool': 'Conversion Tool',
'Cooking NFIs': 'Cooking NFIs',
'Cooking Oil': 'Cooking Oil',
'Coordinate Conversion': 'Coordinate Conversion',
'Copy': 'Copy',
'Copy any data from the one to be deleted into the one to keep': 'Copy any data from the one to be deleted into the one to keep',
'Corn': 'Corn',
'Cost Type': 'Cost Type',
'Cost per Megabyte': 'Cost per Megabyte',
'Cost per Minute': 'Cost per Minute',
"Couldn't import tweepy library": "Couldn't import tweepy library",
'Country': 'Country',
'Country of Residence': 'Country of Residence',
'Create & manage Distribution groups to receive Alerts': 'Create & manage Distribution groups to receive Alerts',
'Create Checklist': 'Create Checklist',
'Create Group Entry': 'Create Group Entry',
'Create Impact Assessment': 'Create Impact Assessment',
'Create Import Job': 'Create Import Job',
'Create Mobile Impact Assessment': 'Create Mobile Impact Assessment',
'Create New Import Job': 'Create New Import Job',
'Create Rapid Assessment': 'Create Rapid Assessment',
'Create Request': 'Khởi tạo yêu cầu',
'Create Task': 'Create Task',
'Create a group entry in the registry.': 'Create a group entry in the registry.',
'Create, enter, and manage surveys.': 'Create, enter, and manage surveys.',
'Creation of Surveys': 'Creation of Surveys',
'Crime': 'Tội phạm',
'Criteria': 'Criteria',
'Currency': 'Currency',
'Current Group Members': 'Nhóm thành viên hiện tại',
'Current Identities': 'Current Identities',
'Current Location': 'Current Location',
'Current Log Entries': 'Current Log Entries',
'Current Memberships': 'Thành viên hiện tại',
'Current Notes': 'Current Notes',
'Current Registrations': 'Current Registrations',
'Current Status': 'Current Status',
'Current Team Members': 'Current Team Members',
'Current Twitter account': 'Tài khoản Twitter hiện tại',
'Current greatest needs of vulnerable groups': 'Current greatest needs of vulnerable groups',
'Current main income sources': 'Current main income sources',
'Current major expenses': 'Current major expenses',
'Current number of patients': 'Current number of patients',
'Current problems, categories': 'Current problems, categories',
'Current problems, details': 'Current problems, details',
'Current request': 'Yêu cầu hiện tại',
'Current response': 'Current response',
'Current session': 'Current session',
'Current type of health problems, adults': 'Current type of health problems, adults',
'Current type of health problems, children': 'Current type of health problems, children',
'Current type of source for drinking water': 'Current type of source for drinking water',
'Current type of source for sanitary water': 'Current type of source for sanitary water',
'Custom Database Resource (e.g., anything defined as a resource in Sahana)': 'Custom Database Resource (e.g., anything defined as a resource in Sahana)',
'Customisable category of aid': 'Các tiêu chí cứu trợ có thể tùy chỉnh',
'DECISION': 'DECISION',
'DNA Profile': 'DNA Profile',
'DNA Profiling': 'DNA Profiling',
'Daily': 'Hàng ngày',
'Dam Overflow': 'Tràn đập',
'Dangerous Person': 'Người nguy hiểm',
'Data uploaded': 'Đã cập nhật dữ liệu',
'Database': 'Database',
'Date': 'Date',
'Date & Time': 'Date & Time',
'Date Requested': 'Date Requested',
'Date Required': 'Date Required',
'Date and Time': 'Date and Time',
'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': 'Ngày giờ nhận hàng hóa.Hiển thị thời gian theo mặc định nhưng vẫn có thể chỉnh sửa',
'Date and time this report relates to.': 'Date and time this report relates to.',
'Date of Birth': 'Date of Birth',
'Date of Latest Information on Beneficiaries Reached': 'Date of Latest Information on Beneficiaries Reached',
'Date of Report': 'Ngày báo cáo',
'Date/Time': 'Ngày/Giờ',
'Date/Time of Find': 'Ngày giờ tìm kiếm',
'Date/Time of disappearance': 'Date/Time of disappearance',
'De-duplicator': 'De-duplicator',
'Dead Body Details': 'Dead Body Details',
'Dead Body Reports': 'Báo cáo thiệt hại về người',
'Deaths in the past 24h': 'Deaths in the past 24h',
'Deaths/24hrs': 'Số người chết/24h',
'Debug': 'Debug',
'Decimal Degrees': 'Độ âm',
'Decomposed': 'Decomposed',
'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.',
'Default Marker': 'Default Marker',
'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.',
'Default synchronization policy': 'Chính sách đồng bộ hóa mặc định',
'Defaults': 'Defaults',
'Defaults updated': 'Defaults updated',
'Defecation area for animals': 'Defecation area for animals',
'Defines the icon used for display of features on handheld GPS.': 'Defines the icon used for display of features on handheld GPS.',
'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.': 'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.',
'Defines the marker used for display & the attributes visible in the popup.': 'Defines the marker used for display & the attributes visible in the popup.',
'Degrees must be a number between -180 and 180': 'Degrees must be a number between -180 and 180',
'Dehydration': 'Dehydration',
'Delete': 'Delete',
'Delete Aid Request': 'Xóa yêu cầu cứu trợ',
'Delete Assessment': 'Delete Assessment',
'Delete Assessment Summary': 'Delete Assessment Summary',
'Delete Baseline': 'Delete Baseline',
'Delete Baseline Type': 'Delete Baseline Type',
'Delete Budget': 'Delete Budget',
'Delete Bundle': 'Delete Bundle',
'Delete Catalog Item': 'Delete Catalog Item',
'Delete Cluster Subsector': 'Delete Cluster Subsector',
'Delete Config': 'Delete Config',
'Delete Distribution': 'Delete Distribution',
'Delete Distribution Item': 'Delete Distribution Item',
'Delete Document': 'Delete Document',
'Delete Donor': 'Delete Donor',
'Delete Entry': 'Delete Entry',
'Delete Feature Class': 'Delete Feature Class',
'Delete Feature Layer': 'Delete Feature Layer',
'Delete Group': 'Delete Group',
'Delete Hospital': 'Xóa Bệnh viện',
'Delete Image': 'Delete Image',
'Delete Impact': 'Delete Impact',
'Delete Impact Type': 'Delete Impact Type',
'Delete Incident': 'Delete Incident',
'Delete Incident Report': 'Delete Incident Report',
'Delete Inventory Item': 'Delete Inventory Item',
'Delete Inventory Store': 'Xóa kho lưu trữ',
'Delete Item': 'Xóa mục',
'Delete Item Category': 'Delete Item Category',
'Delete Item Packet': 'Delete Item Packet',
'Delete Key': 'Delete Key',
'Delete Kit': 'Delete Kit',
'Delete Layer': 'Xóa Layer',
'Delete Location': 'Xóa Vị trí',
'Delete Marker': 'Delete Marker',
'Delete Membership': 'Delete Membership',
'Delete Message': 'Delete Message',
'Delete Metadata': 'Delete Metadata',
'Delete Need': 'Delete Need',
'Delete Need Type': 'Delete Need Type',
'Delete Office': 'Delete Office',
'Delete Old': 'Delete Old',
'Delete Organization': 'Delete Organization',
'Delete Peer': 'Delete Peer',
'Delete Person': 'Delete Person',
'Delete Photo': 'Delete Photo',
'Delete Project': 'Delete Project',
'Delete Projection': 'Delete Projection',
'Delete Rapid Assessment': 'Delete Rapid Assessment',
'Delete Received Item': 'Delete Received Item',
'Delete Received Shipment': 'Delete Received Shipment',
'Delete Record': 'Delete Record',
'Delete Recovery Report': 'Delete Recovery Report',
'Delete Report': 'Delete Report',
'Delete Request': 'Xóa yêu cầu',
'Delete Request Item': 'Xóa yêu cầu hàng hóa',
'Delete Resource': 'Delete Resource',
'Delete Section': 'Delete Section',
'Delete Sector': 'Delete Sector',
'Delete Sent Item': 'Delete Sent Item',
'Delete Sent Shipment': 'Delete Sent Shipment',
'Delete Service Profile': 'Delete Service Profile',
'Delete Setting': 'Delete Setting',
'Delete Skill': 'Delete Skill',
'Delete Skill Type': 'Delete Skill Type',
'Delete Staff Type': 'Delete Staff Type',
'Delete Status': 'Delete Status',
'Delete Subscription': 'Delete Subscription',
'Delete Survey Answer': 'Delete Survey Answer',
'Delete Survey Question': 'Xóa câu hỏi khảo sát',
'Delete Survey Section': 'Delete Survey Section',
'Delete Survey Series': 'Delete Survey Series',
'Delete Survey Template': 'Xóa mẫu khảo sát',
'Delete Unit': 'Xóa đơn vị',
'Delete User': 'Delete User',
'Delete Volunteer': 'Delete Volunteer',
'Delete Warehouse': 'Delete Warehouse',
'Delete Warehouse Item': 'Delete Warehouse Item',
'Delete from Server?': 'Delete from Server?',
'Delivered': 'Delivered',
'Delphi Decision Maker': 'Delphi Decision Maker',
'Demographic': 'Ngành nhân khẩu học',
'Demonstrations': 'Biểu tình',
'Dental Examination': 'Khám nha khoa',
'Dental Profile': 'Dental Profile',
'Department/Unit Name': 'Department/Unit Name',
'Deployment': 'Deployment',
'Describe the condition of the roads to your hospital.': 'Mô tả tình trạng các con đường tới bệnh viện.',
'Describe the procedure which this record relates to (e.g. "medical examination")': 'Describe the procedure which this record relates to (e.g. "medical examination")',
'Description': 'Mô tả',
'Description of Bin Type': 'Description of Bin Type',
'Description of Contacts': 'Description of Contacts',
'Description of defecation area': 'Mo tả khu vực defecation',
'Description of drinking water source': 'Description of drinking water source',
'Description of sanitary water source': 'Description of sanitary water source',
'Description of water source before the disaster': 'Description of water source before the disaster',
'Descriptive Text (e.g., Prose, etc)': 'Descriptive Text (e.g., Prose, etc)',
'Designated for': 'Designated for',
'Desire to remain with family': 'Desire to remain with family',
'Destination': 'Điểm đích',
"Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.": "Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.",
'Details': 'Details',
'Dialysis': 'Dialysis',
'Diarrhea': 'Diarrhea',
'Diarrhea among children under 5': 'Diarrhea among children under 5',
'Dignitary Visit': 'Dignitary Visit',
'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': 'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.',
'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': 'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.',
'Direction': 'Hướng',
'Disabled': 'Disabled',
'Disabled participating in coping activities': 'Disabled participating in coping activities',
'Disabled?': 'Disabled?',
'Disaster Victim Identification': 'Disaster Victim Identification',
'Disaster Victim Registry': 'Disaster Victim Registry',
'Disaster clean-up/repairs': 'Disaster clean-up/repairs',
'Discharge (cusecs)': 'Discharge (cusecs)',
'Discharges/24hrs': 'Discharges/24hrs',
'Discussion Forum': 'Discussion Forum',
'Discussion Forum on item': 'Discussion Forum on item',
'Disease vectors': 'Disease vectors',
'Dispatch': 'Gửi hàng cứu trợ',
'Dispatch Items': 'Dispatch Items',
'Dispensary': 'Y tế dự phòng',
'Displaced': 'Displaced',
'Displaced Populations': 'Displaced Populations',
'Display Polygons?': 'Display Polygons?',
'Display Routes?': 'Display Routes?',
'Display Tracks?': 'Display Tracks?',
'Display Waypoints?': 'Display Waypoints?',
'Dispose': 'Dispose',
'Dispose Expired/Unusable Items': 'Dispose Expired/Unusable Items',
'Distance between defecation area and water source': 'Distance between defecation area and water source',
'Distance between latrines and temporary shelter in meters': 'Distance between latrines and temporary shelter in meters',
'Distance between shelter and latrines': 'Distance between shelter and latrines',
'Distance(Kms)': 'Distance(Kms)',
'Distribution': 'Distribution',
'Distribution Details': 'Distribution Details',
'Distribution Item': 'Hàng hóa đóng góp',
'Distribution Item Details': 'Distribution Item Details',
'Distribution Item added': 'Distribution Item added',
'Distribution Item deleted': 'Distribution Item deleted',
'Distribution Item updated': 'Distribution Item updated',
'Distribution Items': 'Distribution Items',
'Distribution added': 'Distribution added',
'Distribution deleted': 'Distribution deleted',
'Distribution groups': 'Distribution groups',
'Distribution updated': 'Distribution updated',
'Distributions': 'Distributions',
'District': 'Quận',
'Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do households each have at least 2 containers (10-20 litres each) to hold water?': 'Do households each have at least 2 containers (10-20 litres each) to hold water?',
'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?': 'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?',
'Do households have bedding materials available (tarps, plastic mats, blankets)?': 'Do households have bedding materials available (tarps, plastic mats, blankets)?',
'Do households have household water storage containers?': 'Do households have household water storage containers?',
'Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?': 'Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?',
'Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?': 'Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?',
'Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do women and girls have easy access to sanitary materials?': 'Do women and girls have easy access to sanitary materials?',
'Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)',
'Do you have access to cash to restart your business?': 'Do you have access to cash to restart your business?',
'Do you know of any incidents of violence?': 'Do you know of any incidents of violence?',
'Do you know of children living on their own (without adults)?': 'Do you know of children living on their own (without adults)?',
'Do you know of children separated from their parents or caregivers?': 'Do you know of children separated from their parents or caregivers?',
'Do you know of children that have been orphaned by the disaster?': 'Do you know of children that have been orphaned by the disaster?',
'Do you know of children that have been sent to safe places?': 'Do you know of children that have been sent to safe places?',
'Do you know of children that have disappeared without explanation in the period since the disaster?': 'Do you know of children that have disappeared without explanation in the period since the disaster?',
'Do you know of older people who are primary caregivers of children?': 'Do you know of older people who are primary caregivers of children?',
'Do you know of parents/caregivers missing children?': 'Do you know of parents/caregivers missing children?',
'Do you really want to delete these records?': 'Do you really want to delete these records?',
'Do you want to over-write the file metadata with new default values?': 'Bạn có muốn thay dữ liệu file bằng giá trị mặc định mới không?',
'Do you want to receive this shipment?': 'Do you want to receive this shipment?',
'Do you want to send this shipment?': 'Do you want to send this shipment?',
'Document': 'Document',
'Document Details': 'Chi tiết văn bản',
'Document Scan': 'Document Scan',
'Document added': 'Đã thêm tài liệu',
'Document deleted': 'Document deleted',
'Document updated': 'Document updated',
'Documents': 'Documents',
'Documents and Photos': 'Documents and Photos',
'Does this facility provide a cholera treatment center?': 'Does this facility provide a cholera treatment center?',
'Doing nothing (no structured activity)': 'Không làm gì (không có hoạt động theo kế hoạch',
'Dollars': 'Dollars',
'Domestic chores': 'Công việc nội trợ',
'Donation Phone #': 'Donation Phone #',
'Donor': 'Donor',
'Donor Details': 'Donor Details',
'Donor added': 'Đã thêm người quyên góp',
'Donor deleted': 'Donor deleted',
'Donor updated': 'Đã cập nhật người quyên góp',
'Donors': 'Donors',
'Donors Report': 'Báo cáo về tình hình quyên góp',
'Door frame': 'Door frame',
'Draft': 'Bản nháp',
'Drainage': 'Drainage',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'Drawing up a Budget for Staff & Equipment across various Locations.',
'Drill Down by Group': 'Drill Down by Group',
'Drill Down by Incident': 'Drill Down by Incident',
'Drill Down by Shelter': 'Drill Down by Shelter',
'Driving License': 'Driving License',
'Drought': 'Drought',
'Drugs': 'Drugs',
'Dug Well': 'Dug Well',
'Duplicate?': 'Duplicate?',
'Duration': 'Duration',
'Dust Storm': 'Dust Storm',
'Dwellings': 'Dwellings',
'E-mail': 'E-mail',
'EMS Reason': 'EMS Reason',
'EMS Status': 'Tình trạng EMS',
'EMS Status Reason': 'EMS Status Reason',
'EMS Traffic Status': 'EMS Traffic Status',
'ER Status': 'ER Status',
'ER Status Reason': 'ER Status Reason',
'Early Recovery': 'Early Recovery',
'Earthquake': 'Động đất',
'Easy access to sanitation items for women/girls': 'Easy access to sanitation items for women/girls',
'Edit': 'Edit',
'Edit Activity': 'Edit Activity',
'Edit Address': 'Edit Address',
'Edit Aid Request': 'Chỉnh sửa Yêu cầu cứu trợ',
'Edit Application': 'Edit Application',
'Edit Assessment': 'Chỉnh sửa Đánh giá',
'Edit Assessment Summary': 'Edit Assessment Summary',
'Edit Baseline': 'Edit Baseline',
'Edit Baseline Type': 'Edit Baseline Type',
'Edit Budget': 'Edit Budget',
'Edit Bundle': 'Edit Bundle',
'Edit Catalog Item': 'Edit Catalog Item',
'Edit Category<>Sub-Category<>Catalog Relation': 'Edit Category<>Sub-Category<>Catalog Relation',
'Edit Cluster': 'Edit Cluster',
'Edit Cluster Subsector': 'Edit Cluster Subsector',
'Edit Config': 'Edit Config',
'Edit Contact': 'Edit Contact',
'Edit Contact Information': 'Chỉnh sửa thông tin liên lạc',
'Edit Contents': 'Edit Contents',
'Edit Defaults': 'Edit Defaults',
'Edit Description': 'Edit Description',
'Edit Details': 'Chỉnh sửa chi tiết',
'Edit Disaster Victims': 'Edit Disaster Victims',
'Edit Distribution': 'Chỉnh sửa Quyên góp',
'Edit Distribution Item': 'Edit Distribution Item',
'Edit Document': 'Chỉnh sửa tài liệu',
'Edit Donor': 'Edit Donor',
'Edit Email Settings': 'Edit Email Settings',
'Edit Feature Class': 'Edit Feature Class',
'Edit Feature Layer': 'Edit Feature Layer',
'Edit Flood Report': 'Edit Flood Report',
'Edit Gateway Settings': 'Edit Gateway Settings',
'Edit Group': 'Edit Group',
'Edit Hospital': 'Chỉnh sửa Bệnh viện',
'Edit Identification Report': 'Chỉnh sửa báo cáo định dạng',
'Edit Identity': 'Edit Identity',
'Edit Image': 'Edit Image',
'Edit Image Details': 'Edit Image Details',
'Edit Impact': 'Edit Impact',
'Edit Impact Type': 'Edit Impact Type',
'Edit Incident': 'Chỉnh sửa Các sự việc xảy ra',
'Edit Incident Report': 'Edit Incident Report',
'Edit Inventory Item': 'Edit Inventory Item',
'Edit Inventory Store': 'Edit Inventory Store',
'Edit Item': 'Edit Item',
'Edit Item Catalog': 'Edit Item Catalog',
'Edit Item Catalog Categories': 'Chỉnh sửa danh mục hàng hóa',
'Edit Item Category': 'Edit Item Category',
'Edit Item Packet': 'Edit Item Packet',
'Edit Item Sub-Categories': 'Edit Item Sub-Categories',
'Edit Key': 'Chỉnh sửa Key',
'Edit Kit': 'Edit Kit',
'Edit Layer': 'Edit Layer',
'Edit Location': 'Edit Location',
'Edit Log Entry': 'Edit Log Entry',
'Edit Map Services': 'Chỉnh sửa dịch vụ bản đồ',
'Edit Marker': 'Edit Marker',
'Edit Membership': 'Edit Membership',
'Edit Message': 'Edit Message',
'Edit Messaging Settings': 'Edit Messaging Settings',
'Edit Metadata': 'Chỉnh sửa dữ liệu',
'Edit Modem Settings': 'Edit Modem Settings',
'Edit Need': 'Edit Need',
'Edit Need Type': 'Edit Need Type',
'Edit Note': 'Edit Note',
'Edit Office': 'Edit Office',
'Edit Options': 'Edit Options',
'Edit Organization': 'Edit Organization',
'Edit Parameters': 'Edit Parameters',
'Edit Peer Details': 'Chỉnh sửa chi tiết nhóm người',
'Edit Person Details': 'Edit Person Details',
'Edit Personal Effects Details': 'Edit Personal Effects Details',
'Edit Photo': 'Edit Photo',
'Edit Pledge': 'Edit Pledge',
'Edit Position': 'Edit Position',
'Edit Problem': 'Chỉnh sửa Vấn đề',
'Edit Project': 'Edit Project',
'Edit Projection': 'Edit Projection',
'Edit Rapid Assessment': 'Edit Rapid Assessment',
'Edit Received Item': 'Edit Received Item',
'Edit Received Shipment': 'Edit Received Shipment',
'Edit Record': 'Edit Record',
'Edit Recovery Details': 'Chỉnh sửa chi tiết khôi phục',
'Edit Registration': 'Edit Registration',
'Edit Registration Details': 'Edit Registration Details',
'Edit Report': 'Chỉnh sửa báo cáo',
'Edit Request': 'Edit Request',
'Edit Request Item': 'Chỉnh sửa yêu cầu hàng hóa',
'Edit Resource': 'Edit Resource',
'Edit Response': 'Chỉnh sửa phản hồi',
'Edit River': 'Edit River',
'Edit Role': 'Chỉnh sửa Vai trò',
'Edit Sector': 'Edit Sector',
'Edit Sent Item': 'Edit Sent Item',
'Edit Sent Shipment': 'Edit Sent Shipment',
'Edit Setting': 'Chỉnh sửa cài đặt',
'Edit Settings': 'Edit Settings',
'Edit Shelter': 'Chỉnh sửa thông tin cư trú',
'Edit Shelter Service': 'Chỉnh sửa dịch vụ cư trú',
'Edit Shelter Type': 'Edit Shelter Type',
'Edit Shipment Transit Log': 'Edit Shipment Transit Log',
'Edit Shipment to Send': 'Edit Shipment to Send',
'Edit Shipment/Way Bills': 'Edit Shipment/Way Bills',
'Edit Shipment<>Item Relation': 'Edit Shipment<>Item Relation',
'Edit Site': 'Edit Site',
'Edit Skill': 'Chỉnh sửa kỹ năng',
'Edit Skill Type': 'Edit Skill Type',
'Edit Solution': 'Edit Solution',
'Edit Staff': 'Edit Staff',
'Edit Staff Type': 'Edit Staff Type',
'Edit Storage Bin Type(s)': 'Edit Storage Bin Type(s)',
'Edit Storage Bins': 'Edit Storage Bins',
'Edit Storage Location': 'Edit Storage Location',
'Edit Subscription': 'Edit Subscription',
'Edit Survey Answer': 'Chỉnh sửa trả lời khảo sát',
'Edit Survey Question': 'Edit Survey Question',
'Edit Survey Section': 'Edit Survey Section',
'Edit Survey Series': 'Edit Survey Series',
'Edit Survey Template': 'Edit Survey Template',
'Edit Task': 'Edit Task',
'Edit Team': 'Edit Team',
'Edit Theme': 'Edit Theme',
'Edit Themes': 'Edit Themes',
'Edit Ticket': 'Edit Ticket',
'Edit Track': 'Edit Track',
'Edit Tropo Settings': 'Edit Tropo Settings',
'Edit Unit': 'Edit Unit',
'Edit User': 'Edit User',
'Edit Volunteer Details': 'Edit Volunteer Details',
'Edit Volunteer Registration': 'Chỉnh sửa đăng ký tình nguyện viên',
'Edit Warehouse': 'Edit Warehouse',
'Edit Warehouse Item': 'Edit Warehouse Item',
'Edit current record': 'Chỉnh sửa bản thu hiện tại',
'Edit message': 'Edit message',
'Edit the Application': 'Chỉnh sửa ứng dụng',
'Editable?': 'Editable?',
'Education': 'Giáo dục',
'Education materials received': 'Đã nhận được tài liệu, dụng cụ phục vụ học tập',
'Education materials, source': 'Dụng cụ học tập, nguồn',
'Effects Inventory': 'Effects Inventory',
'Eggs': 'Eggs',
'Either a shelter or a location must be specified': 'Either a shelter or a location must be specified',
'Either file upload or document URL required.': 'Either file upload or document URL required.',
'Either file upload or image URL required.': 'yêu cầu upload file hoặc URL ảnh',
'Elderly person headed households (>60 yrs)': 'Elderly person headed households (>60 yrs)',
'Electrical': 'Electrical',
'Elevated': 'Nâng cao lên',
'Email': 'Email',
'Email Settings': 'Email Settings',
'Email address verified, however registration is still pending approval - please wait until confirmation received.': 'Địa chỉ email đã được xác nhận, tuy nhiên đăng ký vẫn còn chờ duyệt - hãy đợi đến khi nhận được phê chuẩn',
'Email settings updated': 'Email settings updated',
'Embalming': 'Embalming',
'Embassy': 'Embassy',
'Emergency Capacity Building project': 'Emergency Capacity Building project',
'Emergency Department': 'Bộ phận cấp cứu',
'Emergency Shelter': 'Emergency Shelter',
'Emergency Support Facility': 'Emergency Support Facility',
'Emergency Support Service': 'Emergency Support Service',
'Emergency Telecommunications': 'Emergency Telecommunications',
'Enable/Disable Layers': 'Kích hoạt/Tắt Layer',
'Enabled': 'Enabled',
'End date': 'Ngày kết thúc',
'End date should be after start date': 'End date should be after start date',
'End of Period': 'End of Period',
'English': 'English',
'Enter Coordinates in Deg Min Sec': 'Nhập tọa độ ở dạng Độ,Phút,Giây',
'Enter Coordinates:': 'Enter Coordinates:',
'Enter a GPS Coord': 'Enter a GPS Coord',
'Enter a date before': 'Enter a date before',
'Enter a location': 'Enter a location',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'Nhập tên cho bảng tính bạn đang tải lên(bắt buộc)',
'Enter a new support request.': 'Nhập một yêu cầu hỗ trợ mới',
'Enter a summary of the request here.': 'Nhập tóm tắt các yêu cầu ở đây',
'Enter a unique label!': 'Enter a unique label!',
'Enter a valid email': 'Enter a valid email',
'Enter tags separated by commas.': 'Enter tags separated by commas.',
'Enter the same password as above': 'Enter the same password as above',
'Enter your firstname': 'Nhập họ của bạn',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Không bắt buộc phải nhập số điện thoại nhưng nếu nhập, bạn sẽ nhận được tin nhắn từ hệ thống',
'Entry deleted': 'Entry deleted',
'Equipment': 'Equipment',
'Error encountered while applying the theme.': 'Error encountered while applying the theme.',
'Error in message': 'Error in message',
'Error logs for "%(app)s"': 'Error logs for "%(app)s"',
'Errors': 'Lỗi',
'Estimated # of households who are affected by the emergency': 'Ước tính # số hộ chịu ảnh hưởng từ thiên tai',
'Estimated # of people who are affected by the emergency': 'Estimated # of people who are affected by the emergency',
'Estimated total number of people in institutions': 'Estimated total number of people in institutions',
'Euros': 'Euro',
'Evacuating': 'Evacuating',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Đánh giá thông tin trong thư. (giá trị này KHÔNG NÊN sử dụng trong các ứng dụng cảnh báo công cộng)',
'Event Time': 'Event Time',
'Event Type': 'Loại Sự kiện',
'Event type': 'Event type',
'Example': 'Example',
'Exceeded': 'Exceeded',
'Excreta disposal': 'Excreta disposal',
'Execute a pre-planned activity identified in <instruction>': 'Execute a pre-planned activity identified in <instruction>',
'Existing food stocks, main dishes': 'Existing food stocks, main dishes',
'Existing food stocks, side dishes': 'Existing food stocks, side dishes',
'Expected In': 'Expected In',
'Expected Out': 'Theo dự kiến',
'Expiry Time': 'Expiry Time',
'Explosive Hazard': 'Explosive Hazard',
'Export': 'Export',
'Export Data': 'Export Data',
'Export Database as CSV': 'Export Database as CSV',
'Export in GPX format': 'Export in GPX format',
'Export in KML format': 'Export in KML format',
'Export in OSM format': 'Export in OSM format',
'Export in PDF format': 'Export in PDF format',
'Export in RSS format': 'Export in RSS format',
'Export in XLS format': 'Export in XLS format',
'Eye Color': 'Màu mắt',
'Facebook': 'Facebook',
'Facial hair, color': 'Facial hair, color',
'Facial hair, type': 'Facial hair, type',
'Facial hear, length': 'Facial hear, length',
'Facility Operations': 'Facility Operations',
'Facility Status': 'Facility Status',
'Facility Type': 'Facility Type',
'Factors affecting school attendance': 'Factors affecting school attendance',
'Failed!': 'Failed!',
'Falling Object Hazard': 'Falling Object Hazard',
'Families/HH': 'Families/HH',
'Family': 'Family',
'Family tarpaulins received': 'Family tarpaulins received',
'Family tarpaulins, source': 'Family tarpaulins, source',
'Family/friends': 'Gia đình/Bạn bè',
'Farmland/fishing material assistance, Rank': 'Farmland/fishing material assistance, Rank',
'Fax': 'Fax',
'Feature Class': 'Feature Class',
'Feature Class Details': 'Feature Class Details',
'Feature Class added': 'Feature Class added',
'Feature Class deleted': 'Feature Class deleted',
'Feature Class updated': 'Feature Class updated',
'Feature Classes': 'Các mức phân loại tính năng',
'Feature Classes are collections of Locations (Features) of the same type': 'Các mức phân loại tính năng là tập hợp các vị trí (tính năng) cùng loại',
'Feature Layer Details': 'Feature Layer Details',
'Feature Layer added': 'Lớp đặc tính đã được thêm',
'Feature Layer deleted': 'Feature Layer deleted',
'Feature Layer updated': 'Cập nhật Layer tính năng',
'Feature Layers': 'Feature Layers',
'Feature Namespace': 'Feature Namespace',
'Feature Type': 'Loại tính năng',
'Features Include': 'Features Include',
'Female': 'Female',
'Female headed households': 'Female headed households',
'Few': 'Một vài',
'Field Hospital': 'Field Hospital',
'File': 'File',
'Fill in Latitude': 'Fill in Latitude',
'Fill in Longitude': 'Fill in Longitude',
'Filter': 'Filter',
'Filter Field': 'Filter Field',
'Filter Value': 'Giá trị lọc',
'Filtered search of aid pledges and requests': 'Filtered search of aid pledges and requests',
'Find': 'Tìm',
'Find Dead Body Report': 'Find Dead Body Report',
'Find Recovery Report': 'Tìm Báo cáo phục hồi',
'Find Volunteers': 'Find Volunteers',
'Find by Name': 'Find by Name',
'Finder': 'Finder',
'Fingerprint': 'Fingerprint',
'Fingerprinting': 'Dấu vân tay',
'Fingerprints': 'Fingerprints',
'Finish': 'Finish',
'Finished Jobs': 'Finished Jobs',
'Fire': 'Fire',
'Fire suppression and rescue': 'Fire suppression and rescue',
'First Name': 'First Name',
'First name': 'Tên',
'Fishing': 'Fishing',
'Flash Flood': 'Flash Flood',
'Flash Freeze': 'Flash Freeze',
'Fleet Management': 'Fleet Management',
'Flexible Impact Assessments': 'Flexible Impact Assessments',
'Flood': 'Lũ lụt',
'Flood Alerts': 'Flood Alerts',
'Flood Alerts show water levels in various parts of the country': 'Flood Alerts show water levels in various parts of the country',
'Flood Report': 'Flood Report',
'Flood Report Details': 'Chi tiết báo cáo tình hình lũ lụt',
'Flood Report added': 'Báo cáo lũ lụt đã được thêm',
'Flood Report deleted': 'Flood Report deleted',
'Flood Report updated': 'Flood Report updated',
'Flood Reports': 'Flood Reports',
'Flow Status': 'Flow Status',
'Focal Point': 'Tiêu điểm ',
'Fog': 'Fog',
'Food': 'Food',
'Food Supply': 'Food Supply',
'Food assistance available/expected': 'Food assistance available/expected',
'Footer': 'Footer',
'Footer file %s missing!': 'Footer file %s missing!',
'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'Đối với mỗi đối tác đồng bộ, có một công việc đồng bộ mặc định chạy sau một khoảng thời gian nhất định. Bạn cũng có thể thiết lập thêm công việc đồng bộ hơn nữa để có thể tùy biến theo nhu cầu. Nhấp vào liên kết bên phải để bắt đầu',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners',
'For live help from the Sahana community on using this application, go to': 'For live help from the Sahana community on using this application, go to',
'For messages that support alert network internal functions': 'For messages that support alert network internal functions',
'For more details on the Sahana Eden system, see the': 'For more details on the Sahana Eden system, see the',
'For more information, see ': 'For more information, see ',
'For:': 'For:',
'Forest Fire': 'Forest Fire',
'Formal camp': 'Trại chính thức',
'Format': 'Định dạng',
'Forms': 'Forms',
'Found': 'Found',
'Freezing Drizzle': 'Freezing Drizzle',
'Freezing Rain': 'Freezing Rain',
'Freezing Spray': 'Freezing Spray',
'French': 'French',
'Friday': 'Friday',
'From Location': 'From Location',
'From Warehouse': 'From Warehouse',
'Frost': 'Frost',
'Full': 'Full',
'Full beard': 'Full beard',
'Fullscreen Map': 'Fullscreen Map',
'Functional Tests': 'Functional Tests',
'Functions available': 'Functions available',
'Funding Organization': 'Funding Organization',
'Funeral': 'Funeral',
'GIS Reports of Shelter': 'GIS Reports of Shelter',
'GIS integration to view location details of the Shelter': 'GIS integration to view location details of the Shelter',
'GPS': 'GPS',
'GPS Marker': 'Đánh dấu GPS',
'GPS Track': 'GPS Track',
'GPS Track File': 'GPS Track File',
'GPX Track': 'GPX Track',
'Gale Wind': 'Gale Wind',
'Gap Analysis': 'Gap Analysis',
'Gap Analysis Map': 'Gap Analysis Map',
'Gap Analysis Report': 'Gap Analysis Report',
'Gap Map': 'Gap Map',
'Gap Report': 'Gap Report',
'Gateway Settings': 'Gateway Settings',
'Gateway settings updated': 'Gateway settings updated',
'Gender': 'Gender',
'General Medical/Surgical': 'General Medical/Surgical',
'General emergency and public safety': 'General emergency and public safety',
'Generator': 'Bộ sinh',
'Geocoder Selection': 'Geocoder Selection',
'Geometry Name': 'Geometry Name',
'Geophysical (inc. landslide)': 'Geophysical (inc. landslide)',
'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Geraldo module not available within the running Python - this needs installing for PDF output!',
'Girls 13-18 yrs in affected area': 'Girls 13-18 yrs in affected area',
'Girls 13-18 yrs not attending school': 'Girls 13-18 yrs not attending school',
'Girls 6-12 yrs in affected area': 'Girls 6-12 yrs in affected area',
'Girls 6-12 yrs not attending school': 'Girls 6-12 yrs not attending school',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Give a brief description of the image, e.g. what can be seen where on the picture (optional).',
'Give information about where and when you have seen the person': 'Give information about where and when you have seen the person',
'Give information about where and when you have seen them': 'Give information about where and when you have seen them',
'Global Messaging Settings': 'Cài đặt hộp thư tin nhắn toàn cầu',
'Goatee': 'Goatee',
'Government': 'Government',
'Government UID': 'Government UID',
'Government building': 'Government building',
'Grade': 'Grade',
'Greek': 'Greek',
'Group': 'Group',
'Group Details': 'Group Details',
'Group ID': 'Group ID',
'Group Member added': 'Group Member added',
'Group Members': 'Group Members',
'Group Memberships': 'Group Memberships',
'Group Title': 'Group Title',
'Group Type': 'Loại nhóm',
'Group added': 'Đã thêm nhóm',
'Group deleted': 'Group deleted',
'Group description': 'Mô tả nhóm',
'Group name': 'Group name',
'Group type': 'Loại nhóm',
'Group updated': 'Group updated',
'Groups': 'Groups',
'Groups removed': 'Groups removed',
'Guest': 'Guest',
'Hail': 'Hail',
'Hair Color': 'Hair Color',
'Hair Length': 'Hair Length',
'Hair Style': 'Kiểu tóc',
'Has data from this Reference Document been entered into Sahana?': 'Has data from this Reference Document been entered into Sahana?',
'Has the safety and security of women and children in your community changed since the emergency?': 'Has the safety and security of women and children in your community changed since the emergency?',
'Has your business been damaged in the course of the disaster?': 'Has your business been damaged in the course of the disaster?',
'Have households received any shelter/NFI assistance or is assistance expected in the coming days?': 'Have households received any shelter/NFI assistance or is assistance expected in the coming days?',
'Have normal food sources been disrupted?': 'Have normal food sources been disrupted?',
'Have schools received or are expecting to receive any assistance?': 'Have schools received or are expecting to receive any assistance?',
'Have the people received or are you expecting any medical or food assistance in the coming days?': 'Have the people received or are you expecting any medical or food assistance in the coming days?',
'Hazard Pay': 'Hazard Pay',
'Hazardous Material': 'Hazardous Material',
'Hazardous Road Conditions': 'Hazardous Road Conditions',
'Header Background': 'Header Background',
'Header background file %s missing!': 'Header background file %s missing!',
'Headquarters': 'Headquarters',
'Health': 'Health',
'Health care assistance, Rank': 'Health care assistance, Rank',
'Health center': 'Trung tâm y tế',
'Health center with beds': 'Health center with beds',
'Health center without beds': 'Health center without beds',
'Health services functioning prior to disaster': 'Health services functioning prior to disaster',
'Health services functioning since disaster': 'Health services functioning since disaster',
'Healthcare Worker': 'Healthcare Worker',
'Heat Wave': 'Heat Wave',
'Heat and Humidity': 'Heat and Humidity',
'Height': 'Height',
'Height (cm)': 'Height (cm)',
'Help': 'Help',
'Helps to monitor status of hospitals': 'Hỗ trợ giám sát trạng thái các bệnh viện',
'Helps to report and search for Missing Persons': 'Hỗ trợ báo cáo và tìm kếm những người mất tích',
'Here are the solution items related to the problem.': 'Here are the solution items related to the problem.',
'High': 'High',
'High Water': 'High Water',
'Hindu': 'Hindu',
'History': 'Lịch sử',
'Hit the back button on your browser to try again.': 'Nhấp vào nút Back trên trình duyệt để tải lại',
'Holiday Address': 'Holiday Address',
'Home': 'Trang chủ',
'Home Address': 'Địa chỉ nhà',
'Home Country': 'Quê quán',
'Home Crime': 'Home Crime',
'Hospital': 'Bệnh viện',
'Hospital Details': 'Chi tiết thông tin bệnh viện',
'Hospital Status Report': 'Báo cáo tình trạng bệnh viện',
'Hospital information added': 'Đã thêm thông tin Bệnh viện',
'Hospital information deleted': 'Đã xóa thông tin bệnh viện',
'Hospital information updated': 'Đã cập nhật thông tin bệnh viện',
'Hospital status assessment.': 'Đánh giá trạng thái bệnh viện',
'Hospitals': 'Bệnh viện',
'Hot Spot': 'Điểm nóng',
'Hourly': 'Hourly',
'Household kits received': 'Household kits received',
'Household kits, source': 'Household kits, source',
'How did boys 13-17yrs spend most of their time prior to the disaster?': 'How did boys 13-17yrs spend most of their time prior to the disaster?',
'How did boys <12yrs spend most of their time prior to the disaster?': 'How did boys <12yrs spend most of their time prior to the disaster?',
'How did boys girls 13-17yrs spend most of their time prior to the disaster?': 'How did boys girls 13-17yrs spend most of their time prior to the disaster?',
'How did girls <12yrs spend most of their time prior to the disaster?': 'How did girls <12yrs spend most of their time prior to the disaster?',
'How do boys 13-17yrs spend most of their time now?': 'How do boys 13-17yrs spend most of their time now?',
'How do boys <12yrs spend most of their time now?': 'How do boys <12yrs spend most of their time now?',
'How do girls 13-17yrs spend most of their time now?': 'How do girls 13-17yrs spend most of their time now?',
'How do girls <12yrs spend most of their time now?': 'How do girls <12yrs spend most of their time now?',
'How does it work?': 'How does it work?',
'How is this person affected by the disaster? (Select all that apply)': 'How is this person affected by the disaster? (Select all that apply)',
'How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.': 'How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.',
'How long does it take you to walk to the health service?': 'How long does it take you to walk to the health service?',
'How long will the food last?': 'How long will the food last?',
'How long will this water resource last?': 'How long will this water resource last?',
'How many Boys (0-17 yrs) are Dead due to the crisis': 'How many Boys (0-17 yrs) are Dead due to the crisis',
'How many Boys (0-17 yrs) are Injured due to the crisis': 'How many Boys (0-17 yrs) are Injured due to the crisis',
'How many Boys (0-17 yrs) are Missing due to the crisis': 'Có bao nhiêu bé trai (0 đến 17 tuổi) bị mất tích do thiên tai',
'How many Girls (0-17 yrs) are Dead due to the crisis': 'How many Girls (0-17 yrs) are Dead due to the crisis',
'How many Girls (0-17 yrs) are Injured due to the crisis': 'How many Girls (0-17 yrs) are Injured due to the crisis',
'How many Girls (0-17 yrs) are Missing due to the crisis': 'How many Girls (0-17 yrs) are Missing due to the crisis',
'How many Men (18 yrs+) are Dead due to the crisis': 'Bao nhiêu người (trên 18 tuổi) chết trong thảm họa',
'How many Men (18 yrs+) are Injured due to the crisis': 'How many Men (18 yrs+) are Injured due to the crisis',
'How many Men (18 yrs+) are Missing due to the crisis': 'How many Men (18 yrs+) are Missing due to the crisis',
'How many Women (18 yrs+) are Dead due to the crisis': 'How many Women (18 yrs+) are Dead due to the crisis',
'How many Women (18 yrs+) are Injured due to the crisis': 'Số nạn nhân là nữ trên 18 tuổi chịu ảnh hưởng của cuộc khủng hoảng',
'How many Women (18 yrs+) are Missing due to the crisis': 'How many Women (18 yrs+) are Missing due to the crisis',
'How many days will the supplies last?': 'How many days will the supplies last?',
'How many doctors in the health centers are still actively working?': 'How many doctors in the health centers are still actively working?',
'How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?': 'How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?',
'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?': 'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?',
'How many latrines are available in the village/IDP centre/Camp?': 'How many latrines are available in the village/IDP centre/Camp?',
'How many midwives in the health centers are still actively working?': 'How many midwives in the health centers are still actively working?',
'How many new cases have been admitted to this facility in the past 24h?': 'How many new cases have been admitted to this facility in the past 24h?',
'How many nurses in the health centers are still actively working?': 'How many nurses in the health centers are still actively working?',
'How many of the patients with the disease died in the past 24h at this facility?': 'How many of the patients with the disease died in the past 24h at this facility?',
'How many of the primary school age boys (6-12) in the area are not attending school?': 'How many of the primary school age boys (6-12) in the area are not attending school?',
'How many of the primary school age girls (6-12) in the area are not attending school?': 'How many of the primary school age girls (6-12) in the area are not attending school?',
'How many of the primary/secondary schools are now open and running a regular schedule of class?': 'How many of the primary/secondary schools are now open and running a regular schedule of class?',
'How many of the secondary school age boys (13-18) in the area are not attending school?': 'How many of the secondary school age boys (13-18) in the area are not attending school?',
'How many of the secondary school age girls (13-18) in the area are not attending school?': 'How many of the secondary school age girls (13-18) in the area are not attending school?',
'How many patients with the disease are currently hospitalized at this facility?': 'How many patients with the disease are currently hospitalized at this facility?',
'How many primary school age boys (6-12) are in the affected area?': 'How many primary school age boys (6-12) are in the affected area?',
'How many primary school age girls (6-12) are in the affected area?': 'How many primary school age girls (6-12) are in the affected area?',
'How many primary/secondary schools were opening prior to the disaster?': 'How many primary/secondary schools were opening prior to the disaster?',
'How many secondary school age boys (13-18) are in the affected area?': 'How many secondary school age boys (13-18) are in the affected area?',
'How many secondary school age girls (13-18) are in the affected area?': 'How many secondary school age girls (13-18) are in the affected area?',
'How many teachers have been affected by the disaster (affected = unable to work)?': 'How many teachers have been affected by the disaster (affected = unable to work)?',
'How many teachers worked in the schools prior to the disaster?': 'How many teachers worked in the schools prior to the disaster?',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.',
'Humanitarian NGO': 'Humanitarian NGO',
'Hurricane': 'Hurricane',
'Hurricane Force Wind': 'Hurricane Force Wind',
'Hygiene': 'Hygiene',
'Hygiene NFIs': 'Hygiene NFIs',
'Hygiene kits received': 'Hygiene kits received',
'Hygiene kits, source': 'Dụng cụ vệ sinh, nguồn',
'Hygiene practice': 'Hygiene practice',
'Hygiene problems': 'Hygiene problems',
'ID Label': 'ID Label',
'ID Tag': 'ID Tag',
'ID Tag Number': 'ID Tag Number',
'ID type': 'ID type',
'Ice Pressure': 'Áp suất băng',
'Iceberg': 'Iceberg',
'Ideally a full URL to the source file, otherwise just a note on where data came from.': 'Ideally a full URL to the source file, otherwise just a note on where data came from.',
'Identification': 'Identification',
'Identification Report': 'Identification Report',
'Identification Reports': 'Identification Reports',
'Identification Status': 'Identification Status',
'Identification label of the Storage bin.': 'Nhãn xác định Bin lưu trữ',
'Identified as': 'Identified as',
'Identified by': 'Identified by',
'Identity': 'Identity',
'Identity Details': 'Identity Details',
'Identity added': 'Identity added',
'Identity deleted': 'Identity deleted',
'Identity updated': 'Identity updated',
'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.': 'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.',
'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.': 'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.',
'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.': 'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.',
'If no marker defined then the system default marker is used': 'If no marker defined then the system default marker is used',
'If no, specify why': 'If no, specify why',
'If the location is a geographic area, then state at what level here.': 'If the location is a geographic area, then state at what level here.',
'If this is set to True then mails will be deleted from the server after downloading.': 'If this is set to True then mails will be deleted from the server after downloading.',
'If this record should be restricted then select which role is required to access the record here.': 'If this record should be restricted then select which role is required to access the record here.',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'If this record should be restricted then select which role(s) are permitted to access the record here.',
"If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.": "If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.",
'If yes, specify what and by whom': 'If yes, specify what and by whom',
'If yes, which and how': 'nếu có thì cái nào và như thế nào',
"If you cannot find the person you want to register as a volunteer, you can add them by clicking 'Add Person' below:": "If you cannot find the person you want to register as a volunteer, you can add them by clicking 'Add Person' below:",
"If you cannot find the person you want to report missing, you can add them by clicking 'Add Person' below:": "If you cannot find the person you want to report missing, you can add them by clicking 'Add Person' below:",
"If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:": "If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:",
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.',
'If you know what the Geonames ID of this location is then you can enter it here.': 'If you know what the Geonames ID of this location is then you can enter it here.',
'If you know what the OSM ID of this location is then you can enter it here.': 'If you know what the OSM ID of this location is then you can enter it here.',
'If you need to add a new document then you can click here to attach one.': 'Nếu cần thêm một tài liệu mới, nhấn vào đây để đính kèm',
'If you would like to help, then please': 'If you would like to help, then please',
'Illegal Immigrant': 'Illegal Immigrant',
'Image': 'Image',
'Image Details': 'Hình ảnh chi tiết',
'Image Tags': 'Image Tags',
'Image Type': 'Image Type',
'Image Upload': 'Image Upload',
'Image added': 'Image added',
'Image deleted': 'Image deleted',
'Image updated': 'Image updated',
'Image/Attachment': 'Image/Attachment',
'Image/Other Attachment': 'Image/Other Attachment',
'Imagery': 'Imagery',
'Images': 'Images',
'Immediate reconstruction assistance, Rank': 'Immediate reconstruction assistance, Rank',
'Impact Assessments': 'Impact Assessments',
'Impact Details': 'Impact Details',
'Impact Type': 'Impact Type',
'Impact Type Details': 'Impact Type Details',
'Impact Type added': 'Impact Type added',
'Impact Type deleted': 'Impact Type deleted',
'Impact Type updated': 'Impact Type updated',
'Impact Types': 'Impact Types',
'Impact added': 'Impact added',
'Impact deleted': 'Impact deleted',
'Impact updated': 'Impact updated',
'Impacts': 'Impacts',
'Import': 'Import',
'Import & Export Data': 'Import & Export Data',
'Import Data': 'Import Data',
'Import Job': 'Import Job',
'Import Jobs': 'Chuyển đổi nghề nghiệp',
'Import and Export': 'Import and Export',
'Import from Ushahidi Instance': 'Import from Ushahidi Instance',
'Import if Master': 'Import if Master',
'Import job created': 'Import job created',
'Import multiple tables as CSV': 'Chuyển đổi định dạng bảng sang CSV',
'Import/Export': 'Import/Export',
'Important': 'Quan trọng',
'Importantly where there are no aid services being provided': 'Importantly where there are no aid services being provided',
'Imported': 'Imported',
'Importing data from spreadsheets': 'Importing data from spreadsheets',
'Improper decontamination': 'Improper decontamination',
'Improper handling of dead bodies': 'Improper handling of dead bodies',
'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Trong GeoServer, đây là tên lớp. Trong WFS getCapabilities, đây là tên FeatureType, phần sau dấu hai chấm (:).',
'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).',
'In Inventories': 'In Inventories',
'In Process': 'In Process',
'In Progress': 'In Progress',
'In Transit': 'In Transit',
'In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?': 'In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?',
'Inbound Mail Settings': 'Inbound Mail Settings',
'Incident': 'Incident',
'Incident Categories': 'Incident Categories',
'Incident Details': 'Incident Details',
'Incident Report': 'Incident Report',
'Incident Report Details': 'Incident Report Details',
'Incident Report added': 'Incident Report added',
'Incident Report deleted': 'Incident Report deleted',
'Incident Report updated': 'Incident Report updated',
'Incident Reporting': 'Incident Reporting',
'Incident Reporting System': 'Incident Reporting System',
'Incident Reports': 'Incident Reports',
'Incident added': 'Incident added',
'Incident deleted': 'Incident deleted',
'Incident updated': 'Incident updated',
'Incidents': 'Incidents',
'Incoming': 'Incoming',
'Incomplete': 'Incomplete',
'Individuals': 'Individuals',
'Industrial Crime': 'Industrial Crime',
'Industry Fire': 'Industry Fire',
'Industry close to village/camp': 'Industry close to village/camp',
'Infant (0-1)': 'Trẻ sơ sinh',
'Infectious Disease': 'Infectious Disease',
'Infectious Diseases': 'Infectious Diseases',
'Infestation': 'Infestation',
'Informal Leader': 'Informal Leader',
'Informal camp': 'Informal camp',
'Information gaps': 'Information gaps',
'Infusion catheters available': 'Infusion catheters available',
'Infusion catheters need per 24h': 'Infusion catheters need per 24h',
'Infusion catheters needed per 24h': 'Infusion catheters needed per 24h',
'Infusions available': 'Infusions available',
'Infusions needed per 24h': 'Infusions needed per 24h',
'Input Job': 'Input Job',
'Instant Porridge': 'Instant Porridge',
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": "Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.",
'Institution': 'Institution',
'Insufficient': 'Insufficient',
'Insufficient vars: Need module, resource, jresource, instance': 'Insufficient vars: Need module, resource, jresource, instance',
'Intake Items': 'Intake Items',
'Intergovernmental Organisation': 'Intergovernmental Organisation',
'Internal State': 'Internal State',
'International NGO': 'Tổ chức phi chính phủ quốc tế',
'International Organization': 'International Organization',
'International Staff': 'International Staff',
'Intervention': 'Intervention',
'Interview taking place at': 'Interview taking place at',
'Invalid': 'Invalid',
'Invalid Query': 'Truy vấn không hợp lệ',
'Invalid email': 'Invalid email',
'Invalid request!': 'Yêu cầu không hợp lệ',
'Invalid ticket': 'Ticket không hợp lệ',
'Inventories with Item': 'Inventories with Item',
'Inventory': 'Inventory',
'Inventory Item Details': 'Chi tiết hàng hóa trong kho',
'Inventory Item added': 'Bổ sung hàng hóa vào kho lưu trữ.',
'Inventory Item deleted': 'Inventory Item deleted',
'Inventory Item updated': 'Inventory Item updated',
'Inventory Items': 'Inventory Items',
'Inventory Management': 'Inventory Management',
'Inventory Store': 'Inventory Store',
'Inventory Store Details': 'Chi tiết kho lưu trữ',
'Inventory Store added': 'Inventory Store added',
'Inventory Store deleted': 'Inventory Store deleted',
'Inventory Store updated': 'Inventory Store updated',
'Inventory Stores': 'Inventory Stores',
'Inventory of Effects': 'Kho dự phòng',
'Inventory/Ledger': 'Inventory/Ledger',
'Is adequate food and water available for these institutions?': 'Is adequate food and water available for these institutions?',
'Is it safe to collect water?': 'Is it safe to collect water?',
'Is there any industrial or agro-chemical production close to the affected area/village?': 'Is there any industrial or agro-chemical production close to the affected area/village?',
'Issuing Authority': 'Issuing Authority',
'It is built using the Template agreed by a group of NGOs working together as the': 'It is built using the Template agreed by a group of NGOs working together as the',
'Item': 'Item',
'Item Added to Shipment': 'Item Added to Shipment',
'Item Catalog Categories': 'Item Catalog Categories',
'Item Catalog Category': 'Item Catalog Category',
'Item Catalog Category Details': 'Item Catalog Category Details',
'Item Catalog Category added': 'Item Catalog Category added',
'Item Catalog Category deleted': 'Item Catalog Category deleted',
'Item Catalog Category updated': 'Item Catalog Category updated',
'Item Catalog Details': 'Item Catalog Details',
'Item Catalog added': 'Item Catalog added',
'Item Catalog deleted': 'Đã xóa danh mục hàng hóa',
'Item Catalog updated': 'Item Catalog updated',
'Item Catalogs': 'Item Catalogs',
'Item Categories': 'Item Categories',
'Item Category': 'Item Category',
'Item Category Details': 'Item Category Details',
'Item Category added': 'Item Category added',
'Item Category deleted': 'Đã xóa Tiêu chí hàng hóa',
'Item Category updated': 'Item Category updated',
'Item Details': 'Item Details',
'Item Packet Details': 'Item Packet Details',
'Item Packet added': 'Item Packet added',
'Item Packet deleted': 'Item Packet deleted',
'Item Packet updated': 'Item Packet updated',
'Item Packets': 'Item Packets',
'Item Sub-Categories': 'Item Sub-Categories',
'Item Sub-Category': 'Item Sub-Category',
'Item Sub-Category Details': 'Item Sub-Category Details',
'Item Sub-Category added': 'Item Sub-Category added',
'Item Sub-Category deleted': 'Item Sub-Category deleted',
'Item Sub-Category updated': 'Đã cập nhật tiêu chí phụ của hàng hóa',
'Item added': 'Item added',
'Item already in Bundle!': 'Hàng đã có trong Bundle!',
'Item already in Kit!': 'Item already in Kit!',
'Item already in budget!': 'Item already in budget!',
'Item deleted': 'Item deleted',
'Item updated': 'Item updated',
'Items': 'Hàng hóa',
'Items Sent from Warehouse': 'Items Sent from Warehouse',
'Japanese': 'Japanese',
'Jerry can': 'Jerry can',
'Jew': 'Jew',
'Job Title': 'Job Title',
'Jobs': 'Jobs',
'Just Once': 'Just Once',
'KPIs': 'KPIs',
'Key': 'Key',
'Key Details': 'Key Details',
'Key added': 'Key added',
'Key deleted': 'Key deleted',
'Key updated': 'Key updated',
'Keys': 'Keys',
'Kit': 'Kit',
'Kit Contents': 'Kit Contents',
'Kit Details': 'Chi tiết Kit',
'Kit Updated': 'Kit Updated',
'Kit added': 'Kit added',
'Kit deleted': 'Đã xóa Kit',
'Kit updated': 'Kit updated',
'Kits': 'Kits',
'Known Identities': 'Known Identities',
'Known incidents of violence against women/girls': 'Known incidents of violence against women/girls',
'Known incidents of violence since disaster': 'Known incidents of violence since disaster',
'LICENCE': 'bản quyền',
'LICENSE': 'LICENSE',
'LMS Administration': 'Quản trị LMS',
'Label': 'Nhãn',
'Lack of material': 'Lack of material',
'Lack of school uniform': 'Lack of school uniform',
'Lack of supplies at school': 'Lack of supplies at school',
'Lack of transport to school': 'Lack of transport to school',
'Lactating women': 'Lactating women',
'Lahar': 'Lahar',
'Landslide': 'Landslide',
'Language': 'Language',
'Last Name': 'Last Name',
'Last known location': 'Last known location',
'Last name': 'Last name',
'Last synchronization time': 'Last synchronization time',
'Last updated by': 'Last updated by',
'Last updated on': 'Last updated on',
'Latitude': 'Latitude',
'Latitude & Longitude': 'Latitude & Longitude',
'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.',
'Latitude should be between': 'Latitude should be between',
'Law enforcement, military, homeland and local/private security': 'Law enforcement, military, homeland and local/private security',
'Layer': 'Layer',
'Layer Details': 'Layer Details',
'Layer added': 'Layer added',
'Layer deleted': 'Đã xóa layer',
'Layer updated': 'Đã cập nhật Layer',
'Layers': 'Layers',
'Layers updated': 'Đã cập nhật Layer',
'Layout': 'Layout',
'Legend Format': 'Legend Format',
'Length': 'Độ dài',
'Level': 'Level',
"Level is higher than parent's": "Level is higher than parent's",
'Library support not available for OpenID': 'Library support not available for OpenID',
'Line': 'Line',
'Link Item & Shipment': 'Link Item & Shipment',
'Link an Item & Shipment': 'Link an Item & Shipment',
'Linked Records': 'Linked Records',
'Linked records': 'Linked records',
'List': 'List',
'List / Add Baseline Types': 'List / Add Baseline Types',
'List / Add Impact Types': 'List / Add Impact Types',
'List / Add Services': 'List / Add Services',
'List / Add Types': 'List / Add Types',
'List Activities': 'List Activities',
'List Aid Requests': 'Danh sách Yêu cầu cứu trợ',
'List All': 'List All',
'List All Entries': 'List All Entries',
'List All Memberships': 'Danh sách tất cả các thành viên',
'List Assessment Summaries': 'List Assessment Summaries',
'List Assessments': 'Danh sách Trị giá tính thuế',
'List Baseline Types': 'List Baseline Types',
'List Baselines': 'List Baselines',
'List Budgets': 'List Budgets',
'List Bundles': 'List Bundles',
'List Catalog Items': 'List Catalog Items',
'List Category<>Sub-Category<>Catalog Relation': 'List Category<>Sub-Category<>Catalog Relation',
'List Checklists': 'Danh sách Checklists ',
'List Cluster Subsectors': 'List Cluster Subsectors',
'List Clusters': 'List Clusters',
'List Configs': 'List Configs',
'List Conflicts': 'List Conflicts',
'List Contacts': 'List Contacts',
'List Distribution Items': 'List Distribution Items',
'List Distributions': 'Danh sách ủng hộ,quyên góp',
'List Documents': 'List Documents',
'List Donors': 'List Donors',
'List Feature Classes': 'List Feature Classes',
'List Feature Layers': 'List Feature Layers',
'List Flood Reports': 'List Flood Reports',
'List Groups': 'Danh sách Nhóm',
'List Groups/View Members': 'List Groups/View Members',
'List Hospitals': 'Danh sách Bệnh viện',
'List Identities': 'List Identities',
'List Images': 'List Images',
'List Impact Assessments': 'List Impact Assessments',
'List Impact Types': 'List Impact Types',
'List Impacts': 'List Impacts',
'List Incident Reports': 'List Incident Reports',
'List Incidents': 'List Incidents',
'List Inventory Items': 'List Inventory Items',
'List Inventory Stores': 'List Inventory Stores',
'List Item Catalog Categories': 'List Item Catalog Categories',
'List Item Catalogs': 'List Item Catalogs',
'List Item Categories': 'List Item Categories',
'List Item Packets': 'List Item Packets',
'List Item Sub-Categories': 'List Item Sub-Categories',
'List Items': 'List Items',
'List Keys': 'List Keys',
'List Kits': 'Danh sách Kit',
'List Layers': 'List Layers',
'List Locations': 'Danh sách Vị trí',
'List Log Entries': 'List Log Entries',
'List Markers': 'List Markers',
'List Members': 'List Members',
'List Memberships': 'Danh sách thành viên',
'List Messages': 'Danh sách tin nhắn ',
'List Metadata': 'Danh sách dữ liệu',
'List Missing Persons': 'Danh sách những người mất tích',
'List Need Types': 'List Need Types',
'List Needs': 'List Needs',
'List Notes': 'List Notes',
'List Offices': 'List Offices',
'List Organizations': 'List Organizations',
'List Peers': 'List Peers',
'List Personal Effects': 'List Personal Effects',
'List Persons': 'List Persons',
'List Photos': 'List Photos',
'List Positions': 'List Positions',
'List Problems': 'List Problems',
'List Projections': 'List Projections',
'List Projects': 'List Projects',
'List Rapid Assessments': 'List Rapid Assessments',
'List Received Items': 'List Received Items',
'List Received Shipments': 'List Received Shipments',
'List Records': 'List Records',
'List Registrations': 'List Registrations',
'List Reports': 'List Reports',
'List Request Items': 'Danh sách Hang hóa yêu cầu',
'List Requests': 'Danh sách yêu cầu',
'List Resources': 'Danh sách tài nguyên',
'List Responses': 'List Responses',
'List Rivers': 'Danh sách sông',
'List Roles': 'Danh sách Vai trò',
'List Sections': 'List Sections',
'List Sector': 'List Sector',
'List Sent Items': 'List Sent Items',
'List Sent Shipments': 'List Sent Shipments',
'List Service Profiles': 'List Service Profiles',
'List Settings': 'List Settings',
'List Shelter Services': 'List Shelter Services',
'List Shelter Types': 'List Shelter Types',
'List Shelters': 'List Shelters',
'List Shipment Transit Logs': 'List Shipment Transit Logs',
'List Shipment/Way Bills': 'Danh sách Đơn hàng/Phí đường bộ',
'List Shipment<>Item Relation': 'List Shipment<>Item Relation',
'List Shipments': 'List Shipments',
'List Sites': 'List Sites',
'List Skill Types': 'List Skill Types',
'List Skills': 'Danh sách kỹ năng',
'List Solutions': 'List Solutions',
'List Staff': 'Danh sách Nhân viên',
'List Staff Types': 'List Staff Types',
'List Status': 'List Status',
'List Storage Bin Type(s)': 'List Storage Bin Type(s)',
'List Storage Bins': 'List Storage Bins',
'List Storage Location': 'Danh sách vị trí kho lưu trữ',
'List Subscriptions': 'Danh sách Đăng ký',
'List Survey Answers': 'List Survey Answers',
'List Survey Questions': 'Danh sách câu hỏi khảo sát',
'List Survey Sections': 'List Survey Sections',
'List Survey Series': 'List Survey Series',
'List Survey Templates': 'List Survey Templates',
'List Tasks': 'List Tasks',
'List Teams': 'List Teams',
'List Themes': 'List Themes',
'List Tickets': 'Danh sách Ticket',
'List Tracks': 'List Tracks',
'List Units': 'Danh sách đơn vị',
'List Users': 'Danh sách người dùng',
'List Volunteers': 'List Volunteers',
'List Warehouse Items': 'List Warehouse Items',
'List Warehouses': 'List Warehouses',
'List all': 'Hiển thị tất cả',
'List of Items': 'List of Items',
'List of Missing Persons': 'Danh sách những người mất tích',
'List of Peers': 'List of Peers',
'List of Reports': 'List of Reports',
'List of Requests': 'Danh sách yêu cầu',
'List of Spreadsheets': 'List of Spreadsheets',
'List of Spreadsheets uploaded': 'List of Spreadsheets uploaded',
'List of Volunteers for this skills set': 'List of Volunteers for this skills set',
'List of addresses': 'Danh sách các địa chỉ',
'List unidentified': 'List unidentified',
'List/Add': 'Danh sách/Thêm',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Danh sách "Ai làm gì, ở đâu"Cho phép các tổ chức cứu trợ điều phối hoạt động của mình',
'Live Help': 'Trợ giúp',
'Livelihood': 'Livelihood',
'Load Cleaned Data into Database': 'Load Cleaned Data into Database',
'Load Details': 'Load Details',
'Load Raw File into Grid': 'Load Raw File into Grid',
'Load the details to help decide which is the best one to keep out of the 2.': 'Load the details to help decide which is the best one to keep out of the 2.',
'Loading': 'Loading',
'Loading Locations...': 'Loading Locations...',
'Local Name': 'Tên địa phương',
'Local Names': 'Local Names',
'Location': 'Location',
'Location 1': 'Location 1',
'Location 2': 'Location 2',
'Location De-duplicated': 'Location De-duplicated',
'Location Details': 'Location Details',
'Location added': 'Location added',
'Location deleted': 'Đã xóa vị trí',
'Location details': 'Location details',
'Location updated': 'Location updated',
'Location: ': 'Location: ',
'Locations': 'Locations',
'Locations De-duplicator': 'Locations De-duplicator',
'Locations of this level need to have a parent of level': 'Locations of this level need to have a parent of level',
'Locations should be different!': 'Locations should be different!',
'Lockdown': 'Lockdown',
'Log': 'Log',
'Log Entry Details': 'Log Entry Details',
'Log entry added': 'Log entry added',
'Log entry deleted': 'Xóa theo dõi đăng nhập',
'Log entry updated': 'Cập nhật theo dõi đăng nhập',
'Login': 'Đăng nhập',
'Logistics': 'Logistics',
'Logistics Management': 'Logistics Management',
'Logistics Management System': 'Logistics Management System',
'Logo': 'Logo',
'Logo file %s missing!': 'Logo file %s missing!',
'Logout': 'Logout',
'Long Text': 'Long Text',
'Longitude': 'Longitude',
'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.': 'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.',
'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Kinh độ trải dài theo hướng Đông-Tây. Kinh tuyến không nằm trên kinh tuyến gốc (Greenwich Mean Time) hướng về phía đông, vắt ngang châu Âu và châu Á.',
'Longitude should be between': 'Longitude should be between',
'Looting': 'Looting',
'Lost Password': 'Lost Password',
'Low': 'Low',
'Magnetic Storm': 'Magnetic Storm',
'Main cash source': 'Main cash source',
'Main income sources before disaster': 'Main income sources before disaster',
'Major outward damage': 'Major outward damage',
'Make Pledge': 'Make Pledge',
'Make Request': 'Make Request',
'Make a Request': 'Tạo yêu cầu',
'Make a Request for Aid': 'Tạo yêu cầu cứu trợ',
'Make preparations per the <instruction>': 'Make preparations per the <instruction>',
'Male': 'Male',
'Malnutrition present prior to disaster': 'Malnutrition present prior to disaster',
'Manage': 'Manage',
'Manage Category': 'Manage Category',
'Manage Item catalog': 'Manage Item catalog',
'Manage Items Catalog': 'Manage Items Catalog',
'Manage Kits': 'Manage Kits',
'Manage Relief Item Catalogue': 'Manage Relief Item Catalogue',
'Manage Sub-Category': 'Quản lý Tiêu chí phụ',
'Manage Users & Roles': 'Manage Users & Roles',
'Manage Warehouses': 'Manage Warehouses',
'Manage Warehouses/Sites': 'Manage Warehouses/Sites',
'Manage requests of hospitals for assistance.': 'Manage requests of hospitals for assistance.',
'Manage volunteers by capturing their skills, availability and allocation': 'Nắm bắt kỹ năng, khả năng và khu vực hoạt động của tình nguyện viên để quản lý',
'Manager': 'Manager',
'Managing Office': 'Managing Office',
'Managing, Storing and Distributing Catalog Items.': 'Managing, Storing and Distributing Catalog Items.',
'Managing, Storing and Distributing Items.': 'Managing, Storing and Distributing Items.',
'Managing, Storing and Distributing Relief Items': 'Quản lý, Lưu trữ và Quyên góp hàng cứu trợ',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).',
'Mandatory. The URL to access the service.': 'Mandatory. The URL to access the service.',
'Manual': 'Manual',
'Manual Synchronization': 'Manual Synchronization',
'Many': 'Many',
'Map': 'Map',
'Map Height': 'Chiều cao bản đồ',
'Map Service Catalogue': 'Catalogue bản đồ dịch vụ',
'Map Settings': 'Cài đặt bản đồ',
'Map Viewing Client': 'Map Viewing Client',
'Map Width': 'Độ rộng bản đồ',
'Map of Hospitals': 'Bản đồ bệnh viện',
'Mapping': 'Mapping',
'Marine Security': 'Marine Security',
'Marital Status': 'Tình trạng hôn nhân',
'Marker': 'Marker',
'Marker Details': 'Chi tiết Đèn hiệu',
'Marker added': 'Marker added',
'Marker deleted': 'Marker deleted',
'Marker updated': 'Marker updated',
'Markers': 'Markers',
'Master Message Log': 'Master Message Log',
'Master Message Log to process incoming reports & requests': 'Kiểm soát log tin nhắn để xử lý báo cáo và yêu cầu gửi đến',
'Match Percentage': 'Match Percentage',
'Match percentage indicates the % match between these two records': 'Match percentage indicates the % match between these two records',
'Matching Records': 'Matching Records',
'Matrix of Choices (Multiple Answers)': 'Matrix of Choices (Multiple Answers)',
'Matrix of Choices (Only one answer)': 'Matrix of Choices (Only one answer)',
'Matrix of Text Fields': 'Matrix of Text Fields',
'Max Persons per Dwelling': 'Max Persons per Dwelling',
'Maximum Weight': 'Maximum Weight',
'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.': 'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.',
'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.': 'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.',
'Medical and public health': 'Medical and public health',
'Medicine': 'Medicine',
'Medium': 'Medium',
'Megabytes per Month': 'Megabytes per Month',
'Members': 'Members',
'Membership': 'Membership',
'Membership Details': 'Membership Details',
'Membership added': 'Đã thêm thành viên',
'Membership deleted': 'Membership deleted',
'Membership updated': 'Cập nhật thông tin thành viên',
'Memberships': 'Memberships',
'Message': 'Message',
'Message Details': 'Message Details',
'Message Variable': 'Message Variable',
'Message added': 'Đã thêm tin nhắn',
'Message deleted': 'Message deleted',
'Message sent to outbox': 'Message sent to outbox',
'Message updated': 'Message updated',
'Message variable': 'Message variable',
'Messages': 'Messages',
'Messaging': 'Messaging',
'Messaging settings updated': 'Messaging settings updated',
'Metadata': 'Metadata',
'Metadata Details': 'Metadata Details',
'Metadata added': 'Đã thêm dữ liệu',
'Metadata can be supplied here to be applied to all uploaded photos, if desired.': 'Metadata can be supplied here to be applied to all uploaded photos, if desired.',
'Metadata deleted': 'Metadata deleted',
'Metadata updated': 'Metadata updated',
'Meteorite': 'Meteorite',
'Meteorological (inc. flood)': 'Meteorological (inc. flood)',
'Method used': 'Method used',
'Micronutrient malnutrition prior to disaster': 'Micronutrient malnutrition prior to disaster',
'Middle Name': 'Middle Name',
'Migrants or ethnic minorities': 'Dân di cư hoặc dân tộc thiểu số',
'Military': 'Military',
'Minimum Bounding Box': 'Minimum Bounding Box',
'Minorities participating in coping activities': 'Minorities participating in coping activities',
'Minutes must be a number between 0 and 60': 'Minutes must be a number between 0 and 60',
'Minutes per Month': 'Minutes per Month',
'Minutes should be a number greater than 0 and less than 60': 'Minutes should be a number greater than 0 and less than 60',
'Miscellaneous': 'Miscellaneous',
'Missing': 'Missing',
'Missing Person': 'Người mất tích',
'Missing Person Details': 'Chi tiết về người mất tích',
'Missing Person Reports': 'Báo cáo số người mất tích',
'Missing Persons': 'Người mất tích',
'Missing Persons Registry': 'Missing Persons Registry',
'Missing Persons Report': 'Báo cáo số người mất tích',
'Missing Report': 'Missing Report',
'Missing Senior Citizen': 'Missing Senior Citizen',
'Missing Vulnerable Person': 'Missing Vulnerable Person',
'Mobile': 'Mobile',
'Mobile Assess': 'Mobile Assess',
'Mobile Assess.': 'Mobile Assess.',
'Mobile Basic Assessment': 'Mobile Basic Assessment',
'Mobile Phone': 'Mobile Phone',
'Mode': 'Mode',
'Modem Settings': 'Modem Settings',
'Modem settings updated': 'Modem settings updated',
'Moderator': 'Moderator',
'Modify Information on groups and individuals': 'Modify Information on groups and individuals',
'Modifying data in spreadsheet before importing it to the database': 'Modifying data in spreadsheet before importing it to the database',
'Module Administration': 'Quản trị Mô-đun',
'Module disabled!': 'Module disabled!',
'Module provides access to information on current Flood Levels.': 'Module provides access to information on current Flood Levels.',
'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.': 'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.',
'Monday': 'Thứ Hai',
'Monthly Cost': 'Monthly Cost',
'Monthly Salary': 'Monthly Salary',
'Months': 'Months',
'Morgue Status': 'Morgue Status',
'Morgue Units Available': 'Morgue Units Available',
'Mosque': 'Mosque',
'Motorcycle': 'Motorcycle',
'Moustache': 'Moustache',
'Movements (Filter In/Out/Lost)': 'Movements (Filter In/Out/Lost)',
'MultiPolygon': 'MultiPolygon',
'Multiple': 'Multiple',
'Multiple Choice (Multiple Answers)': 'Multiple Choice (Multiple Answers)',
'Multiple Choice (Only One Answer)': 'Multiple Choice (Only One Answer)',
'Multiple Text Fields': 'Multiple Text Fields',
'Multiplicator': 'Multiplicator',
'Muslim': 'Muslim',
'My Tasks': 'My Tasks',
'N/A': 'Không xác định',
"NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.": "NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.",
'Name': 'Tên',
'Name and/or ID': 'Name and/or ID',
'Name and/or ID Label': 'Name and/or ID Label',
'Name of Storage Bin Type.': 'Tên loại Bin lưu trữ',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'Name of the file (& optional sub-path) located in static which should be used for the background of the header.',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Name of the file (& optional sub-path) located in static which should be used for the top-left image.',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Name of the file (& optional sub-path) located in views which should be used for footer.',
'Name of the person in local language and script (optional).': 'Name of the person in local language and script (optional).',
'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.': 'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.',
'Names can be added in multiple languages': 'Names can be added in multiple languages',
'National ID Card': 'Chứng minh thư',
'National NGO': 'Các tổ chức phi chính phủ ',
'National Staff': 'National Staff',
'Nationality': 'Nationality',
'Nationality of the person.': 'Nationality of the person.',
'Nautical Accident': 'Nautical Accident',
'Nautical Hijacking': 'Nautical Hijacking',
'Need Type': 'Need Type',
'Need Type Details': 'Need Type Details',
'Need Type added': 'Need Type added',
'Need Type deleted': 'Need Type deleted',
'Need Type updated': 'Need Type updated',
'Need Types': 'Need Types',
"Need a 'url' argument!": "Need a 'url' argument!",
'Need added': 'Need added',
'Need deleted': 'Need deleted',
'Need to configure Twitter Authentication': 'Need to configure Twitter Authentication',
'Need to select 2 Locations': 'Need to select 2 Locations',
'Need to specify a Budget!': 'Need to specify a Budget!',
'Need to specify a Kit!': 'Need to specify a Kit!',
'Need to specify a Resource!': 'Need to specify a Resource!',
'Need to specify a bundle!': 'Need to specify a bundle!',
'Need to specify a group!': 'Need to specify a group!',
'Need to specify a location to search for.': 'Cần chọn địa điểm tìm kiếm',
'Need to specify a role!': 'Yêu cầu xác định vai trò',
'Need to specify a table!': 'Need to specify a table!',
'Need to specify a user!': 'Need to specify a user!',
'Need updated': 'Need updated',
'Needs': 'Needs',
'Needs Details': 'Needs Details',
'Needs elaboration!!!': 'Needs elaboration!!!',
'Needs to reduce vulnerability to violence': 'Needs to reduce vulnerability to violence',
'Negative Flow Isolation': 'Negative Flow Isolation',
'Neighbourhood': 'Neighbourhood',
'Neonatal ICU': 'Neonatal ICU',
'Neonatology': 'Neonatology',
'Network': 'Network',
'Neurology': 'Neurology',
'New': 'New',
'New Assessment reported from': 'New Assessment reported from',
'New Checklist': 'Checklist mới',
'New Peer': 'New Peer',
'New Record': 'New Record',
'New Report': 'New Report',
'New Request': 'Yêu cầu mới',
'New Solution Choice': 'New Solution Choice',
'New Synchronization Peer': 'New Synchronization Peer',
'New cases in the past 24h': 'New cases in the past 24h',
'News': 'News',
'Next': 'Next',
'No': 'No',
'No Activities Found': 'No Activities Found',
'No Addresses currently registered': 'Hiện tại chưa đăng ký Địa chỉ',
'No Aid Requests have been made yet': 'Chưa có yêu cầu cứu trợ nào được tạo',
'No Assessment Summaries currently registered': 'No Assessment Summaries currently registered',
'No Assessments currently registered': 'Chưa đăng ký trị giá tính thuế',
'No Baseline Types currently registered': 'No Baseline Types currently registered',
'No Baselines currently registered': 'No Baselines currently registered',
'No Budgets currently registered': 'No Budgets currently registered',
'No Bundles currently registered': 'No Bundles currently registered',
'No Catalog Items currently registered': 'No Catalog Items currently registered',
'No Category<>Sub-Category<>Catalog Relation currently registered': 'Hiện tại chưa có Category<>Sub-Category<>Catalog Relation được đăng ký',
'No Checklist available': 'No Checklist available',
'No Cluster Subsectors currently registered': 'No Cluster Subsectors currently registered',
'No Clusters currently registered': 'No Clusters currently registered',
'No Configs currently defined': 'No Configs currently defined',
'No Details currently registered': 'No Details currently registered',
'No Distribution Items currently registered': 'Chưa đăng ký danh sách hàng hóa đóng góp',
'No Distributions currently registered': 'No Distributions currently registered',
'No Documents found': 'No Documents found',
'No Donors currently registered': 'No Donors currently registered',
'No Feature Classes currently defined': 'No Feature Classes currently defined',
'No Feature Layers currently defined': 'No Feature Layers currently defined',
'No Flood Reports currently registered': 'Chưa đăng ký báo cáo lũ lụt',
'No Groups currently defined': 'Hiện tại không xác định được nhóm',
'No Groups currently registered': 'No Groups currently registered',
'No Hospitals currently registered': 'Chưa có bệnh viện nào đăng ký',
'No Identification Report Available': 'No Identification Report Available',
'No Identities currently registered': 'No Identities currently registered',
'No Image': 'Không có ảnh',
'No Images currently registered': 'Hiện tại không có ảnh nào được đăng ký',
'No Impact Types currently registered': 'No Impact Types currently registered',
'No Impacts currently registered': 'No Impacts currently registered',
'No Incident Reports currently registered': 'No Incident Reports currently registered',
'No Incidents currently registered': 'Chưa sự việc nào được đưa lên',
'No Inventory Items currently registered': 'No Inventory Items currently registered',
'No Inventory Stores currently registered': 'No Inventory Stores currently registered',
'No Item Catalog Category currently registered': 'No Item Catalog Category currently registered',
'No Item Catalog currently registered': 'No Item Catalog currently registered',
'No Item Categories currently registered': 'No Item Categories currently registered',
'No Item Packets currently registered': 'No Item Packets currently registered',
'No Item Sub-Category currently registered': 'No Item Sub-Category currently registered',
'No Item currently registered': 'No Item currently registered',
'No Items currently registered': 'No Items currently registered',
'No Items currently requested': 'Hiện tại không có hàng hóa nào được yêu cầu',
'No Keys currently defined': 'No Keys currently defined',
'No Kits currently registered': 'No Kits currently registered',
'No Locations currently available': 'No Locations currently available',
'No Locations currently registered': 'No Locations currently registered',
'No Markers currently available': 'Chưa đăng ký marker ',
'No Members currently registered': 'Chưa đăng ký thành viên',
'No Memberships currently defined': 'Chưa xác nhận đăng ký thành viên',
'No Memberships currently registered': 'Chưa có thông tin đăng ký thành viên',
'No Messages currently in Outbox': 'No Messages currently in Outbox',
'No Metadata currently defined': 'No Metadata currently defined',
'No Need Types currently registered': 'No Need Types currently registered',
'No Needs currently registered': 'No Needs currently registered',
'No Offices currently registered': 'No Offices currently registered',
'No Offices found!': 'No Offices found!',
'No Organizations currently registered': 'No Organizations currently registered',
'No Packets for Item': 'No Packets for Item',
'No People currently registered in this shelter': 'No People currently registered in this shelter',
'No Persons currently registered': 'No Persons currently registered',
'No Persons currently reported missing': 'No Persons currently reported missing',
'No Persons found': 'No Persons found',
'No Photos found': 'Không tìm thấy ảnh nào',
'No Presence Log Entries currently registered': 'No Presence Log Entries currently registered',
'No Problems currently defined': 'No Problems currently defined',
'No Projections currently defined': 'Hiện tại chưa xác định được kế hoạch dự phòng',
'No Projects currently registered': 'Chưa đăng ký dự án',
'No Rapid Assessments currently registered': 'No Rapid Assessments currently registered',
'No Received Items currently registered': 'No Received Items currently registered',
'No Received Shipments': 'No Received Shipments',
'No Records currently available': 'No Records currently available',
'No Records matching the query': 'No Records matching the query',
'No Request Items currently registered': 'No Request Items currently registered',
'No Request Shipments': 'No Request Shipments',
'No Requests have been made yet': 'No Requests have been made yet',
'No Requests match this criteria': 'No Requests match this criteria',
'No Responses currently registered': 'No Responses currently registered',
'No Rivers currently registered': 'No Rivers currently registered',
'No Roles currently defined': 'No Roles currently defined',
'No Sections currently registered': 'No Sections currently registered',
'No Sectors currently registered': 'No Sectors currently registered',
'No Sent Items currently registered': 'No Sent Items currently registered',
'No Sent Shipments': 'No Sent Shipments',
'No Settings currently defined': 'No Settings currently defined',
'No Shelter Services currently registered': 'No Shelter Services currently registered',
'No Shelter Types currently registered': 'No Shelter Types currently registered',
'No Shelters currently registered': 'Hiện tại chưa đăng ký nơi cư trú',
'No Shipment Transit Logs currently registered': 'No Shipment Transit Logs currently registered',
'No Shipment/Way Bills currently registered': 'No Shipment/Way Bills currently registered',
'No Shipment<>Item Relation currently registered': 'No Shipment<>Item Relation currently registered',
'No Sites currently registered': 'No Sites currently registered',
'No Skill Types currently set': 'Chưa cài đặt loại kỹ năng',
'No Solutions currently defined': 'No Solutions currently defined',
'No Staff Types currently registered': 'No Staff Types currently registered',
'No Staff currently registered': 'No Staff currently registered',
'No Storage Bin Type currently registered': 'Chưa đăng ký Loại Bin lưu trữ',
'No Storage Bins currently registered': 'No Storage Bins currently registered',
'No Storage Locations currently registered': 'No Storage Locations currently registered',
'No Subscription available': 'No Subscription available',
'No Survey Answers currently registered': 'No Survey Answers currently registered',
'No Survey Questions currently registered': 'No Survey Questions currently registered',
'No Survey Sections currently registered': 'No Survey Sections currently registered',
'No Survey Series currently registered': 'No Survey Series currently registered',
'No Survey Template currently registered': 'No Survey Template currently registered',
'No Tasks with Location Data': 'No Tasks with Location Data',
'No Tasks with Location Data!': 'No Tasks with Location Data!',
'No Themes currently defined': 'No Themes currently defined',
'No Tickets currently registered': 'Hiện tại chưa đăng ký Ticket ',
'No Tracks currently available': 'No Tracks currently available',
'No Units currently registered': 'Chưa đăng ký tên đơn vị',
'No Users currently registered': 'Chưa đăng ký người dùng',
'No Volunteers currently registered': 'No Volunteers currently registered',
'No Warehouse Items currently registered': 'No Warehouse Items currently registered',
'No Warehouses currently registered': 'No Warehouses currently registered',
'No Warehouses match this criteria': 'No Warehouses match this criteria',
'No access at all': 'Không truy cập',
'No access to this record!': 'No access to this record!',
'No action recommended': 'No action recommended',
'No conflicts logged': 'No conflicts logged',
'No contact information available': 'No contact information available',
'No contacts currently registered': 'Chưa đăng ký thông tin liên lạc',
'No data in this table - cannot create PDF!': 'Không có dữ liệu trong bảng - không thể tạo file PDF',
'No databases in this application': 'No databases in this application',
'No entries found': 'No entries found',
'No entries matching the query': 'No entries matching the query',
'No import jobs': 'No import jobs',
'No linked records': 'Không có bản thu liên quan',
'No location found': 'No location found',
'No location known for this person': 'No location known for this person',
'No location known for this team': 'No location known for this team',
'No locations found for members of this team': 'No locations found for members of this team',
'No locations registered at this level': 'No locations registered at this level',
'No log entries matching the query': 'No log entries matching the query',
'No matching records found.': 'No matching records found.',
'No messages in the system': 'No messages in the system',
'No notes available': 'No notes available',
'No peers currently registered': 'No peers currently registered',
'No pending registrations found': 'Không tìm thấy đăng ký đang chờ',
'No pending registrations matching the query': 'No pending registrations matching the query',
'No person record found for current user.': 'No person record found for current user.',
'No positions currently registered': 'No positions currently registered',
'No problem group defined yet': 'No problem group defined yet',
'No records matching the query': 'No records matching the query',
'No records to delete': 'Không có bản thu để xóa',
'No recovery reports available': 'No recovery reports available',
'No report available.': 'Không có báo cáo',
'No reports available.': 'No reports available.',
'No reports currently available': 'No reports currently available',
'No requests found': 'Không tìm thấy yêu cầu',
'No resources currently registered': 'No resources currently registered',
'No resources currently reported': 'No resources currently reported',
'No service profile available': 'No service profile available',
'No skills currently set': 'No skills currently set',
'No status information available': 'No status information available',
'No synchronization': 'Chưa đồng bộ hóa',
'No tasks currently registered': 'No tasks currently registered',
'No template found!': 'Không tìm thấy mẫu',
'No units currently registered': 'No units currently registered',
'No volunteer information registered': 'Chưa đăng ký thông tin tình nguyện viên',
'None': 'None',
'None (no such record)': 'None (no such record)',
'Noodles': 'Mì',
'Normal': 'Normal',
'Normal food sources disrupted': 'Normal food sources disrupted',
'Not Applicable': 'Not Applicable',
'Not Authorised!': 'Chưa đăng nhập',
'Not Possible': 'Not Possible',
'Not Set': 'Not Set',
'Not authorised!': 'Not authorised!',
'Not installed or incorrectly configured.': 'Chưa cài đặt hoặc tùy chỉnh chưa đúng',
'Note': 'Note',
'Note Details': 'Note Details',
'Note Status': 'Note Status',
'Note Type': 'Note Type',
'Note added': 'Note added',
'Note deleted': 'Note deleted',
'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead.': 'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead.',
'Note updated': 'Note updated',
'Notes': 'Notes',
'Notice to Airmen': 'Lưu ý đối với các phi công',
'Number': 'Số',
'Number of Columns': 'Number of Columns',
'Number of Patients': 'Number of Patients',
'Number of Rows': 'Số hàng',
'Number of Vehicles': 'Number of Vehicles',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Number of additional beds of that type expected to become available in this unit within the next 24 hours.',
'Number of alternative places for studying': 'Số địa điểm có thể dùng làm trường học tạm thời',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Number of available/vacant beds of that type in this unit at the time of reporting.',
'Number of deaths during the past 24 hours.': 'Number of deaths during the past 24 hours.',
'Number of discharged patients during the past 24 hours.': 'Number of discharged patients during the past 24 hours.',
'Number of doctors': 'Number of doctors',
'Number of doctors actively working': 'Number of doctors actively working',
'Number of houses damaged, but usable': 'Number of houses damaged, but usable',
'Number of houses destroyed/uninhabitable': 'Number of houses destroyed/uninhabitable',
'Number of in-patients at the time of reporting.': 'Number of in-patients at the time of reporting.',
'Number of latrines': 'Number of latrines',
'Number of midwives actively working': 'Number of midwives actively working',
'Number of newly admitted patients during the past 24 hours.': 'Số lượng bệnh nhân tiếp nhận trong 24h qua',
'Number of non-medical staff': 'Number of non-medical staff',
'Number of nurses': 'Number of nurses',
'Number of nurses actively working': 'Number of nurses actively working',
'Number of private schools': 'Số lượng trường tư',
'Number of public schools': 'Number of public schools',
'Number of religious schools': 'Number of religious schools',
'Number of schools damaged but usable': 'Number of schools damaged but usable',
'Number of schools destroyed/uninhabitable': 'Number of schools destroyed/uninhabitable',
'Number of schools open before disaster': 'Number of schools open before disaster',
'Number of schools open now': 'Number of schools open now',
'Number of teachers affected by disaster': 'Number of teachers affected by disaster',
'Number of teachers before disaster': 'Number of teachers before disaster',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Số các giường bệnh trống trong bệnh viện. Tự động cập nhật từ các báo cáo hàng ngày.',
'Number of vacant/available units to which victims can be transported immediately.': 'Number of vacant/available units to which victims can be transported immediately.',
'Number or Label on the identification tag this person is wearing (if any).': 'Number or Label on the identification tag this person is wearing (if any).',
'Number/Percentage of affected population that is Female & Aged 0-5': 'Number/Percentage of affected population that is Female & Aged 0-5',
'Number/Percentage of affected population that is Female & Aged 13-17': 'Number/Percentage of affected population that is Female & Aged 13-17',
'Number/Percentage of affected population that is Female & Aged 18-25': 'Number/Percentage of affected population that is Female & Aged 18-25',
'Number/Percentage of affected population that is Female & Aged 26-60': 'Number/Percentage of affected population that is Female & Aged 26-60',
'Number/Percentage of affected population that is Female & Aged 6-12': 'Number/Percentage of affected population that is Female & Aged 6-12',
'Number/Percentage of affected population that is Female & Aged 61+': 'Number/Percentage of affected population that is Female & Aged 61+',
'Number/Percentage of affected population that is Male & Aged 0-5': 'Đối tượng nam trong độ tuổi 0-5 chịu ảnh hưởng từ thiên tai',
'Number/Percentage of affected population that is Male & Aged 13-17': 'Number/Percentage of affected population that is Male & Aged 13-17',
'Number/Percentage of affected population that is Male & Aged 18-25': 'Number/Percentage of affected population that is Male & Aged 18-25',
'Number/Percentage of affected population that is Male & Aged 26-60': 'Đối tượng là Nam giới và trong độ tuổi từ 26-60 chịu ảnh hưởng lớn từ thiên tai',
'Number/Percentage of affected population that is Male & Aged 6-12': 'Number/Percentage of affected population that is Male & Aged 6-12',
'Number/Percentage of affected population that is Male & Aged 61+': 'Number/Percentage of affected population that is Male & Aged 61+',
'Numbers Only': 'Chỉ dùng số',
'Nursery Beds': 'Nursery Beds',
'Nutrition': 'Dinh dưỡng',
'OK': 'OK',
'OR Reason': 'OR Reason',
'OR Status': 'OR Status',
'OR Status Reason': 'OR Status Reason',
'Observer': 'Observer',
'Obstetrics/Gynecology': 'Sản khoa/Phụ khoa',
'Office': 'Office',
'Office Address': 'Địa chỉ văn phòng',
'Office Details': 'Office Details',
'Office added': 'Đã thêm Văn phòng',
'Office deleted': 'Đã xóa Văn phòng',
'Office updated': 'Office updated',
'Offices': 'Offices',
'Offline Sync': 'Offline Sync',
'Offline Sync (from USB/File Backup)': 'Offline Sync (from USB/File Backup)',
'Old': 'Old',
'Older people as primary caregivers of children': 'Older people as primary caregivers of children',
'Older people in care homes': 'Older people in care homes',
'Older people participating in coping activities': 'Older people participating in coping activities',
'Older people with chronical illnesses': 'Older people with chronical illnesses',
'Older person (>60 yrs)': 'Older person (>60 yrs)',
'On by default?': 'Bật theo mặc định',
'On by default? (only applicable to Overlays)': 'On by default? (only applicable to Overlays)',
'One Time Cost': 'One Time Cost',
'One time cost': 'One time cost',
'One-time': 'One-time',
'One-time costs': 'One-time costs',
'Oops! Something went wrong...': 'Oops! Something went wrong...',
'Oops! something went wrong on our side.': 'Oops! something went wrong on our side.',
'Open': 'Open',
'Open Assessment': 'Open Assessment',
'Open area': 'Open area',
'Open recent': 'Open recent',
'Operating Rooms': 'Operating Rooms',
'Optional link to an Incident which this Assessment was triggered by.': 'Optional link to an Incident which this Assessment was triggered by.',
'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).',
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.",
'Options': 'Tùy chọn',
'Organisation': 'Organisation',
'Organization': 'Tổ chức',
'Organization Details': 'Chi tiết Tổ chức',
'Organization Registry': 'Đăng ký tổ chức',
'Organization added': 'Organization added',
'Organization deleted': 'Organization deleted',
'Organization updated': 'Organization updated',
'Organizations': 'Tổ chức',
'Origin': 'Origin',
'Origin of the separated children': 'Origin of the separated children',
'Other': 'Other',
'Other (describe)': 'Other (describe)',
'Other (specify)': 'Other (specify)',
'Other Evidence': 'Bằng chứng khác',
'Other Faucet/Piped Water': 'Các đường xả lũ khác',
'Other Isolation': 'Những vùng bị cô lập khác',
'Other Name': 'Other Name',
'Other activities of boys 13-17yrs': 'Các hoạt động khác của nam thanh niên từ 13-17 tuổi',
'Other activities of boys 13-17yrs before disaster': 'Other activities of boys 13-17yrs before disaster',
'Other activities of boys <12yrs': 'Other activities of boys <12yrs',
'Other activities of boys <12yrs before disaster': 'Các hoạt động khác của bé trai dưới 12 tuổi trước khi xảy ra thiên tai',
'Other activities of girls 13-17yrs': 'Other activities of girls 13-17yrs',
'Other activities of girls 13-17yrs before disaster': 'Other activities of girls 13-17yrs before disaster',
'Other activities of girls<12yrs': 'Other activities of girls<12yrs',
'Other activities of girls<12yrs before disaster': 'Other activities of girls<12yrs before disaster',
'Other alternative infant nutrition in use': 'Other alternative infant nutrition in use',
'Other alternative places for study': 'Những nơi có thể dùng làm trường học tạm thời',
'Other assistance needed': 'Các hỗ trợ cần thiết',
'Other assistance, Rank': 'Những sự hỗ trợ khác,thứ hạng',
'Other current health problems, adults': 'Other current health problems, adults',
'Other current health problems, children': 'Other current health problems, children',
'Other events': 'Other events',
'Other factors affecting school attendance': 'Những yếu tố khác ảnh hưởng đến việc đến trường',
'Other major expenses': 'Other major expenses',
'Other school assistance received': 'Other school assistance received',
'Other school assistance, details': 'Other school assistance, details',
'Other school assistance, source': 'Other school assistance, source',
'Other side dishes in stock': 'Other side dishes in stock',
'Other types of water storage containers': 'Other types of water storage containers',
'Other ways to obtain food': 'Other ways to obtain food',
'Outbound Mail settings are configured in models/000_config.py.': 'Outbound Mail settings are configured in models/000_config.py.',
'Outbox': 'Outbox',
'Outgoing SMS Handler': 'Outgoing SMS Handler',
'Outgoing SMS handler': 'Outgoing SMS handler',
'Overland Flow Flood': 'Overland Flow Flood',
'Owned Resources': 'Owned Resources',
'PDAM': 'PDAM',
'PIN': 'PIN',
'PIN number ': 'PIN number ',
'PL Women': 'PL Women',
'Packet': 'Packet',
'Parameters': 'Parameters',
'Parent': 'Parent',
'Parent Office': 'Parent Office',
"Parent level should be higher than this record's level. Parent level is": "Parent level should be higher than this record's level. Parent level is",
'Parent needs to be of the correct level': 'Parent needs to be of the correct level',
'Parent needs to be set': 'Parent needs to be set',
'Parent needs to be set for locations of level': 'Parent needs to be set for locations of level',
'Parents/Caregivers missing children': 'Parents/Caregivers missing children',
'Participant': 'Participant',
'Pashto': 'Pashto',
'Passport': 'Passport',
'Password': 'Password',
"Password fields don't match": "Password fields don't match",
'Pathology': 'Pathology',
'Patients': 'Bệnh nhân',
'Pediatric ICU': 'Pediatric ICU',
'Pediatric Psychiatric': 'Pediatric Psychiatric',
'Pediatrics': 'Khoa Nhi',
'Peer': 'Peer',
'Peer Details': 'Peer Details',
'Peer Registration': 'Peer Registration',
'Peer Registration Details': 'Peer Registration Details',
'Peer Registration Request': 'yêu cầu đăng ký',
'Peer Type': 'Peer Type',
'Peer UID': 'Peer UID',
'Peer added': 'Peer added',
'Peer deleted': 'Peer deleted',
'Peer not allowed to push': 'Peer not allowed to push',
'Peer registration request added': 'Đã thêm yêu cầu đăng ký',
'Peer registration request deleted': 'Peer registration request deleted',
'Peer registration request updated': 'Cập nhật yêu cẩu đăng ký',
'Peer updated': 'Peer updated',
'Peers': 'Peers',
'Pending Requests': 'yêu cầu đang chờ',
'People': 'People',
'People Needing Food': 'People Needing Food',
'People Needing Shelter': 'People Needing Shelter',
'People Needing Water': 'People Needing Water',
'People Trapped': 'People Trapped',
'People with chronical illnesses': 'People with chronical illnesses',
'Person': 'Cá nhân',
'Person 1': 'Person 1',
'Person 1, Person 2 are the potentially duplicate records': 'Person 1, Person 2 are the potentially duplicate records',
'Person 2': 'Person 2',
'Person Data': 'Person Data',
'Person De-duplicator': 'Person De-duplicator',
'Person Details': 'Chi tiết cá nhân',
'Person Finder': 'Person Finder',
'Person Registry': 'Person Registry',
'Person added': 'Person added',
'Person deleted': 'Person deleted',
'Person details updated': 'Person details updated',
'Person interviewed': 'Person interviewed',
'Person missing': 'Person missing',
'Person reporting': 'Person reporting',
'Person who has actually seen the person/group.': 'Person who has actually seen the person/group.',
'Person who is reporting about the presence.': 'Person who is reporting about the presence.',
'Person who observed the presence (if different from reporter).': 'Người quan sát tình hình (nếu khác với phóng viên)',
'Person/Group': 'Person/Group',
'Personal Data': 'Personal Data',
'Personal Effects': 'Personal Effects',
'Personal Effects Details': 'Chi tiết ảnh hưởng cá nhân',
'Personal impact of disaster': 'Personal impact of disaster',
'Persons': 'Cá nhân',
'Persons with disability (mental)': 'Người tàn tật (về tinh thần)',
'Persons with disability (physical)': 'Người tàn tật (về thể chất)',
'Phone': 'Phone',
'Phone 1': 'Điện thoại 1',
'Phone 2': 'Điện thoại 2',
"Phone number to donate to this organization's relief efforts.": 'Số điện thoại để ủng hộ cho nỗ lực cứu trợ của tổ chức này',
'Phone/Business': 'Phone/Business',
'Phone/Emergency': 'Phone/Emergency',
'Phone/Exchange': 'Phone/Exchange',
'Photo': 'Photo',
'Photo Details': 'Chi tiết ảnh',
'Photo added': 'Photo added',
'Photo deleted': 'Photo deleted',
'Photo updated': 'Photo updated',
'Photograph': 'Photograph',
'Photos': 'Photos',
'Physical Description': 'Physical Description',
'Picture upload and finger print upload facility': 'Picture upload and finger print upload facility',
'Place for solid waste disposal': 'Place for solid waste disposal',
'Place of Recovery': 'Place of Recovery',
'Places the children have been sent to': 'Places the children have been sent to',
'Playing': 'Playing',
"Please come back after sometime if that doesn't help.": "Please come back after sometime if that doesn't help.",
'Please correct all errors.': 'Please correct all errors.',
'Please enter a First Name': 'Please enter a First Name',
'Please enter a valid email address': 'Please enter a valid email address',
'Please enter the first few letters of the Person/Group for the autocomplete.': 'Please enter the first few letters of the Person/Group for the autocomplete.',
'Please enter the recipient': 'Please enter the recipient',
'Please fill this!': 'Please fill this!',
'Please report here where you are:': 'Please report here where you are:',
'Please select another level': 'Please select another level',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'Trường này được dùng để lưu các thông tin thêm, bao gồm lịch sử theo dõi của hồ sơ nếu nó được cập nhật.',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.',
'Pledge': 'Pledge',
'Pledge Aid': 'Pledge Aid',
'Pledge Aid to match these Requests': 'Pledge Aid to match these Requests',
'Pledge Status': 'Pledge Status',
'Pledge Support': 'Pledge Support',
'Pledged': 'Pledged',
'Pledges': 'Pledges',
'Point': 'Point',
'Poisoning': 'Poisoning',
'Poisonous Gas': 'Poisonous Gas',
'Police': 'Police',
'Pollution and other environmental': 'Ô nhiễm và các vấn đề môi trường khác',
'Polygon': 'Polygon',
'Population': 'Population',
'Porridge': 'Cháo yến mạch',
'Port': 'Port',
'Port Closure': 'Port Closure',
'Position Details': 'Position Details',
'Position added': 'Position added',
'Position deleted': 'Position deleted',
'Position type': 'Position type',
'Position updated': 'Position updated',
'Positions': 'Positions',
'Postcode': 'Postcode',
'Poultry': 'Poultry',
'Poultry restocking, Rank': 'Thu mua gia cầm, thứ hạng',
'Pounds': 'Pounds',
'Power Failure': 'Power Failure',
'Powered by Sahana Eden': 'Powered by Sahana Eden',
'Preferred Name': 'Preferred Name',
'Pregnant women': 'Pregnant women',
'Preliminary': 'Preliminary',
'Presence': 'Presence',
'Presence Condition': 'Presence Condition',
'Presence Log': 'Presence Log',
"Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.": "Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.",
'Previous': 'Previous',
'Primary Name': 'Primary Name',
'Priority': 'Ưu tiên',
'Priority Level': 'Priority Level',
'Private': 'Private',
'Problem': 'Problem',
'Problem Administration': 'Quản lý vấn đề',
'Problem Details': 'Problem Details',
'Problem Group': 'Problem Group',
'Problem Title': 'Problem Title',
'Problem added': 'Problem added',
'Problem deleted': 'Problem deleted',
'Problem updated': 'Đã cập nhật vấn đề',
'Problems': 'Vấn đề',
'Procedure': 'Procedure',
'Procurements': 'Procurements',
'Product Description': 'Product Description',
'Product Name': 'Product Name',
'Profile': 'Profile',
'Project': 'Project',
'Project Activities': 'Các hoạt động của dự án',
'Project Details': 'Project Details',
'Project Management': 'Project Management',
'Project Status': 'Project Status',
'Project Tracking': 'Project Tracking',
'Project added': 'Dự án đã được thêm',
'Project deleted': 'Project deleted',
'Project has no Lat/Lon': 'Project has no Lat/Lon',
'Project updated': 'Project updated',
'Projection': 'Projection',
'Projection Details': 'Projection Details',
'Projection added': 'Projection added',
'Projection deleted': 'Projection deleted',
'Projection updated': 'Đã cập nhật kế hoạch dự phòng',
'Projections': 'Projections',
'Projects': 'Projects',
'Protected resource': 'Protected resource',
'Protection': 'Protection',
'Provide Metadata for your media files': 'Provide Metadata for your media files',
'Provide a password': 'Provide a password',
'Province': 'Tỉnh/thành',
'Proxy-server': 'Proxy-server',
'Psychiatrics/Adult': 'Psychiatrics/Adult',
'Psychiatrics/Pediatric': 'Khoa thần kinh/Khoa nhi',
'Public': 'Public',
'Public Event': 'Public Event',
'Public and private transportation': 'Phương tiện vận chuyển công cộng và cá nhân',
'Pull tickets from external feed': 'Pull tickets from external feed',
'Punjabi': 'Punjabi',
'Push tickets to external system': 'Push tickets to external system',
'Put a choice in the box': 'Put a choice in the box',
'Pyroclastic Flow': 'Pyroclastic Flow',
'Pyroclastic Surge': 'Núi lửa phun',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Python Serial module not available within the running Python - this needs installing to activate the Modem',
'Quantity': 'Quantity',
'Quarantine': 'Quarantine',
'Queries': 'Queries',
'Query': 'Query',
'Queryable?': 'Queryable?',
'RECORD A': 'RECORD A',
'RECORD B': 'RECORD B',
'RESPONSE': 'RESPONSE',
'Race': 'Race',
'Radiological Hazard': 'Radiological Hazard',
'Radiology': 'Radiology',
'Railway Accident': 'Tại nạn đường sắt',
'Railway Hijacking': 'Railway Hijacking',
'Rain Fall': 'Rain Fall',
'Rapid Assessment': 'Rapid Assessment',
'Rapid Assessment Details': 'Rapid Assessment Details',
'Rapid Assessment added': 'Rapid Assessment added',
'Rapid Assessment deleted': 'Rapid Assessment deleted',
'Rapid Assessment updated': 'Rapid Assessment updated',
'Rapid Assessments': 'Rapid Assessments',
'Rapid Assessments & Flexible Impact Assessments': 'Rapid Assessments & Flexible Impact Assessments',
'Rapid Close Lead': 'Rapid Close Lead',
'Rating Scale': 'Rating Scale',
'Raw Database access': 'Raw Database access',
'Real World Arbitrary Units': 'Real World Arbitrary Units',
'Receive': 'Receive',
'Receive Items': 'Receive Items',
'Receive Shipment': 'Receive Shipment',
'Received': 'Received',
'Received By': 'Received By',
'Received Item Details': 'Received Item Details',
'Received Item added': 'Received Item added',
'Received Item deleted': 'Received Item deleted',
'Received Item updated': 'Received Item updated',
'Received Items': 'Received Items',
'Received Items added to Warehouse Items': 'Received Items added to Warehouse Items',
'Received Shipment Details': 'Received Shipment Details',
'Received Shipment canceled': 'Received Shipment canceled',
'Received Shipment updated': 'Received Shipment updated',
'Received Shipments': 'Received Shipments',
'Recipient': 'Recipient',
'Recipients': 'Người nhận viện trợ',
'Record Details': 'Record Details',
'Record ID': 'Record ID',
'Record Saved': 'Record Saved',
'Record added': 'Hồ sơ đã được thêm',
'Record deleted': 'Record deleted',
'Record last updated': 'Record last updated',
'Record not found!': 'Record not found!',
'Record updated': 'Record updated',
'Records': 'Records',
'Recovery': 'Recovery',
'Recovery Request': 'Phục hồi yêu cầu',
'Recovery Request added': 'Đã thêm yêu cầu phục hồi',
'Recovery Request deleted': 'phục hồi các yêu cầu bị xóa',
'Recovery Request updated': 'Cập nhật Yêu cầu phục hồi',
'Recovery Requests': 'Phục hồi yêu cầu',
'Recovery report added': 'Recovery report added',
'Recovery report deleted': 'Recovery report deleted',
'Recovery report updated': 'Recovery report updated',
'Recurring': 'Định kỳ',
'Recurring Cost': 'Recurring Cost',
'Recurring cost': 'Recurring cost',
'Recurring costs': 'Chi phí định kỳ',
'Reference Document': 'Reference Document',
'Regional': 'Địa phương',
'Register': 'Register',
'Register Person': 'Đăng ký Cá nhân',
'Register Person into this Shelter': 'Register Person into this Shelter',
'Register them as a volunteer': 'Register them as a volunteer',
'Registered People': 'Registered People',
'Registered users can': 'Người dùng đã đăng ký có thể',
'Registering ad-hoc volunteers willing to contribute': 'Registering ad-hoc volunteers willing to contribute',
'Registration': 'Registration',
'Registration Details': 'Registration Details',
'Registration added': 'Bản đăng ký đã được thêm',
'Registration entry deleted': 'Registration entry deleted',
'Registration key': 'Registration key',
'Registration updated': 'Registration updated',
'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.',
'Rehabilitation/Long Term Care': 'Rehabilitation/Long Term Care',
'Reliable access to sanitation/hygiene items': 'Reliable access to sanitation/hygiene items',
'Relief': 'Relief',
'Relief Item Catalog': 'Relief Item Catalog',
'Relief Team': 'Relief Team',
'Religion': 'Religion',
'Religious Leader': 'Religious Leader',
'Relocate as instructed in the <instruction>': 'Relocate as instructed in the <instruction>',
'Remove': 'Remove',
'Repeat your password': 'Repeat your password',
'Replace': 'Replace',
'Replace if Master': 'Replace if Master',
'Replace if Newer': 'Thay thế nếu mới hơn',
'Report': 'Report',
'Report Another Assessment...': 'Report Another Assessment...',
'Report Details': 'Report Details',
'Report Resource': 'Report Resource',
'Report Type': 'Loại báo cáo',
'Report Types Include': 'Report Types Include',
'Report a Problem with the Software': 'báo cáo lỗi bằng phần mềm',
'Report added': 'Đã thêm báo cáo',
'Report deleted': 'Đã xóa báo cáo',
'Report my location': 'Report my location',
'Report that person missing': 'Report that person missing',
'Report the contributing factors for the current EMS status.': 'Báo cáo các nhân tố đóng góp cho tình trạng EMS hiện tại.',
'Report the contributing factors for the current OR status.': 'Report the contributing factors for the current OR status.',
'Report the person as found': 'Report the person as found',
'Report them as found': 'Report them as found',
'Report them missing': 'Report them missing',
'Report updated': 'Report updated',
'ReportLab module not available within the running Python - this needs installing for PDF output!': 'ReportLab module not available within the running Python - this needs installing for PDF output!',
'Reporter': 'Reporter',
'Reporter Name': 'Reporter Name',
'Reporting on the projects in the region': 'Reporting on the projects in the region',
'Reports': 'Reports',
'Request': 'Yêu cầu',
'Request Added': 'Request Added',
'Request Canceled': 'Request Canceled',
'Request Details': 'Chi tiết yêu cầu',
'Request Item': 'Request Item',
'Request Item Details': 'Chi tiết yêu cầu hàng hóa',
'Request Item added': 'Đã thêm yêu cầu hàng hóa',
'Request Item deleted': 'Xóa yêu cầu hàng hóa',
'Request Item updated': 'Đã cập nhật hàng hóa yêu cầu',
'Request Items': 'Yêu cầu hàng hóa',
'Request Type': 'Loại yêu cầu',
'Request Updated': 'Request Updated',
'Request added': 'Request added',
'Request deleted': 'Request deleted',
'Request for Role Upgrade': 'yêu cầu nâng cấp vai trò',
'Request updated': 'Request updated',
'Request, Response & Session': 'Yêu cầu, Phản hồi và Tương tác',
'Requested': 'Đã yêu cầu',
'Requested By Location': 'Requested By Location',
'Requested From Warehouse': 'Requested From Warehouse',
'Requested by': 'Yêu cầu bởi',
'Requested on': 'Requested on',
'Requester': 'Requester',
'Requestor': 'Người yêu cầu',
'Requests': 'Yêu cầu',
'Requests From': 'Requests From',
'Requests for Item': 'Yêu cầu hàng hóa',
'Requires Login!': 'Requires Login!',
'Requires login': 'Requires login',
'Rescue and recovery': 'Rescue and recovery',
'Reset': 'Reset',
'Reset Password': 'Đặt lại mật khẩu',
'Reset form': 'Đặt lại mẫu',
'Resolve': 'Resolve',
'Resolve Conflict': 'Resolve Conflict',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.',
'Resource': 'Resource',
'Resource Details': 'Resource Details',
'Resource added': 'Resource added',
'Resource deleted': 'Resource deleted',
'Resource updated': 'Resource updated',
'Resources': 'Tài nguyên',
'Respiratory Infections': 'Respiratory Infections',
'Response': 'Response',
'Response Details': 'Response Details',
'Response added': 'Response added',
'Response deleted': 'Xóa phản hồi',
'Response updated': 'Response updated',
'Responses': 'Responses',
'Restricted Access': 'Restricted Access',
'Restrictions': 'Restrictions',
'Results': 'Results',
'Retail Crime': 'Retail Crime',
'Retrieve Password': 'Retrieve Password',
'Rice': 'Rice',
'Riot': 'Riot',
'River': 'River',
'River Details': 'Chi tiết Sông',
'River added': 'River added',
'River deleted': 'River deleted',
'River updated': 'River updated',
'Rivers': 'Rivers',
'Road Accident': 'Tai nạn giao thông đường bộ',
'Road Closed': 'Road Closed',
'Road Conditions': 'Road Conditions',
'Road Delay': 'Road Delay',
'Road Hijacking': 'Road Hijacking',
'Road Usage Condition': 'Điều kiện lưu thông đường bộ',
'Role': 'Role',
'Role Details': 'Chi tiết vai trò',
'Role Manager': 'Role Manager',
'Role Required': 'Role Required',
'Role Updated': 'Role Updated',
'Role added': 'Role added',
'Role deleted': 'Role deleted',
'Role updated': 'Role updated',
'Role-based': 'Role-based',
'Roles': 'Roles',
'Roles Permitted': 'Roles Permitted',
'Roof tile': 'Roof tile',
'Row Choices (One Per Line)': 'Row Choices (One Per Line)',
'Rows in table': 'Rows in table',
'Rows selected': 'Rows selected',
'Run Functional Tests': 'Kiểm thử chức năng',
'Run Interval': 'Run Interval',
'Running Cost': 'Running Cost',
'SITUATION': 'SITUATION',
'Safe environment for vulnerable groups': 'Safe environment for vulnerable groups',
'Safety of children and women affected by disaster': 'Safety of children and women affected by disaster',
'Sahana Administrator': 'Quản trị viên Sahana',
'Sahana Agasti': 'Sahana Agasti',
'Sahana Blue': 'Sahana Blue',
'Sahana Community Chat': 'Sahana Community Chat',
'Sahana Eden': 'Sahana Eden',
'Sahana Eden <=> Other': 'Sahana Eden <=> Other',
'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)',
'Sahana Eden <=> Sahana Eden': 'Sahana Eden <=> Sahana Eden',
'Sahana Eden Disaster Management Platform': 'Sahana Eden Disaster Management Platform',
'Sahana Eden Open Source Disaster Management Platform': 'Sahana Eden Open Source Disaster Management Platform',
'Sahana Eden Website': 'Website Sahana Eden',
'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organisations working in disaster management.': 'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organisations working in disaster management.',
'Sahana FOSS Disaster Management System': 'Sahana FOSS Disaster Management System',
'Sahana Green': 'Sahana Green',
'Sahana Login Approval Pending': 'Chờ chấp nhận đăng nhập vào Sahana',
'Sahana Steel': 'Thép Sahana',
'Sahana access granted': 'Sahana access granted',
'Sahana: new request has been made. Please login to see if you can fulfil the request.': 'Sahana: new request has been made. Please login to see if you can fulfil the request.',
'Salted Fish': 'Salted Fish',
'Salvage material usable from destroyed houses': 'Salvage material usable from destroyed houses',
'Salvage material usable from destroyed schools': 'Salvage material usable from destroyed schools',
'Sanitation problems': 'Sanitation problems',
'Satellite': 'Vệ tinh',
'Satellite Office': 'Satellite Office',
'Saturday': 'Saturday',
'Save': 'Lưu',
'Save any Changes in the one you wish to keep': 'Lưu mọi thay đổi ở bất kỳ nơi nào bạn muốn',
'Saved.': 'Saved.',
'Saving...': 'Saving...',
'Scale of Results': 'Phạm vi của kết quả',
'Schedule': 'Lịch trình',
'School': 'School',
'School Closure': 'School Closure',
'School Lockdown': 'School Lockdown',
'School Reports': 'School Reports',
'School Teacher': 'School Teacher',
'School assistance received/expected': 'School assistance received/expected',
'School destroyed': 'School destroyed',
'School heavily damaged': 'School heavily damaged',
'School tents received': 'School tents received',
'School tents, source': 'School tents, source',
'School used for other purpose': 'School used for other purpose',
'School/studying': 'School/studying',
'Schools': 'Schools',
'Search': 'Tìm kiếm',
'Search & List Bin Types': 'Search & List Bin Types',
'Search & List Bins': 'Search & List Bins',
'Search & List Catalog': 'Tìm kiếm và liệt kê các danh mục',
'Search & List Category': 'Tìm và liệt kê danh mục',
'Search & List Items': 'Tìm kiếm và hiển thị danh sách hàng hóa',
'Search & List Locations': 'Tìm và liệt kê các địa điểm',
'Search & List Site': 'Search & List Site',
'Search & List Sub-Category': 'Tìm kiếm và lên danh sách Tiêu chí phụ',
'Search & List Unit': 'Search & List Unit',
'Search Activities': 'Tìm kiếm các hoạt động',
'Search Activity Report': 'Search Activity Report',
'Search Addresses': 'Search Addresses',
'Search Aid Requests': 'Tìm kiếm Yêu cầu cứu trợ',
'Search Assessment Summaries': 'Search Assessment Summaries',
'Search Assessments': 'Tìm kiếm các đánh giá',
'Search Baseline Type': 'Search Baseline Type',
'Search Baselines': 'Search Baselines',
'Search Budgets': 'Tìm kiếm các ngân sách',
'Search Bundles': 'Search Bundles',
'Search Catalog Items': 'Search Catalog Items',
'Search Category<>Sub-Category<>Catalog Relation': 'Search Category<>Sub-Category<>Catalog Relation',
'Search Checklists': 'Tìm kiếm Checklist',
'Search Cluster Subsectors': 'Search Cluster Subsectors',
'Search Clusters': 'Search Clusters',
'Search Configs': 'Search Configs',
'Search Contact Information': 'Tìm thông tin liên lạc',
'Search Contacts': 'Tìm kiếm các đầu mối liên lạc',
'Search Distribution Items': 'Search Distribution Items',
'Search Distributions': 'Tìm kiếm Quyên góp',
'Search Documents': 'Tìm kiếm các tài liệu',
'Search Donors': 'Tìm kiếm những người ủng hộ',
'Search Feature Class': 'Search Feature Class',
'Search Feature Layers': 'Tìm kiếm Layer chức năng',
'Search Flood Reports': 'Tìm các báo cáo về lũ lụt',
'Search Groups': 'Search Groups',
'Search Hospitals': 'Tìm kếm các bệnh viện',
'Search Identity': 'Search thông tin nhận dạng',
'Search Images': 'Tìm kếm các ảnh',
'Search Impact Type': 'Search Impact Type',
'Search Impacts': 'Search Impacts',
'Search Incident Reports': 'Search Incident Reports',
'Search Incidents': 'Search Incidents',
'Search Inventory Items': 'Search Inventory Items',
'Search Inventory Stores': 'Search Inventory Stores',
'Search Item Catalog Category(s)': 'Search Item Catalog Category(s)',
'Search Item Catalog(s)': 'Tìm kiếm Catalog hàng hóa',
'Search Item Categories': 'Search Item Categories',
'Search Item Packets': 'Search Item Packets',
'Search Item Sub-Category(s)': 'Search Item Sub-Category(s)',
'Search Items': 'Search Items',
'Search Keys': 'Tìm kiếm mã',
'Search Kits': 'Search Kits',
'Search Layers': 'Tìm kiếm các lớp',
'Search Locations': 'Tìm kiếm các địa điểm',
'Search Log Entry': 'Search Log Entry',
'Search Markers': 'Search Markers',
'Search Member': 'Tìm thành viên',
'Search Membership': 'Tìm kiếm thành viên',
'Search Memberships': 'Tim kiếm thành viên',
'Search Metadata': 'Tìm kiếm dữ liệu',
'Search Need Type': 'Search Need Type',
'Search Needs': 'Search Needs',
'Search Notes': 'Search Notes',
'Search Offices': 'Tìm các văn phòng',
'Search Organizations': 'Tìm kiếm các tổ chức',
'Search Peer': 'Search Peer',
'Search Personal Effects': 'Search Personal Effects',
'Search Persons': 'Tìm kiếm Cá nhân',
'Search Photos': 'Tìm kiếm ảnh',
'Search Positions': 'Search Positions',
'Search Problems': 'Search Problems',
'Search Projections': 'Search Projections',
'Search Projects': 'Tìm kiếm các dự án',
'Search Rapid Assessments': 'Search Rapid Assessments',
'Search Received Items': 'Search Received Items',
'Search Received Shipments': 'Search Received Shipments',
'Search Records': 'Tìm các hồ sơ',
'Search Recovery Reports': 'Search Recovery Reports',
'Search Registations': 'Tìm kiếm các đăng ký',
'Search Registration Request': 'Tìm kiếm Yêu cầu Đăng ký',
'Search Report': 'Tìm kiếm báo cáo',
'Search Reports': 'Tìm kiếm Báo cáo',
'Search Request': 'Tìm kiếm yêu cầu',
'Search Request Items': 'Tìm kiếm Yêu cầu hàng hóa',
'Search Requests': 'Search Requests',
'Search Resources': 'Tìm kiếm các nguồn lực',
'Search Responses': 'Search Responses',
'Search Rivers': 'Search Rivers',
'Search Roles': 'Tìm các vai trò',
'Search Sections': 'Search Sections',
'Search Sectors': 'Search Sectors',
'Search Sent Items': 'Search Sent Items',
'Search Sent Shipments': 'Search Sent Shipments',
'Search Service Profiles': 'Search Service Profiles',
'Search Settings': 'Search Settings',
'Search Shelter Services': 'Search Shelter Services',
'Search Shelter Types': 'Tìm kiếm Loại Cư trú',
'Search Shelters': 'Search Shelters',
'Search Shipment Transit Logs': 'Search Shipment Transit Logs',
'Search Shipment/Way Bills': 'Search Shipment/Way Bills',
'Search Shipment<>Item Relation': 'Search Shipment<>Item Relation',
'Search Site(s)': 'Search Site(s)',
'Search Skill Types': 'Search Skill Types',
'Search Skills': 'Search Skills',
'Search Solutions': 'Search Solutions',
'Search Staff': 'Search Staff',
'Search Staff Types': 'Search Staff Types',
'Search Status': 'Search Status',
'Search Storage Bin Type(s)': 'Search Storage Bin Type(s)',
'Search Storage Bin(s)': 'Search Storage Bin(s)',
'Search Storage Location(s)': 'Tìm kiếm kho lưu trữ',
'Search Subscriptions': 'Tìm kiếm danh sách, số tiền quyên góp',
'Search Tasks': 'Search Tasks',
'Search Teams': 'Tìm kiếm các đội',
'Search Themes': 'Tìm kiếm chủ đề',
'Search Tickets': 'Search Tickets',
'Search Tracks': 'Tìm kiếm dấu vết',
'Search Twitter Tags': 'Search Twitter Tags',
'Search Units': 'Search Units',
'Search Users': 'Search Users',
'Search Volunteer Registrations': 'Tìm kiếm Đăng ký tình nguyện viên',
'Search Volunteers': 'Search Volunteers',
'Search Warehouse Items': 'Search Warehouse Items',
'Search Warehouses': 'Search Warehouses',
'Search and Edit Group': 'Tìm và sửa thông tin nhóm',
'Search and Edit Individual': 'Tìm kiếm và chỉnh sửa cá nhân',
'Search by ID Tag': 'Search by ID Tag',
'Search by Skill Types': 'Search by Skill Types',
'Search for Items': 'Search for Items',
'Search for a Hospital': 'Tìm kiếm bệnh viện',
'Search for a Location': 'Tìm một địa điểm',
'Search for a Person': 'Tìm kiếm một người',
'Search for a Project': 'Tìm kiếm dự án',
'Search for a Request': 'Tìm kiếm một yêu cầu',
'Search here for a person in order to:': 'Search here for a person in order to:',
"Search here for a person's record in order to:": "Search here for a person's record in order to:",
'Search messages': 'Search messages',
'Searching for different groups and individuals': 'Searching for different groups and individuals',
'Secondary Server (Optional)': 'Secondary Server (Optional)',
'Seconds must be a number between 0 and 60': 'Giây phải là số từ 0 đến 60',
'Section Details': 'Chi tiết khu vực',
'Section deleted': 'Section deleted',
'Section updated': 'Section updated',
'Sections': 'Sections',
'Sector': 'Sector',
'Sector Details': 'Sector Details',
'Sector added': 'Sector added',
'Sector deleted': 'Sector deleted',
'Sector updated': 'Sector updated',
'Sectors': 'Sectors',
'Security Policy': 'Chính sách bảo mật',
'Security Status': 'Security Status',
'Security problems': 'Security problems',
'Seen': 'Seen',
'Select 2 potential locations from the dropdowns.': 'Select 2 potential locations from the dropdowns.',
'Select Items from this Warehouse': 'Select Items from this Warehouse',
'Select Photos': 'Select Photos',
'Select a location': 'Select a location',
"Select a person in charge for status 'assigned'": "Select a person in charge for status 'assigned'",
'Select a question from the list': 'Chọn một câu hỏi trong danh sách',
'Select all that apply': 'Chọn tất cả các áp dụng trên',
'Select an Organization to see a list of offices': 'Select an Organization to see a list of offices',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Select the overlays for Assessments and Activities relating to each Need to identify the gap.',
'Select the person assigned to this role for this project.': 'Select the person assigned to this role for this project.',
'Select the person associated with this scenario.': 'Select the person associated with this scenario.',
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS',
'Self Registration': 'Self Registration',
'Self-registration': 'Self-registration',
'Send': 'Send',
'Send Alerts using Email &/or SMS': 'Send Alerts using Email &/or SMS',
'Send Mail': 'Send Mail',
'Send Notification': 'Send Notification',
'Send Shipment': 'Send Shipment',
'Send message': 'Gửi tin nhắn',
'Send new message': 'Gửi tin nhắn mới',
'Sends & Receives Alerts via Email & SMS': 'Sends & Receives Alerts via Email & SMS',
'Senior (50+)': 'Senior (50+)',
'Sensitivity': 'Mức độ nhạy cảm',
'Sent': 'Sent',
'Sent Item': 'Sent Item',
'Sent Item Details': 'Sent Item Details',
'Sent Item added': 'Sent Item added',
'Sent Item deleted': 'Sent Item deleted',
'Sent Item updated': 'Sent Item updated',
'Sent Items': 'Sent Items',
'Sent Shipment Details': 'Sent Shipment Details',
'Sent Shipment canceled': 'Sent Shipment canceled',
'Sent Shipment updated': 'Sent Shipment updated',
'Sent Shipments': 'Sent Shipments',
'Separate latrines for women and men': 'Separate latrines for women and men',
'Seraiki': 'Seraiki',
'Series': 'Series',
'Server': 'Server',
'Service': 'Service',
'Service Catalogue': 'Service Catalogue',
'Service or Facility': 'Dịch vụ hoặc phương tiện',
'Service profile added': 'Đã thêm thông tin dịch vụ',
'Service profile deleted': 'Service profile deleted',
'Service profile updated': 'Service profile updated',
'Services': 'Dịch vụ',
'Services Available': 'Các dịch vụ đang triển khai',
'Setting Details': 'Setting Details',
'Setting added': 'Đã thêm cài đặt',
'Setting deleted': 'Setting deleted',
'Setting updated': 'Setting updated',
'Settings': 'Cài đặt',
'Settings updated': 'Settings updated',
'Settings were reset because authenticating with Twitter failed': 'Settings were reset because authenticating with Twitter failed',
'Severity': 'Severity',
'Severity:': 'Severity:',
'Share a common Marker (unless over-ridden at the Feature level)': 'Chia sẻ Đèn hiệu chung(nếu không vượt mức tính năng)',
'Shelter': 'Cư trú',
'Shelter & Essential NFIs': 'Shelter & Essential NFIs',
'Shelter Details': 'Shelter Details',
'Shelter Name': 'Shelter Name',
'Shelter Registry': 'Đăng ký tạm trú',
'Shelter Service': 'Shelter Service',
'Shelter Service Details': 'Chi tiết dịch vụ cư trú',
'Shelter Service added': 'Shelter Service added',
'Shelter Service deleted': 'Shelter Service deleted',
'Shelter Service updated': 'Shelter Service updated',
'Shelter Services': 'Dịch vụ cư trú',
'Shelter Type': 'Shelter Type',
'Shelter Type Details': 'Shelter Type Details',
'Shelter Type added': 'Shelter Type added',
'Shelter Type deleted': 'Shelter Type deleted',
'Shelter Type updated': 'Shelter Type updated',
'Shelter Types': 'Shelter Types',
'Shelter Types and Services': 'Shelter Types and Services',
'Shelter added': 'Đã thêm Thông tin cư trú',
'Shelter deleted': 'Shelter deleted',
'Shelter updated': 'Shelter updated',
'Shelter/NFI assistance received/expected': 'Shelter/NFI assistance received/expected',
'Shelters': 'Shelters',
'Shipment Created': 'Shipment Created',
'Shipment Details': 'Shipment Details',
'Shipment Items': 'Shipment Items',
'Shipment Received': 'Shipment Received',
'Shipment Sent': 'Shipment Sent',
'Shipment Transit Log Details': 'Shipment Transit Log Details',
'Shipment Transit Log added': 'Shipment Transit Log added',
'Shipment Transit Log deleted': 'Shipment Transit Log deleted',
'Shipment Transit Log updated': 'Shipment Transit Log updated',
'Shipment Transit Logs': 'Shipment Transit Logs',
'Shipment/Way Bill added': 'Shipment/Way Bill added',
'Shipment/Way Bills': 'Shipment/Way Bills',
'Shipment/Way Bills Details': 'Shipment/Way Bills Details',
'Shipment/Way Bills deleted': 'Shipment/Way Bills deleted',
'Shipment/Way Bills updated': 'Shipment/Way Bills updated',
'Shipment<>Item Relation added': 'Shipment<>Item Relation added',
'Shipment<>Item Relation deleted': 'Shipment<>Item Relation deleted',
'Shipment<>Item Relation updated': 'Shipment<>Item Relation updated',
'Shipment<>Item Relations': 'Shipment<>Item Relations',
'Shipment<>Item Relations Details': 'Shipment<>Item Relations Details',
'Shipments': 'Shipments',
'Shipments To': 'Shipments To',
'Shooting': 'Shooting',
'Short Assessment': 'Short Assessment',
'Short Description': 'Short Description',
'Show Checklist': 'Show Checklist',
'Show on map': 'Hiển thị trên bản đồ',
'Sindhi': 'Sindhi',
'Site': 'Địa điểm',
'Site Address': 'Site Address',
'Site Administration': 'Quản trị Site',
'Site Description': 'Site Description',
'Site Details': 'Site Details',
'Site ID': 'Site ID',
'Site Location Description': 'Site Location Description',
'Site Location Name': 'Site Location Name',
'Site Manager': 'Site Manager',
'Site Name': 'Site Name',
'Site added': 'Site added',
'Site deleted': 'Site deleted',
'Site updated': 'Site updated',
'Site/Warehouse': 'Site/Warehouse',
'Sites': 'Trang web',
'Situation Awareness & Geospatial Analysis': 'Nhận biết tình huống và phân tích tọa độ địa lý',
'Sketch': 'Sketch',
'Skill': 'Skill',
'Skill Details': 'Chi tiết kỹ năng',
'Skill Status': 'Skill Status',
'Skill Type Details': 'Skill Type Details',
'Skill Type added': 'Skill Type added',
'Skill Type deleted': 'Skill Type deleted',
'Skill Type updated': 'Skill Type updated',
'Skill Types': 'Skill Types',
'Skill added': 'Đã thêm kỹ năng',
'Skill deleted': 'Skill deleted',
'Skill updated': 'Skill updated',
'Skills': 'Skills',
'Skype ID': 'Skype ID',
'Small Trade': 'Small Trade',
'Smoke': 'Smoke',
'Snow Fall': 'Snow Fall',
'Snow Squall': 'Snow Squall',
'Solid waste': 'Solid waste',
'Solution': 'Solution',
'Solution Details': 'Solution Details',
'Solution Item': 'Solution Item',
'Solution added': 'Solution added',
'Solution deleted': 'Đã xóa giải pháp',
'Solution updated': 'Solution updated',
'Solutions': 'Solutions',
'Some': 'Some',
'Sorry - the server has a problem, please try again later.': 'Sorry - the server has a problem, please try again later.',
'Sorry that location appears to be outside the area of the Parent.': 'Sorry that location appears to be outside the area of the Parent.',
'Sorry that location appears to be outside the area supported by this deployment.': 'Sorry that location appears to be outside the area supported by this deployment.',
'Sorry, I could not understand your request': 'Xin lỗi, tôi không hiểu yêu cầu của bạn',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Sorry, only users with the MapAdmin role are allowed to edit these locations',
'Sorry, something went wrong.': 'Sorry, something went wrong.',
'Sorry, that page is forbidden for some reason.': 'Xin lỗi, trang này bị cấm vì một số lý do',
'Sorry, that service is temporary unavailable.': 'Xin lỗi, dịch vụ đó tạm thời không hoạt động',
'Sorry, there are no addresses to display': 'Sorry, there are no addresses to display',
"Sorry, things didn't get done on time.": "Sorry, things didn't get done on time.",
"Sorry, we couldn't find that page.": 'Xin lỗi, chúng tôi không tìm thấy trang đó',
'Source': 'Source',
'Source ID': 'Source ID',
'Source Time': 'Source Time',
'Source Type': 'Source Type',
'Space Debris': 'Space Debris',
'Spanish': 'Người Tây Ban Nha',
'Special Ice': 'Special Ice',
'Special Marine': 'Special Marine',
'Special needs': 'Nhu cầu đặc biệt',
'Specialized Hospital': 'Bệnh viện chuyên khoa',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.',
'Specific locations need to have a parent of level': 'Specific locations need to have a parent of level',
'Specify a descriptive title for the image.': 'Specify a descriptive title for the image.',
'Specify the bed type of this unit.': 'Specify the bed type of this unit.',
'Specify the minimum sustainability in weeks or days.': 'Specify the minimum sustainability in weeks or days.',
'Specify the number of available sets': 'Specify the number of available sets',
'Specify the number of available units (adult doses)': 'Specify the number of available units (adult doses)',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions',
'Specify the number of sets needed per 24h': 'Specify the number of sets needed per 24h',
'Specify the number of units (adult doses) needed per 24h': 'Specify the number of units (adult doses) needed per 24h',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h',
'Spherical Mercator?': 'Spherical Mercator?',
'Spreadsheet Importer': 'Spreadsheet Importer',
'Spreadsheet uploaded': 'Spreadsheet uploaded',
'Spring': 'Spring',
'Squall': 'Squall',
'Staff': 'Staff',
'Staff 2': 'Staff 2',
'Staff Details': 'Staff Details',
'Staff Type Details': 'Staff Type Details',
'Staff Type added': 'Staff Type added',
'Staff Type deleted': 'Staff Type deleted',
'Staff Type updated': 'Staff Type updated',
'Staff Types': 'Staff Types',
'Staff added': 'Staff added',
'Staff deleted': 'Xóa tên nhân viên',
'Staff present and caring for residents': 'Staff present and caring for residents',
'Staff updated': 'Staff updated',
'Staffing': 'Staffing',
'Start date': 'Ngày bắt đầu',
'Start of Period': 'Start of Period',
'State': 'State',
'Stationery': 'Stationery',
'Status': 'Status',
'Status Report': 'Status Report',
'Status added': 'Status added',
'Status deleted': 'Status deleted',
'Status of clinical operation of the facility.': 'Status of clinical operation of the facility.',
'Status of general operation of the facility.': 'Status of general operation of the facility.',
'Status of morgue capacity.': 'Status of morgue capacity.',
'Status of operations of the emergency department of this hospital.': 'Tình trạng hoạt động của phòng cấp cứu tại bệnh viện này',
'Status of security procedures/access restrictions in the hospital.': 'Trạng thái của các giới hạn thủ tục/truy nhập an ninh trong bệnh viện',
'Status of the operating rooms of this hospital.': 'Trạng thái các phòng bệnh trong bệnh viện này',
'Status updated': 'Status updated',
'Storage Bin': 'Storage Bin',
'Storage Bin Details': 'Storage Bin Details',
'Storage Bin Number': 'Storage Bin Number',
'Storage Bin Type': 'Storage Bin Type',
'Storage Bin Type Details': 'Storage Bin Type Details',
'Storage Bin Type added': 'Storage Bin Type added',
'Storage Bin Type deleted': 'Storage Bin Type deleted',
'Storage Bin Type updated': 'Storage Bin Type updated',
'Storage Bin Types': 'Storage Bin Types',
'Storage Bin added': 'Storage Bin added',
'Storage Bin deleted': 'Storage Bin deleted',
'Storage Bin updated': 'Storage Bin updated',
'Storage Bins': 'Storage Bins',
'Storage Location': 'Storage Location',
'Storage Location Details': 'Storage Location Details',
'Storage Location ID': 'Storage Location ID',
'Storage Location Name': 'Storage Location Name',
'Storage Location added': 'Storage Location added',
'Storage Location deleted': 'Storage Location deleted',
'Storage Location updated': 'Storage Location updated',
'Storage Locations': 'Storage Locations',
'Store spreadsheets in the Eden database': 'Store spreadsheets in the Eden database',
'Storm Force Wind': 'Storm Force Wind',
'Storm Surge': 'Storm Surge',
'Stowaway': 'Stowaway',
'Street': 'Street',
'Street (continued)': 'Street (continued)',
'Street Address': 'Street Address',
'Strong Wind': 'Strong Wind',
'Sub Category': 'Sub Category',
'Sub-type': 'Sub-type',
'Subject': 'Subject',
'Submission successful - please wait': 'Submission successful - please wait',
'Submission successful - please wait...': 'Submission successful - please wait...',
'Subscription Details': 'Subscription Details',
'Subscription added': 'Subscription added',
'Subscription deleted': 'Subscription deleted',
'Subscription updated': 'Subscription updated',
'Subscriptions': 'Quyên góp',
'Subsistence Cost': 'Mức sống tối thiểu',
'Sufficient care/assistance for chronically ill': 'Sufficient care/assistance for chronically ill',
'Suggest not changing this field unless you know what you are doing.': 'Khuyến nghị bạn không thay đổi trường này khi chưa chắc chắn',
'Summary': 'Summary',
'Sunday': 'Sunday',
'Support Request': 'Hỗ trợ yêu cầu',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.',
'Sure you want to delete this object?': 'Sure you want to delete this object?',
'Surgery': 'Surgery',
'Survey Answer': 'Survey Answer',
'Survey Answer Details': 'Survey Answer Details',
'Survey Answer added': 'Trả lời khảo sát đã được thêm',
'Survey Answer deleted': 'Survey Answer deleted',
'Survey Answer updated': 'Survey Answer updated',
'Survey Module': 'Survey Module',
'Survey Name': 'Tên khảo sát',
'Survey Question': 'Survey Question',
'Survey Question Details': 'Survey Question Details',
'Survey Question Display Name': 'Tên trên bảng câu hỏi khảo sát',
'Survey Question added': 'Survey Question added',
'Survey Question deleted': 'Survey Question deleted',
'Survey Question updated': 'Survey Question updated',
'Survey Section': 'Survey Section',
'Survey Section Details': 'Survey Section Details',
'Survey Section Display Name': 'Survey Section Display Name',
'Survey Section added': 'Đã thêm khu vực khảo sát',
'Survey Section deleted': 'Survey Section deleted',
'Survey Section updated': 'Cập nhật khu vực khảo sát',
'Survey Series': 'Survey Series',
'Survey Series Details': 'Survey Series Details',
'Survey Series Name': 'Survey Series Name',
'Survey Series added': 'Survey Series added',
'Survey Series deleted': 'Survey Series deleted',
'Survey Series updated': 'Đã cập nhật serie khảo sát',
'Survey Template': 'Survey Template',
'Survey Template Details': 'Survey Template Details',
'Survey Template added': 'Thêm mẫu Khảo sát',
'Survey Template deleted': 'Survey Template deleted',
'Survey Template updated': 'Survey Template updated',
'Survey Templates': 'Survey Templates',
'Switch this on to use individual CSS/Javascript files for diagnostics during development.': 'Switch this on to use individual CSS/Javascript files for diagnostics during development.',
'Symbology': 'Symbology',
'Sync Conflicts': 'Sync Conflicts',
'Sync History': 'Sync History',
'Sync Now': 'Đồng bộ hóa ngay bây giờ',
'Sync Partners': 'Sync Partners',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.',
'Sync Pools': 'Sync Pools',
'Sync Schedule': 'Sync Schedule',
'Sync Settings': 'Sync Settings',
'Sync process already started on ': 'Sync process already started on ',
'Synchronisation': 'Synchronisation',
'Synchronization': 'Synchronization',
'Synchronization Conflicts': 'Synchronization Conflicts',
'Synchronization Details': 'Synchronization Details',
'Synchronization History': 'Synchronization History',
'Synchronization Peers': 'Synchronization Peers',
'Synchronization Settings': 'Synchronization Settings',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden',
'Synchronization not configured.': 'Synchronization not configured.',
'Synchronization settings updated': 'Synchronization settings updated',
'Syncronisation History': 'Lịch sử đồng bộ hóa',
'System allows the General Public to Report Incidents & have these Tracked.': 'System allows the General Public to Report Incidents & have these Tracked.',
'System allows the tracking & discovery of Items stored in Locations.': 'System allows the tracking & discovery of Items stored in Locations.',
'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': 'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.',
'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': 'Hệ thống luôn theo sát quá trình làm việc của tất cả các tình nguyện viên trong khu vực bị thiên tai.Hệ thống nắm bắt không chỉ vị trí hoạt động của họ mà còn cả thông tin về các dịch vụ mà họ đang cung cấp ở mỗi khu vực.',
"System's Twitter account updated": 'Cập nhật tài khoản Twitter của hệ thống',
'Table name': 'Table name',
'Tags': 'Tags',
'Take shelter in place or per <instruction>': 'Take shelter in place or per <instruction>',
'Task Details': 'Task Details',
'Task List': 'Task List',
'Task Status': 'Task Status',
'Task added': 'Đã thêm Nhiệm vụ',
'Task deleted': 'Đã xóa Nhiệm vụ',
'Task status': 'Task status',
'Task updated': 'Đã cập nhật nhiệm vụ',
'Tasks': 'Tasks',
'Team': 'Team',
'Team Description': 'Team Description',
'Team Details': 'Team Details',
'Team Head': 'Team Head',
'Team Id': 'Team Id',
'Team Leader': 'Đội trưởng',
'Team Member added': 'Thành viên đội đã được thêm',
'Team Members': 'Team Members',
'Team Name': 'Team Name',
'Team Type': 'Loại Đội',
'Team added': 'Đội đã được thêm',
'Team deleted': 'Team deleted',
'Team updated': 'Team updated',
'Teams': 'Teams',
'Technical testing only, all recipients disregard': 'Technical testing only, all recipients disregard',
'Telecommunications': 'Telecommunications',
'Telephone': 'Telephone',
'Telephony': 'Đường điện thoại',
'Temp folder %s not writable - unable to apply theme!': 'Temp folder %s not writable - unable to apply theme!',
'Template file %s not readable - unable to apply theme!': 'Template file %s not readable - unable to apply theme!',
'Templates': 'Templates',
'Terrorism': 'Terrorism',
'Tertiary Server (Optional)': 'Tertiary Server (Optional)',
'Test Results': 'Test Results',
'Text': 'Văn bản',
'Text Colour for Text blocks': 'Màu vản bản cho khối văn bản',
'Text before each Text Field (One per line)': 'Text before each Text Field (One per line)',
'Text in Message': 'Text in Message',
'Text in Message: ': 'Text in Message: ',
'Thanks for your assistance': 'Thanks for your assistance',
'The': 'The',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.',
'The Area which this Site is located within.': 'Xác định khu vực site này định vị trong đó',
'The Assessments module allows field workers to send in assessments.': 'The Assessments module allows field workers to send in assessments.',
'The Author of this Document (optional)': 'The Author of this Document (optional)',
'The Current Location of the Person, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Current Location of the Person, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The District for this Report.': 'The District for this Report.',
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.",
'The Group whose members can edit data in this record.': 'The Group whose members can edit data in this record.',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.',
'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).': 'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.',
'The Media Library provides a catalogue of digital media.': 'The Media Library provides a catalogue of digital media.',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.',
'The Office this record is associated with.': 'The Office this record is associated with.',
'The Organization Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'The Organization Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.',
'The Organization this record is associated with.': 'The Organization this record is associated with.',
'The Organization which is funding this Activity.': 'The Organization which is funding this Activity.',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.',
'The Rapid Assessments Module stores structured reports done by Professional Organizations.': 'The Rapid Assessments Module stores structured reports done by Professional Organizations.',
'The Request this record is associated with.': 'The Request this record is associated with.',
'The Role this person plays within this Office/Project.': 'The Role this person plays within this Office/Project.',
'The Role this person plays within this hospital.': 'Vai trò của người này trong bệnh viện',
"The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.": "The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.",
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.',
'The Shelter this Request is from (optional).': 'The Shelter this Request is from (optional).',
'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': 'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": "The URL of the image file. If you don't upload an image file, then you must specify its location here.",
'The URL of your web gateway without the post parameters': 'The URL of your web gateway without the post parameters',
'The URL to access the service.': 'The URL to access the service.',
'The Unique Identifier (UUID) as assigned to this facility by the government.': 'The Unique Identifier (UUID) as assigned to this facility by the government.',
'The attribute within the KML which is used for the title of popups.': 'The attribute within the KML which is used for the title of popups.',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)',
'The body height (crown to heel) in cm.': 'The body height (crown to heel) in cm.',
'The category of the Item.': 'The category of the Item.',
'The contact person for this organization.': 'Người chịu trách nhiệm liên lạc cho tổ chức này',
'The country the person usually lives in.': 'The country the person usually lives in.',
'The duplicate record will be deleted': 'The duplicate record will be deleted',
'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.': 'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.',
'The first or only name of the person (mandatory).': 'The first or only name of the person (mandatory).',
'The following modules are available': 'The following modules are available',
'The hospital this record is associated with.': 'Bệnh viện lưu hồ sơ này',
'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.': 'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.',
'The language to use for notifications.': 'The language to use for notifications.',
'The last known location of the missing person before disappearance.': 'The last known location of the missing person before disappearance.',
'The list of Item categories are maintained by the Administrators.': 'Danh sách category hàng hóa được quản trị viên quản lý',
'The name to be used when calling for or directly addressing the person (optional).': 'The name to be used when calling for or directly addressing the person (optional).',
'The next screen will allow you to detail the number of people here & their needs.': 'The next screen will allow you to detail the number of people here & their needs.',
'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...': 'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.',
'The person at the location who is reporting this incident (optional)': 'The person at the location who is reporting this incident (optional)',
'The person reporting about the missing person.': 'Người báo cáo về người mất tích',
'The person reporting the missing person.': 'The person reporting the missing person.',
"The person's manager within this Office/Project.": 'Quản lý của một cá nhân trong Văn phòng/Dự án',
'The post variable containing the phone number': 'The post variable containing the phone number',
'The post variable on the URL used for sending messages': 'Bài viết thay đổi trên URL dùng để gửi tin nhắn',
'The post variables other than the ones containing the message and the phone number': 'The post variables other than the ones containing the message and the phone number',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'The server received an incorrect response from another server that it was accessing to fill the request by the browser.',
'The simple policy allows anonymous users to Read & registered users to Edit. The full security policy allows the administrator to set permissions on individual tables or records - see models/zzz.py.': 'Các chính sách đơn giản cho phép người dùng ẩn danh đọc và đăng ký để chỉnh sửa. Các chính sách bảo mật đầy đủ cho phép quản trị viên thiết lập phân quyền trên các bảng cá nhân hay - xem mô hình / zzz.py.',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>',
'The title of the WMS Browser panel in the Tools panel.': 'The title of the WMS Browser panel in the Tools panel.',
'The token associated with this application on': 'The token associated with this application on',
'The unique identifier which identifies this instance to other instances.': 'The unique identifier which identifies this instance to other instances.',
'The weight in kg.': 'The weight in kg.',
'Theme': 'Chủ đề',
'Theme Details': 'Theme Details',
'Theme added': 'Theme added',
'Theme deleted': 'Theme deleted',
'Theme updated': 'Theme updated',
'Themes': 'Themes',
'There are errors': 'There are errors',
'There are multiple records at this location': 'There are multiple records at this location',
'There are not sufficient items in the store to send this shipment': 'There are not sufficient items in the store to send this shipment',
'There was a problem, sorry, please try again later.': 'There was a problem, sorry, please try again later.',
'These are settings for Inbound Mail.': 'Đây là những cài đặt cho thư gửi vào',
'These are the Incident Categories visible to normal End-Users': 'These are the Incident Categories visible to normal End-Users',
'These are the default settings for all users. To change settings just for you, click ': 'These are the default settings for all users. To change settings just for you, click ',
'They': 'Người ta',
'This appears to be a duplicate of ': 'This appears to be a duplicate of ',
'This file already exists on the server as': 'This file already exists on the server as',
'This form allows the administrator to remove a duplicate location.': 'Mẫu này cho phép quản trị viên xóa bỏ các địa điểm trùng',
'This is the way to transfer data between machines as it maintains referential integrity.': 'Đây là cách truyền dữ liệu giữa các máy vì nó bảo toàn tham chiếu',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!',
'This might be due to a temporary overloading or maintenance of the server.': 'Vấn đề này có thể do tình trạng quá tải hoặc máy chủ đang trong thời gian bảo trì',
'This page shows you logs of past syncs. Click on the link below to go to this page.': 'This page shows you logs of past syncs. Click on the link below to go to this page.',
'This screen allows you to upload a collection of photos to the server.': 'Màn hình cho phép bạn upload bộ sưu ảnh lên server',
'Thunderstorm': 'Thunderstorm',
'Thursday': 'Thursday',
'Ticket': 'Ticket',
'Ticket Details': 'Chi tiết Ticket',
'Ticket added': 'Ticket added',
'Ticket deleted': 'Đã xóa Ticket',
'Ticket updated': 'Ticket updated',
'Ticketing Module': 'Ticketing Module',
'Tickets': 'Tickets',
'Time needed to collect water': 'Time needed to collect water',
'Time of Request': 'Thời gian yêu cầu',
'Timestamp': 'Timestamp',
'Title': 'Title',
'To Location': 'To Location',
'To begin the sync process, click the button on the right => ': 'Nhấp chuột vào nút bên phải để kích hoạt quá trình đồng bộ',
'To begin the sync process, click this button => ': 'To begin the sync process, click this button => ',
'To delete': 'To delete',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py',
"To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.",
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.",
"To search for a hospital, enter any part of the name or ID. You may use % as wildcard. Press 'Search' without input to list all hospitals.": 'Để tìm kiếm một bệnh viện, nhập một phần tên hoặc ID. Có thể sử dụng % như một ký tự thay thế cho một nhóm ký tự. Nhấn "Tìm kiếm" mà không nhập thông tin, sẽ hiển thị toàn bộ các bệnh viện.',
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": 'Để tìm kiếm một địa điểm, nhập tên. Có thể sử dụng ký tự % để thay thế cho một nhóm ký tự. Nhấn "Tìm kiếm" mà không nhập thông tin sẽ hiển thị tất cả các địa điểm.',
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": 'Để tìm kiếm một người,bạn có thể nhập tên, tên đệm hay họ và/hoặc số chứng minh thư của người đó viết cách nhau.Bạn có thể dùng % a làm ký tự đại diện',
"To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.": "To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.",
'To submit a new job, use the': 'To submit a new job, use the',
'To variable': 'Thay đổi',
'Tools': 'Tools',
'Tornado': 'Lốc xoáy',
'Total # of Beneficiaries Reached ': 'Total # of Beneficiaries Reached ',
'Total # of Target Beneficiaries': 'Tổng số # đối tượng hưởng lợi',
'Total # of households of site visited': 'Total # of households of site visited',
'Total Beds': 'Total Beds',
'Total Beneficiaries': 'Total Beneficiaries',
'Total Cost per Megabyte': 'Tổng chi phí cho mỗi Megabyte',
'Total Cost per Minute': 'Total Cost per Minute',
'Total Households': 'Total Households',
'Total Monthly': 'Total Monthly',
'Total Monthly Cost': 'Total Monthly Cost',
'Total Monthly Cost: ': 'Total Monthly Cost: ',
'Total One-time Costs': 'Total One-time Costs',
'Total Persons': 'Total Persons',
'Total Recurring Costs': 'Tổng chi phí định kỳ',
'Total Unit Cost': 'Total Unit Cost',
'Total Unit Cost: ': 'Total Unit Cost: ',
'Total Units': 'Total Units',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'Tổng số giường bệnh trong bệnh viện này. Tự động cập nhật từ các báo cáo hàng ngày.',
'Total number of houses in the area': 'Tổng số nóc nhà trong khu vực',
'Total number of schools in affected area': 'Total number of schools in affected area',
'Total population of site visited': 'Total population of site visited',
'Totals for Budget:': 'Totals for Budget:',
'Totals for Bundle:': 'Totals for Bundle:',
'Totals for Kit:': 'Totals for Kit:',
'Tourist Group': 'Tourist Group',
'Town': 'Town',
'Traces internally displaced people (IDPs) and their needs': 'Traces internally displaced people (IDPs) and their needs',
'Tracing': 'Đang tìm kiếm',
'Track': 'Dấu viết',
'Track Details': 'Track Details',
'Track deleted': 'Track deleted',
'Track updated': 'Track updated',
'Track uploaded': 'Track uploaded',
'Tracking of Projects, Activities and Tasks': 'Tracking of Projects, Activities and Tasks',
'Tracking of basic information on the location, facilities and size of the Shelters': 'Tracking of basic information on the location, facilities and size of the Shelters',
'Tracks': 'Tracks',
'Tracks requests for aid and matches them against donors who have pledged aid': 'Tracks requests for aid and matches them against donors who have pledged aid',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Tracks the location, distibution, capacity and breakdown of victims in Shelters',
'Traffic Report': 'Traffic Report',
'Transit': 'Transit',
'Transition Effect': 'Transition Effect',
'Transparent?': 'Transparent?',
'Transportation assistance, Rank': 'Transportation assistance, Rank',
'Trauma Center': 'Trauma Center',
'Travel Cost': 'Travel Cost',
'Tree': 'Tree',
'Tropical Storm': 'Tropical Storm',
'Tropo Messaging Token': 'Tropo Messaging Token',
'Tropo Settings': 'Tropo Settings',
'Tropo Voice Token': 'Tropo Voice Token',
'Tropo settings updated': 'Cập nhật cài đặt Tropo',
'Truck': 'Xe tải',
'Try checking the URL for errors, maybe it was mistyped.': 'Thử kiểm tra lỗi trên URL, có thể do gõ sai',
'Try hitting refresh/reload button or trying the URL from the address bar again.': 'Thử bấm nút refresh/reload hoặc kiểm tra URL',
'Try refreshing the page or hitting the back button on your browser.': 'Try refreshing the page or hitting the back button on your browser.',
'Tsunami': 'Tsunami',
'Tuesday': 'Tuesday',
'Twitter': 'Twitter',
'Twitter ID or #hashtag': 'Twitter ID or #hashtag',
'Twitter Settings': 'Twitter Settings',
'Type': 'Type',
'Type of cause': 'Type of cause',
'Type of latrines': 'Type of latrines',
'Type of place for defecation': 'Type of place for defecation',
'Type of water source before the disaster': 'Type of water source before the disaster',
'Types of health services available': 'Types of health services available',
'Types of water storage containers available': 'Types of water storage containers available',
'UID': 'UID',
'URL': 'URL',
'UTC Offset': 'UTC Offset',
'Unable to parse CSV file!': 'Không thể đọc file CSV',
'Understaffed': 'Understaffed',
'Unidentified': 'Unidentified',
'Unit': 'Unit',
'Unit Bed Capacity': 'Unit Bed Capacity',
'Unit Cost': 'Unit Cost',
'Unit Details': 'Unit Details',
'Unit Name': 'Unit Name',
'Unit Set': 'Unit Set',
'Unit Short Code for e.g. m for meter.': 'Unit Short Code for e.g. m for meter.',
'Unit added': 'Đã thêm đơn vị',
'Unit deleted': 'Unit deleted',
'Unit updated': 'Đơn vị được cập nhật',
'Units': 'Units',
'Units of Measure': 'Units of Measure',
'Unknown': 'Unknown',
'Unknown Peer': 'Unknown Peer',
'Unknown type of facility': 'Unknown type of facility',
'Unresolved Conflicts': 'Unresolved Conflicts',
'Unselect to disable the modem': 'Unselect to disable the modem',
'Unsent': 'Unsent',
'Unsupported data format!': 'Unsupported data format!',
'Unsupported method!': 'Unsupported method!',
'Update': 'Update',
'Update Activity Report': 'Update Activity Report',
'Update Cholera Treatment Capability Information': 'Update Cholera Treatment Capability Information',
'Update Import Job': 'Update Import Job',
'Update Request': 'Cập nhật Yêu cầu',
'Update Service Profile': 'Update Service Profile',
'Update Task Status': 'Update Task Status',
'Update Unit': 'Update Unit',
'Update if Master': 'Update if Master',
'Update if Newer': 'Cập nhật nếu mới hơn',
'Update your current ordered list': 'Update your current ordered list',
'Upload': 'Upload',
'Upload Photos': 'Upload Photos',
'Upload Spreadsheet': 'Upload Spreadsheet',
'Upload Track': 'Upload Track',
'Upload a Spreadsheet': 'Tải một bảng tính lên',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": "Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.",
'Urban Fire': 'Urban Fire',
'Urban area': 'Urban area',
'Urdu': 'Urdu',
'Urgent': 'Urgent',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Dùng (...)&(...) thay cho VÀ(AND), (...)|(...) cho HOẶC(OR), and ~(...) cho KHÔNG (NOT) để .đưa ra những câu hỏi phúc tạp',
'Use default': 'Use default',
'Use these links to download data that is currently in the database.': 'Dùng liên kết này để tải dữ liệu hiện có trên cơ sở dữ liệu xuống',
'Use this space to add a description about the Bin Type.': 'Thêm thông tin mô tả loại Bin ở đây',
'Use this space to add a description about the site location.': 'Use this space to add a description about the site location.',
'Use this space to add a description about the warehouse/site.': 'Thêm mô tả nhà kho/site ở đây',
'Use this space to add additional comments and notes about the Site/Warehouse.': 'Viết bình luận và ghi chú về site/nhà kho ở đây',
'Used to import data from spreadsheets into the database': 'Used to import data from spreadsheets into the database',
'User': 'User',
'User Details': 'User Details',
'User ID': 'User ID',
'User Management': 'User Management',
'User Profile': 'User Profile',
'User Requests': 'Yêu cầu của người dùng',
'User Updated': 'Đã cập nhât người dùng',
'User added': 'User added',
'User already has this role': 'User already has this role',
'User deleted': 'Đã xóa người dùng',
'User updated': 'User updated',
'Username': 'Username',
'Users': 'Users',
'Users removed': 'Xóa người dùng',
'Ushahidi': 'Ushahidi',
'Usual food sources in the area': 'Usual food sources in the area',
'Utility, telecommunication, other non-transport infrastructure': 'Utility, telecommunication, other non-transport infrastructure',
'Various Reporting functionalities': 'Various Reporting functionalities',
'Vehicle': 'Vehicle',
'Vehicle Crime': 'Tai nạn giao thông',
'Vehicle Types': 'Loại phương tiện',
'Vendor': 'Vendor',
'Verified': 'Verified',
'Verified?': 'Đã xác nhận?',
'Verify password': 'Verify password',
'Version': 'Phiên bản',
'Very High': 'Very High',
'View Alerts received using either Email or SMS': 'Xem nhắc nhở gửi đến qua email hoặc sms',
'View Fullscreen Map': 'View Fullscreen Map',
'View Image': 'View Image',
'View On Map': 'Hiển thị trên bản đồ',
'View Outbox': 'View Outbox',
'View Requests for Aid': 'Xem Yêu cầu viện trợ',
'View Settings': 'View Settings',
'View Tickets': 'View Tickets',
"View and/or update details of the person's record": 'Xem và/hoặc cập nhật chi tiết mục ghi cá nhân',
'View and/or update their details': 'View and/or update their details',
'View or update the status of a hospital.': 'Xem hoặc cập nhật trạng thái của một bệnh viện',
'View pending requests and pledge support.': 'View pending requests and pledge support.',
'View the hospitals on a map.': 'Hiển thị bệnh viện trên bản đồ',
"View/Edit the Database directly (caution: doesn't respect the framework rules!)": "View/Edit the Database directly (caution: doesn't respect the framework rules!)",
'Village': 'Village',
'Village Leader': 'Village Leader',
'Visible?': 'Visible?',
'Visual Recognition': 'Visual Recognition',
'Volcanic Ash Cloud': 'Đám mây tro bụi từ núi lửa',
'Volcanic Event': 'Volcanic Event',
'Volume - Fluids': 'Volume - Fluids',
'Volume - Solids': 'Volume - Solids',
'Volume Capacity': 'Volume Capacity',
'Volume/Dimensions': 'Volume/Dimensions',
'Volunteer Data': 'Dữ liệu tình nguyện viên',
'Volunteer Details': 'Volunteer Details',
'Volunteer Management': 'Volunteer Management',
'Volunteer Project': 'Dự án tình nguyện',
'Volunteer Registration': 'Đăng ký tình nguyện viên',
'Volunteer Registrations': 'Đăng ksy tình nguyện viên',
'Volunteer Request': 'Yêu cầu tình nguyện viên',
'Volunteer added': 'Volunteer added',
'Volunteer deleted': 'Volunteer deleted',
'Volunteer details updated': 'Volunteer details updated',
'Volunteer registration added': 'Đã thêm đăng ký tình nguyện viên',
'Volunteer registration deleted': 'Đã xóa đăng ký tình nguyện viên',
'Volunteer registration updated': 'Đã cập nhật đăng ký tình nguyện viên',
'Volunteers': 'Tình nguyện viên',
'Volunteers were notified!': 'Volunteers were notified!',
'Vote': 'Vote',
'Votes': 'Votes',
'WASH': 'WASH',
'WMS Browser Name': 'WMS Browser Name',
'WMS Browser URL': 'WMS Browser URL',
'Walking Only': 'Walking Only',
'Walking time to the health service': 'Walking time to the health service',
'Warehouse': 'Warehouse',
'Warehouse Details': 'Warehouse Details',
'Warehouse Item': 'Warehouse Item',
'Warehouse Item Details': 'Warehouse Item Details',
'Warehouse Item added': 'Warehouse Item added',
'Warehouse Item deleted': 'Warehouse Item deleted',
'Warehouse Item updated': 'Warehouse Item updated',
'Warehouse Items': 'Warehouse Items',
'Warehouse Management': 'Quản lý kho hàng',
'Warehouse added': 'Warehouse added',
'Warehouse deleted': 'Warehouse deleted',
'Warehouse updated': 'Warehouse updated',
'Warehouse/Sites Registry': 'Warehouse/Sites Registry',
'Warehouses': 'Warehouses',
'WatSan': 'WatSan',
'Water': 'Water',
'Water Sanitation Hygiene': 'Water Sanitation Hygiene',
'Water gallon': 'Ga-lông nước',
'Water storage containers available for HH': 'Water storage containers available for HH',
'Water storage containers sufficient per HH': 'Water storage containers sufficient per HH',
'Water supply': 'Water supply',
'Waterspout': 'Waterspout',
'Way Bill(s)': 'Hóa đơn thu phí đường bộ',
'We have tried': 'We have tried',
'Website': 'Website',
'Wednesday': 'Wednesday',
'Weekly': 'Weekly',
'Weight': 'Weight',
'Weight (kg)': 'Khối lượng',
'Welcome to the Sahana Eden Disaster Management System': 'Welcome to the Sahana Eden Disaster Management System',
'Welcome to the Sahana Portal at ': 'Welcome to the Sahana Portal at ',
'Well-Known Text': 'Well-Known Text',
'Were basic medical supplies available for health services prior to the disaster?': 'Were basic medical supplies available for health services prior to the disaster?',
'Were breast milk substitutes used prior to the disaster?': 'Were breast milk substitutes used prior to the disaster?',
'Were there cases of malnutrition in this area prior to the disaster?': 'Were there cases of malnutrition in this area prior to the disaster?',
'Were there health services functioning for the community prior to the disaster?': 'Were there health services functioning for the community prior to the disaster?',
'Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?': 'Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?',
'What are the factors affecting school attendance?': 'What are the factors affecting school attendance?',
"What are the people's normal ways to obtain food in this area?": "What are the people's normal ways to obtain food in this area?",
'What are your main sources of cash to restart your business?': 'What are your main sources of cash to restart your business?',
'What are your main sources of income now?': 'What are your main sources of income now?',
'What do you spend most of your income on now?': 'What do you spend most of your income on now?',
'What food stocks exist? (main dishes)': 'What food stocks exist? (main dishes)',
'What food stocks exist? (side dishes)': 'What food stocks exist? (side dishes)',
'What is the estimated total number of people in all of these institutions?': 'What is the estimated total number of people in all of these institutions?',
'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?': 'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?',
'What is your major source of drinking water?': 'What is your major source of drinking water?',
"What should be done to reduce women and children's vulnerability to violence?": "What should be done to reduce women and children's vulnerability to violence?",
'What type of latrines are available in the village/IDP centre/Camp?': 'What type of latrines are available in the village/IDP centre/Camp?',
'What type of salvage material can be used from destroyed houses?': 'What type of salvage material can be used from destroyed houses?',
'What type of salvage material can be used from destroyed schools?': 'What type of salvage material can be used from destroyed schools?',
'What types of health problems do children currently have?': 'What types of health problems do children currently have?',
'What types of health problems do people currently have?': 'What types of health problems do people currently have?',
'What types of health services are still functioning in the affected area?': 'What types of health services are still functioning in the affected area?',
'What types of household water storage containers are available?': 'What types of household water storage containers are available?',
'What were your main sources of income before the disaster?': 'What were your main sources of income before the disaster?',
'Wheat': 'Wheat',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': 'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': 'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": "When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.",
'Where are the alternative places for studying?': 'Where are the alternative places for studying?',
'Where are the separated children originally from?': 'Where are the separated children originally from?',
'Where do the majority of people defecate?': 'Where do the majority of people defecate?',
'Where have the children been sent?': 'Where have the children been sent?',
'Where is solid waste disposed in the village/camp?': 'Where is solid waste disposed in the village/camp?',
'Whiskers': 'Whiskers',
'Who is doing what and where': 'Who is doing what and where',
'Who usually collects water for the family?': 'Ai là người thường đi lấy nước cho cả gia đình',
'Width': 'Độ rộng',
'Wild Fire': 'Wild Fire',
'Wind Chill': 'Wind Chill',
'Window frame': 'Window frame',
'Winter Storm': 'Winter Storm',
'Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?': 'Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?',
'Women of Child Bearing Age': 'Women of Child Bearing Age',
'Women participating in coping activities': 'Women participating in coping activities',
'Women who are Pregnant or in Labour': 'Women who are Pregnant or in Labour',
'Womens Focus Groups': 'Womens Focus Groups',
'Wooden plank': 'Wooden plank',
'Wooden poles': 'Wooden poles',
'Working hours end': 'Hết giờ làm việc',
'Working hours start': 'Bắt đầu giờ làm việc',
'Working or other to provide money/food': 'Working or other to provide money/food',
'Would you like to display the photos on the map?': 'Would you like to display the photos on the map?',
'X-Ray': 'X-Ray',
'XMPP': 'XMPP',
'Yes': 'Yes',
'You are attempting to delete your own account - are you sure you want to proceed?': 'You are attempting to delete your own account - are you sure you want to proceed?',
'You are currently reported missing!': 'You are currently reported missing!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.',
'You can click on the map below to select the Lat/Lon fields:': 'You can click on the map below to select the Lat/Lon fields:',
'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.': 'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.',
'You can select the Draw tool (': 'You can select the Draw tool (',
'You can set the modem settings for SMS here.': 'Bạn có thể thiết lập cài đặt modem cho SMS ở đây',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.',
"You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click ": 'Bạn đã thiết lập các cài đặt cá nhân, vì vậy bạn không xem được các thay đổi ở đây.Để thiết lập lại, nhấp chuột vào',
"You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": "You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.",
"You haven't made any calculations": "You haven't made any calculations",
'You must be logged in to register volunteers.': 'You must be logged in to register volunteers.',
'You must be logged in to report persons missing or found.': 'You must be logged in to report persons missing or found.',
'You must provide a series id to proceed.': 'Bạn phải nhập số id của serie để thao tác tiếp',
'You should edit Twitter settings in models/000_config.py': 'Bạn có thể chỉnh sửa cài đặt Twitter tại models/000_config.py',
'Your action is required. Please approve user %s asap: ': 'Your action is required. Please approve user %s asap: ',
'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Your current ordered list of solution items is shown below. You can change it by voting again.',
'Your post was added successfully.': 'Bạn đã gửi thông tin thành công',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.',
'ZIP/Postcode': 'ZIP/Postcode',
'Zinc roof': 'Zinc roof',
'Zoom': 'Zoom',
'Zoom Levels': 'Zoom Levels',
'act': 'act',
'active': 'đang hoạt động',
'added': 'added',
'all records': 'all records',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'allows a budget to be developed based on staff & equipment costs, including any admin overheads.',
'allows for creation and management of surveys to assess the damage following a natural disaster.': 'allows for creation and management of surveys to assess the damage following a natural disaster.',
'an individual/team to do in 1-2 days': 'an individual/team to do in 1-2 days',
'approved': 'approved',
'assigned': 'đã phân công',
'average': 'trung bình',
'black': 'màu đen',
'blond': 'blond',
'blue': 'blue',
'brown': 'brown',
'c/o Name': 'c/o Name',
'can be used to extract data from spreadsheets and put them into database tables.': 'có thể dùng để trích xuất dữ liệu từ bẳng tính đưa vào cơ sở dữ liệu',
'cancelled': 'cancelled',
'caucasoid': 'caucasoid',
'check all': 'check all',
'click for more details': 'click for more details',
'collateral event': 'collateral event',
'completed': 'completed',
'confirmed': 'confirmed',
'consider': 'consider',
'constraint_id': 'constraint_id',
'criminal intent': 'criminal intent',
'crud': 'crud',
'curly': 'curly',
'currently registered': 'currently registered',
'daily': 'hàng ngày',
'dark': 'dark',
'data uploaded': 'đã upload dữ liệu',
'database': 'database',
'database %s select': 'chọn cơ sở dữ liệu %s',
'db': 'db',
'delete all checked': 'delete all checked',
'deleted': 'deleted',
'denied': 'denied',
'description': 'description',
'design': 'design',
'diseased': 'diseased',
'displaced': 'displaced',
'divorced': 'divorced',
'done!': 'done!',
'edit': 'edit',
'editor': 'người biên tập',
'embedded': 'embedded',
'enclosed area': 'enclosed area',
'export as csv file': 'chuyển đổi file csv',
'fat': 'fat',
'feedback': 'phản hồi',
'female': 'female',
'final report': 'final report',
'flush latrine with septic tank': 'flush latrine with septic tank',
'follow-up assessment': 'follow-up assessment',
'forehead': 'forehead',
'form data': 'form data',
'from Twitter': 'from Twitter',
'from_id': 'from_id',
'full': 'full',
'getting': 'getting',
'green': 'green',
'grey': 'grey',
'here': 'ở đây',
'high': 'high',
'hourly': 'hourly',
'households': 'households',
'human error': 'human error',
'identified': 'identified',
'ignore': 'ignore',
'immediately': 'immediately',
'in Deg Min Sec format': 'in Deg Min Sec format',
'in GPS format': 'Ở định dạng GPS',
'inactive': 'inactive',
'initial assessment': 'initial assessment',
'injured': 'injured',
'insert new': 'chèn mới',
'insert new %s': 'insert new %s',
'invalid': 'invalid',
'invalid request': 'yêu cầu không hợp lệ',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'là trung tâm thông tin trực tuyến, nơi lưu trữ thông tin về các nạn nhân và gia đình chịu ảnh hưởng của thiên tai, đặc biệt là xác định con số thương vong và lượng người sơ tán.Thông tin như tên, tuổi, số điện thoại, số CMND, nơi sơ tán và các thông tin khác cũng được lưu lại.Ảnh và dấu vân tay cũng có thể tải lên hệ thống.Để hiệu quả và tiện lợi hơn có thể quản lý theo nhóm',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'theo dõi ticket gửi đến cho phép người ta phân loại và điều phương tiện cứu trợ tới các nơi hợp lý ',
'kilogram': 'kilogram',
'kit': 'kit',
'latrines': 'latrines',
'legend URL': 'legend URL',
'light': 'light',
'liter': 'liter',
'login': 'Đăng nhập',
'long': 'long',
'long>12cm': 'long>12cm',
'low': 'low',
'male': 'male',
'manual': 'manual',
'married': 'married',
'maxExtent': 'maxExtent',
'maxResolution': 'Độ phân giải tối đa',
'medium': 'medium',
'medium<12cm': 'trung bình dưới 12cm',
'menu item': 'menu item',
'message_id': 'message_id',
'meter': 'meter',
'meter cubed': 'meter cubed',
'meters': 'meters',
'module allows the site administrator to configure various options.': 'Mô-đun cho phép người quản trị site cấu hình các tùy chọn khác nhau',
'module helps monitoring the status of hospitals.': 'module giúp theo dõi tình trạng bệnh viện',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).',
'mongoloid': 'mongoloid',
'more': 'more',
'n/a': 'n/a',
'natural hazard': 'thảm họa thiên nhiên',
'negroid': 'negroid',
'never': 'không bao giờ',
'new': 'Mới',
'new record inserted': 'new record inserted',
'next 100 rows': 'next 100 rows',
'no': 'no',
'none': 'none',
'normal': 'bình thường',
'not needed': 'not needed',
'not specified': 'không xác định',
'num Zoom Levels': 'num Zoom Levels',
'once': 'once',
'open defecation': 'open defecation',
'operational intent': 'operational intent',
'or import from csv file': 'or import from csv file',
'other': 'other',
'over one hour': 'hơn một tiếng',
'pack of 10': 'pack of 10',
'pending': 'pending',
'people': 'con người',
'piece': 'piece',
'pit': 'pit',
'pit latrine': 'pit latrine',
'postponed': 'postponed',
'preliminary template or draft, not actionable in its current form': 'preliminary template or draft, not actionable in its current form',
'previous 100 rows': 'previous 100 rows',
'primary incident': 'primary incident',
'problem connecting to twitter.com - please refresh': 'problem connecting to twitter.com - please refresh',
'provides a catalogue of digital media.': 'cung cấp danh mục các phương tiện truyền thông kỹ thuật số',
'record does not exist': 'record does not exist',
'record id': 'record id',
'records deleted': 'records deleted',
'red': 'red',
'reported': 'reported',
'reports successfully imported.': 'import báo cáo thành công',
'retired': 'retired',
'retry': 'retry',
'river': 'river',
'sack 20kg': 'sack 20kg',
'sack 50kg': 'sack 50kg',
'secondary effect': 'secondary effect',
'see comment': 'see comment',
'selected': 'selected',
'separated': 'separated',
'separated from family': 'thất lạc gia đình',
'shaved': 'shaved',
'shift_end': 'shift_end',
'shift_start': 'shift_start',
'short': 'short',
'short<6cm': 'short<6cm',
'sides': 'sides',
'sign-up now': 'sign-up now',
'simple': 'simple',
'single': 'single',
'slim': 'slim',
'state': 'state',
'straight': 'straight',
'suffered financial losses': 'thiệt hại về tài chính',
'table': 'table',
'table_name': 'table_name',
'tall': 'chiều cao',
'technical failure': 'technical failure',
'this': 'this',
'times and it is still not working. We give in. Sorry.': 'times and it is still not working. We give in. Sorry.',
'to access the system': 'to access the system',
'to reset your password': 'to reset your password',
'to verify your email': 'to verify your email',
'to_id': 'to_id',
'ton': 'ton',
'tonsure': 'tonsure',
'total': 'total',
'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!',
'unable to parse csv file': 'unable to parse csv file',
'unapproved': 'unapproved',
'uncheck all': 'uncheck all',
'unidentified': 'unidentified',
'uninhabitable = foundation and structure destroyed': 'uninhabitable = foundation and structure destroyed',
'unknown': 'unknown',
'unspecified': 'unspecified',
'updated': 'đã cập nhật',
'updates only': 'updates only',
'urgent': 'khẩn cấp',
'vm_action': 'vm_action',
'wavy': 'wavy',
'weekly': 'weekly',
'white': 'white',
'wider area, longer term, usually contain multiple Activities': 'wider area, longer term, usually contain multiple Activities',
'widowed': 'widowed',
'window': 'window',
'windows broken, cracks in walls, roof slightly damaged': 'windows broken, cracks in walls, roof slightly damaged',
'within human habitat': 'trong khu dân cư',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt module not available within the running Python - this needs installing for XLS output!',
'yes': 'có',
}
|
ptressel/sahana-eden-madpub
|
languages/vi.py
|
Python
|
mit
| 259,181
|
[
"VisIt"
] |
7b50f734bae97857af072c19d6a18cf41891829ea403f368066c8e7596901a99
|
#!/usr/bin/env python
#**************************************************************************
# Tintwizard
#
# Copyright (C) 2009 Euan Freeman <euan04@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version 3
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#*************************************************************************/
# Last modified: 14th June 2010
import pygtk
pygtk.require('2.0')
import gtk
import os
import sys
import signal
import webbrowser
import math
import shutil
# Project information
NAME = "tintwizard"
AUTHORS = ["Euan Freeman <euan04@gmail.com>"]
VERSION = "0.3.4"
COMMENTS = "tintwizard generates config files for the lightweight panel replacement tint2"
WEBSITE = "http://code.google.com/p/tintwizard/"
# Default values for text entry fields
BG_ROUNDING = "0"
BG_BORDER = "0"
PANEL_SIZE_X = "0"
PANEL_SIZE_Y = "40"
PANEL_MARGIN_X = "0"
PANEL_MARGIN_Y = "0"
PANEL_PADDING_X = "0"
PANEL_PADDING_Y = "0"
PANEL_MONITOR = "all"
PANEL_ITEMS = "TSC"
PANEL_AUTOHIDE_SHOW = "0.0"
PANEL_AUTOHIDE_HIDE = "0.0"
PANEL_AUTOHIDE_HEIGHT = "0"
TASKBAR_PADDING_X = "0"
TASKBAR_PADDING_Y = "0"
TASKBAR_SPACING = "0"
TASK_BLINKS = "7"
TASK_MAXIMUM_SIZE_X = "200"
TASK_MAXIMUM_SIZE_Y = "32"
TASK_PADDING_X = "0"
TASK_PADDING_Y = "0"
TASK_MMC_PADDING_X = "2"
TASK_MMC_PADDING_Y = "2"
TASK_SPACING = "0"
TRAY_PADDING_X = "0"
TRAY_PADDING_Y = "0"
TRAY_SPACING = "0"
TRAY_MAX_ICON_SIZE = "0"
TRAY_ICON_ALPHA = "100"
TRAY_ICON_SAT = "0"
TRAY_ICON_BRI = "0"
ICON_ALPHA = "100"
ICON_SAT = "0"
ICON_BRI = "0"
ACTIVE_ICON_ALPHA = "100"
ACTIVE_ICON_SAT = "0"
ACTIVE_ICON_BRI = "0"
URGENT_ICON_ALPHA = "100"
URGENT_ICON_SAT = "0"
URGENT_ICON_BRI = "0"
ICONIFIED_ICON_ALPHA = "100"
ICONIFIED_ICON_SAT = "0"
ICONIFIED_ICON_BRI = "0"
CLOCK_FMT_1 = "%H:%M"
CLOCK_FMT_2 = "%a %d %b"
CLOCK_TOOLTIP = ""
CLOCK_TIME1_TIMEZONE = ""
CLOCK_TIME2_TIMEZONE = ""
CLOCK_TOOLTIP_TIMEZONE = ""
CLOCK_PADDING_X = "0"
CLOCK_PADDING_Y = "0"
CLOCK_LCLICK = ""
CLOCK_RCLICK = ""
TOOLTIP_PADDING_X = "0"
TOOLTIP_PADDING_Y = "0"
TOOLTIP_SHOW_TIMEOUT = "0"
TOOLTIP_HIDE_TIMEOUT = "0"
BATTERY_LOW = "20"
BATTERY_HIDE = "90"
BATTERY_ACTION = 'notify-send "battery low"'
BATTERY_PADDING_X = "0"
BATTERY_PADDING_Y = "0"
class TintWizardPrefGUI(gtk.Window):
"""The dialog window which lets the user change the default preferences."""
def __init__(self, tw):
"""Create and shows the window."""
self.tw = tw
# Create top-level window
gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL)
self.set_title("Preferences")
self.connect("delete_event", self.quit)
self.layout = gtk.Table(2, 2, False)
self.table = gtk.Table(5, 2, False)
self.table.set_row_spacings(5)
self.table.set_col_spacings(5)
createLabel(self.table, text="Default Font", gridX=0, gridY=0)
self.font = gtk.FontButton(self.tw.defaults["font"])
self.font.set_alignment(0, 0.5)
self.table.attach(self.font, 1, 2, 0, 1, xoptions=gtk.EXPAND, yoptions=gtk.EXPAND)
createLabel(self.table, text="Default Background Color", gridX=0, gridY=1)
self.bgColor = gtk.ColorButton(gtk.gdk.color_parse(self.tw.defaults["bgColor"]))
self.bgColor.set_alignment(0, 0.5)
self.table.attach(self.bgColor, 1, 2, 1, 2, xoptions=gtk.EXPAND, yoptions=gtk.EXPAND)
createLabel(self.table, text="Default Foreground Color", gridX=0, gridY=2)
self.fgColor = gtk.ColorButton(gtk.gdk.color_parse(self.tw.defaults["fgColor"]))
self.fgColor.set_alignment(0, 0.5)
self.table.attach(self.fgColor, 1, 2, 2, 3, xoptions=gtk.EXPAND, yoptions=gtk.EXPAND)
createLabel(self.table, text="Default Border Color", gridX=0, gridY=3)
self.borderColor = gtk.ColorButton(gtk.gdk.color_parse(self.tw.defaults["borderColor"]))
self.borderColor.set_alignment(0, 0.5)
self.table.attach(self.borderColor, 1, 2, 3, 4, xoptions=gtk.EXPAND, yoptions=gtk.EXPAND)
createLabel(self.table, text="Number of background styles", gridX=0, gridY=4)
self.bgCount = createEntry(self.table, maxSize=6, width=8, text=str(self.tw.defaults["bgCount"]), gridX=1, gridY=4, xExpand=True, yExpand=True)
self.layout.attach(self.table, 0, 2, 0, 1, xoptions=gtk.EXPAND, yoptions=gtk.EXPAND, xpadding=20, ypadding=5)
createButton(self.layout, text="Save", stock=gtk.STOCK_SAVE, name="save", gridX=0, gridY=1, xExpand=True, yExpand=True, handler=self.save)
createButton(self.layout, text="Cancel", stock=gtk.STOCK_CANCEL, name="cancel", gridX=1, gridY=1, xExpand=True, yExpand=True, handler=self.quit)
self.add(self.layout)
self.show_all()
def quit(self, widget=None, event=None):
"""Destroys the window."""
self.destroy()
def save(self, action=None):
"""Called when the Save button is clicked."""
if confirmDialog(self, "Overwrite configuration file?") == gtk.RESPONSE_YES:
self.tw.defaults["font"] = self.font.get_font_name()
self.tw.defaults["bgColor"] = rgbToHex(self.bgColor.get_color().red, self.bgColor.get_color().green, self.bgColor.get_color().blue)
self.tw.defaults["fgColor"] = rgbToHex(self.fgColor.get_color().red, self.fgColor.get_color().green, self.fgColor.get_color().blue)
self.tw.defaults["borderColor"] = rgbToHex(self.borderColor.get_color().red, self.borderColor.get_color().green, self.borderColor.get_color().blue)
try:
self.tw.defaults["bgCount"] = int(self.bgCount.get_text())
except:
errorDialog(self, "Invalid value for background count")
return
self.tw.writeConf()
self.quit()
class TintWizardGUI(gtk.Window):
"""The main window for the application."""
def __init__(self):
"""Create and show the window."""
self.filename = None
self.curDir = None
self.toSave = False
if len(sys.argv) > 1:
self.filename = sys.argv[1]
self.oneConfigFile = True
else:
self.oneConfigFile = False
# Read conf file and set default values
self.readConf()
if self.defaults["bgColor"] in [None, "None"]:
self.defaults["bgColor"] = "#000000"
if self.defaults["fgColor"] in [None, "None"]:
self.defaults["fgColor"] = "#ffffff"
if self.defaults["borderColor"] in [None, "None"]:
self.defaults["borderColor"] = "#ffffff"
if os.path.exists(os.path.expandvars("${HOME}") + "/.config/tint2"):
self.curDir = os.path.expandvars("${HOME}") + "/.config/tint2"
else:
errorDialog("$HOME/.config/tint2/ directory not found! Is tint2 installed correctly?")
Sys.exit(1)
try:
self.defaults["bgCount"] = int(self.defaults["bgCount"])
except:
self.defaults["bgCount"] = 2
# Get the full location of the tint2 binary
which = os.popen('which tint2')
self.tint2Bin = which.readline().strip()
which.close()
if len(self.tint2Bin) == 0:
errorDialog(self, "tint2 could not be found. Are you sure it is installed?")
sys.exit(1)
# Create top-level window
gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL)
self.set_title("tintwizard")
self.connect("delete_event", self.quit)
# self.table is our main layout manager
self.table = gtk.Table(4, 1, False)
# Set up the dictionary to hold all registered widgets
self.propUI = {}
# Create menus and toolbar items
ui = """
<ui>
<menubar name="MenuBar">
<menu action="File">
<menuitem action="New" />
<menuitem action="Open" />
<separator />
<menuitem action="Save" />
<menuitem action="Save As..." />
<separator />
<menuitem action="Quit" />
</menu>
<menu action="Tint2">
<menuitem action="OpenDefault" />
<menuitem action="SaveDefault" />
<separator />
<menuitem action="Apply" />
</menu>
<menu action="Tools">
<menuitem action="FontChange" />
<separator />
<menuitem action="Defaults" />
</menu>
<menu action="HelpMenu">
<menuitem action="Help" />
<menuitem action="Report Bug" />
<separator />
<menuitem action="About" />
</menu>
</menubar>
<toolbar name="ToolBar">
<toolitem action="New" />
<toolitem action="Open" />
<toolitem action="Save" />
<separator />
<toolitem action="Apply" />
</toolbar>
</ui>
"""
# Set up UI manager
self.uiManager = gtk.UIManager()
accelGroup = self.uiManager.get_accel_group()
self.add_accel_group(accelGroup)
self.ag = gtk.ActionGroup("File")
self.ag.add_actions([("File", None, "_File"),
("New",gtk.STOCK_NEW, "_New", None, "Create a new config", self.new),
("Open", gtk.STOCK_OPEN, "_Open", None, "Open an existing config", self.openFile),
("Save", gtk.STOCK_SAVE, "_Save", None, "Save the current config", self.save),
("Save As...", gtk.STOCK_SAVE_AS, "Save As", None, "Save the current config as...", self.saveAs),
("SaveDefault", None, "Save As tint2 Default", None, "Save the current config as the tint2 default", self.saveAsDef),
("OpenDefault", None, "Open tint2 Default", None, "Open the current tint2 default config", self.openDef),
("Apply", gtk.STOCK_APPLY, "Apply Config", None, "Apply the current config to tint2", self.apply),
("Quit", gtk.STOCK_QUIT, "_Quit", None, "Quit the program", self.quit),
("Tools", None, "_Tools"),
("Tint2", None, "Tint_2"),
("HelpMenu", None, "_Help"),
("FontChange",gtk.STOCK_SELECT_FONT, "Change All Fonts", None, "Change all fonts at once.", self.changeAllFonts),
("Defaults",gtk.STOCK_PREFERENCES, "Change Defaults", None, "Change tintwizard defaults.", self.changeDefaults),
("Help",gtk.STOCK_HELP, "_Help", None, "Get help with tintwizard", self.help),
("Report Bug",None, "Report Bug", None, "Report a problem with tintwizard", self.reportBug),
("About",gtk.STOCK_ABOUT, "_About Tint Wizard", None, "Find out more about Tint Wizard", self.about)])
# Add main UI
self.uiManager.insert_action_group(self.ag, -1)
self.uiManager.add_ui_from_string(ui)
if not self.oneConfigFile:
# Attach menubar and toolbar to main window
self.table.attach(self.uiManager.get_widget("/MenuBar"), 0, 4, 0, 1)
self.table.attach(self.uiManager.get_widget("/ToolBar"), 0, 4, 1, 2)
# Create notebook
self.notebook = gtk.Notebook()
self.notebook.set_tab_pos(gtk.POS_TOP)
# Create notebook pages
# Background Options
self.tableBgs = gtk.Table(rows=1, columns=1, homogeneous=False)
self.tableBgs.set_row_spacings(5)
self.tableBgs.set_col_spacings(5)
self.bgNotebook = gtk.Notebook()
self.bgNotebook.set_scrollable(True)
self.tableBgs.attach(self.bgNotebook, 0, 2, 0, 1)
self.bgs = []
# Add buttons for adding/deleting background styles
createButton(self.tableBgs, text="New Background", stock=gtk.STOCK_NEW, name="addBg", gridX=0, gridY=1, xExpand=True, yExpand=True, handler=self.addBgClick)
createButton(self.tableBgs, text="Delete Background", stock=gtk.STOCK_DELETE, name="delBg", gridX=1, gridY=1, xExpand=True, yExpand=True, handler=self.delBgClick)
# Panel
self.createPanelDisplayWidgets()
self.createPanelSettingsWidgets()
self.createPanelAutohideWidgets()
# Taskbar
self.createTaskbarWidgets()
# Tasks
self.createTaskSettingsWidgets()
self.createNormalTasksWidgets()
self.createActiveTasksWidgets()
self.createUrgentTasksWidgets()
self.createIconifiedTasksWidgets()
# System Tray
self.createSystemTrayWidgets()
# Clock
self.createClockDisplayWidgets()
self.createClockSettingsWidgets()
# Mouse
self.createMouseWidgets()
# Tooltips
self.createTooltipsWidgets()
# Battery
self.createBatteryWidgets()
# View Config
self.configArea = gtk.ScrolledWindow()
self.configBuf = gtk.TextBuffer()
self.configTextView = gtk.TextView(self.configBuf)
self.configArea.add_with_viewport(self.configTextView)
# Add backgrounds to notebooks
for i in range(self.defaults["bgCount"]):
self.addBgClick(None, init=True)
self.bgNotebook.set_current_page(0)
# Create sub-notebooks
self.panelNotebook = gtk.Notebook()
self.panelNotebook.set_tab_pos(gtk.POS_TOP)
self.panelNotebook.set_current_page(0)
self.panelNotebook.append_page(self.tablePanelDisplay, gtk.Label("Panel Display"))
self.panelNotebook.append_page(self.tablePanelSettings, gtk.Label("Panel Settings"))
self.panelNotebook.append_page(self.tablePanelAutohide, gtk.Label("Panel Autohide"))
self.taskNotebook = gtk.Notebook()
self.taskNotebook.set_tab_pos(gtk.POS_TOP)
self.taskNotebook.set_current_page(0)
self.taskNotebook.append_page(self.tableTask, gtk.Label("Task Settings"))
self.taskNotebook.append_page(self.tableTaskDefault, gtk.Label("Normal Tasks"))
self.taskNotebook.append_page(self.tableTaskActive, gtk.Label("Active Tasks"))
self.taskNotebook.append_page(self.tableTaskUrgent, gtk.Label("Urgent Tasks"))
self.taskNotebook.append_page(self.tableTaskIconified, gtk.Label("Iconified Tasks"))
self.clockNotebook = gtk.Notebook()
self.clockNotebook.set_tab_pos(gtk.POS_TOP)
self.clockNotebook.set_current_page(0)
self.clockNotebook.append_page(self.tableClockDisplays, gtk.Label("Clock Display"))
self.clockNotebook.append_page(self.tableClockSettings, gtk.Label("Clock Settings"))
# Add pages to notebook
self.notebook.append_page(self.tableBgs, gtk.Label("Backgrounds"))
self.notebook.append_page(self.panelNotebook, gtk.Label("Panel"))
self.notebook.append_page(self.tableTaskbar, gtk.Label("Taskbar"))
self.notebook.append_page(self.taskNotebook, gtk.Label("Tasks"))
self.notebook.append_page(self.tableTray, gtk.Label("System Tray"))
self.notebook.append_page(self.clockNotebook, gtk.Label("Clock"))
self.notebook.append_page(self.tableMouse, gtk.Label("Mouse"))
self.notebook.append_page(self.tableTooltip, gtk.Label("Tooltips"))
self.notebook.append_page(self.tableBattery, gtk.Label("Battery"))
self.notebook.append_page(self.configArea, gtk.Label("View Config"))
self.notebook.connect("switch-page", self.switchPage)
# Add notebook to window and show
self.table.attach(self.notebook, 0, 4, 2, 3, xpadding=5, ypadding=5)
if self.oneConfigFile:
# Add button Apply and Close
self.box1 = gtk.HBox(False, 20)
self.table.attach(self.box1, 0, 4, 3, 4, xpadding=5, ypadding=5)
temp = gtk.Button("Apply", gtk.STOCK_APPLY)
temp.set_name("applyBg")
temp.connect("clicked", self.apply)
self.box1.pack_start(temp, True, True, 0)
temp = gtk.Button("Close", gtk.STOCK_CLOSE)
temp.set_name("closeBg")
temp.connect("clicked", self.quit)
self.box1.pack_start(temp, True, True, 0)
# Create and add the status bar to the bottom of the main window
self.statusBar = gtk.Statusbar()
self.statusBar.set_has_resize_grip(True)
self.updateStatusBar("New Config File [*]")
self.table.attach(self.statusBar, 0, 4, 4, 5)
self.add(self.table)
self.show_all()
# If tintwizard was launched with a tint2 config filename
# as an argument, load that config.
if self.oneConfigFile:
self.readTint2Config()
self.generateConfig()
def createPanelDisplayWidgets(self):
"""Create the Panel Display widgets."""
self.tablePanelDisplay = gtk.Table(rows=7, columns=3, homogeneous=False)
self.tablePanelDisplay.set_row_spacings(5)
self.tablePanelDisplay.set_col_spacings(5)
createLabel(self.tablePanelDisplay, text="Position", gridX=0, gridY=0, xPadding=10)
self.panelPosY = createComboBox(self.tablePanelDisplay, ["bottom", "top", "center"], gridX=1, gridY=0, handler=self.changeOccurred)
self.panelPosX = createComboBox(self.tablePanelDisplay, ["left", "right", "center"], gridX=2, gridY=0, handler=self.changeOccurred)
# Note: registered below
createLabel(self.tablePanelDisplay, text="Panel Orientation", gridX=0, gridY=1, xPadding=10)
self.panelOrientation = createComboBox(self.tablePanelDisplay, ["horizontal", "vertical"], gridX=1, gridY=1, handler=self.changeOccurred)
self.registerComponent("panel_position", (self.panelPosY, self.panelPosX, self.panelOrientation))
createLabel(self.tablePanelDisplay, text="Panel Items", gridX=0, gridY=2, xPadding=10)
self.panelItems = createEntry(self.tablePanelDisplay, maxSize=7, width=8, text=PANEL_ITEMS, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("panel_items", self.panelItems)
self.panelSizeLabel = createLabel(self.tablePanelDisplay, text="Size (width, height)", gridX=0, gridY=3, xPadding=10)
self.panelSizeX = createEntry(self.tablePanelDisplay, maxSize=6, width=8, text=PANEL_SIZE_X, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.panelSizeY = createEntry(self.tablePanelDisplay, maxSize=6, width=8, text=PANEL_SIZE_Y, gridX=2, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("panel_size", (self.panelSizeX, self.panelSizeY))
createLabel(self.tablePanelDisplay, text="Margin (x, y)", gridX=0, gridY=4, xPadding=10)
self.panelMarginX = createEntry(self.tablePanelDisplay, maxSize=6, width=8, text=PANEL_MARGIN_X, gridX=1, gridY=4, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.panelMarginY = createEntry(self.tablePanelDisplay, maxSize=6, width=8, text=PANEL_MARGIN_Y, gridX=2, gridY=4, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("panel_margin", (self.panelMarginX, self.panelMarginY))
createLabel(self.tablePanelDisplay, text="Padding (x, y)", gridX=0, gridY=5, xPadding=10)
self.panelPadX = createEntry(self.tablePanelDisplay, maxSize=6, width=8, text=PANEL_PADDING_X, gridX=1, gridY=5, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.panelPadY = createEntry(self.tablePanelDisplay, maxSize=6, width=8, text=PANEL_PADDING_Y, gridX=2, gridY=5, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tablePanelDisplay, text="Horizontal Spacing", gridX=0, gridY=6, xPadding=10)
self.panelSpacing = createEntry(self.tablePanelDisplay, maxSize=6, width=8, text=TASKBAR_SPACING, gridX=1, gridY=6, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("panel_padding", (self.panelPadX, self.panelPadY, self.panelSpacing))
createLabel(self.tablePanelDisplay, text="Panel Background ID", gridX=0, gridY=7, xPadding=10)
self.panelBg = createComboBox(self.tablePanelDisplay, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=7, handler=self.changeOccurred)
self.registerComponent("panel_background_id", self.panelBg)
def createPanelSettingsWidgets(self):
"""Create the Panel Settings widgets."""
self.tablePanelSettings = gtk.Table(rows=5, columns=3, homogeneous=False)
self.tablePanelSettings.set_row_spacings(5)
self.tablePanelSettings.set_col_spacings(5)
createLabel(self.tablePanelSettings, text="Window Manager Menu", gridX=0, gridY=0, xPadding=10)
self.panelMenu = createCheckButton(self.tablePanelSettings, active=False, gridX=1, gridY=0, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("wm_menu", self.panelMenu)
createLabel(self.tablePanelSettings, text="Place In Window Manager Dock", gridX=0, gridY=1, xPadding=10)
self.panelDock = createCheckButton(self.tablePanelSettings, active=False, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("panel_dock", self.panelDock)
createLabel(self.tablePanelSettings, text="Panel Layer", gridX=0, gridY=2, xPadding=10)
self.panelLayer = createComboBox(self.tablePanelSettings, ["bottom", "top", "normal"], gridX=1, gridY=2, handler=self.changeOccurred)
self.registerComponent("panel_layer", self.panelLayer)
createLabel(self.tablePanelSettings, text="Strut Policy", gridX=0, gridY=3, xPadding=10)
self.panelAutohideStrut = createComboBox(self.tablePanelSettings, ["none", "minimum", "follow_size"], gridX=1, gridY=3, handler=self.changeOccurred)
self.registerComponent("strut_policy", self.panelAutohideStrut)
createLabel(self.tablePanelSettings, text="Panel Monitor (all, 1, 2, ...)", gridX=0, gridY=4, xPadding=10)
self.panelMonitor = createEntry(self.tablePanelSettings, maxSize=6, width=8, text=PANEL_MONITOR, gridX=1, gridY=4, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("panel_monitor", self.panelMonitor)
def createPanelAutohideWidgets(self):
"""Create the Panel Autohide widgets."""
self.tablePanelAutohide = gtk.Table(rows=4, columns=3, homogeneous=False)
self.tablePanelAutohide.set_row_spacings(5)
self.tablePanelAutohide.set_col_spacings(5)
createLabel(self.tablePanelAutohide, text="Autohide Panel", gridX=0, gridY=0, xPadding=10)
self.panelAutohide = createCheckButton(self.tablePanelAutohide, active=False, gridX=1, gridY=0, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("autohide", self.panelAutohide)
createLabel(self.tablePanelAutohide, text="Autohide Show Timeout (seconds)", gridX=0, gridY=1, xPadding=10)
self.panelAutohideShow = createEntry(self.tablePanelAutohide, maxSize=6, width=8, text=PANEL_AUTOHIDE_SHOW, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("autohide_show_timeout", self.panelAutohideShow)
createLabel(self.tablePanelAutohide, text="Autohide Hide Timeout (seconds)", gridX=0, gridY=2, xPadding=10)
self.panelAutohideHide = createEntry(self.tablePanelAutohide, maxSize=6, width=8, text=PANEL_AUTOHIDE_HIDE, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("autohide_hide_timeout", self.panelAutohideHide)
createLabel(self.tablePanelAutohide, text="Autohide Hidden Height", gridX=0, gridY=3, xPadding=10)
self.panelAutohideHeight = createEntry(self.tablePanelAutohide, maxSize=6, width=8, text=PANEL_AUTOHIDE_HEIGHT, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("autohide_height", self.panelAutohideHeight)
def createTaskbarWidgets(self):
"""Create the Taskbar widgets."""
self.tableTaskbar = gtk.Table(rows=5, columns=3, homogeneous=False)
self.tableTaskbar.set_row_spacings(5)
self.tableTaskbar.set_col_spacings(5)
createLabel(self.tableTaskbar, text="Taskbar Mode", gridX=0, gridY=0, xPadding=10)
self.taskbarMode = createComboBox(self.tableTaskbar, ["single_desktop", "multi_desktop"], gridX=1, gridY=0, handler=self.changeOccurred)
self.registerComponent("taskbar_mode", self.taskbarMode)
createLabel(self.tableTaskbar, text="Padding (x, y)", gridX=0, gridY=1, xPadding=10)
self.taskbarPadX = createEntry(self.tableTaskbar, maxSize=6, width=8, text=TASKBAR_PADDING_X, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.taskbarPadY = createEntry(self.tableTaskbar, maxSize=6, width=8, text=TASKBAR_PADDING_Y, gridX=2, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskbar, text="Horizontal Spacing", gridX=0, gridY=2, xPadding=10)
self.taskbarSpacing = createEntry(self.tableTaskbar, maxSize=6, width=8, text=TASK_SPACING, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("taskbar_padding", (self.taskbarPadX, self.taskbarPadY, self.taskbarSpacing))
createLabel(self.tableTaskbar, text="Taskbar Background ID", gridX=0, gridY=3, xPadding=10)
self.taskbarBg = createComboBox(self.tableTaskbar, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=3, handler=self.changeOccurred)
self.registerComponent("taskbar_background_id", self.taskbarBg)
createLabel(self.tableTaskbar, text="Active Taskbar Background ID", gridX=0, gridY=4, xPadding=10)
self.taskbarActiveBg = createComboBox(self.tableTaskbar, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=4, handler=self.changeOccurred)
self.registerComponent("taskbar_active_background_id", self.taskbarActiveBg)
def createTaskSettingsWidgets(self):
"""Create the Task Settings widgets."""
self.tableTask = gtk.Table(rows=12, columns=3, homogeneous=False)
self.tableTask.set_row_spacings(5)
self.tableTask.set_col_spacings(5)
createLabel(self.tableTask, text="Number of 'Blinks' on Urgent Event", gridX=0, gridY=0, xPadding=10)
self.taskBlinks = createEntry(self.tableTask, maxSize=6, width=8, text=TASK_BLINKS, gridX=1, gridY=0, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("urgent_nb_of_blink", self.taskBlinks)
createLabel(self.tableTask, text="Show Icons", gridX=0, gridY=1, xPadding=10)
self.taskIconCheckButton = createCheckButton(self.tableTask, active=True, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_icon", self.taskIconCheckButton)
createLabel(self.tableTask, text="Show Text", gridX=0, gridY=2, xPadding=10)
self.taskTextCheckButton = createCheckButton(self.tableTask, active=True, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_text", self.taskTextCheckButton)
createLabel(self.tableTask, text="Centre Text", gridX=0, gridY=3, xPadding=10)
self.taskCentreCheckButton = createCheckButton(self.tableTask, active=True, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_centered", self.taskCentreCheckButton)
createLabel(self.tableTask, text="Font", gridX=0, gridY=4, xPadding=10)
self.fontButton = gtk.FontButton()
if self.defaults["font"] in [None, "None"]: # If there was no font specified in the config file
self.defaults["font"] = self.fontButton.get_font_name() # Use the gtk default
self.fontButton = createFontButton(self.tableTask, font=self.defaults["font"], gridX=1, gridY=4, handler=self.changeOccurred)
self.registerComponent("task_font", self.fontButton)
createLabel(self.tableTask, text="Show Font Shadow", gridX=0, gridY=5, xPadding=10)
self.fontShadowCheckButton = createCheckButton(self.tableTask, active=False, gridX=1, gridY=5, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("font_shadow", self.fontShadowCheckButton)
createLabel(self.tableTask, text="Maximum Size (x, y)", gridX=0, gridY=6, xPadding=10)
self.taskMaxSizeX = createEntry(self.tableTask, maxSize=6, width=8, text=TASK_MAXIMUM_SIZE_X, gridX=1, gridY=6, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.taskMaxSizeY = createEntry(self.tableTask, maxSize=6, width=8, text=TASK_MAXIMUM_SIZE_Y, gridX=2, gridY=6, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_maximum_size", (self.taskMaxSizeX, self.taskMaxSizeY))
createLabel(self.tableTask, text="Padding (x, y)", gridX=0, gridY=7, xPadding=10)
self.taskPadX = createEntry(self.tableTask, maxSize=6, width=8, text=TASK_PADDING_X, gridX=1, gridY=7, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.taskPadY = createEntry(self.tableTask, maxSize=6, width=8, text=TASK_PADDING_Y, gridX=2, gridY=7, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_padding", (self.taskPadX, self.taskPadY))
createLabel(self.tableTask, text="Show minimize/maximize/close buttons", gridX=0, gridY=8, xPadding=10)
self.taskMMCButtonsCheckButton = createCheckButton(self.tableTask, active=True, gridX=1, gridY=8, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_minimize_maximize_close_buttons", self.taskMMCButtonsCheckButton)
createLabel(self.tableTask, text="Button Padding (x, y)", gridX=0, gridY=9, xPadding=10)
self.taskMMCPadX = createEntry(self.tableTask, maxSize=6, width=8, text=TASK_MMC_PADDING_X, gridX=1, gridY=9, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.taskMMCPadY = createEntry(self.tableTask, maxSize=6, width=8, text=TASK_MMC_PADDING_Y, gridX=2, gridY=9, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_buttons_padding", (self.taskMMCPadX, self.taskMMCPadY))
def createNormalTasksWidgets(self):
"""Create the Normal Tasks widgets."""
self.tableTaskDefault = gtk.Table(rows=6, columns=3, homogeneous=False)
self.tableTaskDefault.set_row_spacings(5)
self.tableTaskDefault.set_col_spacings(5)
createLabel(self.tableTaskDefault, text="Normal Task Background ID", gridX=0, gridY=0, xPadding=10)
self.taskBg = createComboBox(self.tableTaskDefault, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=0, handler=self.changeOccurred)
self.registerComponent("task_background_id", self.taskBg)
createLabel(self.tableTaskDefault, text="Note: Default values of 0 for each of these settings leaves icons unchanged!", gridX=0, gridY=1, sizeX=3, xPadding=10)
createLabel(self.tableTaskDefault, text="Normal Icon Alpha (0 to 100)", gridX=0, gridY=2, xPadding=10)
self.iconHue = createEntry(self.tableTaskDefault, maxSize=6, width=8, text=ICON_ALPHA, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskDefault, text="Normal Icon Saturation (-100 to 100)", gridX=0, gridY=3, xPadding=10)
self.iconSat = createEntry(self.tableTaskDefault, maxSize=6, width=8, text=ICON_SAT, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskDefault, text="Normal Icon Brightness (-100 to 100)", gridX=0, gridY=4, xPadding=10)
self.iconBri = createEntry(self.tableTaskDefault, maxSize=6, width=8, text=ICON_BRI, gridX=1, gridY=4, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_icon_asb", (self.iconHue, self.iconSat, self.iconBri))
createLabel(self.tableTaskDefault, text="Normal Font Color", gridX=0, gridY=5, xPadding=10)
self.fontCol = createEntry(self.tableTaskDefault, maxSize=7, width=9, text="", gridX=1, gridY=5, xExpand=True, yExpand=False, handler=None, name="fontCol")
self.fontCol.connect("activate", self.colorTyped)
self.fontColButton = createColorButton(self.tableTaskDefault, color=self.defaults["fgColor"], useAlpha=True, name="fontCol", gridX=2, gridY=5, handler=self.colorChange)
self.fontCol.set_text(self.defaults["fgColor"])
# Add this AFTER we set color to avoid "changed" event
self.fontCol.connect("changed", self.changeOccurred)
self.registerComponent("task_font_color", (self.fontCol, self.fontColButton))
def createActiveTasksWidgets(self):
"""Create the Active Tasks widgets."""
self.tableTaskActive = gtk.Table(rows=6, columns=3, homogeneous=False)
self.tableTaskActive.set_row_spacings(5)
self.tableTaskActive.set_col_spacings(5)
createLabel(self.tableTaskActive, text="Active Task Background ID", gridX=0, gridY=0, xPadding=10)
self.taskActiveBg = createComboBox(self.tableTaskActive, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=0, handler=self.changeOccurred)
self.registerComponent("task_active_background_id", self.taskActiveBg)
createLabel(self.tableTaskActive, text="Note: Default values of 0 for each of these settings leaves icons unchanged!", gridX=0, gridY=1, sizeX=3, xPadding=10)
createLabel(self.tableTaskActive, text="Active Icon Alpha (0 to 100)", gridX=0, gridY=2, xPadding=10)
self.activeIconHue = createEntry(self.tableTaskActive, maxSize=6, width=8, text=ACTIVE_ICON_ALPHA, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskActive, text="Active Icon Saturation (-100 to 100)", gridX=0, gridY=3, xPadding=10)
self.activeIconSat = createEntry(self.tableTaskActive, maxSize=6, width=8, text=ACTIVE_ICON_SAT, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskActive, text="Active Icon Brightness (-100 to 100)", gridX=0, gridY=4, xPadding=10)
self.activeIconBri = createEntry(self.tableTaskActive, maxSize=6, width=8, text=ACTIVE_ICON_BRI, gridX=1, gridY=4, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_active_icon_asb", (self.activeIconHue, self.activeIconSat, self.activeIconBri))
createLabel(self.tableTaskActive, text="Active Font Color", gridX=0, gridY=5, xPadding=10)
self.fontActiveCol = createEntry(self.tableTaskActive, maxSize=7, width=9, text="", gridX=1, gridY=5, xExpand=True, yExpand=False, handler=None, name="fontActiveCol")
self.fontActiveCol.connect("activate", self.colorTyped)
self.fontActiveColButton = createColorButton(self.tableTaskActive, color=self.defaults["fgColor"], useAlpha=True, name="fontActiveCol", gridX=2, gridY=5, handler=self.colorChange)
self.fontActiveCol.set_text(self.defaults["fgColor"])
# Add this AFTER we set color to avoid "changed" event
self.fontActiveCol.connect("changed", self.changeOccurred)
self.registerComponent("task_active_font_color", (self.fontActiveCol, self.fontActiveColButton))
def createUrgentTasksWidgets(self):
"""Create the Urgent Tasks widgets."""
self.tableTaskUrgent = gtk.Table(rows=6, columns=3, homogeneous=False)
self.tableTaskUrgent.set_row_spacings(5)
self.tableTaskUrgent.set_col_spacings(5)
createLabel(self.tableTaskUrgent, text="Urgent Task Background ID", gridX=0, gridY=0, xPadding=10)
self.taskUrgentBg = createComboBox(self.tableTaskUrgent, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=0, handler=self.changeOccurred)
self.registerComponent("task_urgent_background_id", self.taskUrgentBg)
createLabel(self.tableTaskUrgent, text="Note: Default values of 0 for each of these settings leaves icons unchanged!", gridX=0, gridY=1, sizeX=3, xPadding=10)
createLabel(self.tableTaskUrgent, text="Urgent Icon Alpha (0 to 100)", gridX=0, gridY=2, xPadding=10)
self.urgentIconHue = createEntry(self.tableTaskUrgent, maxSize=6, width=8, text=URGENT_ICON_ALPHA, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskUrgent, text="Urgent Icon Saturation (-100 to 100)", gridX=0, gridY=3, xPadding=10)
self.urgentIconSat = createEntry(self.tableTaskUrgent, maxSize=6, width=8, text=URGENT_ICON_SAT, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskUrgent, text="Urgent Icon Brightness (-100 to 100)", gridX=0, gridY=4, xPadding=10)
self.urgentIconBri = createEntry(self.tableTaskUrgent, maxSize=6, width=8, text=URGENT_ICON_BRI, gridX=1, gridY=4, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_urgent_icon_asb", (self.urgentIconHue, self.urgentIconSat, self.urgentIconBri))
createLabel(self.tableTaskUrgent, text="Urgent Font Color", gridX=0, gridY=5, xPadding=10)
self.fontUrgentCol = createEntry(self.tableTaskUrgent, maxSize=7, width=9, text="", gridX=1, gridY=5, xExpand=True, yExpand=False, handler=None, name="fontUrgentCol")
self.fontUrgentCol.connect("activate", self.colorTyped)
self.fontUrgentColButton = createColorButton(self.tableTaskUrgent, color=self.defaults["fgColor"], useAlpha=True, name="fontUrgentCol", gridX=2, gridY=5, handler=self.colorChange)
self.fontUrgentCol.set_text(self.defaults["fgColor"])
# Add this AFTER we set color to avoid "changed" event
self.fontUrgentCol.connect("changed", self.changeOccurred)
self.registerComponent("task_urgent_font_color", (self.fontUrgentCol, self.fontUrgentColButton))
def createIconifiedTasksWidgets(self):
"""Create the Iconified Tasks widgets."""
self.tableTaskIconified = gtk.Table(rows=6, columns=3, homogeneous=False)
self.tableTaskIconified.set_row_spacings(5)
self.tableTaskIconified.set_col_spacings(5)
createLabel(self.tableTaskIconified, text="Iconified Task Background ID", gridX=0, gridY=0, xPadding=10)
self.taskIconifiedBg = createComboBox(self.tableTaskIconified, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=0, handler=self.changeOccurred)
self.registerComponent("task_iconified_background_id", self.taskIconifiedBg)
createLabel(self.tableTaskIconified, text="Note: Default values of 0 for each of these settings leaves icons unchanged!", gridX=0, gridY=1, sizeX=3, xPadding=10)
createLabel(self.tableTaskIconified, text="Iconified Icon Alpha (0 to 100)", gridX=0, gridY=2, xPadding=10)
self.iconifiedIconHue = createEntry(self.tableTaskIconified, maxSize=6, width=8, text=ICONIFIED_ICON_ALPHA, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskIconified, text="Iconified Icon Saturation (-100 to 100)", gridX=0, gridY=3, xPadding=10)
self.iconifiedIconSat = createEntry(self.tableTaskIconified, maxSize=6, width=8, text=ICONIFIED_ICON_SAT, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTaskIconified, text="Iconified Icon Brightness (-100 to 100)", gridX=0, gridY=4, xPadding=10)
self.iconifiedIconBri = createEntry(self.tableTaskIconified, maxSize=6, width=8, text=ICONIFIED_ICON_BRI, gridX=1, gridY=4, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("task_iconified_icon_asb", (self.iconifiedIconHue, self.iconifiedIconSat, self.iconifiedIconBri))
createLabel(self.tableTaskIconified, text="Iconified Font Color", gridX=0, gridY=5, xPadding=10)
self.fontIconifiedCol = createEntry(self.tableTaskIconified, maxSize=7, width=9, text="", gridX=1, gridY=5, xExpand=True, yExpand=False, handler=None, name="fontIconifiedCol")
self.fontIconifiedCol.connect("activate", self.colorTyped)
self.fontIconifiedColButton = createColorButton(self.tableTaskIconified, color=self.defaults["fgColor"], useAlpha=True, name="fontIconifiedCol", gridX=2, gridY=5, handler=self.colorChange)
self.fontIconifiedCol.set_text(self.defaults["fgColor"])
# Add this AFTER we set color to avoid "changed" event
self.fontIconifiedCol.connect("changed", self.changeOccurred)
self.registerComponent("task_iconified_font_color", (self.fontIconifiedCol, self.fontIconifiedColButton))
def createSystemTrayWidgets(self):
"""Create the System Tray widgets."""
self.tableTray = gtk.Table(rows=9, columns=3, homogeneous=False)
self.tableTray.set_row_spacings(5)
self.tableTray.set_col_spacings(5)
createLabel(self.tableTray, text="Padding (x, y)", gridX=0, gridY=1, xPadding=10)
self.trayPadX = createEntry(self.tableTray, maxSize=6, width=8, text=TRAY_PADDING_X, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.trayPadY = createEntry(self.tableTray, maxSize=6, width=8, text=TRAY_PADDING_Y, gridX=2, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTray, text="Horizontal Spacing", gridX=0, gridY=2, xPadding=10)
self.traySpacing = createEntry(self.tableTray, maxSize=6, width=8, text=TRAY_SPACING, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("systray_padding", (self.trayPadX, self.trayPadY, self.traySpacing))
createLabel(self.tableTray, text="System Tray Background ID", gridX=0, gridY=3, xPadding=10)
self.trayBg = createComboBox(self.tableTray, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=3, handler=self.changeOccurred)
self.registerComponent("systray_background_id", self.trayBg)
createLabel(self.tableTray, text="Icon Ordering", gridX=0, gridY=4, xPadding=10)
self.trayOrder = createComboBox(self.tableTray, ["ascending", "descending", "left2right", "right2left"], gridX=1, gridY=4, handler=self.changeOccurred)
self.registerComponent("systray_sort", self.trayOrder)
createLabel(self.tableTray, text="Maximum Icon Size (0 for automatic size)", gridX=0, gridY=5, xPadding=10)
self.trayMaxIconSize = createEntry(self.tableTray, maxSize=6, width=8, text=TRAY_MAX_ICON_SIZE, gridX=1, gridY=5, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("systray_icon_size", self.trayMaxIconSize)
createLabel(self.tableTray, text="System Tray Icon Alpha (0 to 100)", gridX=0, gridY=6, xPadding=10)
self.trayIconHue = createEntry(self.tableTray, maxSize=6, width=8, text=TRAY_ICON_ALPHA, gridX=1, gridY=6, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTray, text="System Tray Icon Saturation (-100 to 100)", gridX=0, gridY=7, xPadding=10)
self.trayIconSat = createEntry(self.tableTray, maxSize=6, width=8, text=TRAY_ICON_SAT, gridX=1, gridY=7, xExpand=True, yExpand=False, handler=self.changeOccurred)
# Note: added below
createLabel(self.tableTray, text="System Tray Icon Brightness (-100 to 100)", gridX=0, gridY=8, xPadding=10)
self.trayIconBri = createEntry(self.tableTray, maxSize=6, width=8, text=TRAY_ICON_BRI, gridX=1, gridY=8, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("systray_icon_asb", (self.trayIconHue, self.trayIconSat, self.trayIconBri))
def createClockDisplayWidgets(self):
"""Create the Clock Display widgets."""
self.tableClockDisplays = gtk.Table(rows=3, columns=3, homogeneous=False)
self.tableClockDisplays.set_row_spacings(5)
self.tableClockDisplays.set_col_spacings(5)
createLabel(self.tableClockDisplays, text="Time 1 Format", gridX=0, gridY=1, xPadding=10)
self.clock1Format = createEntry(self.tableClockDisplays, maxSize=50, width=20, text=CLOCK_FMT_1, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("time1_format", self.clock1Format)
createLabel(self.tableClockDisplays, text="Time 1 Font", gridX=0, gridY=2, xPadding=10)
self.clock1FontButton = createFontButton(self.tableClockDisplays, font=self.defaults["font"], gridX=1, gridY=2, handler=self.changeOccurred)
self.registerComponent("time1_font", self.clock1FontButton)
createLabel(self.tableClockDisplays, text="Time 2 Format", gridX=0, gridY=3, xPadding=10)
self.clock2Format = createEntry(self.tableClockDisplays, maxSize=50, width=20, text=CLOCK_FMT_2, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("time2_format", self.clock2Format)
createLabel(self.tableClockDisplays, text="Time 2 Font", gridX=0, gridY=4, xPadding=10)
self.clock2FontButton = createFontButton(self.tableClockDisplays, font=self.defaults["font"], gridX=1, gridY=4, handler=self.changeOccurred)
self.registerComponent("time2_font", self.clock2FontButton)
createLabel(self.tableClockDisplays, text="Tooltip Format", gridX=0, gridY=5, xPadding=10)
self.clockTooltipFormat = createEntry(self.tableClockDisplays, maxSize=50, width=20, text=CLOCK_TOOLTIP, gridX=1, gridY=5, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("clock_tooltip", self.clockTooltipFormat)
self.clockArea = gtk.ScrolledWindow()
self.clockBuf = gtk.TextBuffer()
self.clockTextView = gtk.TextView(self.clockBuf)
self.clockBuf.insert_at_cursor("%H 00-23 (24-hour) %I 01-12 (12-hour) %l 1-12 (12-hour) %M 00-59 (minutes)\n%S 00-59 (seconds) %P am/pm %b Jan-Dec %B January-December\n%a Sun-Sat %A Sunday-Saturday %d 01-31 (day) %e 1-31 (day)\n%y 2 digit year, e.g. 09 %Y 4 digit year, e.g. 2009")
self.clockTextView.set_editable(False)
self.clockArea.add_with_viewport(self.clockTextView)
self.tableClockDisplays.attach(self.clockArea, 0, 3, 6, 7, xpadding=10)
def createClockSettingsWidgets(self):
"""Create the Clock Settings widgets."""
self.tableClockSettings = gtk.Table(rows=3, columns=3, homogeneous=False)
self.tableClockSettings.set_row_spacings(5)
self.tableClockSettings.set_col_spacings(5)
createLabel(self.tableClockSettings, text="Clock Font Color", gridX=0, gridY=0, xPadding=10)
self.clockFontCol = createEntry(self.tableClockSettings, maxSize=7, width=9, text="", gridX=1, gridY=0, xExpand=True, yExpand=False, handler=None, name="clockFontCol")
self.clockFontCol.connect("activate", self.colorTyped)
self.clockFontColButton = createColorButton(self.tableClockSettings, color=self.defaults["fgColor"], useAlpha=True, name="clockFontCol", gridX=2, gridY=0, handler=self.colorChange)
self.clockFontCol.set_text(self.defaults["fgColor"])
# Add this AFTER we set color to avoid "changed" event
self.clockFontCol.connect("changed", self.changeOccurred)
self.registerComponent("clock_font_color", (self.clockFontCol, self.clockFontColButton))
createLabel(self.tableClockSettings, text="Padding (x, y)", gridX=0, gridY=1, xPadding=10)
self.clockPadX = createEntry(self.tableClockSettings, maxSize=6, width=8, text=CLOCK_PADDING_X, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.clockPadY = createEntry(self.tableClockSettings, maxSize=6, width=8, text=CLOCK_PADDING_Y, gridX=2, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("clock_padding", (self.clockPadX, self.clockPadY))
createLabel(self.tableClockSettings, text="Clock Background ID", gridX=0, gridY=2, xPadding=10)
self.clockBg = createComboBox(self.tableClockSettings, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=2, handler=self.changeOccurred)
self.registerComponent("clock_background_id", self.clockBg)
createLabel(self.tableClockSettings, text="Left Click Command", gridX=0, gridY=3, xPadding=10)
self.clockLClick = createEntry(self.tableClockSettings, maxSize=50, width=20, text=CLOCK_LCLICK, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("clock_lclick_command", self.clockLClick)
createLabel(self.tableClockSettings, text="Right Click Command", gridX=0, gridY=4, xPadding=10)
self.clockRClick = createEntry(self.tableClockSettings, maxSize=50, width=20, text=CLOCK_RCLICK, gridX=1, gridY=4, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("clock_rclick_command", self.clockRClick)
createLabel(self.tableClockSettings, text="Time 1 Timezone", gridX=0, gridY=5, xPadding=10)
self.clockTime1Timezone = createEntry(self.tableClockSettings, maxSize=50, width=20, text=CLOCK_TIME1_TIMEZONE, gridX=1, gridY=5, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("time1_timezone", self.clockTime1Timezone)
createLabel(self.tableClockSettings, text="Time 2 Timezone", gridX=0, gridY=6, xPadding=10)
self.clockTime2Timezone = createEntry(self.tableClockSettings, maxSize=50, width=20, text=CLOCK_TIME2_TIMEZONE, gridX=1, gridY=6, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("time2_timezone", self.clockTime2Timezone)
createLabel(self.tableClockSettings, text="Tooltip Timezone", gridX=0, gridY=7, xPadding=10)
self.clockTooltipTimezone = createEntry(self.tableClockSettings, maxSize=50, width=20, text=CLOCK_TOOLTIP_TIMEZONE, gridX=1, gridY=7, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("clock_tooltip_timezone", self.clockTooltipTimezone)
def createMouseWidgets(self):
"""Creates the Mouse widgets."""
self.tableMouse = gtk.Table(rows=4, columns=3, homogeneous=False)
self.tableMouse.set_row_spacings(5)
self.tableMouse.set_col_spacings(5)
mouseCmds = ["none", "close", "toggle", "iconify", "shade", "toggle_iconify", "maximize_restore", "desktop_left", "desktop_right", "next_task", "prev_task"]
createLabel(self.tableMouse, text="Middle Mouse Click Action", gridX=0, gridY=0, xPadding=10)
self.mouseMiddle = createComboBox(self.tableMouse, mouseCmds, gridX=1, gridY=0, handler=self.changeOccurred)
self.registerComponent("mouse_middle", self.mouseMiddle)
createLabel(self.tableMouse, text="Right Mouse Click Action", gridX=0, gridY=1, xPadding=10)
self.mouseRight = createComboBox(self.tableMouse, mouseCmds, gridX=1, gridY=1, handler=self.changeOccurred)
self.registerComponent("mouse_right", self.mouseRight)
createLabel(self.tableMouse, text="Mouse Wheel Scroll Up Action", gridX=0, gridY=2, xPadding=10)
self.mouseUp = createComboBox(self.tableMouse, mouseCmds, gridX=1, gridY=2, handler=self.changeOccurred)
self.registerComponent("mouse_scroll_up", self.mouseUp)
createLabel(self.tableMouse, text="Mouse Wheel Scroll Down Action", gridX=0, gridY=3, xPadding=10)
self.mouseDown = createComboBox(self.tableMouse, mouseCmds, gridX=1, gridY=3, handler=self.changeOccurred)
self.registerComponent("mouse_scroll_down", self.mouseDown)
def createTooltipsWidgets(self):
"""Creates the Tooltips widgets."""
self.tableTooltip = gtk.Table(rows=7, columns=3, homogeneous=False)
self.tableTooltip.set_row_spacings(5)
self.tableTooltip.set_col_spacings(5)
createLabel(self.tableTooltip, text="Show Tooltips", gridX=0, gridY=0, xPadding=10)
self.tooltipShow = createCheckButton(self.tableTooltip, active=False, gridX=1, gridY=0, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("tooltip", self.tooltipShow)
createLabel(self.tableTooltip, text="Padding (x, y)", gridX=0, gridY=1, xPadding=10)
self.tooltipPadX = createEntry(self.tableTooltip, maxSize=6, width=8, text=TOOLTIP_PADDING_X, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.tooltipPadY = createEntry(self.tableTooltip, maxSize=6, width=8, text=TOOLTIP_PADDING_Y, gridX=2, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("tooltip_padding", (self.tooltipPadX, self.tooltipPadY))
createLabel(self.tableTooltip, text="Tooltip Show Timeout (seconds)", gridX=0, gridY=2, xPadding=10)
self.tooltipShowTime = createEntry(self.tableTooltip, maxSize=6, width=8, text=TOOLTIP_SHOW_TIMEOUT, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("tooltip_show_timeout", self.tooltipShowTime)
createLabel(self.tableTooltip, text="Tooltip Hide Timeout (seconds)", gridX=0, gridY=3, xPadding=10)
self.tooltipHideTime = createEntry(self.tableTooltip, maxSize=6, width=8, text=TOOLTIP_HIDE_TIMEOUT, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("tooltip_hide_timeout", self.tooltipHideTime)
createLabel(self.tableTooltip, text="Tooltip Background ID", gridX=0, gridY=4, xPadding=10)
self.tooltipBg = createComboBox(self.tableTooltip, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=4, handler=self.changeOccurred)
self.registerComponent("tooltip_background_id", self.tooltipBg)
createLabel(self.tableTooltip, text="Tooltip Font", gridX=0, gridY=5, xPadding=10)
self.tooltipFont = createFontButton(self.tableTooltip, font=self.defaults["font"], gridX=1, gridY=5, handler=self.changeOccurred)
self.registerComponent("tooltip_font", self.tooltipFont)
createLabel(self.tableTooltip, text="Tooltip Font Color", gridX=0, gridY=6, xPadding=10)
self.tooltipFontCol = createEntry(self.tableTooltip, maxSize=7, width=9, text="", gridX=1, gridY=6, xExpand=True, yExpand=False, handler=None, name="tooltipFontCol")
self.tooltipFontCol.connect("activate", self.colorTyped)
self.tooltipFontColButton = createColorButton(self.tableTooltip, color=self.defaults["fgColor"], useAlpha=True, name="tooltipFontCol", gridX=2, gridY=6, handler=self.colorChange)
self.tooltipFontCol.set_text(self.defaults["fgColor"])
# Add this AFTER we set color to avoid "changed" event
self.tooltipFontCol.connect("changed", self.changeOccurred)
self.registerComponent("tooltip_font_color", (self.tooltipFontCol, self.tooltipFontColButton))
def createBatteryWidgets(self):
"""Creates the Battery widgets."""
self.tableBattery = gtk.Table(rows=8, columns=3, homogeneous=False)
self.tableBattery.set_row_spacings(5)
self.tableBattery.set_col_spacings(5)
createLabel(self.tableBattery, text="Battery Low Status (%)", gridX=0, gridY=1, xPadding=10)
self.batteryLow = createEntry(self.tableBattery, maxSize=6, width=8, text=BATTERY_LOW, gridX=1, gridY=1, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("battery_low_status", self.batteryLow)
createLabel(self.tableBattery, text="Battery Low Action", gridX=0, gridY=2, xPadding=10)
self.batteryLowAction = createEntry(self.tableBattery, maxSize=150, width=32, text=BATTERY_ACTION, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("battery_low_cmd", self.batteryLowAction)
createLabel(self.tableBattery, text="Battery Hide (0 to 100)", gridX=0, gridY=3, xPadding=10)
self.batteryHide = createEntry(self.tableBattery, maxSize=6, width=8, text=BATTERY_HIDE, gridX=1, gridY=3, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("battery_hide", self.batteryHide)
createLabel(self.tableBattery, text="Battery 1 Font", gridX=0, gridY=4, xPadding=10)
self.bat1FontButton = createFontButton(self.tableBattery, font=self.defaults["font"], gridX=1, gridY=4, handler=self.changeOccurred)
self.registerComponent("bat1_font", self.bat1FontButton)
createLabel(self.tableBattery, text="Battery 2 Font", gridX=0, gridY=5, xPadding=10)
self.bat2FontButton = createFontButton(self.tableBattery, font=self.defaults["font"], gridX=1, gridY=5, handler=self.changeOccurred)
self.registerComponent("bat2_font", self.bat2FontButton)
createLabel(self.tableBattery, text="Battery Font Color", gridX=0, gridY=6, xPadding=10)
self.batteryFontCol = createEntry(self.tableBattery, maxSize=7, width=9, text="", gridX=1, gridY=6, xExpand=True, yExpand=False, handler=None, name="batteryFontCol")
self.batteryFontCol.connect("activate", self.colorTyped)
self.batteryFontColButton = createColorButton(self.tableBattery, color=self.defaults["fgColor"], useAlpha=True, name="batteryFontCol", gridX=2, gridY=6, handler=self.colorChange)
self.batteryFontCol.set_text(self.defaults["fgColor"])
# Add this AFTER we set color to avoid "changed" event
self.batteryFontCol.connect("changed", self.changeOccurred)
self.registerComponent("battery_font_color", (self.batteryFontCol, self.batteryFontColButton))
createLabel(self.tableBattery, text="Padding (x, y)", gridX=0, gridY=7, xPadding=10)
self.batteryPadX = createEntry(self.tableBattery, maxSize=6, width=8, text=BATTERY_PADDING_X, gridX=1, gridY=7, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.batteryPadY = createEntry(self.tableBattery, maxSize=6, width=8, text=BATTERY_PADDING_Y, gridX=2, gridY=7, xExpand=True, yExpand=False, handler=self.changeOccurred)
self.registerComponent("battery_padding", (self.batteryPadX, self.batteryPadY))
createLabel(self.tableBattery, text="Battery Background ID", gridX=0, gridY=8, xPadding=10)
self.batteryBg = createComboBox(self.tableBattery, ["0 (fully transparent)"] + range(1, len(self.bgs)), gridX=1, gridY=8, handler=self.changeOccurred)
self.registerComponent("battery_background_id", self.batteryBg)
def registerComponent(self, configProperty, component):
"""Registers a component with a particular property from
a tint2 config. Note: a component may be a double or
triple if that property has more than one value associated
with it."""
self.propUI[configProperty] = component
def getComponent(self, configProperty):
"""Fetches the component associated with a tint2 property."""
return self.propUI[configProperty] if configProperty in self.propUI else None
def about(self, action=None):
"""Displays the About dialog."""
about = gtk.AboutDialog()
about.set_program_name(NAME)
about.set_version(VERSION)
about.set_authors(AUTHORS)
about.set_comments(COMMENTS)
about.set_website(WEBSITE)
gtk.about_dialog_set_url_hook(self.aboutLinkCallback)
about.run()
about.destroy()
def aboutLinkCallback(dialog, link, data=None):
"""Callback for when a URL is clicked in an About dialog."""
try:
webbrowser.open(link)
except:
errorDialog(self, "Your default web-browser could not be opened.\nPlease visit %s" % link)
def addBg(self):
"""Adds a new background to the list of backgrounds."""
self.bgs += [gtk.Table(4, 3, False)]
createLabel(self.bgs[-1], text="Corner Rounding (px)", gridX=0, gridY=0, xPadding=10)
createEntry(self.bgs[-1], maxSize=7, width=9, text=BG_ROUNDING, gridX=1, gridY=0, xExpand=True, yExpand=False, handler=self.changeOccurred, name="rounded")
createLabel(self.bgs[-1], text="Background Color", gridX=0, gridY=1, xPadding=10)
temp = gtk.Entry(7)
temp.set_width_chars(9)
temp.set_name("bgColEntry")
temp.set_text(self.defaults["bgColor"])
temp.connect("changed", self.changeOccurred)
temp.connect("activate", self.colorTyped)
self.bgs[-1].attach(temp, 1, 2, 1, 2, xoptions=gtk.EXPAND)
temp = gtk.ColorButton(gtk.gdk.color_parse(self.defaults["bgColor"]))
temp.set_use_alpha(True)
temp.set_name("bgCol")
temp.connect("color-set", self.colorChange)
self.bgs[-1].attach(temp, 2, 3, 1, 2, xoptions=gtk.EXPAND, yoptions=gtk.EXPAND)
createLabel(self.bgs[-1], text="Border Width (px)", gridX=0, gridY=2, xPadding=10)
createEntry(self.bgs[-1], maxSize=7, width=9, text=BG_BORDER, gridX=1, gridY=2, xExpand=True, yExpand=False, handler=self.changeOccurred, name="border")
createLabel(self.bgs[-1], text="Border Color", gridX=0, gridY=3, xPadding=10)
temp = gtk.Entry(7)
temp.set_width_chars(9)
temp.set_name("borderColEntry")
temp.connect("activate", self.colorTyped)
temp.set_text(self.defaults["borderColor"])
temp.connect("changed", self.changeOccurred)
self.bgs[-1].attach(temp, 1, 2, 3, 4, xoptions=gtk.EXPAND)
temp = gtk.ColorButton(gtk.gdk.color_parse(self.defaults["borderColor"]))
temp.set_use_alpha(True)
temp.set_name("borderCol")
temp.connect("color-set", self.colorChange)
self.bgs[-1].attach(temp, 2, 3, 3, 4, xoptions=gtk.EXPAND, yoptions=gtk.EXPAND)
# Note: Only set init to True when initialising background styles.
# This prevents unwanted calls to changeOccurred()
def addBgClick(self, widget=None, init=False):
"""Creates a new background and adds a new tab to the notebook."""
n = self.bgNotebook.get_n_pages()
if n > (self.defaults["bgCount"] + 2):
if confirmDialog(self, "You already have %d background styles. Are you sure you would like another?" % n) == gtk.RESPONSE_NO:
return
self.addBg()
newId = len(self.bgs)
self.bgNotebook.append_page(self.bgs[newId-1], gtk.Label("Background ID %d" % (newId)))
self.bgNotebook.show_all()
self.updateComboBoxes(n, "add")
self.bgNotebook.set_current_page(n)
if not init:
self.changeOccurred()
def addBgDefs(self, bgDefs):
"""Add interface elements for a list of background style definitions. bgDefs
should be a list containing dictionaries with the following keys: rounded,
border_width, background_color, border_color"""
for d in bgDefs:
self.addBg()
for child in self.bgs[-1].get_children():
if child.get_name() == "rounded":
child.set_text(d["rounded"])
elif child.get_name() == "border":
child.set_text(d["border_width"])
elif child.get_name() == "bgColEntry":
child.set_text(d["background_color"].split(" ")[0].strip())
child.activate()
elif child.get_name() == "borderColEntry":
child.set_text(d["border_color"].split(" ")[0].strip())
child.activate()
elif child.get_name() == "bgCol":
list = d["background_color"].split(" ")
if len(list) > 1:
child.set_alpha(int(int(list[1].strip()) * 65535 / 100.0))
else:
child.set_alpha(65535)
elif child.get_name() == "borderCol":
list = d["border_color"].split(" ")
if len(list) > 1:
child.set_alpha(int(int(list[1].strip()) * 65535 / 100.0))
else:
child.set_alpha(65535)
newId = len(self.bgs)
self.bgNotebook.append_page(self.bgs[newId-1], gtk.Label("Background ID %d" % (newId)))
self.bgNotebook.show_all()
self.updateComboBoxes(newId-1, "add")
self.bgNotebook.set_current_page(newId)
def apply(self, widget, event=None, confirmChange=True):
"""Applies the current config to tint2."""
# Check if tint2 is running
procs = os.popen('pgrep -x "tint2"') # Check list of active processes for tint2
pids = [] # List of process ids for tint2
for proc in procs.readlines():
pids += [int(proc.strip().split(" ")[0])]
procs.close()
if self.oneConfigFile:
# Save and copy as default
self.save()
tmpSrc = self.filename
tmpDest = os.path.expandvars("${HOME}") + "/.config/tint2/tint2rc"
try:
shutil.copyfile(tmpSrc, tmpDest)
except shutil.Error:
pass
# Ask tint2 to reload config
for pid in pids:
os.kill(pid, signal.SIGUSR1)
else:
if confirmDialog(self, "This will terminate all currently running instances of tint2 before applying config. Continue?") == gtk.RESPONSE_YES:
if not self.save():
return
#shutil.copyfile(self.filename, self.filename+".backup") # Create backup
# If it is - kill it
for pid in pids:
os.kill(pid, signal.SIGTERM)
# Lastly, start it
os.spawnv(os.P_NOWAIT, self.tint2Bin, [self.tint2Bin, "-c", self.filename])
if confirmChange and self.filename != (os.path.expandvars("${HOME}") + "/.config/tint2/tint2rc") and confirmDialog(self, "Use this as default tint2 config?") == gtk.RESPONSE_YES:
tmp = self.filename
self.filename = os.path.expandvars("${HOME}") + "/.config/tint2/tint2rc"
try:
shutil.copyfile(tmp, self.filename)
except shutil.Error:
pass
#if confirmChange and confirmDialog(self, "Keep this config?") == gtk.RESPONSE_NO:
# shutil.copyfile(self.filename+".backup", self.filename) # Create backup
# self.apply(widget, event, False)
def changeAllFonts(self, widget):
"""Changes all fonts at once."""
dialog = gtk.FontSelectionDialog("Select Font")
dialog.set_font_name(self.defaults["font"])
if dialog.run() == gtk.RESPONSE_OK:
newFont = dialog.get_font_name()
self.clock1FontButton.set_font_name(newFont)
self.clock2FontButton.set_font_name(newFont)
self.bat1FontButton.set_font_name(newFont)
self.bat2FontButton.set_font_name(newFont)
self.fontButton.set_font_name(newFont)
dialog.destroy()
self.generateConfig()
self.changeOccurred()
def changeDefaults(self, widget=None):
"""Shows the style preferences widget."""
TintWizardPrefGUI(self)
def changeOccurred(self, widget=None):
"""Called when the user changes something, i.e. entry value"""
self.toSave = True
self.updateStatusBar(change=True)
if widget == self.panelOrientation:
if self.panelOrientation.get_active_text() == "horizontal":
self.panelSizeLabel.set_text("Size (width, height)")
else:
self.panelSizeLabel.set_text("Size (height, width)")
def colorChange(self, widget):
"""Update the text entry when a color button is updated."""
r = widget.get_color().red
g = widget.get_color().green
b = widget.get_color().blue
label = self.getColorLabel(widget)
# No label found
if not label:
return
label.set_text(rgbToHex(r, g, b))
self.changeOccurred()
def colorTyped(self, widget):
"""Update the color button when a valid value is typed into the entry."""
s = widget.get_text()
# The color button associated with this widget.
colorButton = self.getColorButton(widget)
# Just a precautionary check - this situation should never arise.
if not colorButton:
#print "Error in colorTyped() -- unrecognised entry widget."
return
# If the entered value is invalid, set textbox to the current
# hex value of the associated color button.
buttonHex = self.getHexFromWidget(colorButton)
if len(s) != 7:
errorDialog(self, "Invalid color specification: [%s]" % s)
widget.set_text(buttonHex)
return
try:
col = gtk.gdk.Color(s)
except:
errorDialog(self, "Invalid color specification: [%s]" % s)
widget.set_text(buttonHex)
return
colorButton.set_color(col)
# Note: only set init to True when removing backgrounds for a new config
# This prevents unwanted calls to changeOccurred()
def delBgClick(self, widget=None, prompt=True, init=False):
"""Deletes the selected background after confirming with the user."""
selected = self.bgNotebook.get_current_page()
if selected == -1: # Nothing to remove
return
if prompt:
if confirmDialog(self, "Remove this background?") != gtk.RESPONSE_YES:
return
self.bgNotebook.remove_page(selected)
self.bgs.pop(selected)
for i in range(self.bgNotebook.get_n_pages()):
self.bgNotebook.set_tab_label_text(self.bgNotebook.get_nth_page(i), "Background ID %d" % (i+1))
self.bgNotebook.show_all()
self.updateComboBoxes(len(self.bgs) + 1, "remove")
if not init:
self.changeOccurred()
def generateConfig(self):
"""Reads values from each widget and generates a config."""
self.configBuf.delete(self.configBuf.get_start_iter(), self.configBuf.get_end_iter())
self.configBuf.insert(self.configBuf.get_end_iter(), "# Tint2 config file\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "# Generated by tintwizard (http://code.google.com/p/tintwizard/)\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "# For information on manually configuring tint2 see http://code.google.com/p/tint2/wiki/Configure\n\n")
if not self.oneConfigFile:
self.configBuf.insert(self.configBuf.get_end_iter(), "# To use this as default tint2 config: save as $HOME/.config/tint2/tint2rc\n\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "# Background definitions\n")
for i in range(len(self.bgs)):
self.configBuf.insert(self.configBuf.get_end_iter(), "# ID %d\n" % (i + 1))
for child in self.bgs[i].get_children():
if child.get_name() == "rounded":
rounded = child.get_text() if child.get_text() else BG_ROUNDING
elif child.get_name() == "border":
borderW = child.get_text() if child.get_text() else BG_BORDER
elif child.get_name() == "bgCol":
bgCol = self.getHexFromWidget(child)
bgAlpha = int(child.get_alpha() / 65535.0 * 100)
elif child.get_name() == "borderCol":
borderCol = self.getHexFromWidget(child)
borderAlpha = int(child.get_alpha() / 65535.0 * 100)
self.configBuf.insert(self.configBuf.get_end_iter(), "rounded = %s\n" % (rounded))
self.configBuf.insert(self.configBuf.get_end_iter(), "border_width = %s\n" % (borderW))
self.configBuf.insert(self.configBuf.get_end_iter(), "background_color = %s %d\n" % (bgCol, bgAlpha))
self.configBuf.insert(self.configBuf.get_end_iter(), "border_color = %s %d\n\n" % (borderCol, borderAlpha))
self.configBuf.insert(self.configBuf.get_end_iter(), "# Panel\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "panel_monitor = %s\n" % (self.panelMonitor.get_text() if self.panelMonitor.get_text() else PANEL_MONITOR))
self.configBuf.insert(self.configBuf.get_end_iter(), "panel_position = %s %s %s\n" % (self.panelPosY.get_active_text(), self.panelPosX.get_active_text(), self.panelOrientation.get_active_text()))
self.configBuf.insert(self.configBuf.get_end_iter(), "panel_size = %s %s\n" % (self.panelSizeX.get_text() if self.panelSizeX.get_text() else PANEL_SIZE_X,
self.panelSizeY.get_text() if self.panelSizeY.get_text() else PANEL_SIZE_Y))
self.configBuf.insert(self.configBuf.get_end_iter(), "panel_margin = %s %s\n" % (self.panelMarginX.get_text() if self.panelMarginX.get_text() else PANEL_MARGIN_X,
self.panelMarginY.get_text() if self.panelMarginY.get_text() else PANEL_MARGIN_Y))
self.configBuf.insert(self.configBuf.get_end_iter(), "panel_padding = %s %s %s\n" % (self.panelPadX.get_text() if self.panelPadX.get_text() else PANEL_PADDING_X,
self.panelPadY.get_text() if self.panelPadY.get_text() else PANEL_PADDING_Y,
self.panelSpacing.get_text() if self.panelSpacing.get_text() else TASKBAR_SPACING))
self.configBuf.insert(self.configBuf.get_end_iter(), "panel_dock = %s\n" % int(self.panelDock.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "wm_menu = %s\n" % int(self.panelMenu.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "panel_layer = %s\n" % (self.panelLayer.get_active_text()))
self.configBuf.insert(self.configBuf.get_end_iter(), "panel_background_id = %s\n" % (self.panelBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Panel Autohide\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "autohide = %s\n" % int(self.panelAutohide.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "autohide_show_timeout = %s\n" % (self.panelAutohideShow.get_text() if self.panelAutohideShow.get_text() else PANEL_AUTOHIDE_SHOW))
self.configBuf.insert(self.configBuf.get_end_iter(), "autohide_hide_timeout = %s\n" % (self.panelAutohideHide.get_text() if self.panelAutohideHide.get_text() else PANEL_AUTOHIDE_HIDE))
self.configBuf.insert(self.configBuf.get_end_iter(), "autohide_height = %s\n" % (self.panelAutohideHeight.get_text() if self.panelAutohideHeight.get_text() else PANEL_AUTOHIDE_HEIGHT))
self.configBuf.insert(self.configBuf.get_end_iter(), "strut_policy = %s\n" % (self.panelAutohideStrut.get_active_text() if self.panelAutohideStrut.get_active_text() else PANEL_AUTOHIDE_STRUT))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Taskbar\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "taskbar_mode = %s\n" % (self.taskbarMode.get_active_text()))
self.configBuf.insert(self.configBuf.get_end_iter(), "taskbar_padding = %s %s %s\n" % (self.taskbarPadX.get_text() if self.taskbarPadX.get_text() else TASKBAR_PADDING_X,
self.taskbarPadY.get_text() if self.taskbarPadY.get_text() else TASKBAR_PADDING_X,
self.taskbarSpacing.get_text() if self.taskbarSpacing.get_text() else TASK_SPACING))
self.configBuf.insert(self.configBuf.get_end_iter(), "taskbar_background_id = %s\n" % (self.taskbarBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "taskbar_active_background_id = %s\n" % (self.taskbarActiveBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Tasks\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "urgent_nb_of_blink = %s\n" % (self.taskBlinks.get_text() if self.taskBlinks.get_text() else TASK_BLINKS))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_icon = %s\n" % int(self.taskIconCheckButton.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_text = %s\n" % int(self.taskTextCheckButton.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_centered = %s\n" % int(self.taskCentreCheckButton.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_maximum_size = %s %s\n" % (self.taskMaxSizeX.get_text() if self.taskMaxSizeX.get_text() else TASK_MAXIMUM_SIZE_X, self.taskMaxSizeY.get_text() if self.taskMaxSizeY.get_text() else TASK_MAXIMUM_SIZE_Y))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_padding = %s %s\n" % (self.taskPadX.get_text() if self.taskPadX.get_text() else TASK_PADDING_X,
self.taskPadY.get_text() if self.taskPadY.get_text() else TASK_PADDING_Y))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_minimize_maximize_close_buttons = %s\n" % int(self.taskMMCButtonsCheckButton.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_buttons_padding = %s %s\n" % (self.taskMMCPadX.get_text() if self.taskMMCPadX.get_text() else TASK_MMC_PADDING_X,
self.taskMMCPadY.get_text() if self.taskMMCPadY.get_text() else TASK_MMC_PADDING_Y))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_background_id = %s\n" % (self.taskBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_active_background_id = %s\n" % (self.taskActiveBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_urgent_background_id = %s\n" % (self.taskUrgentBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_iconified_background_id = %s\n" % (self.taskIconifiedBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Task Icons\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "task_icon_asb = %s %s %s\n" % (self.iconHue.get_text() if self.iconHue.get_text() else ICON_ALPHA,
self.iconSat.get_text() if self.iconSat.get_text() else ICON_SAT,
self.iconBri.get_text() if self.iconBri.get_text() else ICON_BRI))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_active_icon_asb = %s %s %s\n" % (self.activeIconHue.get_text() if self.activeIconHue.get_text() else ACTIVE_ICON_ALPHA,
self.activeIconSat.get_text() if self.activeIconSat.get_text() else ACTIVE_ICON_SAT,
self.activeIconBri.get_text() if self.activeIconBri.get_text() else ACTIVE_ICON_BRI))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_urgent_icon_asb = %s %s %s\n" % (self.urgentIconHue.get_text() if self.urgentIconHue.get_text() else URGENT_ICON_ALPHA,
self.urgentIconSat.get_text() if self.urgentIconSat.get_text() else URGENT_ICON_SAT,
self.urgentIconBri.get_text() if self.urgentIconBri.get_text() else URGENT_ICON_BRI))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_iconified_icon_asb = %s %s %s\n" % (self.iconifiedIconHue.get_text() if self.iconifiedIconHue.get_text() else ICONIFIED_ICON_ALPHA,
self.iconifiedIconSat.get_text() if self.iconifiedIconSat.get_text() else ICONIFIED_ICON_SAT,
self.iconifiedIconBri.get_text() if self.iconifiedIconBri.get_text() else ICONIFIED_ICON_BRI))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Fonts\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "task_font = %s\n" % (self.fontButton.get_font_name()))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_font_color = %s %s\n" % (self.getHexFromWidget(self.fontColButton),
int(self.fontColButton.get_alpha() / 65535.0 * 100)))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_active_font_color = %s %s\n" % (self.getHexFromWidget(self.fontActiveColButton),
int(self.fontActiveColButton.get_alpha() / 65535.0 * 100)))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_urgent_font_color = %s %s\n" % (self.getHexFromWidget(self.fontUrgentColButton),
int(self.fontUrgentColButton.get_alpha() / 65535.0 * 100)))
self.configBuf.insert(self.configBuf.get_end_iter(), "task_iconified_font_color = %s %s\n" % (self.getHexFromWidget(self.fontIconifiedColButton),
int(self.fontIconifiedColButton.get_alpha() / 65535.0 * 100)))
self.configBuf.insert(self.configBuf.get_end_iter(), "font_shadow = %s\n" % int(self.fontShadowCheckButton.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# System Tray\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "systray_padding = %s %s %s\n" % (self.trayPadX.get_text() if self.trayPadX.get_text() else TRAY_PADDING_X,
self.trayPadY.get_text() if self.trayPadY.get_text() else TRAY_PADDING_Y,
self.traySpacing.get_text() if self.traySpacing.get_text() else TRAY_SPACING))
self.configBuf.insert(self.configBuf.get_end_iter(), "systray_sort = %s\n" % (self.trayOrder.get_active_text()))
self.configBuf.insert(self.configBuf.get_end_iter(), "systray_background_id = %s\n" % (self.trayBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "systray_icon_size = %s\n" % (self.trayMaxIconSize.get_text() if self.trayMaxIconSize.get_text() else TRAY_MAX_ICON_SIZE))
self.configBuf.insert(self.configBuf.get_end_iter(), "systray_icon_asb = %s %s %s\n" % (self.trayIconHue.get_text() if self.trayIconHue.get_text() else TRAY_ICON_ALPHA,
self.trayIconSat.get_text() if self.trayIconSat.get_text() else TRAY_ICON_SAT,
self.trayIconBri.get_text() if self.trayIconBri.get_text() else TRAY_ICON_BRI))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Clock\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "time1_format = %s\n" % (self.clock1Format.get_text() if self.clock1Format.get_text() else CLOCK_FMT_1))
self.configBuf.insert(self.configBuf.get_end_iter(), "time1_font = %s\n" % (self.clock1FontButton.get_font_name()))
self.configBuf.insert(self.configBuf.get_end_iter(), "time2_format = %s\n" % (self.clock2Format.get_text() if self.clock2Format.get_text() else CLOCK_FMT_2))
self.configBuf.insert(self.configBuf.get_end_iter(), "time2_font = %s\n" % (self.clock2FontButton.get_font_name()))
self.configBuf.insert(self.configBuf.get_end_iter(), "clock_font_color = %s %s\n" % (self.getHexFromWidget(self.clockFontColButton),
int(self.clockFontColButton.get_alpha() / 65535.0 * 100)))
self.configBuf.insert(self.configBuf.get_end_iter(), "clock_tooltip = %s\n" % (self.clockTooltipFormat.get_text() if self.clockTooltipFormat.get_text() else CLOCK_TOOLTIP))
self.configBuf.insert(self.configBuf.get_end_iter(), "clock_padding = %s %s\n" % (self.clockPadX.get_text() if self.clockPadX.get_text() else CLOCK_PADDING_X,
self.clockPadY.get_text() if self.clockPadY.get_text() else CLOCK_PADDING_Y))
self.configBuf.insert(self.configBuf.get_end_iter(), "clock_background_id = %s\n" % (self.clockBg.get_active()))
if self.clockLClick.get_text():
self.configBuf.insert(self.configBuf.get_end_iter(), "clock_lclick_command = %s\n" % (self.clockLClick.get_text()))
if self.clockRClick.get_text():
self.configBuf.insert(self.configBuf.get_end_iter(), "clock_rclick_command = %s\n" % (self.clockRClick.get_text()))
self.configBuf.insert(self.configBuf.get_end_iter(), "time1_timezone = %s\n" % (self.clockTime1Timezone.get_text() if self.clockTime1Timezone.get_text() else CLOCK_TIME1_TIMEZONE))
self.configBuf.insert(self.configBuf.get_end_iter(), "time2_timezone = %s\n" % (self.clockTime2Timezone.get_text() if self.clockTime2Timezone.get_text() else CLOCK_TIME2_TIMEZONE))
self.configBuf.insert(self.configBuf.get_end_iter(), "clock_tooltip_timezone = %s\n" % (self.clockTooltipTimezone.get_text() if self.clockTooltipTimezone.get_text() else CLOCK_TOOLTIP_TIMEZONE))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Tooltips\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "tooltip = %s\n" % int(self.tooltipShow.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "tooltip_padding = %s %s\n" % (self.tooltipPadX.get_text() if self.tooltipPadX.get_text() else TOOLTIP_PADDING_Y,
self.tooltipPadY.get_text() if self.tooltipPadY.get_text() else TOOLTIP_PADDING_Y))
self.configBuf.insert(self.configBuf.get_end_iter(), "tooltip_show_timeout = %s\n" % (self.tooltipShowTime.get_text() if self.tooltipShowTime.get_text() else TOOLTIP_SHOW_TIMEOUT))
self.configBuf.insert(self.configBuf.get_end_iter(), "tooltip_hide_timeout = %s\n" % (self.tooltipHideTime.get_text() if self.tooltipHideTime.get_text() else TOOLTIP_HIDE_TIMEOUT))
self.configBuf.insert(self.configBuf.get_end_iter(), "tooltip_background_id = %s\n" % (self.tooltipBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "tooltip_font = %s\n" % (self.tooltipFont.get_font_name()))
self.configBuf.insert(self.configBuf.get_end_iter(), "tooltip_font_color = %s %s\n" % (self.getHexFromWidget(self.tooltipFontColButton),
int(self.tooltipFontColButton.get_alpha() / 65535.0 * 100)))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Mouse\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "mouse_middle = %s\n" % (self.mouseMiddle.get_active_text()))
self.configBuf.insert(self.configBuf.get_end_iter(), "mouse_right = %s\n" % (self.mouseRight.get_active_text()))
self.configBuf.insert(self.configBuf.get_end_iter(), "mouse_scroll_up = %s\n" % (self.mouseUp.get_active_text()))
self.configBuf.insert(self.configBuf.get_end_iter(), "mouse_scroll_down = %s\n" % (self.mouseDown.get_active_text()))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# Battery\n")
self.configBuf.insert(self.configBuf.get_end_iter(), "battery_low_status = %s\n" % (self.batteryLow.get_text() if self.batteryLow.get_text() else BATTERY_LOW))
self.configBuf.insert(self.configBuf.get_end_iter(), "battery_low_cmd = %s\n" % (self.batteryLowAction.get_text() if self.batteryLowAction.get_text() else BATTERY_ACTION))
self.configBuf.insert(self.configBuf.get_end_iter(), "battery_hide = %s\n" % (self.batteryHide.get_text() if self.batteryHide.get_text() else BATTERY_HIDE))
self.configBuf.insert(self.configBuf.get_end_iter(), "bat1_font = %s\n" % (self.bat1FontButton.get_font_name()))
self.configBuf.insert(self.configBuf.get_end_iter(), "bat2_font = %s\n" % (self.bat2FontButton.get_font_name()))
self.configBuf.insert(self.configBuf.get_end_iter(), "battery_font_color = %s %s\n" % (self.getHexFromWidget(self.batteryFontColButton),
int(self.batteryFontColButton.get_alpha() / 65535.0 * 100)))
self.configBuf.insert(self.configBuf.get_end_iter(), "battery_padding = %s %s\n" % (self.batteryPadX.get_text() if self.batteryPadX.get_text() else BATTERY_PADDING_Y,
self.batteryPadY.get_text() if self.batteryPadY.get_text() else BATTERY_PADDING_Y))
self.configBuf.insert(self.configBuf.get_end_iter(), "battery_background_id = %s\n" % (self.batteryBg.get_active()))
self.configBuf.insert(self.configBuf.get_end_iter(), "\n# End of config")
def getColorButton(self, widget):
"""Returns the color button associated with widget."""
if widget.get_name() == "fontCol":
return self.fontColButton
elif widget.get_name() == "fontActiveCol":
return self.fontActiveColButton
elif widget.get_name() == "fontUrgentCol":
return self.fontUrgentColButton
elif widget.get_name() == "fontIconifiedCol":
return self.fontIconifiedColButton
elif widget.get_name() == "clockFontCol":
return self.clockFontColButton
elif widget.get_name() == "batteryFontCol":
return self.batteryFontColButton
elif widget.get_name() == "tooltipFontCol":
return self.tooltipFontColButton
elif widget.get_name() == "bgColEntry":
bgID = self.bgNotebook.get_current_page()
for child in self.bgs[bgID].get_children():
if child.get_name() == "bgCol":
return child
elif widget.get_name() == "borderColEntry":
bgID = self.bgNotebook.get_current_page()
for child in self.bgs[bgID].get_children():
if child.get_name() == "borderCol":
return child
# No button found which matches label
return None
def getColorLabel(self, widget):
"""Gets the color label associated with a color button."""
if widget.get_name() == "fontCol":
return self.fontCol
elif widget.get_name() == "fontActiveCol":
return self.fontActiveCol
elif widget.get_name() == "fontUrgentCol":
return self.fontUrgentCol
elif widget.get_name() == "fontIconifiedCol":
return self.fontIconifiedCol
elif widget.get_name() == "clockFontCol":
return self.clockFontCol
elif widget.get_name() == "batteryFontCol":
return self.batteryFontCol
elif widget.get_name() == "tooltipFontCol":
return self.tooltipFontCol
elif widget.get_name() == "bgCol":
bgID = self.bgNotebook.get_current_page()
for child in self.bgs[bgID].get_children():
if child.get_name() == "bgColEntry":
return child
elif widget.get_name() == "borderCol":
bgID = self.bgNotebook.get_current_page()
for child in self.bgs[bgID].get_children():
if child.get_name() == "borderColEntry":
return child
# No label found which matches color button
return None
def getHexFromWidget(self, widget):
"""Returns the #RRGGBB value of a widget."""
r = widget.get_color().red
g = widget.get_color().green
b = widget.get_color().blue
return rgbToHex(r, g, b)
def help(self, action=None):
"""Opens the Help wiki page in the default web browser."""
try:
webbrowser.open("http://code.google.com/p/tintwizard/wiki/Help")
except:
errorDialog(self, "Your default web-browser could not be opened.\nPlease visit http://code.google.com/p/tintwizard/wiki/Help")
def main(self):
"""Enters the main loop."""
gtk.main()
def new(self, action=None):
"""Prepares a new config."""
if self.toSave:
self.savePrompt()
self.toSave = True
self.filename = None
self.resetConfig()
self.generateConfig()
self.updateStatusBar("New Config File [*]")
def openDef(self, widget=None):
"""Opens the default tint2 config."""
self.openFile(default=True)
def openFile(self, widget=None, default=False):
"""Reads from a config file. If default=True, open the tint2 default config."""
self.new()
if not default:
chooser = gtk.FileChooserDialog("Open Config File", self, gtk.FILE_CHOOSER_ACTION_OPEN, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_default_response(gtk.RESPONSE_OK)
if self.curDir != None:
chooser.set_current_folder(self.curDir)
chooserFilter = gtk.FileFilter()
chooserFilter.set_name("All files")
chooserFilter.add_pattern("*")
chooser.add_filter(chooserFilter)
chooser.show()
response = chooser.run()
if response == gtk.RESPONSE_OK:
self.filename = chooser.get_filename()
self.curDir = os.path.dirname(self.filename)
else:
chooser.destroy()
return
chooser.destroy()
else:
self.filename = os.path.expandvars("$HOME/.config/tint2/tint2rc")
self.curDir = os.path.expandvars("$HOME/.config/tint2")
self.readTint2Config()
self.generateConfig()
self.updateStatusBar()
def parseBgs(self, string):
"""Parses the background definitions from a string."""
s = string.split("\n")
bgDefs = []
cur = -1
bgKeys = ["border_width", "background_color", "border_color"]
newString = ""
for line in s:
data = [token.strip() for token in line.split("=")]
if data[0] == "rounded": # It may be considered bad practice to
bgDefs += [{"rounded": data[1]}] # find each style definition with an
elif data[0] in bgKeys: # arbitrary value, but tint2 does the same.
bgDefs[cur][data[0]] = data[1] # This means that any existing configs must
else: # start with 'rounded'.
newString += "%s\n" % line
self.addBgDefs(bgDefs)
return newString
def parseConfig(self, string):
"""Parses the contents of a config file."""
for line in string.split("\n"):
s = line.split("=") # Create a list with KEY and VALUE
e = s[0].strip() # Strip whitespace from KEY
if e == "time1_format": # Set the VALUE of KEY
self.parseProp(self.getComponent(e), s[1], True, "time1")
elif e == "time2_format":
self.parseProp(self.getComponent(e), s[1], True, "time2")
elif e == "clock_tooltip":
self.parseProp(self.getComponent(e), s[1], True, "clock_tooltip")
elif e == "time1_timezone":
self.parseProp(self.getComponent(e), s[1], True, "time1_timezone")
elif e == "time2_timezone":
self.parseProp(self.getComponent(e), s[1], True, "time2_timezone")
elif e == "clock_tooltip_timezone":
self.parseProp(self.getComponent(e), s[1], True, "tooltip_timezone")
elif e == "systray_padding":
self.parseProp(self.getComponent(e), s[1], True, "tray")
elif e == "taskbar_active_background_id":
self.parseProp(self.getComponent(e), s[1], True, "activeBg")
else:
component = self.getComponent(e)
if component != None:
self.parseProp(self.getComponent(e), s[1])
def parseProp(self, prop, string, special=False, propType=""):
"""Parses a variable definition from the conf file and updates the correct UI widget."""
string = string.strip() # Remove whitespace from the VALUE
eType = type(prop) # Get widget type
if eType == gtk.Entry:
prop.set_text(string)
prop.activate()
elif eType == gtk.ComboBox:
# This allows us to select the correct combo-box value.
if string in ["bottom", "top", "left", "right", "center", "single_desktop", "multi_desktop", "single_monitor",
"none", "close", "shade", "iconify", "toggle", "toggle_iconify", "maximize_restore",
"desktop_left", "desktop_right", "horizontal", "vertical", "ascending", "descending",
"left2right", "right2left", "next_task", "prev_task", "minimum", "follow_size", "normal"]:
if string in ["bottom", "left", "single_desktop", "none", "horizontal", "ascending"]:
i = 0
elif string in ["top", "right", "multi_desktop", "close", "vertical", "descending", "minimum"]:
i = 1
elif string in ["center", "single_monitor", "toggle", "left2right", "follow_size", "normal"]:
i = 2
elif string in ["right2left"]:
i = 3
else:
i = ["none", "close", "toggle", "iconify", "shade", "toggle_iconify", "maximize_restore",
"desktop_left", "desktop_right", "next_task", "prev_task"].index(string)
prop.set_active(i)
else:
prop.set_active(int(string))
elif eType == gtk.CheckButton:
prop.set_active(bool(int(string)))
elif eType == gtk.FontButton:
prop.set_font_name(string)
elif eType == gtk.ColorButton:
prop.set_alpha(int(int(string) * 65535 / 100.0))
elif eType == tuple: # If a property has more than 1 value, for example the x and y co-ords
s = string.split(" ") # of the padding properties, then just we use recursion to set the
for i in range(len(prop)): # value of each associated widget.
if i >= len(s):
self.parseProp(prop[i], "0")
else:
self.parseProp(prop[i], s[i])
def quit(self, widget, event=None):
"""Asks if user would like to save file before quitting, then quits the program."""
if self.toSave:
if self.oneConfigFile:
response = gtk.RESPONSE_YES
else:
dialog = gtk.Dialog("Save config?", self, gtk.DIALOG_MODAL, (gtk.STOCK_YES, gtk.RESPONSE_YES, gtk.STOCK_NO, gtk.RESPONSE_NO, gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))
dialog.get_content_area().add(gtk.Label("Save config before quitting?"))
dialog.get_content_area().set_size_request(300, 100)
dialog.show_all()
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_CANCEL:
return True # Return True to stop it quitting when we hit "Cancel"
elif response == gtk.RESPONSE_NO:
gtk.main_quit()
elif response == gtk.RESPONSE_YES:
self.save()
gtk.main_quit()
else:
gtk.main_quit()
def readConf(self):
"""Reads the tintwizard configuration file - NOT tint2 config files."""
self.defaults = {"font": None, "bgColor": None, "fgColor": None, "borderColor": None, "bgCount": None}
if self.oneConfigFile:
# don't need tintwizard.conf
return
pathName = os.path.expandvars("${HOME}") + "/.config/tint2/"
if not os.path.exists(pathName + "tintwizard.conf"):
self.writeConf()
return
f = open(pathName + "tintwizard.conf", "r")
for line in f:
if "=" in line:
l = line.split("=")
if self.defaults.has_key(l[0].strip()):
self.defaults[l[0].strip()] = l[1].strip()
def readTint2Config(self):
"""Reads in from a config file."""
f = open(self.filename, "r")
string = ""
for line in f:
if (line[0] != "#") and (len(line) > 2):
string += line
f.close()
# Remove all background styles so we can create new ones as we read them
for i in range(len(self.bgs)):
self.delBgClick(None, False)
# As we parse background definitions, we build a new string
# without the background related stuff. This means we don't
# have to read through background defs AGAIN when parsing
# the other stuff.
noBgDefs = self.parseBgs(string)
self.parseConfig(noBgDefs)
def reportBug(self, action=None):
"""Opens the bug report page in the default web browser."""
try:
webbrowser.open("http://code.google.com/p/tintwizard/issues/entry")
except:
errorDialog(self, "Your default web-browser could not be opened.\nPlease visit http://code.google.com/p/tintwizard/issues/entry")
def resetConfig(self):
"""Resets all the widgets to their default values."""
# Backgrounds
for i in range(len(self.bgs)):
self.delBgClick(prompt=False, init=True)
for i in range(self.defaults["bgCount"]):
self.addBgClick(init=True)
self.bgNotebook.set_current_page(0)
# Panel
self.panelPosY.set_active(0)
self.panelPosX.set_active(0)
self.panelOrientation.set_active(0)
self.panelItems.set_text(PANEL_ITEMS)
self.panelSizeX.set_text(PANEL_SIZE_X)
self.panelSizeY.set_text(PANEL_SIZE_Y)
self.panelMarginX.set_text(PANEL_MARGIN_X)
self.panelMarginY.set_text(PANEL_MARGIN_Y)
self.panelPadX.set_text(PANEL_PADDING_Y)
self.panelPadY.set_text(PANEL_PADDING_Y)
self.panelSpacing.set_text(TASKBAR_SPACING)
self.panelBg.set_active(0)
self.panelMenu.set_active(0)
self.panelDock.set_active(0)
self.panelLayer.set_active(0)
self.panelMonitor.set_text(PANEL_MONITOR)
self.panelAutohide.set_active(0)
self.panelAutohideShow.set_text(PANEL_AUTOHIDE_SHOW)
self.panelAutohideHide.set_text(PANEL_AUTOHIDE_HIDE)
self.panelAutohideHeight.set_text(PANEL_AUTOHIDE_HEIGHT)
self.panelAutohideStrut.set_active(0)
# Taskbar
self.taskbarMode.set_active(0)
self.taskbarPadX.set_text(TASKBAR_PADDING_X)
self.taskbarPadY.set_text(TASKBAR_PADDING_Y)
self.taskbarSpacing.set_text(TASK_SPACING)
self.taskbarBg.set_active(0)
self.taskbarActiveBg.set_active(0)
# Tasks
self.taskBlinks.set_text(TASK_BLINKS)
self.taskCentreCheckButton.set_active(True)
self.taskTextCheckButton.set_active(True)
self.taskIconCheckButton.set_active(True)
self.taskMaxSizeX.set_text(TASK_MAXIMUM_SIZE_X)
self.taskMaxSizeY.set_text(TASK_MAXIMUM_SIZE_Y)
self.taskPadX.set_text(TASK_PADDING_X)
self.taskPadY.set_text(TASK_PADDING_Y)
self.taskMMCButtonsCheckButton.set_active(True)
self.taskMMCPadX.set_text(TASK_MMC_PADDING_X)
self.taskMMCPadY.set_text(TASK_MMC_PADDING_Y)
self.taskBg.set_active(0)
self.taskActiveBg.set_active(0)
self.taskUrgentBg.set_active(0)
self.taskIconifiedBg.set_active(0)
# Icons
self.iconHue.set_text(ICON_ALPHA)
self.iconSat.set_text(ICON_SAT)
self.iconBri.set_text(ICON_BRI)
self.activeIconHue.set_text(ACTIVE_ICON_ALPHA)
self.activeIconSat.set_text(ACTIVE_ICON_SAT)
self.activeIconBri.set_text(ACTIVE_ICON_BRI)
self.urgentIconHue.set_text(URGENT_ICON_ALPHA)
self.urgentIconSat.set_text(URGENT_ICON_SAT)
self.urgentIconBri.set_text(URGENT_ICON_BRI)
self.iconifiedIconHue.set_text(ICONIFIED_ICON_ALPHA)
self.iconifiedIconSat.set_text(ICONIFIED_ICON_SAT)
self.iconifiedIconBri.set_text(ICONIFIED_ICON_BRI)
# Fonts
self.fontButton.set_font_name(self.defaults["font"])
self.fontColButton.set_alpha(65535)
self.fontColButton.set_color(gtk.gdk.color_parse(self.defaults["fgColor"]))
self.fontCol.set_text(self.defaults["fgColor"])
self.fontActiveColButton.set_alpha(65535)
self.fontActiveColButton.set_color(gtk.gdk.color_parse(self.defaults["fgColor"]))
self.fontActiveCol.set_text(self.defaults["fgColor"])
self.fontUrgentColButton.set_alpha(65535)
self.fontUrgentColButton.set_color(gtk.gdk.color_parse(self.defaults["fgColor"]))
self.fontUrgentCol.set_text(self.defaults["fgColor"])
self.fontIconifiedColButton.set_alpha(65535)
self.fontIconifiedColButton.set_color(gtk.gdk.color_parse(self.defaults["fgColor"]))
self.fontIconifiedCol.set_text(self.defaults["fgColor"])
self.fontShadowCheckButton.set_active(False)
# System Tray
self.trayPadX.set_text(TRAY_PADDING_X)
self.trayPadY.set_text(TRAY_PADDING_X)
self.traySpacing.set_text(TRAY_SPACING)
self.trayOrder.set_active(0)
self.trayBg.set_active(0)
self.trayMaxIconSize.set_text(TRAY_MAX_ICON_SIZE)
self.trayIconHue.set_text(TRAY_ICON_ALPHA)
self.trayIconSat.set_text(TRAY_ICON_SAT)
self.trayIconBri.set_text(TRAY_ICON_BRI)
# Clock
self.clock1Format.set_text(CLOCK_FMT_1)
self.clock1FontButton.set_font_name(self.defaults["font"])
self.clock2Format.set_text(CLOCK_FMT_2)
self.clockTooltipFormat.set_text(CLOCK_TOOLTIP)
self.clock2FontButton.set_font_name(self.defaults["font"])
self.clockFontColButton.set_alpha(65535)
self.clockFontColButton.set_color(gtk.gdk.color_parse(self.defaults["fgColor"]))
self.clockFontCol.set_text(self.defaults["fgColor"])
self.clockPadX.set_text(CLOCK_PADDING_X)
self.clockPadY.set_text(CLOCK_PADDING_Y)
self.clockBg.set_active(0)
self.clockLClick.set_text(CLOCK_LCLICK)
self.clockRClick.set_text(CLOCK_RCLICK)
self.clockTime1Timezone.set_text(CLOCK_TIME1_TIMEZONE)
self.clockTime2Timezone.set_text(CLOCK_TIME2_TIMEZONE)
self.clockTooltipTimezone.set_text(CLOCK_TOOLTIP_TIMEZONE)
# Tooltips
self.tooltipShow.set_active(False)
self.tooltipPadX.set_text(TOOLTIP_PADDING_X)
self.tooltipPadY.set_text(TOOLTIP_PADDING_Y)
self.tooltipShowTime.set_text(TOOLTIP_SHOW_TIMEOUT)
self.tooltipHideTime.set_text(TOOLTIP_HIDE_TIMEOUT)
self.tooltipBg.set_active(0)
self.tooltipFont.set_font_name(self.defaults["font"])
self.tooltipFontColButton.set_alpha(65535)
self.tooltipFontColButton.set_color(gtk.gdk.color_parse(self.defaults["fgColor"]))
self.tooltipFontCol.set_text(self.defaults["fgColor"])
# Mouse
self.mouseMiddle.set_active(0)
self.mouseRight.set_active(0)
self.mouseUp.set_active(0)
self.mouseDown.set_active(0)
# Battery
self.batteryLow.set_text(BATTERY_LOW)
self.batteryLowAction.set_text(BATTERY_ACTION)
self.batteryHide.set_text(BATTERY_HIDE)
self.bat1FontButton.set_font_name(self.defaults["font"])
self.bat2FontButton.set_font_name(self.defaults["font"])
self.batteryFontColButton.set_alpha(65535)
self.batteryFontColButton.set_color(gtk.gdk.color_parse(self.defaults["fgColor"]))
self.batteryFontCol.set_text(self.defaults["fgColor"])
self.batteryPadX.set_text(BATTERY_PADDING_Y)
self.batteryPadY.set_text(BATTERY_PADDING_Y)
self.batteryBg.set_active(0)
def save(self, widget=None, event=None):
"""Saves the generated config file."""
# This function returns the boolean status of whether or not the
# file saved, so that the apply() function knows if it should
# kill the tint2 process and apply the new config.
# If no file has been selected, force the user to "Save As..."
if self.filename == None:
return self.saveAs()
else:
self.generateConfig()
self.writeFile()
return True
def saveAs(self, widget=None, event=None):
"""Prompts the user to select a file and then saves the generated config file."""
self.generateConfig()
chooser = gtk.FileChooserDialog("Save Config File As...", self, gtk.FILE_CHOOSER_ACTION_SAVE, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_default_response(gtk.RESPONSE_OK)
if self.curDir != None:
chooser.set_current_folder(self.curDir)
chooserFilter = gtk.FileFilter()
chooserFilter.set_name("All files")
chooserFilter.add_pattern("*")
chooser.add_filter(chooserFilter)
chooser.show()
response = chooser.run()
if response == gtk.RESPONSE_OK:
self.filename = chooser.get_filename()
if os.path.exists(self.filename):
overwrite = confirmDialog(self, "This file already exists. Overwrite this file?")
if overwrite == gtk.RESPONSE_YES:
self.writeFile()
chooser.destroy()
return True
else:
self.filename = None
chooser.destroy()
return False
else:
self.writeFile()
chooser.destroy()
return True
else:
self.filename = None
chooser.destroy()
return False
def saveAsDef(self, widget=None, event=None):
"""Saves the config as the default tint2 config."""
if confirmDialog(self, "Overwrite current tint2 default config?") == gtk.RESPONSE_YES:
self.filename = os.path.expandvars("${HOME}") + "/.config/tint2/tint2rc"
self.curDir = os.path.expandvars("${HOME}") + "/.config/tint2"
# If, for whatever reason, tint2 has no default config - create one.
if not os.path.isfile(self.filename):
f = open(self.filename, "w")
f.write("# tint2rc")
f.close()
self.generateConfig()
self.writeFile()
return True
def savePrompt(self):
"""Prompt the user to save before creating a new file."""
if confirmDialog(self, "Save current config?") == gtk.RESPONSE_YES:
self.save(None)
def switchPage(self, notebook, page, page_num):
"""Handles notebook page switch events."""
# If user selects the 'View Config' tab, update the textarea within this tab.
if notebook.get_tab_label_text(notebook.get_nth_page(page_num)) == "View Config":
self.generateConfig()
def updateComboBoxes(self, i, action="add"):
"""Updates the contents of a combo box when a background style has been added/removed."""
cbs = [self.batteryBg, self.clockBg, self.taskbarBg, self.taskbarActiveBg, self.trayBg, self.taskActiveBg, self.taskBg, self.panelBg, self.tooltipBg, self.taskUrgentBg, self.taskIconifiedBg]
if action == "add":
for cb in cbs:
cb.append_text(str(i+1))
else:
for cb in cbs:
if cb.get_active() == i: # If background is selected, set to a different value
cb.set_active(0)
cb.remove_text(i)
def updateStatusBar(self, message="", change=False):
"""Updates the message on the statusbar. A message can be provided,
and if change is set to True (i.e. something has been modified) then
an appropriate symbol [*] is shown beside filename."""
contextID = self.statusBar.get_context_id("")
self.statusBar.pop(contextID)
if not message:
message = "%s %s" % (self.filename or "New Config File", "[*]" if change else "")
self.statusBar.push(contextID, message)
def writeConf(self):
"""Writes the tintwizard configuration file."""
confStr = "#Start\n[defaults]\n"
for key in self.defaults:
confStr += "%s = %s\n" % (key, str(self.defaults[key]))
confStr += "#End\n"
pathName = os.path.expandvars("${HOME}") + "/.config/tint2/"
f = open(pathName+"tintwizard.conf", "w")
f.write(confStr)
f.close()
def writeFile(self):
"""Writes the contents of the config text buffer to file."""
try:
f = open(self.filename, "w")
f.write(self.configBuf.get_text(self.configBuf.get_start_iter(), self.configBuf.get_end_iter()))
f.close()
self.toSave = False
self.curDir = os.path.dirname(self.filename)
self.updateStatusBar()
except IOError:
errorDialog(self, "Could not save file")
# General use functions
def createLabel(parent, text="", gridX=0, gridY=0, sizeX=1, sizeY=1, xPadding=0):
"""Creates a label and adds it to a parent widget."""
temp = gtk.Label(text)
temp.set_alignment(0, 0.5)
parent.attach(temp, gridX, gridX+sizeX, gridY, gridY+sizeY, xpadding=xPadding)
return temp
def createComboBox(parent, choices=["null"], active=0, gridX=0, gridY=0, sizeX=1, sizeY=1, xExpand=True, yExpand=True, handler=None):
"""Creates a combo box with text choices and adds it to a parent widget."""
temp = gtk.combo_box_new_text()
for choice in choices:
temp.append_text(choice)
temp.set_active(active)
if handler != None:
temp.connect("changed", handler)
parent.attach(temp, gridX, gridX+sizeX, gridY, gridY+sizeY, xoptions=gtk.EXPAND if xExpand else 0, yoptions=gtk.EXPAND if yExpand else 0)
return temp
def createEntry(parent, maxSize, width, text="", gridX=0, gridY=0, sizeX=1, sizeY=1, xExpand=True, yExpand=True, handler=None, name=""):
"""Creates a text entry widget and adds it to a parent widget."""
temp = gtk.Entry(maxSize)
temp.set_width_chars(width)
temp.set_text(text)
temp.set_name(name)
if handler != None:
temp.connect("changed", handler)
parent.attach(temp, gridX, gridX+sizeX, gridY, gridY+sizeY, xoptions=gtk.EXPAND if xExpand else 0, yoptions=gtk.EXPAND if yExpand else 0)
return temp
def createCheckButton(parent, text="", active=False, gridX=0, gridY=0, sizeX=1, sizeY=1, xExpand=True, yExpand=True, handler=None):
"""Creates a checkbox widget and adds it to a parent widget."""
temp = gtk.CheckButton(text if text != "" else None)
temp.set_active(active)
temp.connect("toggled", handler)
parent.attach(temp, gridX, gridX+sizeX, gridY, gridY+sizeY, xoptions=gtk.EXPAND if xExpand else 0, yoptions=gtk.EXPAND if yExpand else 0)
return temp
def createButton(parent, text="", stock=None, name="", gridX=0, gridY=0, sizeX=1, sizeY=1, xExpand=True, yExpand=True, handler=None):
"""Creates a button widget and adds it to a parent widget."""
if stock:
temp = gtk.Button(text, stock)
else:
temp = gtk.Button(text)
temp.set_name(name)
temp.connect("clicked", handler)
parent.attach(temp, gridX, gridX+sizeX, gridY, gridY+sizeY, xoptions=gtk.EXPAND if xExpand else 0, yoptions=gtk.EXPAND if yExpand else 0)
return temp
def createFontButton(parent, font, gridX=0, gridY=0, sizeX=1, sizeY=1, xExpand=True, yExpand=True, handler=None):
"""Creates a font button widget and adds it to a parent widget."""
temp = gtk.FontButton()
temp.set_font_name(font)
temp.connect("font-set", handler)
parent.attach(temp, gridX, gridX+sizeX, gridY, gridY+sizeY, xoptions=gtk.EXPAND if xExpand else 0, yoptions=gtk.EXPAND if yExpand else 0)
return temp
def createColorButton(parent, color="#000000", useAlpha=True, name="", gridX=0, gridY=0, sizeX=1, sizeY=1, xExpand=True, yExpand=True, handler=None):
temp = gtk.ColorButton(gtk.gdk.color_parse(color))
temp.set_use_alpha(useAlpha)
temp.set_name(name)
temp.connect("color-set", handler)
parent.attach(temp, gridX, gridX+sizeX, gridY, gridY+sizeY, xoptions=gtk.EXPAND if xExpand else 0, yoptions=gtk.EXPAND if yExpand else 0)
return temp
def confirmDialog(parent, message):
"""Creates a confirmation dialog and returns the response."""
dialog = gtk.MessageDialog(parent, gtk.DIALOG_MODAL, gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, message)
dialog.show()
response = dialog.run()
dialog.destroy()
return response
def errorDialog(parent=None, message="An error has occured!"):
"""Creates an error dialog."""
dialog = gtk.MessageDialog(parent, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK, message)
dialog.show()
dialog.run()
dialog.destroy()
def numToHex(n):
"""Convert integer n in range [0, 15] to hex."""
try:
return ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F"][n]
except:
return -1
def rgbToHex(r, g, b):
"""Constructs a 6 digit hex representation of color (r, g, b)."""
r2 = trunc(r / 65535.0 * 255)
g2 = trunc(g / 65535.0 * 255)
b2 = trunc(b / 65535.0 * 255)
return "#%s%s%s%s%s%s" % (numToHex(r2 / 16), numToHex(r2 % 16), numToHex(g2 / 16), numToHex(g2 % 16), numToHex(b2 / 16), numToHex(b2 % 16))
def trunc(n):
"""Truncate a floating point number, rounding up or down appropriately."""
c = math.fabs(math.ceil(n) - n)
f = math.fabs(math.floor(n) - n)
if c < f:
return int(math.ceil(n))
else:
return int(math.floor(n))
# Direct execution of application
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "-version":
print NAME, VERSION
exit()
tw = TintWizardGUI()
tw.main()
|
krnlyng/tint2
|
src/tint2conf/tintwizard.py
|
Python
|
gpl-2.0
| 107,754
|
[
"VisIt"
] |
b31c5de6d457e76a0af4d374e58218adbf6fea4f7b07e1e5961c4116243ad91e
|
"""
:copyright: Copyright 2006-2016 by the PyNN team, see AUTHORS.
:license: CeCILL, see LICENSE for details.
"""
import numpy
from pyNN import recording
from pyNN.neuron import simulator
import re
from neuron import h
recordable_pattern = re.compile(r'((?P<section>\w+)(\((?P<location>[-+]?[0-9]*\.?[0-9]+)\))?\.)?(?P<var>\w+)')
class Recorder(recording.Recorder):
"""Encapsulates data and functions related to recording model variables."""
_simulator = simulator
def _record(self, variable, new_ids, sampling_interval=None):
"""Add the cells in `new_ids` to the set of recorded cells."""
if variable == 'spikes':
for id in new_ids:
if id._cell.rec is not None:
id._cell.rec.record(id._cell.spike_times)
else:
self.sampling_interval = sampling_interval or self._simulator.state.dt
for id in new_ids:
self._record_state_variable(id._cell, variable)
def _record_state_variable(self, cell, variable):
if hasattr(cell, 'recordable') and variable in cell.recordable:
hoc_var = cell.recordable[variable]
elif variable == 'v':
hoc_var = cell.source_section(0.5)._ref_v # or use "seg.v"?
elif variable == 'gsyn_exc':
hoc_var = cell.esyn._ref_g
elif variable == 'gsyn_inh':
hoc_var = cell.isyn._ref_g
else:
source, var_name = self._resolve_variable(cell, variable)
hoc_var = getattr(source, "_ref_%s" % var_name)
cell.traces[variable] = vec = h.Vector()
if self.sampling_interval == self._simulator.state.dt:
vec.record(hoc_var)
else:
vec.record(hoc_var, self.sampling_interval)
if not cell.recording_time:
cell.record_times = h.Vector()
if self.sampling_interval == self._simulator.state.dt:
cell.record_times.record(h._ref_t)
else:
cell.record_times.record(h._ref_t, self.sampling_interval)
cell.recording_time += 1
# could be staticmethod
def _resolve_variable(self, cell, variable_path):
match = recordable_pattern.match(variable_path)
if match:
parts = match.groupdict()
if parts['section']:
section = getattr(cell, parts['section'])
if parts['location']:
source = section(float(parts['location']))
else:
source = section
else:
source = cell.source
return source, parts['var']
else:
raise AttributeError("Recording of %s not implemented." % variable_path)
def _reset(self):
"""Reset the list of things to be recorded."""
for id in set.union(*self.recorded.values()):
id._cell.traces = {}
id._cell.spike_times = h.Vector(0)
id._cell.recording_time == 0
id._cell.record_times = None
def _clear_simulator(self):
"""
Should remove all recorded data held by the simulator and, ideally,
free up the memory.
"""
for id in set.union(*self.recorded.values()):
if hasattr(id._cell, "traces"):
for variable in id._cell.traces:
id._cell.traces[variable].resize(0)
if id._cell.rec is not None:
id._cell.spike_times.resize(0)
else:
id._cell.clear_past_spikes()
def _get_spiketimes(self, id):
spikes = numpy.array(id._cell.spike_times)
return spikes[spikes <= simulator.state.t + 1e-9]
def _get_all_signals(self, variable, ids, clear=False):
# assuming not using cvode, otherwise need to get times as well and use IrregularlySampledAnalogSignal
if len(ids) > 0:
signals = numpy.vstack((id._cell.traces[variable] for id in ids)).T
expected_length = int(simulator.state.tstop / self.sampling_interval) + 1
if signals.shape[0] != expected_length: # generally due to floating point/rounding issues
signals = numpy.vstack((signals, signals[-1, :]))
else:
signals = numpy.array([])
return signals
def _local_count(self, variable, filter_ids=None):
N = {}
if variable == 'spikes':
for id in self.filter_recorded(variable, filter_ids):
N[int(id)] = id._cell.spike_times.size()
else:
raise Exception("Only implemented for spikes")
return N
|
anupkdas-nus/global_synapses
|
pyNN-dispackgaes/neuron/recording.py
|
Python
|
gpl-3.0
| 4,605
|
[
"NEURON"
] |
d59293a090560e79599f49ade84adcfb359c947944a37f83f89a9a467ae20274
|
# -*- coding: utf-8 -*-
from collections import defaultdict
import os
import numpy as np
from pyfr.shapes import BaseShape
from pyfr.util import subclass_where
from pyfr.writers import BaseWriter
class VTKWriter(BaseWriter):
# Supported file types and extensions
name = 'vtk'
extn = ['.vtu', '.pvtu']
def __init__(self, args):
super().__init__(args)
self.dtype = np.dtype(args.precision).type
self.divisor = args.divisor or self.cfg.getint('solver', 'order')
# Solutions need a separate processing pipeline to other data
if self.dataprefix == 'soln':
self._proc_fields = self._proc_fields_soln
self._vtk_vars = self.elementscls.visvarmap[self.ndims]
# Otherwise we're dealing with simple scalar data
else:
self._proc_fields = self._proc_fields_scal
self._soln_fields = self.stats.get('data', 'fields').split(',')
self._vtk_vars = {k: [k] for k in self._soln_fields}
def _proc_fields_soln(self, vsol):
# Primitive and visualisation variable maps
privarmap = self.elementscls.privarmap[self.ndims]
visvarmap = self.elementscls.visvarmap[self.ndims]
# Convert from conservative to primitive variables
vsol = np.array(self.elementscls.conv_to_pri(vsol, self.cfg))
# Prepare the fields
fields = []
for vnames in visvarmap.values():
ix = [privarmap.index(vn) for vn in vnames]
fields.append(vsol[ix])
return fields
def _proc_fields_scal(self, vsol):
return [vsol[self._soln_fields.index(vn)] for vn in self._vtk_vars]
def _get_npts_ncells_nnodes(self, mk):
m_inf = self.mesh_inf[mk]
# Get the shape and sub division classes
shapecls = subclass_where(BaseShape, name=m_inf[0])
subdvcls = subclass_where(BaseShapeSubDiv, name=m_inf[0])
# Number of vis points
npts = shapecls.nspts_from_order(self.divisor + 1)*m_inf[1][1]
# Number of sub cells and nodes
ncells = len(subdvcls.subcells(self.divisor))*m_inf[1][1]
nnodes = len(subdvcls.subnodes(self.divisor))*m_inf[1][1]
return npts, ncells, nnodes
def _get_array_attrs(self, mk=None):
dtype = 'Float32' if self.dtype == np.float32 else 'Float64'
dsize = np.dtype(self.dtype).itemsize
vvars = self._vtk_vars
names = ['', 'connectivity', 'offsets', 'types']
types = [dtype, 'Int32', 'Int32', 'UInt8']
comps = ['3', '', '', '']
for fname, varnames in vvars.items():
names.append(fname.capitalize())
types.append(dtype)
comps.append(str(len(varnames)))
# If a mesh has been given the compute the sizes
if mk:
npts, ncells, nnodes = self._get_npts_ncells_nnodes(mk)
nb = npts*dsize
sizes = [3*nb, 4*nnodes, 4*ncells, ncells]
sizes.extend(len(varnames)*nb for varnames in vvars.values())
return names, types, comps, sizes
else:
return names, types, comps
def write_out(self):
name, extn = os.path.splitext(self.outf)
parallel = extn == '.pvtu'
parts = defaultdict(list)
for mk, sk in zip(self.mesh_inf, self.soln_inf):
prt = mk.split('_')[-1]
pfn = '{0}_{1}.vtu'.format(name, prt) if parallel else self.outf
parts[pfn].append((mk, sk))
write_s_to_fh = lambda s: fh.write(s.encode('utf-8'))
for pfn, misil in parts.items():
with open(pfn, 'wb') as fh:
write_s_to_fh('<?xml version="1.0" ?>\n<VTKFile '
'byte_order="LittleEndian" '
'type="UnstructuredGrid" '
'version="0.1">\n<UnstructuredGrid>\n')
# Running byte-offset for appended data
off = 0
# Header
for mk, sk in misil:
off = self._write_serial_header(fh, mk, off)
write_s_to_fh('</UnstructuredGrid>\n'
'<AppendedData encoding="raw">\n_')
# Data
for mk, sk in misil:
self._write_data(fh, mk, sk)
write_s_to_fh('\n</AppendedData>\n</VTKFile>')
if parallel:
with open(self.outf, 'wb') as fh:
write_s_to_fh('<?xml version="1.0" ?>\n<VTKFile '
'byte_order="LittleEndian" '
'type="PUnstructuredGrid" '
'version="0.1">\n<PUnstructuredGrid>\n')
# Header
self._write_parallel_header(fh)
# Constitutent pieces
for pfn in parts:
write_s_to_fh('<Piece Source="{0}"/>\n'
.format(os.path.basename(pfn)))
write_s_to_fh('</PUnstructuredGrid>\n</VTKFile>\n')
def _write_darray(self, array, vtuf, dtype):
array = array.astype(dtype)
np.uint32(array.nbytes).tofile(vtuf)
array.tofile(vtuf)
def _write_serial_header(self, vtuf, mk, off):
names, types, comps, sizes = self._get_array_attrs(mk)
npts, ncells = self._get_npts_ncells_nnodes(mk)[:2]
write_s = lambda s: vtuf.write(s.encode('utf-8'))
write_s('<Piece NumberOfPoints="{0}" NumberOfCells="{1}">\n'
.format(npts, ncells))
write_s('<Points>\n')
# Write vtk DaraArray headers
for i, (n, t, c, s) in enumerate(zip(names, types, comps, sizes)):
write_s('<DataArray Name="{0}" type="{1}" '
'NumberOfComponents="{2}" '
'format="appended" offset="{3}"/>\n'
.format(n, t, c, off))
off += 4 + s
# Write ends/starts of vtk file objects
if i == 0:
write_s('</Points>\n<Cells>\n')
elif i == 3:
write_s('</Cells>\n<PointData>\n')
# Write end of vtk element data
write_s('</PointData>\n</Piece>\n')
# Return the current offset
return off
def _write_parallel_header(self, vtuf):
names, types, comps = self._get_array_attrs()
write_s = lambda s: vtuf.write(s.encode('utf-8'))
write_s('<PPoints>\n')
# Write vtk DaraArray headers
for i, (n, t, s) in enumerate(zip(names, types, comps)):
write_s('<PDataArray Name="{0}" type="{1}" '
'NumberOfComponents="{2}"/>\n'.format(n, t, s))
if i == 0:
write_s('</PPoints>\n<PCells>\n')
elif i == 3:
write_s('</PCells>\n<PPointData>\n')
write_s('</PPointData>\n')
def _write_data(self, vtuf, mk, sk):
name = self.mesh_inf[mk][0]
mesh = self.mesh[mk]
soln = self.soln[sk]
# Get the shape and sub division classes
shapecls = subclass_where(BaseShape, name=name)
subdvcls = subclass_where(BaseShapeSubDiv, name=name)
# Dimensions
nspts, neles = mesh.shape[:2]
# Sub divison points inside of a standard element
svpts = shapecls.std_ele(self.divisor)
nsvpts = len(svpts)
# Shape
soln_b = shapecls(nspts, self.cfg)
# Generate the operator matrices
mesh_vtu_op = soln_b.sbasis.nodal_basis_at(svpts)
soln_vtu_op = soln_b.ubasis.nodal_basis_at(svpts)
# Calculate node locations of vtu elements
vpts = np.dot(mesh_vtu_op, mesh.reshape(nspts, -1))
vpts = vpts.reshape(nsvpts, -1, self.ndims)
# Calculate solution at node locations of vtu elements
vsol = np.dot(soln_vtu_op, soln.reshape(-1, self.nvars*neles))
vsol = vsol.reshape(nsvpts, self.nvars, -1).swapaxes(0, 1)
# Append dummy z dimension for points in 2D
if self.ndims == 2:
vpts = np.pad(vpts, [(0, 0), (0, 0), (0, 1)], 'constant')
# Write element node locations to file
self._write_darray(vpts.swapaxes(0, 1), vtuf, self.dtype)
# Perform the sub division
nodes = subdvcls.subnodes(self.divisor)
# Prepare vtu cell arrays
vtu_con = np.tile(nodes, (neles, 1))
vtu_con += (np.arange(neles)*nsvpts)[:, None]
# Generate offset into the connectivity array
vtu_off = np.tile(subdvcls.subcelloffs(self.divisor), (neles, 1))
vtu_off += (np.arange(neles)*len(nodes))[:, None]
# Tile vtu cell type numbers
vtu_typ = np.tile(subdvcls.subcelltypes(self.divisor), neles)
# Write vtu node connectivity, connectivity offsets and cell types
self._write_darray(vtu_con, vtuf, np.int32)
self._write_darray(vtu_off, vtuf, np.int32)
self._write_darray(vtu_typ, vtuf, np.uint8)
# Process and write out the various fields
for arr in self._proc_fields(vsol):
self._write_darray(arr.T, vtuf, self.dtype)
class BaseShapeSubDiv(object):
vtk_types = dict(tri=5, quad=9, tet=10, pyr=14, pri=13, hex=12)
vtk_nodes = dict(tri=3, quad=4, tet=4, pyr=5, pri=6, hex=8)
@classmethod
def subcells(cls, n):
pass
@classmethod
def subcelloffs(cls, n):
return np.cumsum([cls.vtk_nodes[t] for t in cls.subcells(n)])
@classmethod
def subcelltypes(cls, n):
return np.array([cls.vtk_types[t] for t in cls.subcells(n)])
@classmethod
def subnodes(cls, n):
pass
class TensorProdShapeSubDiv(BaseShapeSubDiv):
@classmethod
def subnodes(cls, n):
conbase = np.array([0, 1, n + 2, n + 1])
# Extend quad mapping to hex mapping
if cls.ndim == 3:
conbase = np.hstack((conbase, conbase + (1 + n)**2))
# Calculate offset of each subdivided element's nodes
nodeoff = np.zeros((n,)*cls.ndim, dtype=np.int)
for dim, off in enumerate(np.ix_(*(range(n),)*cls.ndim)):
nodeoff += off*(n + 1)**dim
# Tile standard element node ordering mapping, then apply offsets
internal_con = np.tile(conbase, (n**cls.ndim, 1))
internal_con += nodeoff.T.flatten()[:, None]
return np.hstack(internal_con)
class QuadShapeSubDiv(TensorProdShapeSubDiv):
name = 'quad'
ndim = 2
@classmethod
def subcells(cls, n):
return ['quad']*(n**2)
class HexShapeSubDiv(TensorProdShapeSubDiv):
name = 'hex'
ndim = 3
@classmethod
def subcells(cls, n):
return ['hex']*(n**3)
class TriShapeSubDiv(BaseShapeSubDiv):
name = 'tri'
@classmethod
def subcells(cls, n):
return ['tri']*(n**2)
@classmethod
def subnodes(cls, n):
conlst = []
for row in range(n, 0, -1):
# Lower and upper indices
l = (n - row)*(n + row + 3) // 2
u = l + row + 1
# Base offsets
off = [l, l + 1, u, u + 1, l + 1, u]
# Generate current row
subin = np.ravel(np.arange(row - 1)[..., None] + off)
subex = [ix + row - 1 for ix in off[:3]]
# Extent list
conlst.extend([subin, subex])
return np.hstack(conlst)
class TetShapeSubDiv(BaseShapeSubDiv):
name = 'tet'
@classmethod
def subcells(cls, nsubdiv):
return ['tet']*(nsubdiv**3)
@classmethod
def subnodes(cls, nsubdiv):
conlst = []
jump = 0
for n in range(nsubdiv, 0, -1):
for row in range(n, 0, -1):
# Lower and upper indices
l = (n - row)*(n + row + 3) // 2 + jump
u = l + row + 1
# Lower and upper for one row up
ln = (n + 1)*(n + 2) // 2 + l - n + row
un = ln + row
rowm1 = np.arange(row - 1)[..., None]
# Base offsets
offs = [(l, l + 1, u, ln), (l + 1, u, ln, ln + 1),
(u, u + 1, ln + 1, un), (u, ln, ln + 1, un),
(l + 1, u, u+1, ln + 1), (u + 1, ln + 1, un, un + 1)]
# Current row
conlst.extend(rowm1 + off for off in offs[:-1])
conlst.append(rowm1[:-1] + offs[-1])
conlst.append([ix + row - 1 for ix in offs[0]])
jump += (n + 1)*(n + 2) // 2
return np.hstack(np.ravel(c) for c in conlst)
class PriShapeSubDiv(BaseShapeSubDiv):
name = 'pri'
@classmethod
def subcells(cls, n):
return ['pri']*(n**3)
@classmethod
def subnodes(cls, n):
# Triangle connectivity
tcon = TriShapeSubDiv.subnodes(n).reshape(-1, 3)
# Layer these rows of triangles to define prisms
loff = (n + 1)*(n + 2) // 2
lcon = [[tcon + i*loff, tcon + (i + 1)*loff] for i in range(n)]
return np.hstack(np.hstack(l).flat for l in lcon)
class PyrShapeSubDiv(BaseShapeSubDiv):
name = 'pyr'
@classmethod
def subcells(cls, n):
cells = []
for i in range(n, 0, -1):
cells += ['pyr']*(i**2 + (i - 1)**2)
cells += ['tet']*(2*i*(i - 1))
return cells
@classmethod
def subnodes(cls, nsubdiv):
lcon = []
# Quad connectivity
qcon = [QuadShapeSubDiv.subnodes(n + 1).reshape(-1, 4)
for n in range(nsubdiv)]
# Simple functions
def _row_in_quad(n, a=0, b=0):
return np.array([(n*i + j, n*i + j + 1)
for i in range(a, n + b)
for j in range(n - 1)])
def _col_in_quad(n, a=0, b=0):
return np.array([(n*i + j, n*(i + 1) + j)
for i in range(n - 1)
for j in range(a, n + b)])
u = 0
for n in range(nsubdiv, 0, -1):
l = u
u += (n + 1)**2
lower_quad = qcon[n - 1] + l
upper_pts = np.arange(n**2) + u
# First set of pyramids
lcon.append([lower_quad, upper_pts])
if n > 1:
upper_quad = qcon[n - 2] + u
lower_pts = np.hstack(range(k*(n + 1)+1, (k + 1)*n + k)
for k in range(1, n)) + l
# Second set of pyramids
lcon.append([upper_quad[:, ::-1], lower_pts])
lower_row = _row_in_quad(n + 1, 1, -1) + l
lower_col = _col_in_quad(n + 1, 1, -1) + l
upper_row = _row_in_quad(n) + u
upper_col = _col_in_quad(n) + u
# Tetrahedra
lcon.append([lower_col, upper_row])
lcon.append([lower_row[:, ::-1], upper_col])
return np.hstack(np.column_stack(l).flat for l in lcon)
|
iyer-arvind/PyFR
|
pyfr/writers/vtk.py
|
Python
|
bsd-3-clause
| 14,930
|
[
"VTK"
] |
04d7448a8e712112d9bdd9c3384452db6a3c88336ab6b25dd492dffc49970b80
|
"""This module provides support for Fourier transforms. It calculates
the bilateral Fourier transform using:
S(f) = \int_{-\infty}^{\infty} s(t) e^{-j * 2 * \pi * t} dt
It also allows functions that strictly do not have a Fourier transform
by using Dirac deltas. For example, a, cos(a * t), sin(a * t), exp(j
* a * t).
Copyright 2016--2022 Michael Hayes, UCECE
"""
# TODO:
# Add DiracDelta(t, n)
# Simplify (-j * DiracDelta(f - 1) + j * DiracDelta(f + 1)).inverse_fourier()
# This should give 2 * sin(2 * pi * t)
from sympy.core.function import AppliedUndef
from sympy import sympify, pi, exp, I, oo, S, sign, sin, cos, sinh, cosh, tanh
from sympy import DiracDelta, Heaviside, FourierTransform, Integral
from sympy import fourier_transform as sympy_fourier_transform, Function
from .sym import symsimplify, j
from .transformer import BilateralForwardTransformer
from .utils import factor_const, similarity_shift, expand_functions
from .extrafunctions import rect, sincn, sincu, trap, tri
__all__ = ('FT', 'IFT')
class FourierTransformer(BilateralForwardTransformer):
name = 'Fourier transform'
def key(self, expr, t, f, **kwargs):
return expr, t, f
def simplify_term(self, expr, var):
return symsimplify(expr)
def sympy(self, expr, t, f):
result = sympy_fourier_transform(expr, t, f)
if expr != 0 and result == 0:
# There is a bug in SymPy where it returns 0.
self.error()
if isinstance(result, FourierTransform):
self.error()
return result
def func(self, expr, t, f):
if not isinstance(expr, AppliedUndef):
self.error('Expecting function')
# Convert v(t) to V(f), etc.
name = expr.func.__name__
if self.is_inverse:
func = Function(name[0].lower() + name[1:])
else:
func = Function(name[0].upper() + name[1:])
result = func(f)
return result
def integral(self, expr, t, f):
const, expr = factor_const(expr, t)
if len(expr.args) != 2:
self.error()
integrand = expr.args[0]
if not isinstance(expr, Integral):
self.error()
if len(expr.args[1]) != 3:
self.error('Require definite integral')
var = expr.args[1][0]
limits = expr.args[1][1:]
const2, expr2 = factor_const(integrand, var)
if (expr2.is_Function and
expr2.args[0] == t - var and limits[0] == 0 and limits[1] == oo):
return const2 * self.term(expr2.subs(t - var, t), t, f) / f
# Look for convolution integral
# TODO, handle convolution with causal functions.
if (limits[0] != -oo) or (limits[1] != oo):
self.error('Need indefinite limits')
if ((len(expr.args) != 2) or not expr2.is_Mul or
not expr2.args[0].is_Function or not expr2.args[1].is_Function):
self.error('Need integral of product of two functions')
f1 = expr2.args[0]
f2 = expr2.args[1]
# TODO: apply similarity theorem if have f(a * tau) etc.
if (f1.args[0] == var and f2.args[0] == t - var):
F1 = self.term(f1, var, f)
F2 = self.term(f2.subs(t - var, t), t, f)
elif (f2.args[0] == var and f1.args[0] == t - var):
F1 = self.term(f1.subs(t - var, t), t, f)
F2 = self.term(f2, var, f)
else:
self.error('Cannot recognise convolution')
return const2 * F1 * F2
def function(self, expr, t, f):
# Handle expressions with a function of FOO, e.g.,
# v(t), v(t) * y(t), 3 * v(t) / t, v(4 * a * t), etc.,
if not expr.has(AppliedUndef):
self.error()
const, expr = factor_const(expr, t)
if isinstance(expr, AppliedUndef) and expr.args[0] == t:
return self.func(expr, t, f) * const
tsym = sympify(str(t))
expr = expr.subs(tsym, t)
rest = S.One
undefs = []
for factor in expr.as_ordered_factors():
if isinstance(factor, AppliedUndef):
if factor.args[0] != t:
self.error('Weird function %s not of %s' % (factor, t))
undefs.append(factor)
else:
rest *= factor
if rest.has(AppliedUndef):
# Have something like 1/v(t)
self.error()
exprs = undefs
if rest.has(t):
exprs = exprs + [rest]
rest = S.One
result = self.term(exprs[0], t, f) * rest
if len(exprs) == 1:
return result * const
for m in range(len(exprs) - 1):
nu = self.dummy_var(expr, 'tau' if self.is_inverse else 'nu',
level=m, real=True)
expr2 = self.term(exprs[m + 1], t, f)
result = Integral(result.subs(f, f - nu) * expr2.subs(f, nu),
(nu, -oo, oo))
return result * const
def term(self, expr, t, f):
const, expr = factor_const(expr, t)
if expr.has(Integral):
return self.integral(expr, t, f) * const
if isinstance(expr, AppliedUndef) and expr.args[0] == t:
return self.func(expr, t, f) * const
# TODO add u(t) <--> delta(f) / 2 - j / (2 * pi * f)
if expr.has(AppliedUndef) and expr.args[0] == t:
# Handle v(t), v(t) * y(t), 3 * v(t) / t etc.
return self.function(expr, t, f) * const
# Check for constant.
if not expr.has(t):
return expr * DiracDelta(f) * const
one = S.One
const1 = const
other = one
exps = one
factors = expr.expand().as_ordered_factors()
for factor in factors:
if not factor.has(t):
const1 *= factor
else:
if factor.is_Function and factor.func == exp:
exps *= factor
else:
other *= factor
sf = -f if self.is_inverse else f
if other != 1 and exps == 1:
if other == t:
return const1 * I / (2 * pi) * DiracDelta(sf, 1)
elif other == t**2:
return -const1 / (2 * pi)**2 * DiracDelta(sf, 2)
# TODO check for other powers of t...
elif other == sign(t):
return const1 / (I * pi * sf)
elif other == sign(t) * t:
return -const1 * 2 / (2 * pi * f)**2
elif other == Heaviside(t):
return const1 / (I * 2 * pi * f) + const1 * DiracDelta(sf) / 2
elif other == 1 / t:
return -const1 * I * pi * sign(sf)
elif other == 1 / t**2:
return -const1 * 2 * pi**2 * sf * sign(sf)
elif other.is_Function and other.func == Heaviside and other.args[0] == t:
return (const1 / (I * 2 * pi * sf) + const1 * DiracDelta(sf) / 2)
elif other == Heaviside(t) * t:
return -const1 / (2 * pi * f)**2 + const1 * I * DiracDelta(sf, 1) / (4 * pi)
# t * u(t - tau)
elif (other.is_Mul and len(other.args) == 2 and
other.args[0] == t and other.args[1].is_Function and
other.args[1].func == Heaviside and other.args[1].args[0] == t):
e = exp(I * 2 * pi * sf)
return I * DiracDelta(sf, 1) / (4 * pi) * e - 1 / (4 * pi**2 * f**2) * e
elif other.is_Function and other.func == sincn and other.args[0] == t:
return const1 * rect(f)
elif other.is_Function and other.func == sincu and other.args[0] == t:
return const1 * pi * rect(f * pi)
elif (other.is_Pow and other.args[1] == 2 and
other.args[0].is_Function and other.args[0].func == sincn and
other.args[0].args[0] == t):
other = other.args[0]
return const1 * tri(f)
elif other.is_Function and other.func == rect and other.args[0] == t:
return const1 * sincn(f)
elif other.is_Function and other.func == tri and other.args[0] == t:
return const1 * sincn(f)**2
elif other.is_Function and other.func == trap and other.args[0] == t:
alpha = other.args[1]
# Check for rect
if alpha == 0:
return const1 * sincn(f)
return alpha * const1 * sincn(f) * sincn(alpha * f)
#
# factor = other.factor()
# const2, factor2 = factor_const(factor, t)
# if factor2.is_Pow and factor2.args[1] == -2:
# foo = factor2.args[0]
# a = foo.coeff(t, 1)
# b = foo.coeff(t, 0)
# if a != 0:
# return const1 * const2 * f * exp(-b * 2 *pi * f / a) * sign(f) / (2 * pi)
# Sympy incorrectly gives exp(-a * t) instead of exp(-a * t) *
# Heaviside(t)
elif other.is_Pow and other.args[1] == -1 and other.args[0].has(t):
foo = other.args[0]
if foo.is_Add:
bar = foo.args[1] / t
if not bar.has(t) and bar.has(I):
a = -(foo.args[0] * 2 * pi * I) / bar
return const1 * exp(-a * sf) * Heaviside(sf * sign(a))
elif foo.is_Function and foo.func == cosh and foo.args[0] == t:
return const * pi / cosh(pi**2 * sf)
elif foo.is_Function and foo.func == sinh and foo.args[0] == t:
return -I * const * pi * tanh(pi**2 * sf)
elif other.is_Function and other.func == tanh and other.args[0] == t:
return -I * const * pi / sinh(pi**2 * sf)
if expr == t * DiracDelta(t, 1):
return const * sf / (-I * 2 * pi)
# Apply similarity and shift theorems.
expr2, scale, shift = similarity_shift(expr, t)
if scale != 1 or shift != 0:
result = self.term(expr2, t, f / scale) / abs(scale)
if shift != 0:
result *= exp(-I * 2 * pi * f / scale * shift)
return const * result
if expr.is_Function and expr.args[0] == t:
# Try to handle functions such as ramp, rampstep.
expr2 = expand_functions(expr, t)
if expr != expr2:
terms = expr2.as_ordered_terms()
result = 0
for term in terms:
result += self.term(term, t, f)
return result * const
# Punt and use SymPy. Should check for t**n, t**n * exp(-a * t), etc.
return const * self.sympy(expr, t, sf)
args = exps.args[0]
foo = args / t
if foo.has(t):
# Have exp(a * t**n), SymPy might be able to handle this
return const * self.sympy(expr, t, sf)
if exps != 1 and foo.has(I):
if other == 1:
return const1 * DiracDelta(sf - foo / (I * 2 * pi))
Q = self.term(other, t, f)
return const1 * Q.subs(f, (f - foo / (I * 2 * pi)))
return const * self.sympy(expr, t, sf)
def noevaluate(self, expr, t, f):
return Integral(expr * exp(-j * 2 * pi * f * t), (t, -oo, oo))
def check(self, expr, t, f):
if expr.has(f):
self.error('Expression depends on f')
if expr.is_Piecewise and expr.args[0].args[1].has(t >= 0):
self.error('Expression is unknown for t < 0 (use causal=True)')
def rewrite(self, expr, var):
# sym.rewrite(exp) can create exp(log...)
if expr.has(sin):
expr = expr.replace(lambda expr: expr.is_Function and expr.func == sin,
lambda expr: expr.rewrite(exp))
if expr.has(cos):
expr = expr.replace(lambda expr: expr.is_Function and expr.func == cos,
lambda expr: expr.rewrite(exp))
expr = expr.expand()
return expr
fourier_transformer = FourierTransformer()
def FT(expr, t, f, **kwargs):
"""Compute bilateral Fourier transform of expr.
Undefined functions such as v(t) are converted to V(f)
This also handles some expressions that do not really have a Fourier
transform, such as a, cos(a * t), sin(a * t), exp(I * a * t)."""
return fourier_transformer.transform(expr, t, f, **kwargs)
def fourier_transform(expr, t, f, **kwargs):
"""Compute bilateral Fourier transform of expr.
Undefined functions such as v(t) are converted to V(f)
This also handles some expressions that do not really have a Fourier
transform, such as a, cos(a * t), sin(a * t), exp(I * a * t)."""
return fourier_transformer.transform(expr, t, f, **kwargs)
|
mph-/lcapy
|
lcapy/fourier.py
|
Python
|
lgpl-2.1
| 12,994
|
[
"DIRAC"
] |
607da2b330a47e8a65e7f803a0af2ea28dde2574f540cd70290767d955dcfd43
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The fsl module provides classes for interfacing with the `FSL
<http://www.fmrib.ox.ac.uk/fsl/index.html>`_ command line tools. This
was written to work with FSL version 5.0.4.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname(os.path.realpath(__file__))
>>> datadir = os.path.realpath(os.path.join(filepath,
... '../../testing/data'))
>>> os.chdir(datadir)
"""
import os
import warnings
from glob import glob
import numpy as np
import nibabel as nib
from ..fsl.base import FSLCommand, FSLCommandInputSpec, Info
from ..base import (traits, TraitedSpec, InputMultiPath, File,
isdefined, Undefined)
from ...utils.filemanip import (load_json, save_json, split_filename,
fname_presuffix)
warn = warnings.warn
class PrepareFieldmapInputSpec(FSLCommandInputSpec):
scanner = traits.String('SIEMENS', argstr='%s', position=1,
desc='must be SIEMENS', usedefault=True)
in_phase = File(exists=True, argstr='%s', position=2, mandatory=True,
desc=('Phase difference map, in SIEMENS format range from '
'0-4096 or 0-8192)'))
in_magnitude = File(exists=True, argstr='%s', position=3, mandatory=True,
desc='Magnitude difference map, brain extracted')
delta_TE = traits.Float(2.46, usedefault=True, mandatory=True, argstr='%f',
position=-2,
desc=('echo time difference of the '
'fieldmap sequence in ms. (usually 2.46ms in'
' Siemens)'))
nocheck = traits.Bool(False, position=-1, argstr='--nocheck',
usedefault=True,
desc=('do not perform sanity checks for image '
'size/range/dimensions'))
out_fieldmap = File(argstr='%s', position=4,
desc='output name for prepared fieldmap')
class PrepareFieldmapOutputSpec(TraitedSpec):
out_fieldmap = File(exists=True, desc='output name for prepared fieldmap')
class PrepareFieldmap(FSLCommand):
"""
Interface for the fsl_prepare_fieldmap script (FSL 5.0)
Prepares a fieldmap suitable for FEAT from SIEMENS data - saves output in
rad/s format (e.g. ```fsl_prepare_fieldmap SIEMENS
images_3_gre_field_mapping images_4_gre_field_mapping fmap_rads 2.65```).
Examples
--------
>>> from nipype.interfaces.fsl import PrepareFieldmap
>>> prepare = PrepareFieldmap()
>>> prepare.inputs.in_phase = "phase.nii"
>>> prepare.inputs.in_magnitude = "magnitude.nii"
>>> prepare.inputs.output_type = "NIFTI_GZ"
>>> prepare.cmdline #doctest: +ELLIPSIS
'fsl_prepare_fieldmap SIEMENS phase.nii magnitude.nii \
.../phase_fslprepared.nii.gz 2.460000'
>>> res = prepare.run() # doctest: +SKIP
"""
_cmd = 'fsl_prepare_fieldmap'
input_spec = PrepareFieldmapInputSpec
output_spec = PrepareFieldmapOutputSpec
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
if not isdefined(self.inputs.out_fieldmap):
self.inputs.out_fieldmap = self._gen_fname(
self.inputs.in_phase, suffix='_fslprepared')
if not isdefined(self.inputs.nocheck) or not self.inputs.nocheck:
skip += ['nocheck']
return super(PrepareFieldmap, self)._parse_inputs(skip=skip)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_fieldmap'] = self.inputs.out_fieldmap
return outputs
def _run_interface(self, runtime):
runtime = super(PrepareFieldmap, self)._run_interface(runtime)
if runtime.returncode == 0:
out_file = self.inputs.out_fieldmap
im = nib.load(out_file)
dumb_img = nib.Nifti1Image(np.zeros(im.shape), im.affine,
im.header)
out_nii = nib.funcs.concat_images((im, dumb_img))
nib.save(out_nii, out_file)
return runtime
class TOPUPInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, mandatory=True,
desc='name of 4D file with images', argstr='--imain=%s')
encoding_file = File(exists=True, mandatory=True,
xor=['encoding_direction'],
desc='name of text file with PE directions/times',
argstr='--datain=%s')
encoding_direction = traits.List(traits.Enum('y', 'x', 'z', 'x-', 'y-',
'z-'), mandatory=True,
xor=['encoding_file'],
requires=['readout_times'],
argstr='--datain=%s',
desc=('encoding direction for automatic '
'generation of encoding_file'))
readout_times = InputMultiPath(traits.Float,
requires=['encoding_direction'],
xor=['encoding_file'], mandatory=True,
desc=('readout times (dwell times by # '
'phase-encode steps minus 1)'))
out_base = File(desc=('base-name of output files (spline '
'coefficients (Hz) and movement parameters)'),
name_source=['in_file'], name_template='%s_base',
argstr='--out=%s', hash_files=False)
out_field = File(argstr='--fout=%s', hash_files=False,
name_source=['in_file'], name_template='%s_field',
desc='name of image file with field (Hz)')
out_corrected = File(argstr='--iout=%s', hash_files=False,
name_source=['in_file'], name_template='%s_corrected',
desc='name of 4D image file with unwarped images')
out_logfile = File(argstr='--logout=%s', desc='name of log-file',
name_source=['in_file'], name_template='%s_topup.log',
keep_extension=True, hash_files=False)
# TODO: the following traits admit values separated by commas, one value
# per registration level inside topup.
warp_res = traits.Float(10.0, argstr='--warpres=%f',
desc=('(approximate) resolution (in mm) of warp '
'basis for the different sub-sampling levels'
'.'))
subsamp = traits.Int(1, argstr='--subsamp=%d',
desc='sub-sampling scheme')
fwhm = traits.Float(8.0, argstr='--fwhm=%f',
desc='FWHM (in mm) of gaussian smoothing kernel')
config = traits.String('b02b0.cnf', argstr='--config=%s', usedefault=True,
desc=('Name of config file specifying command line '
'arguments'))
max_iter = traits.Int(5, argstr='--miter=%d',
desc='max # of non-linear iterations')
reg_lambda = traits.Float(1.0, argstr='--miter=%0.f',
desc=('lambda weighting value of the '
'regularisation term'))
ssqlambda = traits.Enum(1, 0, argstr='--ssqlambda=%d',
desc=('Weight lambda by the current value of the '
'ssd. If used (=1), the effective weight of '
'regularisation term becomes higher for the '
'initial iterations, therefore initial steps'
' are a little smoother than they would '
'without weighting. This reduces the '
'risk of finding a local minimum.'))
regmod = traits.Enum('bending_energy', 'membrane_energy',
argstr='--regmod=%s',
desc=('Regularisation term implementation. Defaults '
'to bending_energy. Note that the two functions'
' have vastly different scales. The membrane '
'energy is based on the first derivatives and '
'the bending energy on the second derivatives. '
'The second derivatives will typically be much '
'smaller than the first derivatives, so input '
'lambda will have to be larger for '
'bending_energy to yield approximately the same'
' level of regularisation.'))
estmov = traits.Enum(1, 0, argstr='--estmov=%d',
desc='estimate movements if set')
minmet = traits.Enum(0, 1, argstr='--minmet=%d',
desc=('Minimisation method 0=Levenberg-Marquardt, '
'1=Scaled Conjugate Gradient'))
splineorder = traits.Int(3, argstr='--splineorder=%d',
desc=('order of spline, 2->Qadratic spline, '
'3->Cubic spline'))
numprec = traits.Enum('double', 'float', argstr='--numprec=%s',
desc=('Precision for representing Hessian, double '
'or float.'))
interp = traits.Enum('spline', 'linear', argstr='--interp=%s',
desc='Image interpolation model, linear or spline.')
scale = traits.Enum(0, 1, argstr='--scale=%d',
desc=('If set (=1), the images are individually scaled'
' to a common mean'))
regrid = traits.Enum(1, 0, argstr='--regrid=%d',
desc=('If set (=1), the calculations are done in a '
'different grid'))
class TOPUPOutputSpec(TraitedSpec):
out_fieldcoef = File(exists=True,
desc='file containing the field coefficients')
out_movpar = File(exists=True, desc='movpar.txt output file')
out_enc_file = File(desc='encoding directions file output for applytopup')
out_field = File(desc='name of image file with field (Hz)')
out_corrected = File(desc='name of 4D image file with unwarped images')
out_logfile = File(desc='name of log-file')
class TOPUP(FSLCommand):
"""
Interface for FSL topup, a tool for estimating and correcting
susceptibility induced distortions. See FSL documentation for
`reference <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/TOPUP>`_,
`usage examples
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/topup/ExampleTopupFollowedByApplytopup>`_,
and `exemplary config files
<https://github.com/ahheckel/FSL-scripts/blob/master/rsc/fsl/fsl4/topup/b02b0.cnf>`_.
Examples
--------
>>> from nipype.interfaces.fsl import TOPUP
>>> topup = TOPUP()
>>> topup.inputs.in_file = "b0_b0rev.nii"
>>> topup.inputs.encoding_file = "topup_encoding.txt"
>>> topup.inputs.output_type = "NIFTI_GZ"
>>> topup.cmdline #doctest: +ELLIPSIS
'topup --config=b02b0.cnf --datain=topup_encoding.txt \
--imain=b0_b0rev.nii --out=b0_b0rev_base --iout=b0_b0rev_corrected.nii.gz \
--fout=b0_b0rev_field.nii.gz --logout=b0_b0rev_topup.log'
>>> res = topup.run() # doctest: +SKIP
"""
_cmd = 'topup'
input_spec = TOPUPInputSpec
output_spec = TOPUPOutputSpec
def _format_arg(self, name, trait_spec, value):
if name == 'encoding_direction':
return trait_spec.argstr % self._generate_encfile()
if name == 'out_base':
path, name, ext = split_filename(value)
if path != '':
if not os.path.exists(path):
raise ValueError('out_base path must exist if provided')
return super(TOPUP, self)._format_arg(name, trait_spec, value)
def _list_outputs(self):
outputs = super(TOPUP, self)._list_outputs()
del outputs['out_base']
base_path = None
if isdefined(self.inputs.out_base):
base_path, base, _ = split_filename(self.inputs.out_base)
if base_path == '':
base_path = None
else:
base = split_filename(self.inputs.in_file)[1] + '_base'
outputs['out_fieldcoef'] = self._gen_fname(base, suffix='_fieldcoef',
cwd=base_path)
outputs['out_movpar'] = self._gen_fname(base, suffix='_movpar',
ext='.txt', cwd=base_path)
if isdefined(self.inputs.encoding_direction):
outputs['out_enc_file'] = self._get_encfilename()
return outputs
def _get_encfilename(self):
out_file = os.path.join(os.getcwd(),
('%s_encfile.txt' %
split_filename(self.inputs.in_file)[1]))
return out_file
def _generate_encfile(self):
"""Generate a topup compatible encoding file based on given directions
"""
out_file = self._get_encfilename()
durations = self.inputs.readout_times
if len(self.inputs.encoding_direction) != len(durations):
if len(self.inputs.readout_times) != 1:
raise ValueError(('Readout time must be a float or match the'
'length of encoding directions'))
durations = durations * len(self.inputs.encoding_direction)
lines = []
for idx, encdir in enumerate(self.inputs.encoding_direction):
direction = 1.0
if encdir.endswith('-'):
direction = -1.0
line = [float(val[0] == encdir[0]) * direction
for val in ['x', 'y', 'z']] + [durations[idx]]
lines.append(line)
np.savetxt(out_file, np.array(lines), fmt='%d %d %d %.8f')
return out_file
def _overload_extension(self, value, name=None):
if name == 'out_base':
return value
return super(TOPUP, self)._overload_extension(value, name)
class ApplyTOPUPInputSpec(FSLCommandInputSpec):
in_files = InputMultiPath(File(exists=True), mandatory=True,
desc='name of 4D file with images',
argstr='--imain=%s', sep=',')
encoding_file = File(exists=True, mandatory=True,
desc='name of text file with PE directions/times',
argstr='--datain=%s')
in_index = traits.List(traits.Int, argstr='--inindex=%s', sep=',',
mandatory=True,
desc=('comma separated list of indicies into '
'--datain of the input image (to be '
'corrected)'))
in_topup_fieldcoef = File(exists=True, argstr="--topup=%s", copyfile=False,
requires=['in_topup_movpar'],
desc=('topup file containing the field '
'coefficients'))
in_topup_movpar = File(exists=True, requires=['in_topup_fieldcoef'],
copyfile=False, desc='topup movpar.txt file')
out_corrected = File(desc='output (warped) image',
name_source=['in_files'],
name_template='%s_corrected',
argstr='--out=%s')
method = traits.Enum('jac', 'lsr', argstr='--method=%s',
desc=('use jacobian modulation (jac) or least-squares'
' resampling (lsr)'))
interp = traits.Enum('trilinear', 'spline', argstr='--interp=%s',
desc='interpolation method')
datatype = traits.Enum('char', 'short', 'int', 'float', 'double',
argstr='-d=%s', desc='force output data type')
class ApplyTOPUPOutputSpec(TraitedSpec):
out_corrected = File(exists=True, desc=('name of 4D image file with '
'unwarped images'))
class ApplyTOPUP(FSLCommand):
"""
Interface for FSL topup, a tool for estimating and correcting
susceptibility induced distortions.
`General reference
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/topup/ApplytopupUsersGuide>`_
and `use example
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/topup/ExampleTopupFollowedByApplytopup>`_.
Examples
--------
>>> from nipype.interfaces.fsl import ApplyTOPUP
>>> applytopup = ApplyTOPUP()
>>> applytopup.inputs.in_files = ["epi.nii", "epi_rev.nii"]
>>> applytopup.inputs.encoding_file = "topup_encoding.txt"
>>> applytopup.inputs.in_index = [1,2]
>>> applytopup.inputs.in_topup_fieldcoef = "topup_fieldcoef.nii.gz"
>>> applytopup.inputs.in_topup_movpar = "topup_movpar.txt"
>>> applytopup.inputs.output_type = "NIFTI_GZ"
>>> applytopup.cmdline #doctest: +ELLIPSIS
'applytopup --datain=topup_encoding.txt --imain=epi.nii,epi_rev.nii \
--inindex=1,2 --topup=topup --out=epi_corrected.nii.gz'
>>> res = applytopup.run() # doctest: +SKIP
"""
_cmd = 'applytopup'
input_spec = ApplyTOPUPInputSpec
output_spec = ApplyTOPUPOutputSpec
def _format_arg(self, name, spec, value):
if name == 'in_topup_fieldcoef':
return spec.argstr % value.split('_fieldcoef')[0]
return super(ApplyTOPUP, self)._format_arg(name, spec, value)
class EddyInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, mandatory=True, argstr='--imain=%s',
desc=('File containing all the images to estimate '
'distortions for'))
in_mask = File(exists=True, mandatory=True, argstr='--mask=%s',
desc='Mask to indicate brain')
in_index = File(exists=True, mandatory=True, argstr='--index=%s',
desc=('File containing indices for all volumes in --imain '
'into --acqp and --topup'))
in_acqp = File(exists=True, mandatory=True, argstr='--acqp=%s',
desc='File containing acquisition parameters')
in_bvec = File(exists=True, mandatory=True, argstr='--bvecs=%s',
desc=('File containing the b-vectors for all volumes in '
'--imain'))
in_bval = File(exists=True, mandatory=True, argstr='--bvals=%s',
desc=('File containing the b-values for all volumes in '
'--imain'))
out_base = traits.Str('eddy_corrected', argstr='--out=%s',
usedefault=True,
desc=('basename for output (warped) image'))
session = File(exists=True, argstr='--session=%s',
desc=('File containing session indices for all volumes in '
'--imain'))
in_topup_fieldcoef = File(exists=True, argstr="--topup=%s",
requires=['in_topup_movpar'],
desc=('topup file containing the field '
'coefficients'))
in_topup_movpar = File(exists=True, requires=['in_topup_fieldcoef'],
desc='topup movpar.txt file')
flm = traits.Enum('linear', 'quadratic', 'cubic', argstr='--flm=%s',
desc='First level EC model')
fwhm = traits.Float(desc=('FWHM for conditioning filter when estimating '
'the parameters'), argstr='--fwhm=%s')
niter = traits.Int(5, argstr='--niter=%s', desc='Number of iterations')
method = traits.Enum('jac', 'lsr', argstr='--resamp=%s',
desc=('Final resampling method (jacobian/least '
'squares)'))
repol = traits.Bool(False, argstr='--repol',
desc='Detect and replace outlier slices')
num_threads = traits.Int(1, usedefault=True, nohash=True,
desc="Number of openmp threads to use")
class EddyOutputSpec(TraitedSpec):
out_corrected = File(exists=True,
desc=('4D image file containing all the corrected '
'volumes'))
out_parameter = File(exists=True,
desc=('text file with parameters definining the '
'field and movement for each scan'))
class Eddy(FSLCommand):
"""
Interface for FSL eddy, a tool for estimating and correcting eddy
currents induced distortions. `User guide
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and
`more info regarding acqp file
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.
Examples
--------
>>> from nipype.interfaces.fsl import Eddy
>>> eddy = Eddy()
>>> eddy.inputs.in_file = 'epi.nii'
>>> eddy.inputs.in_mask = 'epi_mask.nii'
>>> eddy.inputs.in_index = 'epi_index.txt'
>>> eddy.inputs.in_acqp = 'epi_acqp.txt'
>>> eddy.inputs.in_bvec = 'bvecs.scheme'
>>> eddy.inputs.in_bval = 'bvals.scheme'
>>> eddy.cmdline #doctest: +ELLIPSIS
'eddy --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme \
--imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii \
--out=.../eddy_corrected'
>>> res = eddy.run() # doctest: +SKIP
"""
_cmd = 'eddy'
input_spec = EddyInputSpec
output_spec = EddyOutputSpec
_num_threads = 1
def __init__(self, **inputs):
super(Eddy, self).__init__(**inputs)
self.inputs.on_trait_change(self._num_threads_update, 'num_threads')
if not isdefined(self.inputs.num_threads):
self.inputs.num_threads = self._num_threads
else:
self._num_threads_update()
def _num_threads_update(self):
self._num_threads = self.inputs.num_threads
if not isdefined(self.inputs.num_threads):
if 'OMP_NUM_THREADS' in self.inputs.environ:
del self.inputs.environ['OMP_NUM_THREADS']
else:
self.inputs.environ['OMP_NUM_THREADS'] = str(self.inputs.num_threads)
def _format_arg(self, name, spec, value):
if name == 'in_topup_fieldcoef':
return spec.argstr % value.split('_fieldcoef')[0]
if name == 'out_base':
return spec.argstr % os.path.abspath(value)
return super(Eddy, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_corrected'] = os.path.abspath('%s.nii.gz' % self.inputs.out_base)
outputs['out_parameter'] = os.path.abspath('%s.eddy_parameters' % self.inputs.out_base)
return outputs
class SigLossInputSpec(FSLCommandInputSpec):
in_file = File(mandatory=True,
exists=True,
argstr='-i %s',
desc='b0 fieldmap file')
out_file = File(argstr='-s %s',
desc='output signal loss estimate file',
genfile=True)
mask_file = File(exists=True,
argstr='-m %s',
desc='brain mask file')
echo_time = traits.Float(argstr='--te=%f',
desc='echo time in seconds')
slice_direction = traits.Enum('x', 'y', 'z',
argstr='-d %s',
desc='slicing direction')
class SigLossOuputSpec(TraitedSpec):
out_file = File(exists=True,
desc='signal loss estimate file')
class SigLoss(FSLCommand):
"""
Estimates signal loss from a field map (in rad/s)
Examples
--------
>>> from nipype.interfaces.fsl import SigLoss
>>> sigloss = SigLoss()
>>> sigloss.inputs.in_file = "phase.nii"
>>> sigloss.inputs.echo_time = 0.03
>>> sigloss.inputs.output_type = "NIFTI_GZ"
>>> sigloss.cmdline #doctest: +ELLIPSIS
'sigloss --te=0.030000 -i phase.nii -s .../phase_sigloss.nii.gz'
>>> res = sigloss.run() # doctest: +SKIP
"""
input_spec = SigLossInputSpec
output_spec = SigLossOuputSpec
_cmd = 'sigloss'
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self.inputs.out_file
if ((not isdefined(outputs['out_file'])) and
(isdefined(self.inputs.in_file))):
outputs['out_file'] = self._gen_fname(self.inputs.in_file,
suffix='_sigloss')
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()['out_file']
return None
class EpiRegInputSpec(FSLCommandInputSpec):
epi = File(exists=True, argstr='--epi=%s', mandatory=True,
position=-4, desc='EPI image')
t1_head = File(exists=True, argstr='--t1=%s', mandatory=True,
position=-3, desc='wholehead T1 image')
t1_brain = File(exists=True, argstr='--t1brain=%s', mandatory=True,
position=-2, desc='brain extracted T1 image')
out_base = traits.String("epi2struct", desc='output base name', argstr='--out=%s',
position=-1, usedefault=True)
fmap = File(exists=True, argstr='--fmap=%s',
desc='fieldmap image (in rad/s)')
fmapmag = File(exists=True, argstr='--fmapmag=%s',
desc='fieldmap magnitude image - wholehead')
fmapmagbrain = File(exists=True, argstr='--fmapmagbrain=%s',
desc='fieldmap magnitude image - brain extracted')
wmseg = File(exists=True, argstr='--wmseg=%s',
desc='white matter segmentation of T1 image, has to be named \
like the t1brain and end on _wmseg')
echospacing = traits.Float(argstr='--echospacing=%f',
desc='Effective EPI echo spacing \
(sometimes called dwell time) - in seconds')
pedir = traits.Enum('x', 'y', 'z', '-x', '-y', '-z', argstr='--pedir=%s',
desc='phase encoding direction, dir = x/y/z/-x/-y/-z')
weight_image = File(exists=True, argstr='--weight=%s',
desc='weighting image (in T1 space)')
no_fmapreg = traits.Bool(False, argstr='--nofmapreg',
desc='do not perform registration of fmap to T1 \
(use if fmap already registered)')
no_clean = traits.Bool(False, argstr='--noclean',
desc='do not clean up intermediate files')
class EpiRegOutputSpec(TraitedSpec):
out_file = File(exists=True,
desc='unwarped and coregistered epi input')
out_1vol = File(exists=True,
desc='unwarped and coregistered single volume')
fmap2str_mat = File(exists=True,
desc='rigid fieldmap-to-structural transform')
fmap2epi_mat = File(exists=True,
desc='rigid fieldmap-to-epi transform')
fmap_epi = File(exists=True, desc='fieldmap in epi space')
fmap_str = File(exists=True, desc='fieldmap in structural space')
fmapmag_str = File(exists=True,
desc='fieldmap magnitude image in structural space')
epi2str_inv = File(exists=True,
desc='rigid structural-to-epi transform')
epi2str_mat = File(exists=True,
desc='rigid epi-to-structural transform')
shiftmap = File(exists=True, desc='shiftmap in epi space')
fullwarp = File(exists=True,
desc='warpfield to unwarp epi and transform into \
structural space')
wmseg = File(exists=True, desc='white matter segmentation used in flirt bbr')
wmedge = File(exists=True, desc='white matter edges for visualization')
class EpiReg(FSLCommand):
"""
Runs FSL epi_reg script for simultaneous coregistration and fieldmap
unwarping.
Examples
--------
>>> from nipype.interfaces.fsl import EpiReg
>>> epireg = EpiReg()
>>> epireg.inputs.epi='epi.nii'
>>> epireg.inputs.t1_head='T1.nii'
>>> epireg.inputs.t1_brain='T1_brain.nii'
>>> epireg.inputs.out_base='epi2struct'
>>> epireg.inputs.fmap='fieldmap_phase_fslprepared.nii'
>>> epireg.inputs.fmapmag='fieldmap_mag.nii'
>>> epireg.inputs.fmapmagbrain='fieldmap_mag_brain.nii'
>>> epireg.inputs.echospacing=0.00067
>>> epireg.inputs.pedir='y'
>>> epireg.cmdline #doctest: +ELLIPSIS
'epi_reg --echospacing=0.000670 --fmap=fieldmap_phase_fslprepared.nii \
--fmapmag=fieldmap_mag.nii --fmapmagbrain=fieldmap_mag_brain.nii --pedir=y \
--epi=epi.nii --t1=T1.nii --t1brain=T1_brain.nii --out=epi2struct'
>>> epireg.run() # doctest: +SKIP
"""
_cmd = 'epi_reg'
input_spec = EpiRegInputSpec
output_spec = EpiRegOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = os.path.join(os.getcwd(),
self.inputs.out_base + '.nii.gz')
if not (isdefined(self.inputs.no_fmapreg) and self.inputs.no_fmapreg) and isdefined(self.inputs.fmap):
outputs['out_1vol'] = os.path.join(os.getcwd(),
self.inputs.out_base + '_1vol.nii.gz')
outputs['fmap2str_mat'] = os.path.join(os.getcwd(),
self.inputs.out_base + '_fieldmap2str.mat')
outputs['fmap2epi_mat'] = os.path.join(os.getcwd(),
self.inputs.out_base + '_fieldmaprads2epi.mat')
outputs['fmap_epi'] = os.path.join(os.getcwd(),
self.inputs.out_base + '_fieldmaprads2epi.nii.gz')
outputs['fmap_str'] = os.path.join(os.getcwd(),
self.inputs.out_base + '_fieldmaprads2str.nii.gz')
outputs['fmapmag_str'] = os.path.join(os.getcwd(),
self.inputs.out_base + '_fieldmap2str.nii.gz')
outputs['shiftmap'] = os.path.join(os.getcwd(),
self.inputs.out_base + '_fieldmaprads2epi_shift.nii.gz')
outputs['fullwarp'] = os.path.join(os.getcwd(),
self.inputs.out_base + '_warp.nii.gz')
outputs['epi2str_inv'] = os.path.join(os.getcwd(),
self.inputs.out_base + '_inv.mat')
outputs['epi2str_mat'] = os.path.join(os.getcwd(),
self.inputs.out_base + '.mat')
outputs['wmedge'] = os.path.join(os.getcwd(),
self.inputs.out_base + '_fast_wmedge.nii.gz')
outputs['wmseg'] = os.path.join(os.getcwd(),
self.inputs.out_base + '_fast_wmseg.nii.gz')
return outputs
#######################################
# deprecated interfaces
#######################################
class EPIDeWarpInputSpec(FSLCommandInputSpec):
mag_file = File(exists=True,
desc='Magnitude file',
argstr='--mag %s', position=0, mandatory=True)
dph_file = File(exists=True,
desc='Phase file assumed to be scaled from 0 to 4095',
argstr='--dph %s', mandatory=True)
exf_file = File(exists=True,
desc='example func volume (or use epi)',
argstr='--exf %s')
epi_file = File(exists=True,
desc='EPI volume to unwarp',
argstr='--epi %s')
tediff = traits.Float(2.46, usedefault=True,
desc='difference in B0 field map TEs',
argstr='--tediff %s')
esp = traits.Float(0.58, desc='EPI echo spacing',
argstr='--esp %s', usedefault=True)
sigma = traits.Int(2, usedefault=True, argstr='--sigma %s',
desc="2D spatial gaussing smoothing \
stdev (default = 2mm)")
vsm = traits.String(genfile=True, desc='voxel shift map',
argstr='--vsm %s')
exfdw = traits.String(desc='dewarped example func volume', genfile=True,
argstr='--exfdw %s')
epidw = traits.String(desc='dewarped epi volume', genfile=False,
argstr='--epidw %s')
tmpdir = traits.String(genfile=True, desc='tmpdir',
argstr='--tmpdir %s')
nocleanup = traits.Bool(True, usedefault=True, desc='no cleanup',
argstr='--nocleanup')
cleanup = traits.Bool(desc='cleanup',
argstr='--cleanup')
class EPIDeWarpOutputSpec(TraitedSpec):
unwarped_file = File(desc="unwarped epi file")
vsm_file = File(desc="voxel shift map")
exfdw = File(desc="dewarped functional volume example")
exf_mask = File(desc="Mask from example functional volume")
class EPIDeWarp(FSLCommand):
"""
Wraps the unwarping script `epidewarp.fsl
<http://surfer.nmr.mgh.harvard.edu/fswiki/epidewarp.fsl>`_.
.. warning:: deprecated in FSL, please use
:func:`nipype.workflows.dmri.preprocess.epi.sdc_fmb` instead.
Examples
--------
>>> from nipype.interfaces.fsl import EPIDeWarp
>>> dewarp = EPIDeWarp()
>>> dewarp.inputs.epi_file = "functional.nii"
>>> dewarp.inputs.mag_file = "magnitude.nii"
>>> dewarp.inputs.dph_file = "phase.nii"
>>> dewarp.inputs.output_type = "NIFTI_GZ"
>>> dewarp.cmdline #doctest: +ELLIPSIS
'epidewarp.fsl --mag magnitude.nii --dph phase.nii --epi functional.nii \
--esp 0.58 --exfdw .../exfdw.nii.gz --nocleanup --sigma 2 --tediff 2.46 \
--tmpdir .../temp --vsm .../vsm.nii.gz'
>>> res = dewarp.run() # doctest: +SKIP
"""
_cmd = 'epidewarp.fsl'
input_spec = EPIDeWarpInputSpec
output_spec = EPIDeWarpOutputSpec
def __init__(self, **inputs):
warnings.warn(("Deprecated: Please use "
"nipype.workflows.dmri.preprocess.epi.sdc_fmb instead"),
DeprecationWarning)
return super(EPIDeWarp, self).__init__(**inputs)
def _run_interface(self, runtime):
runtime = super(EPIDeWarp, self)._run_interface(runtime)
if runtime.stderr:
self.raise_exception(runtime)
return runtime
def _gen_filename(self, name):
if name == 'exfdw':
if isdefined(self.inputs.exf_file):
return self._gen_fname(self.inputs.exf_file,
suffix="_exfdw")
else:
return self._gen_fname("exfdw")
if name == 'epidw':
if isdefined(self.inputs.epi_file):
return self._gen_fname(self.inputs.epi_file,
suffix="_epidw")
if name == 'vsm':
return self._gen_fname('vsm')
if name == 'tmpdir':
return os.path.join(os.getcwd(), 'temp')
return None
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.exfdw):
outputs['exfdw'] = self._gen_filename('exfdw')
else:
outputs['exfdw'] = self.inputs.exfdw
if isdefined(self.inputs.epi_file):
if isdefined(self.inputs.epidw):
outputs['unwarped_file'] = self.inputs.epidw
else:
outputs['unwarped_file'] = self._gen_filename('epidw')
if not isdefined(self.inputs.vsm):
outputs['vsm_file'] = self._gen_filename('vsm')
else:
outputs['vsm_file'] = self._gen_fname(self.inputs.vsm)
if not isdefined(self.inputs.tmpdir):
outputs[
'exf_mask'] = self._gen_fname(cwd=self._gen_filename('tmpdir'),
basename='maskexf')
else:
outputs['exf_mask'] = self._gen_fname(cwd=self.inputs.tmpdir,
basename='maskexf')
return outputs
class EddyCorrectInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, desc='4D input file', argstr='%s', position=0,
mandatory=True)
out_file = File(desc='4D output file', argstr='%s', position=1,
name_source=['in_file'], name_template='%s_edc',
output_name='eddy_corrected')
ref_num = traits.Int(0, argstr='%d', position=2, desc='reference number',
mandatory=True, usedefault=True)
class EddyCorrectOutputSpec(TraitedSpec):
eddy_corrected = File(exists=True,
desc='path/name of 4D eddy corrected output file')
class EddyCorrect(FSLCommand):
"""
.. warning:: Deprecated in FSL. Please use
:class:`nipype.interfaces.fsl.epi.Eddy` instead
Example
-------
>>> from nipype.interfaces.fsl import EddyCorrect
>>> eddyc = EddyCorrect(in_file='diffusion.nii',
... out_file="diffusion_edc.nii", ref_num=0)
>>> eddyc.cmdline
'eddy_correct diffusion.nii diffusion_edc.nii 0'
"""
_cmd = 'eddy_correct'
input_spec = EddyCorrectInputSpec
output_spec = EddyCorrectOutputSpec
def __init__(self, **inputs):
warnings.warn(("Deprecated: Please use nipype.interfaces.fsl.epi.Eddy "
"instead"), DeprecationWarning)
return super(EddyCorrect, self).__init__(**inputs)
def _run_interface(self, runtime):
runtime = super(EddyCorrect, self)._run_interface(runtime)
if runtime.stderr:
self.raise_exception(runtime)
return runtime
|
iglpdc/nipype
|
nipype/interfaces/fsl/epi.py
|
Python
|
bsd-3-clause
| 38,169
|
[
"Gaussian"
] |
2d429a090dabe00191bdda8ce032d1c20306442cf83e56e07def7b969e1eb405
|
#!/usr/bin/env python
import platform
import time
import numpy
from pyscf import lib
from pyscf.dft import rks
from mpi4pyscf.lib import logger
from mpi4pyscf.scf import hf as mpi_hf
from mpi4pyscf.tools import mpi
comm = mpi.comm
rank = mpi.rank
@mpi.parallel_call(skip_args=[1, 2, 3, 4], skip_kwargs=['dm_last', 'vhf_last'])
def get_veff(mf, mol=None, dm=None, dm_last=0, vhf_last=0, hermi=1):
t0 = (time.clock(), time.time())
mf.unpack_(comm.bcast(mf.pack()))
mol = mf.mol
ni = mf._numint
if mf.nlc != '':
raise NotImplementedError
omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, spin=mol.spin)
# Broadcast the large input arrays here.
if any(comm.allgather(dm is mpi.Message.SkippedArg)):
if rank == 0 and dm is None:
dm = mf.make_rdm1()
dm = mpi.bcast_tagged_array(dm)
if any(comm.allgather(dm_last is mpi.Message.SkippedArg)):
dm_last = mpi.bcast_tagged_array(dm_last)
if any(comm.allgather(vhf_last is mpi.Message.SkippedArg)):
vhf_last = mpi.bcast_tagged_array(vhf_last)
ground_state = (isinstance(dm, numpy.ndarray) and dm.ndim == 2)
if mf.grids.coords is None:
_setup_grids_(mf, dm)
t0 = logger.timer(mf, 'setting up grids', *t0)
if hermi == 2: # because rho = 0
n, exc, vxc = 0, 0, 0
else:
n, exc, vxc = ni.nr_rks(mol, mf.grids, mf.xc, dm)
n = comm.allreduce(n)
exc = comm.allreduce(exc)
vxc = mpi.reduce(vxc)
logger.debug(mf, 'nelec by numeric integration = %s', n)
t0 = logger.timer(mf, 'vxc', *t0)
if abs(hyb) < 1e-10 and abs(alpha) < 1e-10:
vk = None
if getattr(vhf_last, 'vj', None) is not None:
ddm = numpy.asarray(dm) - dm_last
vj = mf.get_j(mol, ddm, hermi)
vj += vhf_last.vj
else:
vj = mf.get_j(mol, dm, hermi)
vxc += vj
else:
if getattr(vhf_last, 'vk', None) is not None:
ddm = numpy.asarray(dm) - dm_last
vj, vk = mf.get_jk(mol, ddm, hermi)
vk *= hyb
if abs(omega) > 1e-10:
vklr = mf.get_k(mol, ddm, hermi, omega=omega)
vk += vklr * (alpha - hyb)
ddm = None
vj += vhf_last.vj
vk += vhf_last.vk
else:
vj, vk = mf.get_jk(mol, dm, hermi)
vk *= hyb
if abs(omega) > 1e-10:
vklr = mf.get_k(mol, dm, hermi, omega=omega)
vk += vklr * (alpha - hyb)
vxc += vj - vk * .5
if ground_state:
exc -= numpy.einsum('ij,ji', dm, vk) * .5 * .5
if ground_state:
ecoul = numpy.einsum('ij,ji', dm, vj) * .5
else:
ecoul = None
vxc = lib.tag_array(vxc, ecoul=ecoul, exc=exc, vj=vj, vk=vk)
return vxc
def _setup_grids_(mf, dm):
mol = mf.mol
grids = mf.grids
if rank == 0:
grids.build(with_non0tab=False)
ngrids = comm.bcast(grids.weights.size)
grids.coords = numpy.array_split(grids.coords, mpi.pool.size)
grids.weights = numpy.array_split(grids.weights, mpi.pool.size)
else:
ngrids = comm.bcast(None)
grids.coords = mpi.scatter(grids.coords)
grids.weights = mpi.scatter(grids.weights)
ground_state = (isinstance(dm, numpy.ndarray) and dm.ndim == 2)
if mf.small_rho_cutoff > 1e-20 and ground_state:
rho = mf._numint.get_rho(mol, dm, grids, mf.max_memory)
n = comm.allreduce(numpy.dot(rho, grids.weights))
if abs(n-mol.nelectron) < rks.NELEC_ERROR_TOL*n:
rw = rho * grids.weights
idx = abs(rw) > mf.small_rho_cutoff / ngrids
logger.alldebug1(mf, 'Drop grids %d',
grids.weights.size - numpy.count_nonzero(idx))
grids.coords = numpy.asarray(grids.coords [idx], order='C')
grids.weights = numpy.asarray(grids.weights[idx], order='C')
grids.non0tab = grids.make_mask(mol, grids.coords)
return grids
@mpi.register_class
class RKS(rks.RKS, mpi_hf.RHF):
get_jk = mpi_hf.SCF.get_jk
get_j = mpi_hf.SCF.get_j
get_k = mpi_hf.SCF.get_k
@lib.with_doc(rks.RKS.get_veff.__doc__)
def get_veff(self, mol=None, dm=None, dm_last=0, vhf_last=0, hermi=1):
assert(mol is None or mol is self.mol)
return get_veff(self, None, dm, dm_last, vhf_last, hermi)
def pack(self):
return {'verbose': self.verbose,
'direct_scf_tol': self.direct_scf_tol,
'xc': self.xc,
'nlc': self.nlc,
'omega': self.omega,
'small_rho_cutoff': self.small_rho_cutoff, }
def dump_flags(self, verbose=None):
mpi_info = mpi.platform_info()
if rank == 0:
rks.RKS.dump_flags(self, verbose)
lib.logger.debug(self, 'MPI info (rank, host, pid) %s', mpi_info)
return self
|
sunqm/mpi4pyscf
|
mpi4pyscf/dft/rks.py
|
Python
|
gpl-3.0
| 4,942
|
[
"PySCF"
] |
e8cf083238871cff48ca2b7985893459ae113db9f8e6ffa9b8004c7232fed1f9
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import time
import numpy as np
from psi4 import core
from psi4.driver.p4util.exceptions import *
from psi4.driver import p4util
from psi4.driver import psifiles as psif
from .sapt_util import print_sapt_var
def _symmetrize(mat):
tmp = 0.5 * (mat + mat.transpose())
return tmp
def _compute_fxc(PQrho, half_Saux, halfp_Saux, x_alpha, rho_thresh=1.e-8):
"""
Computes the gridless (P|fxc|Q) ALDA tensor.
"""
naux = PQrho.shape[0]
# Level it out
PQrho_lvl = core.triplet(half_Saux, PQrho, half_Saux, False, False, False)
# Rotate into a diagonal basis
rho = core.Vector("rho eigenvalues", naux)
U = core.Matrix("rho eigenvectors", naux, naux)
PQrho_lvl.diagonalize(U, rho, core.DiagonalizeOrder.Ascending)
# "Gridless DFT"
mask = rho.np < rho_thresh # Values too small cause singularities
rho.np[mask] = rho_thresh
dft_size = rho.shape[0]
inp = {"RHO_A": rho}
out = {"V": core.Vector(dft_size), "V_RHO_A": core.Vector(dft_size), "V_RHO_A_RHO_A": core.Vector(dft_size)}
func_x = core.LibXCFunctional('XC_LDA_X', True)
func_x.compute_functional(inp, out, dft_size, 2)
out["V_RHO_A_RHO_A"].scale(1.0 - x_alpha)
func_c = core.LibXCFunctional('XC_LDA_C_VWN', True)
func_c.compute_functional(inp, out, dft_size, 2)
out["V_RHO_A_RHO_A"].np[mask] = 0
# Rotate back
Ul = U.clone()
Ul.np[:] *= out["V_RHO_A_RHO_A"].np
tmp = core.doublet(Ul, U, False, True)
# Undo the leveling
return core.triplet(halfp_Saux, tmp, halfp_Saux, False, False, False)
def df_fdds_dispersion(primary, auxiliary, cache, is_hybrid, x_alpha, leg_points=10, leg_lambda=0.3, do_print=True):
rho_thresh = core.get_option("SAPT", "SAPT_FDDS_V2_RHO_CUTOFF")
if do_print:
core.print_out("\n ==> E20 Dispersion (CHF FDDS) <== \n\n")
core.print_out(" Legendre Points: % 10d\n" % leg_points)
core.print_out(" Lambda Shift: % 10.3f\n" % leg_lambda)
core.print_out(" Fxc Kernal: % 10s\n" % "ALDA")
core.print_out(" (P|Fxc|Q) Thresh: % 8.3e\n" % rho_thresh)
# Build object
df_matrix_keys = ["Cocc_A", "Cvir_A", "Cocc_B", "Cvir_B"]
fdds_matrix_cache = {key: cache[key] for key in df_matrix_keys}
df_vector_keys = ["eps_occ_A", "eps_vir_A", "eps_occ_B", "eps_vir_B"]
fdds_vector_cache = {key: cache[key] for key in df_vector_keys}
fdds_obj = core.FDDS_Dispersion(primary, auxiliary, fdds_matrix_cache, fdds_vector_cache, is_hybrid)
# Aux Densities
D = fdds_obj.project_densities([cache["D_A"], cache["D_B"]])
# Temps
half_Saux = fdds_obj.aux_overlap().clone()
half_Saux.power(-0.5, 1.e-12)
halfp_Saux = fdds_obj.aux_overlap().clone()
halfp_Saux.power(0.5, 1.e-12)
# Builds potentials
W_A = fdds_obj.metric().clone()
W_A.axpy(1.0, _compute_fxc(D[0], half_Saux, halfp_Saux, x_alpha, rho_thresh=rho_thresh))
W_A = W_A.to_array()
W_B = fdds_obj.metric().clone()
W_B.axpy(1.0, _compute_fxc(D[1], half_Saux, halfp_Saux, x_alpha, rho_thresh=rho_thresh))
W_B = W_B.to_array()
# Nuke the densities
del D
metric = fdds_obj.metric().clone().to_array()
metric_inv = fdds_obj.metric_inv().clone().to_array()
# Integrate
core.print_out("\n => Time Integration <= \n\n")
val_pack = ("Omega", "Weight", "Disp20,u", "Disp20", "time [s]")
core.print_out("% 12s % 12s % 14s % 14s % 10s\n" % val_pack)
start_time = time.time()
total_uc = 0
total_c = 0
# Read R
if is_hybrid:
R_A = fdds_obj.R_A().to_array()
R_B = fdds_obj.R_B().to_array()
Rtinv_A = np.linalg.pinv(R_A, rcond=1.e-13).transpose()
Rtinv_B = np.linalg.pinv(R_B, rcond=1.e-13).transpose()
for point, weight in zip(*np.polynomial.legendre.leggauss(leg_points)):
omega = leg_lambda * (1.0 - point) / (1.0 + point)
lambda_scale = ((2.0 * leg_lambda) / (point + 1.0)**2)
# Monomer A
if is_hybrid:
aux_dict = fdds_obj.form_aux_matrices("A", omega)
aux_dict = {k: v.to_array() for k, v in aux_dict.items()}
X_A_uc = aux_dict["amp"].copy()
X_A = X_A_uc - x_alpha * aux_dict["K2L"]
# K matrices
K_A = -x_alpha * aux_dict["K1LD"] - x_alpha * aux_dict["K2LD"] + x_alpha * x_alpha * aux_dict["K21L"]
KRS_A = K_A.dot(Rtinv_A).dot(metric)
else:
X_A = fdds_obj.form_unc_amplitude("A", omega)
X_A.scale(-1.0)
X_A = X_A.to_array()
X_A_uc = X_A.copy()
# Coupled A
XSW_A = X_A.dot(metric_inv).dot(W_A)
if is_hybrid:
XSW_A += 0.25 * KRS_A
amplitude = np.linalg.pinv(metric - XSW_A, rcond=1.e-13)
X_A_coupled = X_A + XSW_A.dot(amplitude).dot(X_A)
del X_A, XSW_A, amplitude
if is_hybrid:
del K_A, KRS_A, aux_dict
# Monomer B
if is_hybrid:
aux_dict = fdds_obj.form_aux_matrices("B", omega)
aux_dict = {k: v.to_array() for k, v in aux_dict.items()}
X_B_uc = aux_dict["amp"].copy()
X_B = X_B_uc - x_alpha * aux_dict["K2L"]
# K matrices
K_B = -x_alpha * aux_dict["K1LD"] - x_alpha * aux_dict["K2LD"] + x_alpha * x_alpha * aux_dict["K21L"]
KRS_B = K_B.dot(Rtinv_B).dot(metric)
else:
X_B = fdds_obj.form_unc_amplitude("B", omega)
X_B.scale(-1.0)
X_B = X_B.to_array()
X_B_uc = X_B.copy()
# Coupled B
XSW_B = X_B.dot(metric_inv).dot(W_B)
if is_hybrid:
XSW_B += 0.25 * KRS_B
amplitude = np.linalg.pinv(metric - XSW_B, rcond=1.e-13)
X_B_coupled = X_B + XSW_B.dot(amplitude).dot(X_B)
del X_B, XSW_B, amplitude
if is_hybrid:
del K_B, KRS_B, aux_dict
# Make sure the results are symmetrized
X_A_uc = _symmetrize(X_A_uc)
X_B_uc = _symmetrize(X_B_uc)
X_A_coupled = _symmetrize(X_A_coupled)
X_B_coupled = _symmetrize(X_B_coupled)
# Combine
tmp_uc = metric_inv.dot(X_A_uc).dot(metric_inv)
value_uc = np.dot(tmp_uc.flatten(), X_B_uc.flatten())
del tmp_uc
tmp_c = metric_inv.dot(X_A_coupled).dot(metric_inv)
value_c = np.dot(tmp_c.flatten(), X_B_coupled.flatten())
# Tally
total_uc += value_uc * weight * lambda_scale
total_c += value_c * weight * lambda_scale
if do_print:
tmp_disp_unc = value_uc * weight * lambda_scale
tmp_disp = value_c * weight * lambda_scale
fdds_time = time.time() - start_time
val_pack = (omega, weight, tmp_disp_unc, tmp_disp, fdds_time)
core.print_out("% 12.3e % 12.3e % 14.3e % 14.3e %10d\n" % val_pack)
Disp20_uc = -1.0 / (2.0 * np.pi) * total_uc
Disp20_c = -1.0 / (2.0 * np.pi) * total_c
core.print_out("\n")
core.print_out(print_sapt_var("Disp20,u", Disp20_uc, short=True) + "\n")
core.print_out(print_sapt_var("Disp20", Disp20_c, short=True) + "\n")
return {"Disp20,FDDS (unc)": Disp20_uc, "Disp20": Disp20_c}
def df_mp2_fisapt_dispersion(wfn, primary, auxiliary, cache, do_print=True):
if do_print:
core.print_out("\n ==> E20 Dispersion (MP2) <== \n\n")
# Build object
df_matrix_keys = ["Cocc_A", "Cvir_A", "Cocc_B", "Cvir_B"]
df_mfisapt_keys = ["Caocc0A", "Cvir0A", "Caocc0B", "Cvir0B"]
matrix_cache = {fkey: cache[ckey] for ckey, fkey in zip(df_matrix_keys, df_mfisapt_keys)}
other_keys = ["S", "D_A", "P_A", "V_A", "J_A", "K_A", "D_B", "P_B", "V_B", "J_B", "K_B", "K_O"]
for key in other_keys:
matrix_cache[key] = cache[key]
# matrix_cache["K_O"] = matrix_cache["K_O"].transpose()
df_vector_keys = ["eps_occ_A", "eps_vir_A", "eps_occ_B", "eps_vir_B"]
df_vfisapt_keys = ["eps_aocc0A", "eps_vir0A", "eps_aocc0B", "eps_vir0B"]
vector_cache = {fkey: cache[ckey] for ckey, fkey in zip(df_vector_keys, df_vfisapt_keys)}
wfn.set_basisset("DF_BASIS_SAPT", auxiliary)
fisapt = core.FISAPT(wfn)
# Compute!
fisapt.disp(matrix_cache, vector_cache, False)
scalars = fisapt.scalars()
core.print_out("\n")
core.print_out(print_sapt_var("Disp20 (MP2)", scalars["Disp20"], short=True) + "\n")
core.print_out(print_sapt_var("Exch-Disp20,u", scalars["Exch-Disp20"], short=True) + "\n")
ret = {}
ret["Exch-Disp20,u"] = scalars["Exch-Disp20"]
ret["Disp20,u"] = scalars["Disp20"]
if core.get_option("SAPT", "DO_DISP_EXCH_SINF"):
fisapt.sinf_disp(matrix_cache, vector_cache, False)
scalars = fisapt.scalars()
return ret
def df_mp2_sapt_dispersion(dimer_wfn, wfn_A, wfn_B, primary_basis, aux_basis, cache, do_print=True):
if do_print:
core.print_out("\n ==> E20 Dispersion (MP2) <== \n\n")
optstash = p4util.OptionsState(['SAPT', 'SAPT0_E10'], ['SAPT', 'SAPT0_E20IND'], ['SAPT', 'SAPT0_E20DISP'],
['SAPT', 'SAPT_QUIET'])
core.set_local_option("SAPT", "SAPT0_E10", False)
core.set_local_option("SAPT", "SAPT0_E20IND", False)
core.set_local_option("SAPT", "SAPT0_E20DISP", True)
core.set_local_option("SAPT", "SAPT_QUIET", True)
if core.get_option('SCF', 'REFERENCE') == 'RHF':
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERA, 'monomerA', 'dimer')
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERB, 'monomerB', 'dimer')
core.IO.set_default_namespace('dimer')
dimer_wfn.set_basisset("DF_BASIS_SAPT", aux_basis)
dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
e_sapt = core.sapt(dimer_wfn, wfn_A, wfn_B)
optstash.restore()
svars = dimer_wfn.variables()
core.print_out("\n")
core.print_out(print_sapt_var("Disp20 (MP2)", svars["E DISP20"], short=True) + "\n")
core.print_out(print_sapt_var("Exch-Disp20,u", svars["E EXCH-DISP20"], short=True) + "\n")
ret = {}
ret["Exch-Disp20,u"] = svars["E EXCH-DISP20"]
ret["Disp20,u"] = svars["E DISP20"]
return ret
|
lothian/psi4
|
psi4/driver/procrouting/sapt/sapt_mp2_terms.py
|
Python
|
lgpl-3.0
| 11,036
|
[
"Psi4"
] |
0e02c318025b597df87db7ae302fcc60e8e7b67c6c42f7f5e3b5c077cc535181
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffyqcreport(RPackage):
"""This package creates a QC report for an AffyBatch object.
The report is intended to allow the user to quickly assess the
quality of a set of arrays in an AffyBatch object."""
homepage = "https://www.bioconductor.org/packages/affyQCReport/"
url = "https://git.bioconductor.org/packages/affyQCReport"
version('1.54.0', git='https://git.bioconductor.org/packages/affyQCReport', commit='5572e9981dc874b78b4adebf58080cac3fbb69e1')
depends_on('r@3.4.0:3.4.9', when='@1.54.0')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-affyplm', type=('build', 'run'))
depends_on('r-genefilter', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-simpleaffy', type=('build', 'run'))
depends_on('r-xtable', type=('build', 'run'))
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-affyqcreport/package.py
|
Python
|
lgpl-2.1
| 2,211
|
[
"Bioconductor"
] |
31958dda4c671c63b813fd40739b0b2ca6e76a90d03cd927b62c4a48a8deb37f
|
# Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
"""@file des_meds.py Module for generating DES Multi-Epoch Data Structures (MEDS) in GalSim.
This module defines the `MultiExposureObject` class for representing multiple exposure data for a single object, and the `WCSTransform` class used to store general, locally-linearized WCS information per exposure. The `write_meds` function
can be used to write a list of `MultiExposureObject` instances to a single MEDS file.
Importing this module also adds these data structures to the config framework, so that MEDS file output can subsequently be simulated directly using a config file.
"""
import numpy
import galsim
# these image stamp sizes are available in MEDS format
BOX_SIZES = [32,48,64,96,128,192,256]
# while creating the meds file, all the data is stored in memory, and then written to disc once all
# the necessary images have been created.
# You can control the amound of memory used to prevent jamming your system.
MAX_MEMORY = 1e9
# Maximum number of exposures allowed per galaxy (incl. coadd)
MAX_NCUTOUTS = 11
# flags for unavailable data
EMPTY_START_INDEX = 9999
EMPTY_JAC = 999
class WCSTransform(object):
"""
Very simple class which holds a WCS transformation, including a Jacobian and a shifted centroid.
The (u,v) coordinate plane is the local tangent plane at the location of the galaxy with
North = +v, West = +u. The 'u' coordinace scales as cos(DEC) * dRA.
Here, 'row' is a row in the image; 'col' is a column in the image.
The row/col notation is the same as arrays indexed in C and python, arr[row,col].
Available fields:
self.dudrow - element 1,1 of the Jacobian matrix
self.dudcol - element 1,2 of the Jacobian matrix
self.dvdrow - element 2,1 of the Jacobian matrix
self.dvdcol - element 2,2 of the Jacobian matrix
self.row0 - horizontal position of the centroid as given by SExtractor, in pixel coordinates
self.col0 - vertical position of the centroid as given by SExtractor, in pixel coordinates
"""
def __init__(self, dudrow, dudcol, dvdrow, dvdcol, row0, col0):
self.dudrow = dudrow
self.dudcol = dudcol
self.dvdrow = dvdrow
self.dvdcol = dvdcol
self.row0 = row0
self.col0 = col0
class MultiExposureObject(object):
"""
A class containing exposures for single object, along with other information.
Available fields:
self.images list of images of the object (GalSim images)
self.weights list of weight maps (GalSim images)
self.segs list of segmentation masks (GalSim images)
self.wcstrans list of WCS transformation (WCSTransform objects)
self.n_cutouts number of exposures
self.box_size size of each exposure image
Constructor parameters:
@param images list of images of the object (GalSim images)
@param weights list of weight maps (GalSim images)
@param badpix list of bad pixel masks (GalSim images)
@param segs list of segmentation maps (GalSim images)
@param wcstrans list of WCS transformation (WCSTransform objects)
@param id galaxy id
Images, weights and segs have to be square numpy arrays with size in
BOX_SIZES = [32,48,64,96,128,196,256].
Number of exposures for all lists (images,weights,segs,wcstrans) have to be the same and smaller
than MAX_NCUTOUTS (default 11).
"""
def __init__(self, images, weights=None, badpix=None, segs=None, wcstrans=None, id=0):
# assign the ID
self.id = id
# check if images is a list
if not isinstance(images,list):
raise TypeError('images should be a list')
# get number of cutouts from image list
self.images = images
# get box size from the first image
self.box_size = self.images[0].array.shape[0]
self.n_cutouts = len(self.images)
# see if there are cutouts
if self.n_cutouts < 1:
raise ValueError('no cutouts in this object')
# check if the box size is correct
if self.box_size not in BOX_SIZES:
# raise ValueError('box size should be in [32,48,64,96,128,196,256], is %d' % box_size)
raise ValueError( 'box size should be in '+str(BOX_SIZES)+', is '+str(self.box_size) )
# check if weights, segs and wcstrans were supplied. If not, create sensible values.
if weights != None:
self.weights = weights
else:
self.weights = [galsim.ImageF(self.box_size, self.box_size, init_value=1)]*self.n_cutouts
# check segmaps
if segs != None:
self.segs = segs
# I think eventually, the meds files will have some more sophisticated pixel map
# where the segmentation info and bad pixel info are separately coded.
# However, for now, we just set to 0 any bad pixels.
# (Not that GalSim has any mechanism yet for generating bad pixels, so this is
# usually a null op, but it's in place for when there is something to do.)
if badpix != None:
if len(self.segs) != len(badpix):
raise ValueError("segs and badpix are different lengths")
for i in range(len(self.segs)):
if (self.segs[i].array.shape != badpix[i].array.shape):
raise ValueError("segs[%d] and badpix[%d] have different shapes."%(i,i))
self.segs[i].array[:,:] &= (badpix[i].array == 0)
elif badpix != None:
self.segs = badpix
# Flip the sense of the bits 0 -> 1, other -> 0
# Again, this might need to become more sophisticated at some point...
for i in range(len(self.segs)):
self.segs[i].array[:,:] = (badpix[i].array == 0)
else:
self.segs = [galsim.ImageI(self.box_size, self.box_size, init_value=1)]*self.n_cutouts
# check wcstrans
if wcstrans != None:
self.wcstrans = wcstrans
else:
# build jacobians that are just based on the pixel scale, set the centers
dudrow = 1
dudcol = 0
dvdrow = 0
dvdcol = 1
# set to the center of the postage stamp
row0 = float(self.box_size)/2.
col0 = float(self.box_size)/2.
self.wcstrans = [ WCSTransform(dudrow * im.scale, dudcol * im.scale,
dvdrow * im.scale, dvdcol * im.scale, row0, col0) for im in self.images ]
# check if weights,segs,jacks are lists
if not isinstance(self.weights,list):
raise TypeError('weights should be a list')
if not isinstance(self.segs,list):
raise TypeError('segs should be a list')
if not isinstance(self.wcstrans,list):
raise TypeError('wcstrans should be a list')
# loop through the images and check if they are of the same size
for extname in ('images','weights','segs'):
# get the class field
ext = eval('self.' + extname )
# loop through exposures
for icutout,cutout in enumerate(ext):
# get the sizes of array
nx=cutout.array.shape[0]
ny=cutout.array.shape[1]
# x and y size should be the same
if nx != ny:
raise ValueError('%s should be square and is %d x %d' % (extname,nx,ny))
# check if box size is correct
if nx != self.box_size:
raise ValueError('%s object %d has size %d and should be %d' %
( extname,icutout,nx,self.box_size ) )
# see if the number of Jacobians is right
if len(self.wcstrans) != self.n_cutouts:
raise ValueError('number of Jacobians is %d is not equal to number of cutouts %d'%
( len(self.wcstrans),self.n_cutouts ) )
# check each Jacobian
for jac in self.wcstrans:
# should ba a WCSTransform instance
if not isinstance(jac, WCSTransform):
raise TypeError('wcstrans list should contain WCSTransform objects')
def write_meds(file_name, obj_list, clobber=True):
"""
@brief Writes the galaxy, weights, segmaps images to a MEDS file.
Arguments:
----------
@param file_name: Name of meds file to be written
@param obj_list: List of MultiExposureObjects
"""
import numpy
import sys
import pyfits
# initialise the catalog
cat = {}
cat['ncutout'] = []
cat['box_size'] = []
cat['start_row'] = []
cat['id'] = []
cat['dudrow'] = []
cat['dudcol'] = []
cat['dvdrow'] = []
cat['dvdcol'] = []
cat['row0'] = []
cat['col0'] = []
# initialise the image vectors
vec = {}
vec['image'] = []
vec['seg'] = []
vec['weight'] = []
# initialise the image vector index
n_vec = 0
# get number of objects
n_obj = len(obj_list)
# loop over objects
for obj in obj_list:
# initialise the start indices for each image
start_rows = numpy.ones(MAX_NCUTOUTS)*EMPTY_START_INDEX
dudrow = numpy.ones(MAX_NCUTOUTS)*EMPTY_JAC
dudcol = numpy.ones(MAX_NCUTOUTS)*EMPTY_JAC
dvdrow = numpy.ones(MAX_NCUTOUTS)*EMPTY_JAC
dvdcol = numpy.ones(MAX_NCUTOUTS)*EMPTY_JAC
row0 = numpy.ones(MAX_NCUTOUTS)*EMPTY_JAC
col0 = numpy.ones(MAX_NCUTOUTS)*EMPTY_JAC
# get the number of cutouts (exposures)
n_cutout = obj.n_cutouts
# append the catalog for this object
cat['ncutout'].append(n_cutout)
cat['box_size'].append(obj.box_size)
cat['id'].append(obj.id)
# loop over cutouts
for i in range(n_cutout):
# assign the start row to the end of image vector
start_rows[i] = n_vec
# append the image vectors
# if vector not is already initialised
if vec['image'] == []:
vec['image'] = obj.images[i].array.flatten()
vec['seg'] = obj.segs[i].array.flatten()
vec['weight'] = obj.weights[i].array.flatten()
# if vector already exists
else:
vec['image'] = numpy.concatenate([vec['image'], obj.images[i].array.flatten()])
vec['seg'] = numpy.concatenate([vec['seg'], obj.segs[i].array.flatten()])
vec['weight'] = numpy.concatenate([vec['weight'], obj.weights[i].array.flatten()])
# append the Jacobian
dudrow[i] = obj.wcstrans[i].dudrow
dudcol[i] = obj.wcstrans[i].dudcol
dvdrow[i] = obj.wcstrans[i].dvdrow
dvdcol[i] = obj.wcstrans[i].dvdcol
row0[i] = obj.wcstrans[i].row0
col0[i] = obj.wcstrans[i].col0
# check if we are running out of memory
if sys.getsizeof(vec) > MAX_MEMORY:
raise MemoryError(
'Running out of memory > %1.0fGB - you can increase the limit by changing MAX_MEMORY' %
MAX_MEMORY/1e9)
# update n_vec to point to the end of image vector
n_vec = len(vec['image'])
# update the start rows fields in the catalog
cat['start_row'].append(start_rows)
# add lists of Jacobians
cat['dudrow'].append(dudrow)
cat['dudcol'].append(dudcol)
cat['dvdrow'].append(dvdrow)
cat['dvdcol'].append(dvdcol)
cat['row0'].append(row0)
cat['col0'].append(col0)
# get the primary HDU
primary = pyfits.PrimaryHDU()
# second hdu is the object_data
cols = []
cols.append( pyfits.Column(name='ncutout', format='i4', array=cat['ncutout'] ) )
cols.append( pyfits.Column(name='id', format='i4', array=cat['id'] ) )
cols.append( pyfits.Column(name='box_size', format='i4', array=cat['box_size'] ) )
cols.append( pyfits.Column(name='file_id', format='i4', array=[1]*n_obj) )
cols.append( pyfits.Column(name='start_row', format='%di4' % MAX_NCUTOUTS,
array=numpy.array(cat['start_row'])) )
cols.append( pyfits.Column(name='orig_row', format='f8', array=[1]*n_obj) )
cols.append( pyfits.Column(name='orig_col', format='f8', array=[1]*n_obj) )
cols.append( pyfits.Column(name='orig_start_row', format='i4', array=[1]*n_obj) )
cols.append( pyfits.Column(name='orig_start_col', format='i4', array=[1]*n_obj) )
cols.append( pyfits.Column(name='dudrow', format='%df8'% MAX_NCUTOUTS, array=cat['dudrow'] ) )
cols.append( pyfits.Column(name='dudcol', format='%df8'% MAX_NCUTOUTS, array=cat['dudcol'] ) )
cols.append( pyfits.Column(name='dvdrow', format='%df8'% MAX_NCUTOUTS, array=cat['dvdrow'] ) )
cols.append( pyfits.Column(name='dvdcol', format='%df8'% MAX_NCUTOUTS, array=cat['dvdcol'] ) )
cols.append( pyfits.Column(name='cutout_row', format='%df8'% MAX_NCUTOUTS, array=cat['row0'] ) )
cols.append( pyfits.Column(name='cutout_col', format='%df8'% MAX_NCUTOUTS, array=cat['col0'] ) )
object_data = pyfits.new_table(pyfits.ColDefs(cols))
object_data.update_ext_name('object_data')
# third hdu is image_info
cols = []
cols.append( pyfits.Column(name='image_path', format='A256', array=['generated_by_galsim'] ) )
cols.append( pyfits.Column(name='sky_path', format='A256', array=['generated_by_galsim'] ) )
cols.append( pyfits.Column(name='seg_path', format='A256', array=['generated_by_galsim'] ) )
image_info = pyfits.new_table(pyfits.ColDefs(cols))
image_info.update_ext_name('image_info')
# fourth hdu is metadata
cols = []
cols.append( pyfits.Column(name='cat_file', format='A256', array=['generated_by_galsim'] ))
cols.append( pyfits.Column(name='coadd_file', format='A256', array=['generated_by_galsim'] ))
cols.append( pyfits.Column(name='coadd_hdu', format='A1', array=['x'] ))
cols.append( pyfits.Column(name='coadd_seg_hdu', format='A1', array=['x'] ))
cols.append( pyfits.Column(name='coadd_srclist', format='A256', array=['generated_by_galsim'] ))
cols.append( pyfits.Column(name='coadd_wt_hdu', format='A1', array=['x'] ))
cols.append( pyfits.Column(name='coaddcat_file', format='A256', array=['generated_by_galsim'] ))
cols.append( pyfits.Column(name='coaddseg_file', format='A256', array=['generated_by_galsim'] ))
cols.append( pyfits.Column(name='cutout_file', format='A256', array=['generated_by_galsim'] ))
cols.append( pyfits.Column(name='max_boxsize', format='A3', array=['x'] ))
cols.append( pyfits.Column(name='medsconf', format='A3', array=['x'] ))
cols.append( pyfits.Column(name='min_boxsize', format='A2', array=['x'] ))
cols.append( pyfits.Column(name='se_badpix_hdu', format='A1', array=['x'] ))
cols.append( pyfits.Column(name='se_hdu', format='A1', array=['x'] ))
cols.append( pyfits.Column(name='se_wt_hdu', format='A1', array=['x'] ))
cols.append( pyfits.Column(name='seg_hdu', format='A1', array=['x'] ))
cols.append( pyfits.Column(name='sky_hdu', format='A1', array=['x'] ))
metadata = pyfits.new_table(pyfits.ColDefs(cols))
metadata.update_ext_name('metadata')
# rest of HDUs are image vectors
image_cutouts = pyfits.ImageHDU( vec['image'] , name='image_cutouts' )
weight_cutouts = pyfits.ImageHDU( vec['weight'], name='weight_cutouts' )
seg_cutouts = pyfits.ImageHDU( vec['seg'] , name='seg_cutouts' )
# write all
hdu_list = pyfits.HDUList([
primary,
object_data,
image_info,
metadata,
image_cutouts,
weight_cutouts,
seg_cutouts
])
hdu_list.writeto(file_name,clobber=clobber)
# Now add this to the config framework.
import galsim.config
# Make this a valid output type:
galsim.config.process.valid_output_types['des_meds'] = (
'galsim.des.BuildMEDS', # Function that builds the objects using config
'galsim.des.GetNObjForMEDS', # Function that calculates the number of objects
True, # Takes nproc argument
False, # Takes *_file_name arguments for psf, weight, badpix
False) # Takes *_hdu arguments for psf, weight, badpix
def BuildMEDS(file_name, config, nproc=1, logger=None, image_num=0, obj_num=0):
"""
Build a meds file as specified in config.
@param file_name The name of the output file.
@param config A configuration dict.
@param nproc How many processes to use.
@param logger If given, a logger object to log progress.
@param image_num If given, the current image_num (default = 0)
@param obj_num If given, the current obj_num (default = 0)
@return time Time taken to build file
"""
import time
t1 = time.time()
ignore = [ 'file_name', 'dir', 'nfiles', 'psf', 'weight', 'badpix', 'nproc' ]
req = { 'nobjects' : int , 'nstamps_per_object' : int }
params = galsim.config.GetAllParams(config['output'],'output',config,ignore=ignore,req=req)[0]
nobjects = params['nobjects']
nstamps_per_object = params['nstamps_per_object']
ntot = nobjects * nstamps_per_object
all_images = galsim.config.BuildImages(
ntot, config=config, nproc=nproc, logger=logger, obj_num=obj_num,
make_psf_image=False, make_weight_image=True, make_badpix_image=True)
main_images = all_images[0]
weight_images = all_images[2]
badpix_images = all_images[3]
obj_list = []
for i in range(nobjects):
k1 = i*nstamps_per_object
k2 = (i+1)*nstamps_per_object
obj = MultiExposureObject(images = main_images[k1:k2],
weights = weight_images[k1:k2],
badpix = badpix_images[k1:k2])
obj_list.append(obj)
write_meds(file_name, obj_list)
t2 = time.time()
return t2-t1
def GetNObjForMEDS(config, file_num, image_num):
ignore = [ 'file_name', 'dir', 'nfiles', 'psf', 'weight', 'badpix', 'nproc' ]
req = { 'nobjects' : int , 'nstamps_per_object' : int }
params = galsim.config.GetAllParams(config['output'],'output',config,ignore=ignore,req=req)[0]
config['seq_index'] = file_num
if 'image' in config and 'type' in config['image']:
image_type = config['image']['type']
if image_type != 'Single':
raise AttibuteError("MEDS files are not compatible with image type %s."%image_type)
nobjects = params['nobjects']
nstamps_per_object = params['nstamps_per_object']
ntot = nobjects * nstamps_per_object
# nobj is a list of nobj per image.
# The MEDS file is considered to only have a single image, so the list has only 1 element.
nobj = [ ntot ]
return nobj
|
mardom/GalSim
|
galsim/des/des_meds.py
|
Python
|
gpl-3.0
| 20,128
|
[
"Galaxy"
] |
0734a852658a9244ac5d7d80de13156b1dc65fa2b9ff00375120bd3e2951beba
|
################################################################################
# #
# Copyright (C) 2010-2017 The ESPResSo project #
# #
# This file is part of ESPResSo. #
# #
# ESPResSo is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# ESPResSo is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
# #
# Active Matter: Swimmer Flow Field Tutorial #
# #
################################################################################
from __future__ import print_function
import numpy as np
import os
from espressomd import assert_features, lb
## Exercise 1 ##
# Create a routine to read in the hydrodynamic type
# (pusher/puller) and position at which the particle
# is initiated, set the variables 'type' and 'pos' to
# these values, respectively.
...
mode = ...
pos = ...
################################################################################
## Exercise 2 ##
# Create an output directory that is labeled according
# to the value of the type and position, use the parameter
# 'outdir' to store this path
outdir = ...
try:
os.makedirs(outdir)
except:
print("INFO: Directory \"{}\" exists".format(outdir))
# System parameters
length = 25.0
prod_steps = 1000
prod_length = 50
dt = 0.01
system = espressomd.System(box_l=[length, length, length])
system.cell_system.skin = 0.3
system.time_step = dt
system.min_global_cut = 1.0
################################################################################
# Set the position of the particle
## Exercise 3 ##
# Determine the initial position of the particle, which
# should be in the center of the box, and shifted by
# the value of 'pos' in the direction of the z-axis
x0 = ...
y0 = ...
z0 = ...
# Sphere size, mass, and moment of inertia, dipole force
sph_size = 0.5
sph_mass = 4.8
Ixyz = 4.8
force = 0.1
## Exercise 4 ##
# Why is the sphere size set to 0.5 (this value is
# an approximation for the real value)? What happens when you
# change the mass and rotational inertia? Why is the value of
# the force chosen to be low.
# Setup the particle particle
system.part.add(pos=[x0, y0, z0], type=0, mass=sph_mass, rinertia=[Ixyz, Ixyz, Ixyz],
swimming={'f_swim': force, 'mode': mode, 'dipole_length': sph_size + 0.5})
## Exercise 5 ##
# Why is the dipole_length chosen in this way?
# What happens if you make the length go to zero?
# Why does this happen?
################################################################################
# Setup the fluid (quiescent)
agrid = 1
vskin = 0.1
frict = 20.0
visco = 1.0
densi = 1.0
temp = 0.0
lbf = lb.LBFluidGPU(agrid=agrid, dens=densi, visc=visco,
tau=dt, fric=frict, couple='3pt')
## Exercise 6 ##
# What does 'couple 3pt' imply?
# Can the particle rotate in the flow field?
system.actors.add(lbf)
system.thermostat.set_lb(kT=temp)
################################################################################
# Output the coordinates
with open("{}/trajectory.dat".format(outdir), 'w') as outfile:
print("####################################################", file=outfile)
print("# time position velocity #", file=outfile)
print("####################################################", file=outfile)
# Production run
for k in range(prod_steps):
# Output quantities
print("{time} {pos[0]} {pos[1]} {pos[2]} {vel[0]} {vel[1]} {vel[2]}"
.format(time=system.time, pos=system.part[0].pos, vel=system.part[0].v),
file=outfile)
# Output 50 simulations
if k % (prod_steps / 50) == 0:
num = k / (prod_steps / 50)
lbf.print_vtk_velocity("{}/lb_velocity_{}.vtk".format(outdir, num))
system.part.writevtk(
"{}/position_{}.vtk".format(outdir, num), types=[0])
system.integrator.run(prod_length)
## Exercise 7 ##
# Use the snapshots and paraview to visualize the final state.
# By appropriately choosing the initial position, you can ensure
# that the swimmer is in the center of the box. Explain why
# the flow lines look the way they do.
|
KonradBreitsprecher/espresso
|
doc/tutorials/06-active_matter/EXERCISES/flow_field.py
|
Python
|
gpl-3.0
| 5,592
|
[
"ESPResSo",
"ParaView",
"VTK"
] |
cbf0b56d08527db9d3410b6a89e35bd55a956f24f273df4ea00686a28551523e
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
import os
from django.utils import html
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.unittest import TestCase
class TestUtilsHtml(TestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
f = html.escape
items = (
('&','&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
for pattern in patterns:
self.check_output(f, pattern % value, pattern % output)
# Check repeated values.
self.check_output(f, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(f, '<&', '<&')
def test_format_html(self):
self.assertEqual(
html.format_html("{0} {1} {third} {fourth}",
"< Dangerous >",
html.mark_safe("<b>safe</b>"),
third="< dangerous again",
fourth=html.mark_safe("<i>safe again</i>")
),
"< Dangerous > <b>safe</b> < dangerous again <i>safe again</i>"
)
def test_linebreaks(self):
f = html.linebreaks
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_tags(self):
f = html.strip_tags
items = (
('<p>See: 'é is an apostrophe followed by e acute</p>',
'See: 'é is an apostrophe followed by e acute'),
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('hi, <f x', 'hi, <f x'),
('234<235, right?', '234<235, right?'),
('a4<a5 right?', 'a4<a5 right?'),
('b7>b2!', 'b7>b2!'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
('a<p onclick="alert(\'<test>\')">b</p>c', 'abc'),
('a<p a >b</p>c', 'abc'),
('d<a:b c:d>e</p>f', 'def'),
('<strong>foo</strong><a href="http://example.com">bar</a>', 'foobar'),
)
for value, output in items:
self.check_output(f, value, output)
# Some convoluted syntax for which parsing may differ between python versions
output = html.strip_tags('<sc<!-- -->ript>test<<!-- -->/script>')
self.assertNotIn('<script>', output)
self.assertIn('test', output)
output = html.strip_tags('<script>alert()</script>&h')
self.assertNotIn('<script>', output)
self.assertIn('alert()', output)
# Test with more lengthy content (also catching performance regressions)
for filename in ('strip_tags1.html', 'strip_tags2.txt'):
path = os.path.join(os.path.dirname(upath(__file__)), 'files', filename)
with open(path, 'r') as fp:
content = force_text(fp.read())
start = datetime.now()
stripped = html.strip_tags(content)
elapsed = datetime.now() - start
self.assertEqual(elapsed.seconds, 0)
self.assertIn("Please try again.", stripped)
self.assertNotIn('<', stripped)
def test_strip_spaces_between_tags(self):
f = html.strip_spaces_between_tags
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
self.check_output(f, value)
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_entities(self):
f = html.strip_entities
# Strings that should come out untouched.
values = ("&", "&a", "&a", "a&#a")
for value in values:
self.check_output(f, value)
# Valid entities that should be stripped from the patterns.
entities = ("", "", "&a;", "&fdasdfasdfasdf;")
patterns = (
("asdf %(entity)s ", "asdf "),
("%(entity)s%(entity)s", ""),
("&%(entity)s%(entity)s", "&"),
("%(entity)s3", "3"),
)
for entity in entities:
for in_pattern, output in patterns:
self.check_output(f, in_pattern % {'entity': entity}, output)
def test_fix_ampersands(self):
f = html.fix_ampersands
# Strings without ampersands or with ampersands already encoded.
values = ("a", "b", "&a;", "& &x; ", "asdf")
patterns = (
("%s", "%s"),
("&%s", "&%s"),
("&%s&", "&%s&"),
)
for value in values:
for in_pattern, out_pattern in patterns:
self.check_output(f, in_pattern % value, out_pattern % value)
# Strings with ampersands that need encoding.
items = (
("&#;", "&#;"),
("ͫ ;", "&#875 ;"),
("abc;", "&#4abc;"),
)
for value, output in items:
self.check_output(f, value, output)
def test_escapejs(self):
f = html.escapejs
items = (
('"double quotes" and \'single quotes\'', '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(r'\ : backslashes, too', '\\u005C : backslashes, too'),
('and lots of whitespace: \r\n\t\v\f\b', 'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'),
(r'<script>and this</script>', '\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
('paragraph separator:\u2029and line separator:\u2028', 'paragraph separator:\\u2029and line separator:\\u2028'),
)
for value, output in items:
self.check_output(f, value, output)
def test_clean_html(self):
f = html.clean_html
items = (
('<p>I <i>believe</i> in <b>semantic markup</b>!</p>', '<p>I <em>believe</em> in <strong>semantic markup</strong>!</p>'),
('I escape & I don\'t <a href="#" target="_blank">target</a>', 'I escape & I don\'t <a href="#" >target</a>'),
('<p>I kill whitespace</p><br clear="all"><p> </p>', '<p>I kill whitespace</p>'),
# also a regression test for #7267: this used to raise an UnicodeDecodeError
('<p>* foo</p><p>* bar</p>', '<ul>\n<li> foo</li><li> bar</li>\n</ul>'),
)
for value, output in items:
self.check_output(f, value, output)
def test_remove_tags(self):
f = html.remove_tags
items = (
("<b><i>Yes</i></b>", "b i", "Yes"),
("<a>x</a> <p><b>y</b></p>", "a b", "x <p>y</p>"),
)
for value, tags, output in items:
self.assertEqual(f(value, tags), output)
def test_smart_urlquote(self):
quote = html.smart_urlquote
# Ensure that IDNs are properly quoted
self.assertEqual(quote('http://öäü.com/'), 'http://xn--4ca9at.com/')
self.assertEqual(quote('http://öäü.com/öäü/'), 'http://xn--4ca9at.com/%C3%B6%C3%A4%C3%BC/')
# Ensure that everything unsafe is quoted, !*'();:@&=+$,/?#[]~ is considered safe as per RFC
self.assertEqual(quote('http://example.com/path/öäü/'), 'http://example.com/path/%C3%B6%C3%A4%C3%BC/')
self.assertEqual(quote('http://example.com/%C3%B6/ä/'), 'http://example.com/%C3%B6/%C3%A4/')
self.assertEqual(quote('http://example.com/?x=1&y=2'), 'http://example.com/?x=1&y=2')
|
atruberg/django-custom
|
tests/utils_tests/test_html.py
|
Python
|
bsd-3-clause
| 8,672
|
[
"ADF"
] |
43815c75b4f989c97b456c48156ec1f40809ef4de852ec377c6818b5ff15b8e6
|
# Network class.
from scipy.special import expit
import numpy as np
import pyperclip
import screenPixel
import pyautogui as pg
import random
import re
import os
'''
inp nodes hidden nodes exit nodes
100 20 2
'''
class network:
def mutate(self,x):
if(np.random.rand() < self.mutationRate):
x += (np.random.rand()-0.5)/10
return (x)
else:
return (x)
def __init__(self,id):
self.id = id
#100 inputs should do the trick =)
self.dna = [np.random.randint( 5183999, size=(10.,1,)), np.random.randn(2,10), np.random.rand(2,2)]
self.screen = screenPixel.ScreenPixel()
#screen data array
self.input = np.empty([1,100])
#vectorized sigmouid function
self.sig = np.vectorize(expit)
self.mutate = np.vectorize(self.mutate)
pg.FAILSAFE = False
def run(self):
#frames for rough time monitoring
#checks if "game over" png is on screen that's how it knows it died
#I found that by using a smaller .png I could optimization of about 0.05 second
#Grayscale also makes it run 0.1 seconds faster
while(pg.locateOnScreen('again.png', grayscale = True) == None):
self.screen.capture()
x = []
# gets pixel data from the 100 pixel locations
for i in self.dna[0]:
x.append(self.screen.get(i))
self.input = np.asarray(x)
self.input = np.dot(self.input,self.dna[1].T)
#tanh activation function
self.input = np.tanh(self.input)
self.input = np.dot(self.input,self.dna[2].T)
#sigmouid activation so that the range of the output is controlled and I can map to mouse position
self.input = self.sig(self.input)
#output of network is mapped to mouse position
pg.moveTo(int(self.input[0]*1440),int(self.input[1]*700 +40))
#Shameless hack
pg.moveTo(626,352)
pg.dragTo(824,395,button='left')
pg.hotkey('command', 'c')
self.Fitness = pyperclip.paste()
self.Fitness = int(re.sub("[^0-9]","",str(self.Fitnexx0ss)))
#did you see that!
def crossover(self,parent):
child = network(-1)
#input data crossover
x = random.randrange(0,len(self.dna[0]))
child.dna[0] = np.concatenate((self.dna[0][0:x],parent.dna[0][x:len(self.dna[0])]))
# hidden weights
'''
100x20
20 Neurons
[ . . . . . ]
100 [ . . . . . ]
[ . . . . . ]
[ . . . . . ]
cross over vertically because each column corresponds to one neuron
so each neuron's data is retained.q '''
x = np.random.randint(0,high=self.dna[1].shape[0]+1)
child.dna[1] = np.concatenate((self.dna[1][0:x],parent.dna[1][x:]))
x = np.random.randint(0,high=self.dna[2].shape[0]+1)
child.dna[2] = np.concatenate((self.dna[2][0:x],parent.dna[2][x:]))
return child
def mutation(self,mutationRate):
self.mutationRate = mutationRate
for i in range(len(self.dna[0])):
if(np.random.rand() < self.mutationRate):
self.dna[0][i] = np.random.randint(0, high=5183999)
self.dna[1] = self.mutate(self.dna[1])
self.dna[2] = self.mutate(self.dna[2])
def save(self,generation,network, comb):
if not os.path.exists('data/gen/net{}'.format(network)):
os.makedirs('data/gen/net{}'.format(network))
np.save(open('data/gen/net{}/dna0'.format(network),'wb'),self.dna[0])
np.save(open('data/gen/net{}/dna1'.format(network),'wb'),self.dna[1])
np.save(open('data/gen/net{}/dna2'.format(network),'wb'),self.dna[2])
self.graph('data/gen/net{}/picnet{}'.format(network,network),comb)
if not os.path.exists('data/gen{}/net{}'.format(generation,network)):
os.makedirs('data/gen{}/net{}'.format(generation,network))
np.save(open('data/gen{}/net{}/dna0'.format(generation,network),'wb'),self.dna[0])
np.save(open('data/gen{}/net{}/dna1'.format(generation,network),'wb'),self.dna[1])
np.save(open('data/gen{}/net{}/dna2'.format(generation,network),'wb'),self.dna[2])
self.graph('data/gen{}/net{}/picnet{}'.format(generation,network,network),comb)
def graph(self,path,comb):
import matplotlib.pyplot as plt
ind = 1
names = ['input','hidden','output']
for i in self.dna:
plt.subplot(int('31{}'.format(ind)))
plt.title(names[ind-1])
plt.imshow(self.dna[ind-1],interpolation='none', vmin = comb[ind-1][0],vmax=comb[ind-1][1])
ind+=1
plt.savefig(path)
if __name__ == '__main__':
x = network()
print( x.dna[1])
x.mutation(1)
print(x.dna[1])
|
Chrisr850/Snake.io-player
|
testnetwork.py
|
Python
|
mit
| 4,943
|
[
"NEURON"
] |
40eeebab988494ec14e2a269ce7bda8081a0e50dfb892551adea004adb5f0f8b
|
data = [
'East Los Angeles',34.0238889,-118.1711111,
'Los Angeles',34.0522222,-118.2427778,
'San Diego',32.7152778,-117.1563889,
'San Francisco',37.7750000,-122.4183333,
'San Jose',37.3394444,-121.8938889,
'Chicago', 41.8500000, -87.6500000,
'New York',40.7141667,-74.0063889,
'Long Beach',33.7669444,-118.1883333,
'Boston',42.3583333,-71.0602778,
'Mesa',33.4222222,-111.8219444,
'Phoenix',33.4483333,-112.0733333,
'Tucson',32.2216667,-110.9258333,
'Philadelphia',39.9522222,-75.1641667,
'Houston',29.7630556,-95.3630556,
'Memphis',35.1494444,-90.0488889,
'Nashville',36.1658333,-86.7844444,
'Austin',30.2669444,-97.7427778,
'Dallas',32.7833333,-96.8000000,
'El Paso',31.7586111,-106.4863889,
'Fort Worth',32.7252778,-97.3205556,
'San Antonio',29.4238889,-98.4933333,
'Virginia Beach',36.8527778,-75.9783333,
'Portland',45.5236111,-122.6750000,
'Seattle',47.6063889,-122.3308333,
'Milwaukee',43.0388889,-87.9063889,
'Fresno',36.7477778,-119.7713889,
'Sacramento',38.5816667,-121.4933333,
'Denver',39.7391667,-104.9841667,
'Washington',38.8950000,-77.0366667,
'Jacksonville',30.3319444,-81.6558333,
'Atlanta',33.7488889,-84.3880556,
'Indianapolis',39.7683333,-86.1580556,
'Kansas City',39.1141667,-94.6272222,
'New Orleans',29.9544444,-90.0750000,
'Baltimore',39.2902778,-76.6125000,
'Detroit',42.3313889,-83.0458333,
'Kansas City 2',39.0997222,-94.5783333,
'Omaha',41.2586111,-95.9375000,
'Las Vegas',36.1750000,-115.1363889,
'Albuquerque', 35.0844444, -106.6505556,
'Charlotte',35.2269444,-80.8433333,
'Cleveland',41.4994444,-81.6955556,
'Columbus',39.9611111,-82.9988889,
'Oklahoma City',35.4675000,-97.5161111,
'San Juan',18.4683333,-66.1061111,
]
accepted = [
'Portland',
'San Francisco',
'Los Angeles',
'Austin',
'Atlanta',
'Dallas',
'San Juan',
'Philadelphia',
'Memphis',
'Indianapolis',
'Boston',
'Phoenix',
'Chicago',
'Seattle',
'San Diego',
'Albuquerque',
'Denver',
'Washington',
'Tucson',
'San Antonio',
'New York',
'San Jose',
'Long Beach',
'Mesa',
'Houston',
'Virginia Beach'
]
keep = []
for city in accepted:
idx = -1;
for i in range(len(data)):
if city == data[i]:
idx = i
break
if idx == -1:
print('Cannot find city ' + city)
else:
keep.append(data[idx:idx+3])
print(keep)
|
nesl/mercury
|
Web/filter_city.py
|
Python
|
gpl-2.0
| 2,824
|
[
"COLUMBUS"
] |
c4a097b43844449d3a5a3ebd61e4a1ab436c3a401845dcd5a4f6f4adcf76d18d
|
from rdkit import Chem
class SubstructureSet:
def __init__(self):
self.substructures = dict()
def add_substructure(self, smiles, value):
if smiles in self.substructures:
self.substructures[smiles].add_occurrence(value)
else:
self.substructures[smiles] = Substructure(smiles, value)
def get_dict(self):
return self.substructures
class Substructure:
def __init__(self, smiles, value):
self.smiles = smiles
self.value_sum = value
self.occurrences = 1
self.number_heavy_atoms = Chem.MolFromSmiles(smiles, sanitize=False).GetNumHeavyAtoms()
def add_occurrence(self, value):
self.occurrences += 1
self.value_sum += value
def get_smiles(self):
return self.smiles
def get_occurrences(self):
return self.occurrences
def get_mean_value(self):
return self.value_sum / self.occurrences
def get_number_heavy_atoms(self):
return self.number_heavy_atoms
|
patrick-winter-knime/mol-struct-nets
|
molstructnets/steps/interpretation/extractsaliencymapsubstructures2d/substructure_set.py
|
Python
|
gpl-3.0
| 1,024
|
[
"RDKit"
] |
9a2c4786e2db67af6f9ea1d79bbfe1731b584d875cae102a38b1fddabad781de
|
#this python script first creates a neighborlist of atoms using Periodic CKDTree search
#next one can get the Qn distribution of a Si-O-Al network or Si-O-Si network using this script
#this version can also find the coordination of Si and Al and NBO statistics
#Author: Mohammad Rafat Sadat, Dept. of CEEM University of Arizona
#Date: 3.31.17
from periodic_kdtree import PeriodicCKDTree
import numpy as np
import glob
import time
outputfile = 'GP-NaAl1.0'
global natoms_GBP
natoms_GBP = 8600
start_time = time.time()
global filename
filename = 'data.GP2-w2.17-t0.25-Eq' #original lammps datafile
flist = glob.glob(filename)
for f in flist:
load = np.genfromtxt(f, dtype=float, skip_header=19, skip_footer=natoms_GBP+1, usecols=(4,5,6))
#load = np.genfromtxt(f, dtype=float, skip_header=18, usecols=(4,5,6))
x=np.array(load)
for f in flist: #read the finally written lammps data file to make the neighborlist
load = np.genfromtxt(f, dtype=int, skip_header=19, skip_footer=natoms_GBP+1, usecols=(0,2)) #only need the serial and atom type col
#load = np.genfromtxt(f, skip_header=18, usecols=(0,2))
y=np.array(load)
#--------------list the IDs of Si and Al-----------------
Al_list = []
Si_list = []
for t in range(len(y)):
if y[t,1]==1:
Si_list.append(y[t,0])
elif y[t,1] ==2:
Al_list.append(y[t,0])
else:
Si_list = Si_list
Al_list = Al_list
SiAl_list = Si_list + Al_list
#------------------------------------------------------------
xmin = np.min(x[:,0])
xmax = np.max(x[:,0])
dx = xmax-xmin
ymin = np.min(x[:,1])
ymax = np.max(x[:,1])
dy = ymax-ymin
zmin = np.min(x[:,2])
zmax = np.max(x[:,2])
dz = zmax-zmin
s = (len(x),10)
Nlist = np.zeros(s,dtype=np.int)
# Boundaries (0 or negative means open boundaries in that dimension)
#changing bounds manually
bounds = np.array([dx, dy, dz]) # xy periodic, open along z
# Build kd-tree
T = PeriodicCKDTree(bounds, x)
# Find 4 closest neighbors to a random point
# (d[j], i[j]) = distance and index of jth closest point
# Find neighbors within a fixed distance of a point
print "Building Neighborlist..."
neighbors = []
for i in xrange(len(x)):
localneigh = T.query_ball_point(x[i],r=2.1) #r = cutoff (Angstrom) for making Nlist
#localneigh.insert(0,i)
localneigh.remove(i)
localneigh.insert(0,i)
neighbors.append(localneigh)
#print neighbors
print "Neighborlist built! Writing data to file...."
print "***********writing with atom types*****************"
#print neighbors
outFile = open('Nlist-types'+'-'+outputfile, 'w')
for i in xrange(s[0]):
#Slice the atomtypes using the neighbor indices, have to subtract 1
#from index because you added it in your neighborlist build
neightypes = y[neighbors[i],1]
#print neightypes
#Now loop of the sliced neighbor types
if y[i,1] == 1 or y[i,1] == 2 or y[i,1] == 4:
outFile.write("%i " % y[i,0])
for items in neightypes:
#outFile.write("%i " % y[i,0])
outFile.write("%i " % items)
outFile.write("\n")
outFile.close()
print "***********writing with atom type O only*****************"
#print neighbors
outFile = open('Nlist-type_O'+'-'+outputfile, 'w')
for i in xrange(s[0]):
#Slice the atomtypes using the neighbor indices, have to subtract 1
#from index because you added it in your neighborlist build
neightypes = y[neighbors[i],1]
#print neightypes
#Now loop of the sliced neighbor types
if y[i,1] == 4:
outFile.write("%i " % y[i,0])
for items in neightypes:
#outFile.write("%i " % y[i,0])
outFile.write("%i " % items)
outFile.write("\n")
outFile.close()
print "***********writing with atom ID*****************"
#print neighbors
outFile = open('Nlist-ID'+'-'+outputfile, 'w')
for i in xrange(s[0]):
#Slice the atomtypes using the neighbor indices, have to subtract 1
#from index because you added it in your neighborlist build
neightypes = y[neighbors[i],0]
#print neightypes
#Now loop of the sliced neighbor types
if y[i,1] == 1 or y[i,1] == 2 or y[i,1] == 4:
outFile.write("%i " % y[i,0])
for items in neightypes:
#outFile.write("%i " % y[i,0])
outFile.write("%i " % items)
outFile.write("\n")
outFile.close()
print "***********writing with atom ID of O only*****************"
#print neighbors
outFile = open('Nlist-ID_O'+'-'+outputfile, 'w')
for i in xrange(s[0]):
#Slice the atomtypes using the neighbor indices, have to subtract 1
#from index because you added it in your neighborlist build
neightypes = y[neighbors[i],0]
#print neightypes
#Now loop of the sliced neighbor types
if y[i,1] == 4:
outFile.write("%i " % y[i,0])
for items in neightypes:
#outFile.write("%i " % y[i,0])
outFile.write("%i " % items)
outFile.write("\n")
outFile.close()
print "***********writing with atom ID of Si or Al only*****************"
#print neighbors
outFile = open('Nlist-ID_Si_Al'+'-'+outputfile, 'w')
for i in xrange(s[0]):
#Slice the atomtypes using the neighbor indices, have to subtract 1
#from index because you added it in your neighborlist build
neightypes = y[neighbors[i],0]
#print neightypes
#Now loop of the sliced neighbor types
if y[i,1] == 1 or y[i,1] == 2 :
outFile.write("%i " % y[i,0])
for items in neightypes:
#outFile.write("%i " % y[i,0])
outFile.write("%i " % items)
outFile.write("\n")
outFile.close()
##*********************************FINDING THE Qn DISTRIBUTION*********************************
#1. compare between the two array and see for each Si or Al, if a neighboring O has a different Si or Al, if TRUE then count +=1
# if count==4 then Q4 +=1, if count ==3, then Q3 += 1 and so on.... (if type ==1, Q4Si, elif type ==2, Q4Al...)
#2. loop no. 1 for all the neighboring Oxygens of that particular Si or Al.
#3. Repeat for all Si and Al.
def Qn_distrib(SiAlID, OID, Otype):
Q4 = 0
Q3 = 0
Q2 = 0
Q1 = 0
Q0 = 0
Q5 = 0
Q6 = 0
SiAl_full = []
O_full = []
Otype_full = []
Obridging = [] #list of bridging O with two Si or Al neighbors (i.e. Si-O-Al)
count = 0
#bridgeO = 0
with open(SiAlID, 'r') as f:
for line in f:
words_SiAl = line.split() # list of each lines
for x in range(len(words_SiAl)):
words_SiAl[x] = int(words_SiAl[x])
SiAl_full.append(words_SiAl)
with open(Otype, 'r') as f:
for line in f:
words_Otype = line.split() # list of each lines
for x in range(len(words_Otype)):
words_Otype[x] = int(words_Otype[x])
Otype_full.append(words_Otype)
###Make a list of bridging oxygens**********************
for i in range(len(Otype_full)):
bridgeO=0
for j in range(2,len(Otype_full[i])):
if Otype_full[i][j] == 1 or Otype_full[i][j] == 2:
bridgeO +=1
if bridgeO == 2:
Obridging.append(Otype_full[i][0])
else:
Obridging = Obridging
#print Otype_full
#print Obridging
with open(OID, 'r') as f:
for line in f:
words_O = line.split() # list of each lines
for x in range(len(words_O)):
words_O[x] = int(words_O[x])
O_full.append(words_O)
#now we have Nested lists of all SiAl and O neighbors
#each element on this list is the list of neighbors that the atom has
#for example, SiAl_full is a list of neighborlists where the first two IDs are of the Si or Al atom itself and the rests are O neighbors
#O_full is the list of O neighbors where the first two is the O itself followed by it's neighbors
#print O_full
for i in range(len(SiAl_full)):
count = 0
for j in range(2,len(SiAl_full[i])):
for k in range(len(Obridging)):
#
if SiAl_full[i][j] == Obridging[k]:
count +=1
else:
count = count
if count == 4:
Q4 +=1
elif count == 3:
Q3 +=1
elif count == 2:
Q2 +=1
elif count == 1:
Q1 +=1
elif count == 0:
Q0 +=1
elif count == 5:
Q5 +=1
elif count == 6:
Q6 +=1
return Q6, Q5, Q4, Q3, Q2, Q1, Q0
#find the coordination of Si and Al atoms
def coord(fname, datafile):
"""Nlist file name, lammps data file name"""
num_lines = 0
num_words = 0
Al6 = []
Al5 = []
Al4 = []
Al3 = []
Al2 = []
Si6 = []
Si5 = []
Si4 = []
Si3 = []
Si2 = []
Si1 = []
BOs = []
NBOs = []
with open(fname, 'r') as f:
for line in f:
num_lines += 1
words = line.split() # list of each lines
# count the line numbers
if words[1]==str(2): # condition for chosing only atom type 2
num_words = len(words) #-count #getting rid of the like neighbors
if num_words ==5: # if the number of neighbor is 3
Al3.append(num_lines)
elif num_words ==4: # if only 2 neighbors are present
Al2.append(num_lines)
elif num_words ==6:
Al4.append(num_lines)
elif num_words ==7:
Al5.append(num_lines)
elif num_words ==8:
Al6.append(num_lines)
elif words[1]==str(1): # condition for chosing only atom type 1
num_words = len(words) #-count
if num_words ==5: # if the number of neighbor is 3
Si3.append(num_lines)
elif num_words ==4: #if only 2 neighbors are present
Si2.append(num_lines)
elif num_words ==6:
Si4.append(num_lines)
elif num_words ==7:
Si5.append(num_lines)
elif num_words ==8:
Si6.append(num_lines)
elif num_words ==3:
Si1.append(num_lines)
elif words[1]==str(4): # condition for chosing only atom type 4
num_words = len(words)
if num_words == 4: # if the number of neighbor is 2
BOs.append(num_lines)
elif num_words == 3: #if only 1 neighbors are present
NBOs.append(num_lines)
flist = glob.glob(datafile)
for f in flist:
load = np.genfromtxt(f, skip_header=19, skip_footer = natoms_GBP+1, usecols=(2,4,5,6))
data=np.array(load)
#calculating the Si/Al/Na ratios
nSi = 0
nAl = 0
nNa = 0
nO = 0
for j in range(len(data)):
if data[j,0] == 1:
nSi +=1
elif data[j,0] ==2:
nAl +=1
elif data[j,0] ==3:
nNa +=1
elif data[j,0] ==4:
nO +=1
SiAl = nSi/float(nAl)
NaAl = (nNa) / float(nAl)
print "***********elements************"
print "No. of Si = ", nSi
print "No. of Al = ", nAl
print "No. of Na = ", nNa
print "No. of O = ", nO
print "Si/Al ratio=", SiAl
print "Na/Al ratio=",NaAl
print "%Al(4): = ", len(Al4)*100.0/nAl
print "%Si(4): = ", len(Si4)*100.0/nSi
print "%NBOs: = ", len(NBOs)*100.0/nO
return len(Al4)*100.0/nAl, len(Si4)*100.0/nSi
Q6, Q5, Q4, Q3, Q2, Q1, Q0 = Qn_distrib('Nlist-ID_Si_Al-'+outputfile, 'Nlist-ID_O-'+outputfile, 'Nlist-type_O-'+outputfile)
Al4, Si4 = coord('Nlist-types-GP-NaAl1.0', filename)
Qlist = [Q6, Q5, Q4, Q3, Q2, Q1, Q0]
print sum(Qlist), Q6*100/total, Q5*100/total, Q4*100/total, Q3*100/total, Q2*100/total, Q1*100/total, Q0*100/total
def outputwrite(outfilename):
outFile = open('GB-coord-Qn-'+outfilename+'.dat', 'a')
outFile.write('%s %s %s %s %s %s \n' %(nQ5, nQ4, nQ3, nQ2, Si4, Al4))
outFile.write('%s %f \n' %(outname, len(Al4)*100.0/nAl))
outFile.close()
return 0
|
msadat/python-scripts
|
Qn_coord_periodic_kdtree_V1.0.py
|
Python
|
gpl-3.0
| 12,337
|
[
"LAMMPS"
] |
4e501d75bdf1a637a163d7cd2212091d99c246e2ccce4a9f4ef84b4f4c5c3f34
|
#!/usr/bin/env python
####################################################################################
### Copyright (C) 2015-2019 by ABLIFE
####################################################################################
####################################################################################
###
####################################################################################
# Date Version Author ChangeLog
#
#
#
#####################################################################################
"""
distribution
"""
###
import re, os, sys, logging, time, datetime
from optparse import OptionParser
import subprocess
import smtplib
import email.mime.multipart
import email.mime.text
import HTSeq
import numpy
import gffutils
from matplotlib import pyplot
###
_version = 'v1.0'
# -----------------------------------------------------------------------------------
### S
# -----------------------------------------------------------------------------------
def getPos(dbfile, pos_type):
pos = set()
db = gffutils.FeatureDB(dbfile)
if pos_type.lower() == "tss":
for gene in db.features_of_type('gene', order_by='start'):
# print(gene)
## gene info
gene_id = gene.id
gene_strand = gene.strand
gene_start = gene.start
trans = ('mRNA', 'miRNA', 'mRNA_TE_gene', 'ncRNA', 'rRNA', 'snoRNA', 'snRNA', 'tRNA', 'transcript')
for isoform in db.children(gene_id, level=1, featuretype=trans):
if isoform.strand == "+":
tss_pos = HTSeq.GenomicPosition(isoform.seqid, isoform.start, strand=isoform.strand)
pos.add(tss_pos)
if isoform.strand == "-":
tss_pos = HTSeq.GenomicPosition(isoform.seqid, isoform.end, strand=isoform.strand)
pos.add(tss_pos)
break
if pos_type.lower() == "tts":
for gene in db.features_of_type('gene', order_by='start'):
# print(gene)
## gene info
gene_id = gene.id
gene_strand = gene.strand
gene_start = gene.start
# if gene_start > 20000:
# break
trans = ('mRNA', 'miRNA', 'mRNA_TE_gene', 'ncRNA', 'rRNA', 'snoRNA', 'snRNA', 'tRNA', 'transcript')
for isoform in db.children(gene_id, level=1, featuretype=trans):
if isoform.strand == "+":
tts_pos = HTSeq.GenomicPosition(isoform.seqid, isoform.end, strand=isoform.strand)
pos.add(tts_pos)
if isoform.strand == "-":
tts_pos = HTSeq.GenomicPosition(isoform.seqid, isoform.start, strand=isoform.strand)
pos.add(tts_pos)
break
if pos_type.lower() == "startcodon":
for gene in db.features_of_type('gene', order_by='start'):
# print(gene)
## gene info
gene_id = gene.id
gene_strand = gene.strand
gene_start = gene.start
trans = ('mRNA', 'miRNA', 'mRNA_TE_gene', 'ncRNA', 'rRNA', 'snoRNA', 'snRNA', 'tRNA', 'transcript')
for isoform in db.children(gene_id, level=1, featuretype=trans):
if isoform.strand == "+":
for cds in db.children(isoform.id, level=1, featuretype='CDS', order_by='start'):
this_pos = HTSeq.GenomicPosition(cds.seqid, cds.start, strand=cds.strand)
pos.add(this_pos)
break
if isoform.strand == "-":
for cds in db.children(isoform.id, level=1, featuretype='CDS', order_by='start', reverse=True):
this_pos = HTSeq.GenomicPosition(cds.seqid, cds.end, strand=cds.strand)
pos.add(this_pos)
break
break
if pos_type.lower() == "stopcodon":
for gene in db.features_of_type('gene', order_by='start'):
# print(gene)
## gene info
gene_id = gene.id
gene_strand = gene.strand
gene_start = gene.start
trans = ('mRNA', 'miRNA', 'mRNA_TE_gene', 'ncRNA', 'rRNA', 'snoRNA', 'snRNA', 'tRNA', 'transcript')
for isoform in db.children(gene_id, level=1, featuretype=trans):
if isoform.strand == "+":
for cds in db.children(isoform.id, level=1, featuretype='CDS', order_by='start', reverse=True):
this_pos = HTSeq.GenomicPosition(cds.seqid, cds.end, strand=cds.strand)
pos.add(this_pos)
break
if isoform.strand == "-":
for cds in db.children(isoform.id, level=1, featuretype='CDS', order_by='start'):
this_pos = HTSeq.GenomicPosition(cds.seqid, cds.start, strand=cds.strand)
pos.add(this_pos)
break
break
return pos
def getPeakSummit(peakfile, peaktype="ncRNA"):
# ncRNA peak:
# NC_005810.1 5573 5918 + 346 216 5634 54 25985
# clip peak:
# #Chr Start End PeakID Tags Strand Length maxHeight Summit
# chr1 826803 826861 chr1_382396 8 - 59 6 826829
pos = set()
if peaktype.lower() == "ncrna":
for eachLine in open(peakfile):
if eachLine.startswith('\n') or eachLine.startswith('#'):
continue
line = eachLine.strip().split('\t')
chr = line[0]
strand = line[3]
summit = int(line[6])
summit_pos = HTSeq.GenomicPosition(chr, summit, strand=strand)
pos.add(summit_pos)
if peaktype.lower() == "clip":
for eachLine in open(peakfile):
if eachLine.startswith('\n') or eachLine.startswith('#'):
continue
line = eachLine.strip().split('\t')
chr = line[0]
strand = line[5]
summit = int(line[8])
summit_pos = HTSeq.GenomicPosition(chr, summit, strand=strand)
pos.add(summit_pos)
return pos
def getIntervalSet(sortedbamfile, window):
iset = list()
for r in sortedbamfile[window]:
iv_seq = (co.ref_iv for co in r.cigar if co.type == "M" and co.size > 0)
for almnt in iv_seq:
iset.append(almnt)
return iset
def distributionToOnePoint(bamfile, dbfile, outfile, type, halfwinwidth, gff=None):
sortedbamfile = HTSeq.BAM_Reader(bamfile)
if gff is not None:
db = gffutils.create_db(gff, dbfile, merge_strategy="create_unique", verbose=False, force=True)
# fragmentsize = 200
pos = set()
pos = getPos(dbfile, type)
profile = numpy.zeros(2 * halfwinwidth, dtype='i')
for p in pos:
print(p)
# window = HTSeq.GenomicInterval(p.chrom,
# p.pos - halfwinwidth - fragmentsize, p.pos + halfwinwidth + fragmentsize, ".")
if p.pos < halfwinwidth:
p.pos = halfwinwidth
window = HTSeq.GenomicInterval(p.chrom, p.pos - halfwinwidth, p.pos + halfwinwidth, ".")
interval_set = set()
interval_set = getIntervalSet(sortedbamfile, window)
for iv in interval_set:
# almnt.iv.length = fragmentsize
if p.strand == "+":
start_in_window = iv.start - p.pos + halfwinwidth
end_in_window = iv.end - p.pos + halfwinwidth
else:
start_in_window = p.pos + halfwinwidth - iv.end
end_in_window = p.pos + halfwinwidth - iv.start
start_in_window = max(start_in_window, 0)
end_in_window = min(end_in_window, 2 * halfwinwidth)
if start_in_window >= 2 * halfwinwidth or end_in_window < 0:
continue
profile[start_in_window: end_in_window] += 1
with open(outfile, 'w') as o:
o.writelines("#distance\tdensity\n")
i = 0 - halfwinwidth
for k in profile:
o.writelines(str(i) + '\t' + str(k) + '\n')
i += 1
pyplot.plot(numpy.arange(-halfwinwidth, halfwinwidth), profile)
pyplot.show()
def peakDistributionToSummit(bamorbedfile, peakfile, outfile, halfwinwidth=200, type="ncRNA"):
intype = "bam"
match = re.search(r'\.bam$', bamorbedfile)
if not match:
intype = "bed"
# print(intype)
sortedbamfile = HTSeq.BAM_Reader(bamorbedfile) if intype == "bam" else None
bedga, bed_dict = store_bed_iv(bamorbedfile) if intype == "bed" else ("", "")
sortedbamfile = HTSeq.BAM_Reader(bamorbedfile)
pos = getPeakSummit(peakfile, peaktype=type)
profile = numpy.zeros(2 * halfwinwidth, dtype='i')
for p in pos:
print(p)
# window = HTSeq.GenomicInterval(p.chrom,
# p.pos - halfwinwidth - fragmentsize, p.pos + halfwinwidth + fragmentsize, ".")
if p.pos < halfwinwidth:
p.pos = halfwinwidth
window = HTSeq.GenomicInterval(p.chrom, p.pos - halfwinwidth, p.pos + halfwinwidth, ".")
interval_set = list()
if intype == "bam":
interval_set = getIntervalSet(sortedbamfile, window)
elif intype == "bed":
# print(bed_dict)
interval_set = get_bed_set_in_iv(bedga, bed_dict, window)
for iv in interval_set:
# almnt.iv.length = fragmentsize
if p.strand == "+":
start_in_window = iv.start - p.pos + halfwinwidth
end_in_window = iv.end - p.pos + halfwinwidth
else:
start_in_window = p.pos + halfwinwidth - iv.end
end_in_window = p.pos + halfwinwidth - iv.start
start_in_window = max(start_in_window, 0)
end_in_window = min(end_in_window, 2 * halfwinwidth)
if start_in_window >= 2 * halfwinwidth or end_in_window < 0:
continue
profile[start_in_window: end_in_window] += 1
with open(outfile, 'w') as o:
o.writelines("+distance\tdensity\n")
i = 0 - halfwinwidth
for k in profile:
o.writelines(str(i) + '\t' + str(k) + '\n')
i += 1
# pyplot.plot(numpy.arange(-halfwinwidth, halfwinwidth), profile)
# pyplot.show()
def getPosByChr(chr, dbfile, pos_type):
pos = set()
db = gffutils.FeatureDB(dbfile)
if pos_type.lower() == "tss":
for gene in db.features_of_type('gene', seqid=chr, order_by='start'):
# print(gene)
## gene info
gene_id = gene.id
gene_strand = gene.strand
gene_start = gene.start
# if gene_start > 20000:
# break
trans = ('mRNA', 'miRNA', 'mRNA_TE_gene', 'ncRNA', 'rRNA', 'snoRNA', 'snRNA', 'tRNA', 'transcript')
for isoform in db.children(gene_id, level=1, featuretype=trans):
if isoform.strand == "+":
tss_pos = HTSeq.GenomicPosition(isoform.seqid, isoform.start, strand=isoform.strand)
pos.add(tss_pos)
if isoform.strand == "-":
tss_pos = HTSeq.GenomicPosition(isoform.seqid, isoform.end, strand=isoform.strand)
pos.add(tss_pos)
break
if pos_type.lower() == "tts":
for gene in db.features_of_type('gene', seqid=chr, order_by='start'):
# print(gene)
## gene info
gene_id = gene.id
gene_strand = gene.strand
gene_start = gene.start
# if gene_start > 20000:
# break
trans = ('mRNA', 'miRNA', 'mRNA_TE_gene', 'ncRNA', 'rRNA', 'snoRNA', 'snRNA', 'tRNA', 'transcript')
for isoform in db.children(gene_id, level=1, featuretype=trans):
if isoform.strand == "+":
tts_pos = HTSeq.GenomicPosition(isoform.seqid, isoform.end, strand=isoform.strand)
pos.add(tts_pos)
if isoform.strand == "-":
tts_pos = HTSeq.GenomicPosition(isoform.seqid, isoform.start, strand=isoform.strand)
pos.add(tts_pos)
break
if pos_type.lower() == "startcodon":
for gene in db.features_of_type('gene', seqid=chr, order_by='start'):
# print(gene)
## gene info
gene_id = gene.id
gene_strand = gene.strand
gene_start = gene.start
# if gene_start > 20000:
# break
trans = ('mRNA', 'miRNA', 'mRNA_TE_gene', 'ncRNA', 'rRNA', 'snoRNA', 'snRNA', 'tRNA', 'transcript')
for isoform in db.children(gene_id, level=1, featuretype=trans):
if isoform.strand == "+":
for cds in db.children(isoform.id, level=1, featuretype='CDS', order_by='start'):
this_pos = HTSeq.GenomicPosition(cds.seqid, cds.start, strand=cds.strand)
pos.add(this_pos)
break
if isoform.strand == "-":
for cds in db.children(isoform.id, level=1, featuretype='CDS', order_by='start', reverse=True):
this_pos = HTSeq.GenomicPosition(cds.seqid, cds.end, strand=cds.strand)
pos.add(this_pos)
break
break
if pos_type.lower() == "stopcodon":
for gene in db.features_of_type('gene', seqid=chr, order_by='start'):
# print(gene)
## gene info
gene_id = gene.id
gene_strand = gene.strand
gene_start = gene.start
# if gene_start > 20000:
# break
trans = ('mRNA', 'miRNA', 'mRNA_TE_gene', 'ncRNA', 'rRNA', 'snoRNA', 'snRNA', 'tRNA', 'transcript')
for isoform in db.children(gene_id, level=1, featuretype=trans):
if isoform.strand == "+":
for cds in db.children(isoform.id, level=1, featuretype='CDS', order_by='start', reverse=True):
this_pos = HTSeq.GenomicPosition(cds.seqid, cds.end, strand=cds.strand)
pos.add(this_pos)
break
if isoform.strand == "-":
for cds in db.children(isoform.id, level=1, featuretype='CDS', order_by='start'):
this_pos = HTSeq.GenomicPosition(cds.seqid, cds.start, strand=cds.strand)
pos.add(this_pos)
break
break
if pos_type.lower() == "intronstart":
for gene in db.features_of_type('gene', seqid=chr, order_by='start'):
# print(gene)
## gene info
gene_id = gene.id
gene_strand = gene.strand
gene_start = gene.start
# if gene_start > 20000:
# break
trans = ('mRNA', 'transcript')
for isoform in db.children(gene_id, level=1, featuretype=trans):
if isoform.strand == "+":
flag = 0
for exon in db.children(isoform.id, level=1, featuretype='exon', order_by='start', reverse=True):
if flag == 0:
flag = 1
continue
this_pos = HTSeq.GenomicPosition(exon.seqid, exon.end, strand=exon.strand)
pos.add(this_pos)
if isoform.strand == "-":
flag = 0
for exon in db.children(isoform.id, level=1, featuretype='exon', order_by='start'):
if flag == 0:
flag = 1
continue
this_pos = HTSeq.GenomicPosition(exon.seqid, exon.start, strand=exon.strand)
pos.add(this_pos)
break
if pos_type.lower() == "intronend":
for gene in db.features_of_type('gene', seqid=chr, order_by='start'):
# print(gene)
## gene info
gene_id = gene.id
gene_strand = gene.strand
gene_start = gene.start
# if gene_start > 20000:
# break
trans = ('mRNA', 'transcript')
for isoform in db.children(gene_id, level=1, featuretype=trans):
if isoform.strand == "+":
flag = 0
for exon in db.children(isoform.id, level=1, featuretype='exon', order_by='start'):
if flag == 0:
flag = 1
continue
this_pos = HTSeq.GenomicPosition(exon.seqid, exon.start, strand=exon.strand)
pos.add(this_pos)
if isoform.strand == "-":
flag = 0
for exon in db.children(isoform.id, level=1, featuretype='exon', order_by='start', reverse=True):
if flag == 0:
flag = 1
continue
this_pos = HTSeq.GenomicPosition(exon.seqid, exon.end, strand=exon.strand)
pos.add(this_pos)
break
return pos
def store_bed_iv(bedFile):
bedfile = HTSeq.BED_Reader(bedFile)
bedga = HTSeq.GenomicArrayOfSets("auto", stranded=False)
n = 0
bed_dict = {}
for r in bedfile:
r.name = r.line
# print(r.line)
n += 1
bed_dict[str(n)] = r
bedga[r.iv] += str(n)
return bedga, bed_dict
def get_bed_set_in_iv(bedga, bed_dict, iv):
iset = list()
bfs = set()
# print(iv)
# print(bedga)
for biv, fs in bedga[iv].steps():
bfs = bfs.union(fs)
for n in bfs:
bed = bed_dict[n]
iset.append(bed.iv)
return iset
def distributionToOnePointByChr(chr, bamorbedfile, dbfile, outfile, pos_type, halfwinwidth, server_list, gff=None):
# print(chr)
intype = "bam"
match = re.search(r'\.bam$', bamorbedfile)
if not match:
intype = "bed"
# print(intype)
sortedbamfile = HTSeq.BAM_Reader(bamorbedfile) if intype == "bam" else None
bedga, bed_dict = store_bed_iv(bamorbedfile) if intype == "bed" else ("", "")
if gff is not None:
db = gffutils.create_db(gff, dbfile, merge_strategy="create_unique", verbose=False, force=True)
# fragmentsize = 200
pos = set()
pos = getPosByChr(chr, dbfile, pos_type)
# profile = [0 for x in range(2 * halfwinwidth)]
profile = numpy.zeros(2 * halfwinwidth, dtype='i')
for p in pos:
# print(p)
# window = HTSeq.GenomicInterval(p.chrom,
# p.pos - halfwinwidth - fragmentsize, p.pos + halfwinwidth + fragmentsize, ".")
if p.pos < halfwinwidth:
p.pos = halfwinwidth
window = HTSeq.GenomicInterval(p.chrom, p.pos - halfwinwidth, p.pos + halfwinwidth, ".")
interval_set = list()
if intype == "bam":
interval_set = getIntervalSet(sortedbamfile, window)
elif intype == "bed":
# print(bed_dict)
interval_set = get_bed_set_in_iv(bedga, bed_dict, window)
for iv in interval_set:
# almnt.iv.length = fragmentsize
if p.strand == "+":
start_in_window = iv.start - p.pos + halfwinwidth
end_in_window = iv.end - p.pos + halfwinwidth
else:
start_in_window = p.pos + halfwinwidth - iv.end
end_in_window = p.pos + halfwinwidth - iv.start
start_in_window = max(start_in_window, 0)
end_in_window = min(end_in_window, 2 * halfwinwidth)
if start_in_window >= 2 * halfwinwidth or end_in_window < 0:
continue
profile[start_in_window:end_in_window] += 1
server_list[chr] = profile[:]
def iv_distribution_around_gene_bychr(chr, bamorbedfile, dbfile, pos_type, halfwinwidth, gff=None):
# print(chr)
fout = open("_ivaround_gene_" + chr, 'w')
intype = "bam"
match = re.search(r'\.bam$', bamorbedfile)
if not match:
intype = "bed"
# print(intype)
sortedbamfile = HTSeq.BAM_Reader(bamorbedfile) if intype == "bam" else None
bedga, bed_dict = store_bed_iv(bamorbedfile) if intype == "bed" else ("", "")
if gff is not None:
db = gffutils.create_db(gff, dbfile, merge_strategy="create_unique", verbose=False, force=True)
for p, isoform, gene, window in getIsoformandPosByChr(chr, dbfile, pos_type, halfwinwidth):
interval_set = list()
if intype == "bam":
interval_set = getIntervalSet(sortedbamfile, window)
elif intype == "bed":
interval_set = get_bed_set_in_iv(bedga, bed_dict, window)
for iv in interval_set:
if iv.strand != p.strand:
continue
summit_pos = iv.start + int((iv.end - iv.start) / 2)
distance = 0
abs_distance = 0
if p.strand == "+":
distance = summit_pos - p.pos
abs_distance = abs(distance)
else:
distance = p.pos - summit_pos
abs_distance = abs(distance)
if abs_distance > halfwinwidth:
continue
fout.writelines(isoform.seqid + "\t" + str(isoform.start) + "\t" + str(isoform.end) + "\t" + isoform.strand + "\t" + isoform.id + "\t" + gene.id + "\t" + str(iv.start) + "\t" + str(iv.end) + "\t" + str(distance) + "\n")
fout.close()
def getIsoformandPosByChr(chr, dbfile, pos_type, halfwinwidth):
pos = list()
db = gffutils.FeatureDB(dbfile)
if pos_type.lower() == "tss":
for gene in db.features_of_type('gene', seqid=chr, order_by='start'):
# print(gene)
## gene info
gene_id = gene.id
gene_strand = gene.strand
gene_start = gene.start
# if gene_start > 20000:
# break
trans = ('mRNA', 'miRNA', 'mRNA_TE_gene', 'ncRNA', 'rRNA', 'snoRNA', 'snRNA', 'tRNA', 'transcript')
for isoform in db.children(gene_id, level=1, featuretype=trans):
if isoform.strand == "+":
tss_pos = HTSeq.GenomicPosition(isoform.seqid, isoform.start, strand=isoform.strand)
window = HTSeq.GenomicInterval(tss_pos.chrom, tss_pos.pos - halfwinwidth, isoform.end, tss_pos.strand)
if tss_pos.pos < halfwinwidth:
window = HTSeq.GenomicInterval(tss_pos.chrom, 0, isoform.end, tss_pos.strand)
pos.append((tss_pos, isoform, gene, window))
if isoform.strand == "-":
tss_pos = HTSeq.GenomicPosition(isoform.seqid, isoform.end, strand=isoform.strand)
window = HTSeq.GenomicInterval(tss_pos.chrom, isoform.start, tss_pos.pos + halfwinwidth, tss_pos.strand)
pos.append((tss_pos, isoform, gene, window))
if pos_type.lower() == "tts":
for gene in db.features_of_type('gene', seqid=chr, order_by='start'):
# print(gene)
## gene info
gene_id = gene.id
gene_strand = gene.strand
gene_start = gene.start
# if gene_start > 20000:
# break
trans = ('mRNA', 'miRNA', 'mRNA_TE_gene', 'ncRNA', 'rRNA', 'snoRNA', 'snRNA', 'tRNA', 'transcript')
for isoform in db.children(gene_id, level=1, featuretype=trans):
if isoform.strand == "+":
tts_pos = HTSeq.GenomicPosition(isoform.seqid, isoform.end, strand=isoform.strand)
window = HTSeq.GenomicInterval(tts_pos.chrom, isoform.start, tts_pos.pos + halfwinwidth, tts_pos.strand)
pos.append((tts_pos, isoform, gene, window))
if isoform.strand == "-":
tts_pos = HTSeq.GenomicPosition(isoform.seqid, isoform.start, strand=isoform.strand)
window = HTSeq.GenomicInterval(tts_pos.chrom, tts_pos.pos - halfwinwidth, isoform.end, tts_pos.strand)
if tts_pos.pos < halfwinwidth:
window = HTSeq.GenomicInterval(tts_pos.chrom, 0, isoform.end, tts_pos.strand)
pos.append((tts_pos, isoform, gene, window))
return pos
# -----------------------------------------------------------------------------------
### E
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
### S
# -----------------------------------------------------------------------------------
def test():
"""this is test function"""
pass
# getCumulativeDataFromFile TEST
# infile = "cisnat_classification_rpkm.txt"
# outfile = "inflorescence_minor_ratio_Cumulative.txt"
# getCumulativeDataFromFile(infile, 19, outfile)
## distributionToOnePoint TEST
# bamfile = "accepted_hits.uniq.fix.bam"
# dbfile = "tair10.db"
# outfile = "startcodon"
# type = "stopcodon"
# halfwinwidth = 10000
# distributionToOnePoint(bamfile, dbfile, outfile, type, halfwinwidth)
#
# bamfile = "/data1/project1/chend/Y.pestis_paper/CLIP-Seq/CLIP-Seq_bowtie2_0807/HFQ_Flag_clip_clean.fq/HFQ_Flag_clip_clean.fq_mapped.uniq.bam"
# peakfile = "/data1/project1/chend/Y.pestis_paper/CLIP-Seq/CLIP_PeakCalling/Hfq_Flag/Hfq_Flag_filter.txt"
# outfile = "test_peak.txt"
# halfwinwidth = 100
# peakDistributionToSummit(bamfile, peakfile, outfile, halfwinwidth, type="ncRNA")
if __name__ == '__main__':
test()
# -----------------------------------------------------------------------------------
### E
# -----------------------------------------------------------------------------------
|
ablifedev/ABLIRC
|
ABLIRC/bin/ablib/utils/distribution.py
|
Python
|
mit
| 26,148
|
[
"HTSeq"
] |
866b4feaf02a6c481cad023c592d6200318ef9b7a546af7d557b797ce87750d1
|
"""
This module implements :class:`Group`, which represents a subset of the
channels in an :class:`AnalogSignal` or :class:`IrregularlySampledSignal`.
It replaces and extends the grouping function of the former :class:`ChannelIndex`
and :class:`Unit`.
"""
from os import close
from neo.core.container import Container
class Group(Container):
"""
Can contain any of the data objects, views, or other groups,
outside the hierarchy of the segment and block containers.
A common use is to link the :class:`SpikeTrain` objects within a :class:`Block`,
possibly across multiple Segments, that were emitted by the same neuron.
*Required attributes/properties*:
None
*Recommended attributes/properties*:
:objects: (Neo object) Objects with which to pre-populate the :class:`Group`
:name: (str) A label for the group.
:description: (str) Text description.
:file_origin: (str) Filesystem path or URL of the original data file.
*Optional arguments*:
:allowed_types: (list or tuple) Types of Neo object that are allowed to be
added to the Group. If not specified, any Neo object can be added.
Note: Any other additional arguments are assumed to be user-specific
metadata and stored in :attr:`annotations`.
*Container of*:
:class:`AnalogSignal`, :class:`IrregularlySampledSignal`, :class:`SpikeTrain`,
:class:`Event`, :class:`Epoch`, :class:`ChannelView`, :class:`Group`
"""
_data_child_objects = (
'AnalogSignal', 'IrregularlySampledSignal', 'SpikeTrain',
'Event', 'Epoch', 'ChannelView', 'ImageSequence'
)
_container_child_objects = ('Segment', 'Group')
_single_parent_objects = ('Block',)
def __init__(self, objects=None, name=None, description=None, file_origin=None,
allowed_types=None, **annotations):
super().__init__(name=name, description=description,
file_origin=file_origin, **annotations)
if allowed_types is None:
self.allowed_types = None
else:
self.allowed_types = tuple(allowed_types)
if objects:
self.add(*objects)
@property
def _container_lookup(self):
return {
cls_name: getattr(self, container_name)
for cls_name, container_name in zip(self._child_objects, self._child_containers)
}
def _get_container(self, cls):
if hasattr(cls, "proxy_for"):
cls = cls.proxy_for
return self._container_lookup[cls.__name__]
def add(self, *objects):
"""Add a new Neo object to the Group"""
for obj in objects:
if self.allowed_types and not isinstance(obj, self.allowed_types):
raise TypeError("This Group can only contain {}, but not {}"
"".format(self.allowed_types, type(obj)))
container = self._get_container(obj.__class__)
container.append(obj)
|
JuliaSprenger/python-neo
|
neo/core/group.py
|
Python
|
bsd-3-clause
| 3,012
|
[
"NEURON"
] |
335369a66bafa3f51ba4f7904a61a43a38320a14148238c15d05063c3437bede
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2002 Gary Shao
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2017 Jonathan Biegert <azrdev@qrdn.de>
# Copyright (C) 2017 Mindaugas Baranauskas
# Copyright (C) 2017 Paul Culley
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
""" Graphviz adapter for Graphs """
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
import os
from io import BytesIO
import tempfile
from subprocess import Popen, PIPE
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ...const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from ...utils.file import search_for, where_is
from . import BaseDoc
from ..menu import NumberOption, TextOption, EnumeratedListOption, \
BooleanOption
from ...constfunc import win
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
LOG = logging.getLogger(".graphdoc")
#-------------------------------------------------------------------------
#
# Private Constants
#
#-------------------------------------------------------------------------
_FONTS = [{'name' : _("Default"), 'value' : ""},
{'name' : _("PostScript / Helvetica"), 'value' : "Helvetica"},
{'name' : _("TrueType / FreeSans"), 'value' : "FreeSans"}]
_RANKDIR = [{'name' : _("Vertical (↓)"), 'value' : "TB"},
{'name' : _("Vertical (↑)"), 'value' : "BT"},
{'name' : _("Horizontal (→)"), 'value' : "LR"},
{'name' : _("Horizontal (←)"), 'value' : "RL"}]
_NODE_PORTS = {"TB" : ("n", "s"),
"BT" : ("s", "n"),
"LR" : ("w", "e"),
"RL" : ("e", "w")}
_PAGEDIR = [{'name' : _("Bottom, left"), 'value' : "BL"},
{'name' : _("Bottom, right"), 'value' : "BR"},
{'name' : _("Top, left"), 'value' : "TL"},
{'name' : _("Top, Right"), 'value' : "TR"},
{'name' : _("Right, bottom"), 'value' : "RB"},
{'name' : _("Right, top"), 'value' : "RT"},
{'name' : _("Left, bottom"), 'value' : "LB"},
{'name' : _("Left, top"), 'value' : "LT"}]
_RATIO = [{'name' : _("Compress to minimal size"), 'value': "compress"},
{'name' : _("Fill the given area"), 'value': "fill"},
{'name' : _("Expand uniformly"), 'value': "expand"}]
_NOTELOC = [{'name' : _("Top"), 'value' : "t"},
{'name' : _("Bottom"), 'value' : "b"}]
_SPLINE = [{'name' : _("Straight"), 'value' : "false"},
{'name' : _("Curved"), 'value' : "true", },
{'name' : _("Orthogonal"), 'value' : 'ortho'}]
if win():
_DOT_FOUND = search_for("dot.exe")
if search_for("gswin32c.exe") == 1:
_GS_CMD = "gswin32c.exe"
elif search_for("gswin32.exe") == 1:
_GS_CMD = "gswin32.exe"
else:
_GS_CMD = ""
else:
_DOT_FOUND = search_for("dot")
_GS_CMD = where_is("gs")
#------------------------------------------------------------------------------
#
# GVOptions
#
#------------------------------------------------------------------------------
class GVOptions:
"""
Defines all of the controls necessary
to configure the graph reports.
"""
def __init__(self):
self.h_pages = None
self.v_pages = None
self.page_dir = None
self.dpi = None
def add_menu_options(self, menu):
"""
Add all graph related options to the menu.
:param menu: The menu the options should be added to.
:type menu: :class:`.Menu`
:return: nothing
"""
################################
category = _("Graphviz Layout")
################################
font_family = EnumeratedListOption(_("Font family"), "")
for item in _FONTS:
font_family.add_item(item["value"], item["name"])
font_family.set_help(_("Choose the font family. If international "
"characters don't show, use FreeSans font. "
"FreeSans is available from: "
"http://www.nongnu.org/freefont/"))
menu.add_option(category, "font_family", font_family)
font_size = NumberOption(_("Font size"), 14, 8, 128)
font_size.set_help(_("The font size, in points."))
menu.add_option(category, "font_size", font_size)
rank_dir = EnumeratedListOption(_("Graph Direction"), "TB")
for item in _RANKDIR:
rank_dir.add_item(item["value"], item["name"])
rank_dir.set_help(_("Whether graph goes from top to bottom "
"or left to right."))
menu.add_option(category, "rank_dir", rank_dir)
h_pages = NumberOption(_("Number of Horizontal Pages"), 1, 1, 25)
h_pages.set_help(_("Graphviz can create very large graphs by "
"spreading the graph across a rectangular "
"array of pages. This controls the number "
"pages in the array horizontally. "
"Only valid for dot and pdf via Ghostscript."))
menu.add_option(category, "h_pages", h_pages)
v_pages = NumberOption(_("Number of Vertical Pages"), 1, 1, 25)
v_pages.set_help(_("Graphviz can create very large graphs by "
"spreading the graph across a rectangular "
"array of pages. This controls the number "
"pages in the array vertically. "
"Only valid for dot and pdf via Ghostscript."))
menu.add_option(category, "v_pages", v_pages)
page_dir = EnumeratedListOption(_("Paging Direction"), "BL")
for item in _PAGEDIR:
page_dir.add_item(item["value"], item["name"])
page_dir.set_help(_("The order in which the graph pages are output. "
"This option only applies if the horizontal pages "
"or vertical pages are greater than 1."))
menu.add_option(category, "page_dir", page_dir)
spline = EnumeratedListOption(_("Connecting lines"), "true")
for item in _SPLINE:
spline.add_item(item["value"], item["name"])
spline.set_help(_("How the lines between objects will be drawn."))
menu.add_option(category, "spline", spline)
node_ports = BooleanOption(_("Alternate line attachment"), False)
node_ports.set_help(_("Whether lines attach to nodes differently"))
menu.add_option(category, "node_ports", node_ports)
# the page direction option only makes sense when the
# number of horizontal and/or vertical pages is > 1,
# so we need to remember these 3 controls for later
self.h_pages = h_pages
self.v_pages = v_pages
self.page_dir = page_dir
# the page direction option only makes sense when the
# number of horizontal and/or vertical pages is > 1
self.h_pages.connect('value-changed', self.pages_changed)
self.v_pages.connect('value-changed', self.pages_changed)
################################
category = _("Graphviz Options")
################################
aspect_ratio = EnumeratedListOption(_("Aspect ratio"), "fill")
for item in _RATIO:
aspect_ratio.add_item(item["value"], item["name"])
help_text = _(
'Affects node spacing and scaling of the graph.\n'
'If the graph is smaller than the print area:\n'
' Compress will not change the node spacing. \n'
' Fill will increase the node spacing to fit the print area in '
'both width and height.\n'
' Expand will increase the node spacing uniformly to preserve '
'the aspect ratio.\n'
'If the graph is larger than the print area:\n'
' Compress will shrink the graph to achieve tight packing at the '
'expense of symmetry.\n'
' Fill will shrink the graph to fit the print area after first '
'increasing the node spacing.\n'
' Expand will shrink the graph uniformly to fit the print area.')
aspect_ratio.set_help(help_text)
menu.add_option(category, "ratio", aspect_ratio)
dpi = NumberOption(_("DPI"), 75, 20, 1200)
dpi.set_help(_("Dots per inch. When creating images such as "
".gif or .png files for the web, try numbers "
"such as 100 or 300 DPI. PostScript and PDF files "
"always use 72 DPI."))
menu.add_option(category, "dpi", dpi)
self.dpi = dpi
nodesep = NumberOption(_("Node spacing"), 0.20, 0.01, 5.00, 0.01)
nodesep.set_help(_("The minimum amount of free space, in inches, "
"between individual nodes. For vertical graphs, "
"this corresponds to spacing between columns. "
"For horizontal graphs, this corresponds to "
"spacing between rows."))
menu.add_option(category, "nodesep", nodesep)
ranksep = NumberOption(_("Rank spacing"), 0.20, 0.01, 5.00, 0.01)
ranksep.set_help(_("The minimum amount of free space, in inches, "
"between ranks. For vertical graphs, this "
"corresponds to spacing between rows. For "
"horizontal graphs, this corresponds to spacing "
"between columns."))
menu.add_option(category, "ranksep", ranksep)
use_subgraphs = BooleanOption(_('Use subgraphs'), True)
use_subgraphs.set_help(_("Subgraphs can help Graphviz position "
"spouses together, but with non-trivial "
"graphs will result in longer lines and "
"larger graphs."))
menu.add_option(category, "usesubgraphs", use_subgraphs)
################################
category = _("Note")
################################
note = TextOption(_("Note to add to the graph"),
[""])
note.set_help(_("This text will be added to the graph."))
menu.add_option(category, "note", note)
noteloc = EnumeratedListOption(_("Note location"), 't')
for i in range(0, len(_NOTELOC)):
noteloc.add_item(_NOTELOC[i]["value"], _NOTELOC[i]["name"])
noteloc.set_help(_("Whether note will appear on top "
"or bottom of the page."))
menu.add_option(category, "noteloc", noteloc)
notesize = NumberOption(_("Note size"), 32, 8, 128)
notesize.set_help(_("The size of note text, in points."))
menu.add_option(category, "notesize", notesize)
def pages_changed(self):
"""
This method gets called every time the v_pages or h_pages
options are changed; when both vertical and horizontal
pages are set to "1", then the page_dir control needs to
be unavailable
"""
if self.v_pages.get_value() > 1 or self.h_pages.get_value() > 1:
self.page_dir.set_available(True)
else:
self.page_dir.set_available(False)
#------------------------------------------------------------------------------
#
# GVDoc
#
#------------------------------------------------------------------------------
class GVDoc(metaclass=ABCMeta):
"""
Abstract Interface for Graphviz document generators. Output formats
for Graphviz reports must implement this interface to be used by the
report system.
"""
@abstractmethod
def add_node(self, node_id, label, shape="", color="",
style="", fillcolor="", url="", htmloutput=False):
"""
Add a node to this graph. Nodes can be different shapes like boxes and
circles.
:param node_id: A unique identification value for this node.
Example: "p55"
:type node_id: string
:param label: The text to be displayed in the node.
Example: "John Smith"
:type label: string
:param shape: The shape for the node.
Examples: "box", "ellipse", "circle"
:type shape: string
:param color: The color of the node line.
Examples: "blue", "lightyellow"
:type color: string
:param style: The style of the node.
:type style: string
:param fillcolor: The fill color for the node.
Examples: "blue", "lightyellow"
:type fillcolor: string
:param url: A URL for the node.
:type url: string
:param htmloutput: Whether the label contains HTML.
:type htmloutput: boolean
:return: nothing
"""
@abstractmethod
def add_link(self, id1, id2, style="", head="", tail="", comment=""):
"""
Add a link between two nodes.
:param id1: The unique identifier of the starting node.
Example: "p55"
:type id1: string
:param id2: The unique identifier of the ending node.
Example: "p55"
:type id2: string
:param comment: A text string displayed at the end of the link line.
Example: "person C is the son of person A and person B"
:type comment: string
:return: nothing
"""
@abstractmethod
def add_comment(self, comment):
"""
Add a comment to the source file.
:param comment: A text string to add as a comment.
Example: "Next comes the individuals."
:type comment: string
:return: nothing
"""
@abstractmethod
def start_subgraph(self, graph_id):
"""
Start a subgraph in this graph.
:param id: The unique identifier of the subgraph.
Example: "p55"
:type id1: string
:return: nothing
"""
@abstractmethod
def end_subgraph(self):
"""
End a subgraph that was previously started in this graph.
:return: nothing
"""
#------------------------------------------------------------------------------
#
# GVDocBase
#
#------------------------------------------------------------------------------
class GVDocBase(BaseDoc, GVDoc):
"""
Base document generator for all Graphviz document generators. Classes that
inherit from this class will only need to implement the close function.
The close function will generate the actual file of the appropriate type.
"""
def __init__(self, options, paper_style, uistate=None):
BaseDoc.__init__(self, None, paper_style, uistate=uistate)
self._filename = None
self._dot = BytesIO()
self._paper = paper_style
get_option = options.menu.get_option_by_name
self.dpi = get_option('dpi').get_value()
self.fontfamily = get_option('font_family').get_value()
self.fontsize = get_option('font_size').get_value()
self.hpages = get_option('h_pages').get_value()
self.nodesep = get_option('nodesep').get_value()
self.noteloc = get_option('noteloc').get_value()
self.notesize = get_option('notesize').get_value()
self.note = get_option('note').get_value()
self.pagedir = get_option('page_dir').get_value()
self.rankdir = get_option('rank_dir').get_value()
self.ranksep = get_option('ranksep').get_value()
self.ratio = get_option('ratio').get_value()
self.vpages = get_option('v_pages').get_value()
self.usesubgraphs = get_option('usesubgraphs').get_value()
self.spline = get_option('spline').get_value()
self.node_ports = get_option('node_ports').get_value()
paper_size = paper_style.get_size()
# Subtract 0.01" from the drawing area to make some room between
# this area and the margin in order to compensate for different
# rounding errors internally in dot
sizew = (paper_size.get_width() -
self._paper.get_left_margin() -
self._paper.get_right_margin()) / 2.54 - 0.01
sizeh = (paper_size.get_height() -
self._paper.get_top_margin() -
self._paper.get_bottom_margin()) / 2.54 - 0.01
pheight = paper_size.get_height_inches()
pwidth = paper_size.get_width_inches()
xmargin = self._paper.get_left_margin() / 2.54
ymargin = self._paper.get_top_margin() / 2.54
sizew *= self.hpages
sizeh *= self.vpages
self.write(
'digraph GRAMPS_graph\n'
'{\n'
' bgcolor=white;\n'
' center="true"; \n'
' charset="utf8";\n'
' concentrate="false";\n' +
' dpi="%d";\n' % self.dpi +
' graph [fontsize=%d];\n' % self.fontsize +
' margin="%3.2f,%3.2f"; \n' % (xmargin, ymargin) +
' mclimit="99";\n' +
' nodesep="%.2f";\n' % self.nodesep +
' outputorder="edgesfirst";\n' +
('#' if self.hpages == self.vpages == 1 else '') +
# comment out "page=" if the graph is on 1 page (bug #2121)
' page="%3.2f,%3.2f";\n' % (pwidth, pheight) +
' pagedir="%s";\n' % self.pagedir +
' rankdir="%s";\n' % self.rankdir +
' ranksep="%.2f";\n' % self.ranksep +
' ratio="%s";\n' % self.ratio +
' searchsize="100";\n' +
' size="%3.2f,%3.2f"; \n' % (sizew, sizeh) +
' splines="%s";\n' % self.spline +
'\n' +
' edge [len=0.5 style=solid fontsize=%d];\n' % self.fontsize)
if self.node_ports:
self.write(' edge [headport=%s tailport=%s];\n'
% _NODE_PORTS[self.rankdir])
if self.fontfamily:
self.write(' node [style=filled fontname="%s" fontsize=%d];\n'
% (self.fontfamily, self.fontsize))
else:
self.write(' node [style=filled fontsize=%d];\n'
% self.fontsize)
self.write('\n')
def write(self, text):
""" Write text to the dot file """
self._dot.write(text.encode('utf8', 'xmlcharrefreplace'))
def open(self, filename):
""" Implement GVDocBase.open() """
self._filename = os.path.normpath(os.path.abspath(filename))
def close(self):
"""
This isn't useful by itself. Other classes need to override this and
actually generate a file.
"""
if self.note:
# build up the label
label = ''
for line in self.note: # for every line in the note...
line = line.strip() # ...strip whitespace from this line...
if line != '': # ...and if we still have a line...
if label != '': # ...see if we need to insert a newline...
label += '\\n'
label += line.replace('"', '\\\"')
# after all that, see if we have a label to display
if label != '':
self.write(
'\n' +
' label="%s";\n' % label +
' labelloc="%s";\n' % self.noteloc +
' fontsize="%d";\n' % self.notesize)
self.write('}\n\n')
def add_node(self, node_id, label, shape="", color="",
style="", fillcolor="", url="", htmloutput=False):
"""
Add a node to this graph. Nodes can be different shapes like boxes and
circles.
Implements GVDocBase.add_node().
"""
text = '['
if shape:
text += ' shape="%s"' % shape
if color:
text += ' color="%s"' % color
if fillcolor:
text += ' fillcolor="%s"' % fillcolor
if style:
text += ' style="%s"' % style
# note that we always output a label -- even if an empty string --
# otherwise Graphviz uses the node ID as the label which is unlikely
# to be what the user wants to see in the graph
if label.startswith("<") or htmloutput:
text += ' label=<%s>' % label
else:
text += ' label="%s"' % label
if url:
text += ' URL="%s"' % url
text += " ]"
self.write(' "%s" %s;\n' % (node_id, text))
def add_link(self, id1, id2, style="", head="", tail="", comment=""):
"""
Add a link between two nodes.
Implements GVDocBase.add_link().
"""
self.write(' "%s" -> "%s"' % (id1, id2))
if style or head or tail:
self.write(' [')
if style:
self.write(' style=%s' % style)
if head:
self.write(' arrowhead=%s' % head)
if tail:
self.write(' arrowtail=%s' % tail)
if head:
if tail:
self.write(' dir=both')
else:
self.write(' dir=forward')
else:
if tail:
self.write(' dir=back')
else:
self.write(' dir=none')
self.write(' ]')
self.write(';')
if comment:
self.write(' // %s' % comment)
self.write('\n')
def add_comment(self, comment):
"""
Add a comment.
Implements GVDocBase.add_comment().
"""
tmp = comment.split('\n')
for line in tmp:
text = line.strip()
if text == "":
self.write('\n')
elif text.startswith('#'):
self.write('%s\n' % text)
else:
self.write('# %s\n' % text)
def start_subgraph(self, graph_id):
""" Implement GVDocBase.start_subgraph() """
graph_id = graph_id.replace(' ', '_') # for user-defined ID with space
self.write(
' subgraph cluster_%s\n' % graph_id +
' {\n' +
' style="invis";\n') # no border around subgraph (#0002176)
def end_subgraph(self):
""" Implement GVDocBase.end_subgraph() """
self.write(' }\n')
#------------------------------------------------------------------------------
#
# GVDotDoc
#
#------------------------------------------------------------------------------
class GVDotDoc(GVDocBase):
""" GVDoc implementation that generates a .gv text file. """
def close(self):
""" Implements GVDotDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-3:] != ".gv":
self._filename += ".gv"
with open(self._filename, "wb") as dotfile:
dotfile.write(self._dot.getvalue())
#------------------------------------------------------------------------------
#
# GVPsDoc
#
#------------------------------------------------------------------------------
class GVPsDoc(GVDocBase):
""" GVDoc implementation that generates a .ps file using Graphviz. """
def __init__(self, options, paper_style):
# DPI must always be 72 for PDF.
# GV documentation says dpi is only for image formats.
options.menu.get_option_by_name('dpi').set_value(72)
GVDocBase.__init__(self, options, paper_style)
# GV documentation allow multiple pages only for ps format,
# But it does not work with -Tps:cairo in order to
# show Non Latin-1 letters. Force to only 1 page.
# See bug tracker issue 2815
options.menu.get_option_by_name('v_pages').set_value(1)
options.menu.get_option_by_name('h_pages').set_value(1)
GVDocBase.__init__(self, options, paper_style)
def close(self):
""" Implements GVPsDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-3:] != ".ps":
self._filename += ".ps"
# Create a temporary dot file
(handle, tmp_dot) = tempfile.mkstemp(".gv")
dotfile = os.fdopen(handle, "wb")
dotfile.write(self._dot.getvalue())
dotfile.close()
# Generate the PS file.
# Reason for using -Tps:cairo. Needed for Non Latin-1 letters
# Some testing with Tps:cairo. Non Latin-1 letters are OK i all cases:
# Output format: ps PDF-GostScript PDF-Graphviz
# Single page OK OK OK
# Multip page 1 page, OK 1 page,
# corrupted set by gramps
# If I take a correct multip page PDF and convert it with pdf2ps I get
# multip pages, but the output is clipped, some margins have
# disappeared. I used 1 inch margins always.
# See bug tracker issue 2815
# :cairo does not work with Graphviz 2.26.3 and later See issue 4164
# recent versions of Graphviz doesn't even try, just puts out a single
# large page.
command = 'dot -Tps:cairo -o"%s" "%s"' % (self._filename, tmp_dot)
dotversion = str(Popen(['dot', '-V'],
stderr=PIPE).communicate(input=None)[1])
# Problem with dot 2.26.3 and later and multiple pages, which gives
# "cairo: out of memory" If the :cairo is skipped for these cases it
# gives bad result for non-Latin-1 characters (utf-8).
if (dotversion.find('2.26.3') or dotversion.find('2.28.0') != -1) and \
(self.vpages * self.hpages) > 1:
command = command.replace(':cairo', '')
os.system(command)
# Delete the temporary dot file
os.remove(tmp_dot)
#------------------------------------------------------------------------------
#
# GVSvgDoc
#
#------------------------------------------------------------------------------
class GVSvgDoc(GVDocBase):
""" GVDoc implementation that generates a .svg file using Graphviz. """
def __init__(self, options, paper_style):
# GV documentation allow multiple pages only for ps format,
# which also includes pdf via ghostscript.
options.menu.get_option_by_name('v_pages').set_value(1)
options.menu.get_option_by_name('h_pages').set_value(1)
GVDocBase.__init__(self, options, paper_style)
def close(self):
""" Implements GVSvgDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-4:] != ".svg":
self._filename += ".svg"
# Create a temporary dot file
(handle, tmp_dot) = tempfile.mkstemp(".gv")
dotfile = os.fdopen(handle, "wb")
dotfile.write(self._dot.getvalue())
dotfile.close()
# Generate the SVG file.
os.system('dot -Tsvg:cairo -o"%s" "%s"' % (self._filename, tmp_dot))
# Delete the temporary dot file
os.remove(tmp_dot)
#------------------------------------------------------------------------------
#
# GVSvgzDoc
#
#------------------------------------------------------------------------------
class GVSvgzDoc(GVDocBase):
""" GVDoc implementation that generates a .svg file using Graphviz. """
def __init__(self, options, paper_style):
# GV documentation allow multiple pages only for ps format,
# which also includes pdf via ghostscript.
options.menu.get_option_by_name('v_pages').set_value(1)
options.menu.get_option_by_name('h_pages').set_value(1)
GVDocBase.__init__(self, options, paper_style)
def close(self):
""" Implements GVSvgzDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-5:] != ".svgz":
self._filename += ".svgz"
# Create a temporary dot file
(handle, tmp_dot) = tempfile.mkstemp(".gv")
dotfile = os.fdopen(handle, "wb")
dotfile.write(self._dot.getvalue())
dotfile.close()
# Generate the SVGZ file.
os.system('dot -Tsvgz -o"%s" "%s"' % (self._filename, tmp_dot))
# Delete the temporary dot file
os.remove(tmp_dot)
#------------------------------------------------------------------------------
#
# GVPngDoc
#
#------------------------------------------------------------------------------
class GVPngDoc(GVDocBase):
""" GVDoc implementation that generates a .png file using Graphviz. """
def __init__(self, options, paper_style):
# GV documentation allow multiple pages only for ps format,
# which also includes pdf via ghostscript.
options.menu.get_option_by_name('v_pages').set_value(1)
options.menu.get_option_by_name('h_pages').set_value(1)
GVDocBase.__init__(self, options, paper_style)
def close(self):
""" Implements GVPngDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-4:] != ".png":
self._filename += ".png"
# Create a temporary dot file
(handle, tmp_dot) = tempfile.mkstemp(".gv")
dotfile = os.fdopen(handle, "wb")
dotfile.write(self._dot.getvalue())
dotfile.close()
# Generate the PNG file.
os.system('dot -Tpng -o"%s" "%s"' % (self._filename, tmp_dot))
# Delete the temporary dot file
os.remove(tmp_dot)
#------------------------------------------------------------------------------
#
# GVJpegDoc
#
#------------------------------------------------------------------------------
class GVJpegDoc(GVDocBase):
""" GVDoc implementation that generates a .jpg file using Graphviz. """
def __init__(self, options, paper_style):
# GV documentation allow multiple pages only for ps format,
# which also includes pdf via ghostscript.
options.menu.get_option_by_name('v_pages').set_value(1)
options.menu.get_option_by_name('h_pages').set_value(1)
GVDocBase.__init__(self, options, paper_style)
def close(self):
""" Implements GVJpegDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-4:] != ".jpg":
self._filename += ".jpg"
# Create a temporary dot file
(handle, tmp_dot) = tempfile.mkstemp(".gv")
dotfile = os.fdopen(handle, "wb")
dotfile.write(self._dot.getvalue())
dotfile.close()
# Generate the JPEG file.
os.system('dot -Tjpg -o"%s" "%s"' % (self._filename, tmp_dot))
# Delete the temporary dot file
os.remove(tmp_dot)
#------------------------------------------------------------------------------
#
# GVGifDoc
#
#------------------------------------------------------------------------------
class GVGifDoc(GVDocBase):
""" GVDoc implementation that generates a .gif file using Graphviz. """
def __init__(self, options, paper_style):
# GV documentation allow multiple pages only for ps format,
# which also includes pdf via ghostscript.
options.menu.get_option_by_name('v_pages').set_value(1)
options.menu.get_option_by_name('h_pages').set_value(1)
GVDocBase.__init__(self, options, paper_style)
def close(self):
""" Implements GVGifDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-4:] != ".gif":
self._filename += ".gif"
# Create a temporary dot file
(handle, tmp_dot) = tempfile.mkstemp(".gv")
dotfile = os.fdopen(handle, "wb")
dotfile.write(self._dot.getvalue())
dotfile.close()
# Generate the GIF file.
os.system('dot -Tgif -o"%s" "%s"' % (self._filename, tmp_dot))
# Delete the temporary dot file
os.remove(tmp_dot)
#------------------------------------------------------------------------------
#
# GVPdfGvDoc
#
#------------------------------------------------------------------------------
class GVPdfGvDoc(GVDocBase):
""" GVDoc implementation that generates a .pdf file using Graphviz. """
def __init__(self, options, paper_style):
# DPI must always be 72 for PDF.
# GV documentation says dpi is only for image formats.
options.menu.get_option_by_name('dpi').set_value(72)
# GV documentation allow multiple pages only for ps format,
# which also includes pdf via ghostscript.
options.menu.get_option_by_name('v_pages').set_value(1)
options.menu.get_option_by_name('h_pages').set_value(1)
GVDocBase.__init__(self, options, paper_style)
def close(self):
""" Implements GVPdfGvDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-4:] != ".pdf":
self._filename += ".pdf"
# Create a temporary dot file
(handle, tmp_dot) = tempfile.mkstemp(".gv")
dotfile = os.fdopen(handle, "wb")
dotfile.write(self._dot.getvalue())
dotfile.close()
fname = self._filename
# Generate the PDF file.
os.system('dot -Tpdf -o"%s" "%s"' % (fname, tmp_dot))
# Delete the temporary dot file
os.remove(tmp_dot)
#------------------------------------------------------------------------------
#
# GVPdfGsDoc
#
#------------------------------------------------------------------------------
class GVPdfGsDoc(GVDocBase):
""" GVDoc implementation that generates a .pdf file using Ghostscript. """
def __init__(self, options, paper_style):
# DPI must always be 72 for PDF.
# GV documentation says dpi is only for image formats.
options.menu.get_option_by_name('dpi').set_value(72)
GVDocBase.__init__(self, options, paper_style)
def close(self):
""" Implements GVPdfGsDoc.close() """
GVDocBase.close(self)
# Make sure the extension is correct
if self._filename[-4:] != ".pdf":
self._filename += ".pdf"
# Create a temporary dot file
(handle, tmp_dot) = tempfile.mkstemp(".gv")
dotfile = os.fdopen(handle, "wb")
dotfile.write(self._dot.getvalue())
dotfile.close()
# Create a temporary PostScript file
(handle, tmp_ps) = tempfile.mkstemp(".ps")
os.close(handle)
# Generate PostScript using dot
# Reason for using -Tps:cairo. Needed for Non Latin-1 letters
# See bug tracker issue 2815
# :cairo does not work with with multi-page See issue 4164
# recent versions of Graphviz doesn't even try, just puts out a single
# large page, so we use Ghostscript to split it up.
command = 'dot -Tps:cairo -o"%s" "%s"' % (tmp_ps, tmp_dot)
os.system(command)
# Add .5 to remove rounding errors.
paper_size = self._paper.get_size()
width_pt = int((paper_size.get_width_inches() * 72) + .5)
height_pt = int((paper_size.get_height_inches() * 72) + .5)
if (self.vpages * self.hpages) == 1:
# -dDEVICEWIDTHPOINTS=%d' -dDEVICEHEIGHTPOINTS=%d
command = '%s -q -sDEVICE=pdfwrite -dNOPAUSE '\
'-dDEVICEWIDTHPOINTS=%d -dDEVICEHEIGHTPOINTS=%d '\
'-sOutputFile="%s" "%s" -c quit' % (
_GS_CMD, width_pt, height_pt, self._filename, tmp_ps)
os.system(command)
os.remove(tmp_ps)
return
# Margins (in centimeters) to pixels 72/2.54=28.345
margin_t = int(28.345 * self._paper.get_top_margin())
margin_b = int(28.345 * self._paper.get_bottom_margin())
margin_r = int(28.345 * self._paper.get_right_margin())
margin_l = int(28.345 * self._paper.get_left_margin())
margin_x = margin_l + margin_r
margin_y = margin_t + margin_b
# Convert to PDF using ghostscript
list_of_pieces = []
x_rng = range(1, self.hpages + 1) if 'L' in self.pagedir \
else range(self.hpages, 0, -1)
y_rng = range(1, self.vpages + 1) if 'B' in self.pagedir \
else range(self.vpages, 0, -1)
if self.pagedir[0] in 'TB':
the_list = ((__x, __y) for __y in y_rng for __x in x_rng)
else:
the_list = ((__x, __y) for __x in x_rng for __y in y_rng)
for __x, __y in the_list:
# Slit PS file to pieces of PDF
page_offset_x = (__x - 1) * (margin_x - width_pt)
page_offset_y = (__y - 1) * (margin_y - height_pt)
tmp_pdf_piece = "%s_%d_%d.pdf" % (tmp_ps, __x, __y)
list_of_pieces.append(tmp_pdf_piece)
# Generate Ghostscript code
command = '%s -q -dBATCH -dNOPAUSE -dSAFER -g%dx%d '\
'-sOutputFile="%s" -r72 -sDEVICE=pdfwrite '\
'-c "<</.HWMargins [%d %d %d %d] /PageOffset [%d %d]>> '\
'setpagedevice" -f "%s"' % (
_GS_CMD, width_pt + 10, height_pt + 10, tmp_pdf_piece,
margin_l, margin_b, margin_r, margin_t,
page_offset_x + 5, page_offset_y + 5, tmp_ps)
# Execute Ghostscript
os.system(command)
# Merge pieces to single multipage PDF ;
command = '%s -q -dBATCH -dNOPAUSE '\
'-sOUTPUTFILE="%s" -r72 -sDEVICE=pdfwrite %s '\
% (_GS_CMD, self._filename, ' '.join(list_of_pieces))
os.system(command)
# Clean temporary files
os.remove(tmp_ps)
for tmp_pdf_piece in list_of_pieces:
os.remove(tmp_pdf_piece)
os.remove(tmp_dot)
#------------------------------------------------------------------------------
#
# Various Graphviz formats.
#
#------------------------------------------------------------------------------
FORMATS = []
if _DOT_FOUND:
if _GS_CMD != "":
FORMATS += [{'type' : "gspdf",
'ext' : "pdf",
'descr': _("PDF (Ghostscript)"),
'mime' : "application/pdf",
'class': GVPdfGsDoc}]
FORMATS += [{'type' : "gvpdf",
'ext' : "pdf",
'descr': _("PDF (Graphviz)"),
'mime' : "application/pdf",
'class': GVPdfGvDoc}]
FORMATS += [{'type' : "ps",
'ext' : "ps",
'descr': _("PostScript"),
'mime' : "application/postscript",
'class': GVPsDoc}]
FORMATS += [{'type' : "svg",
'ext' : "svg",
'descr': _("Structured Vector Graphics (SVG)"),
'mime' : "image/svg",
'class': GVSvgDoc}]
FORMATS += [{'type' : "svgz",
'ext' : "svgz",
'descr': _("Compressed Structured Vector Graphs (SVGZ)"),
'mime' : "image/svgz",
'class': GVSvgzDoc}]
FORMATS += [{'type' : "jpg",
'ext' : "jpg",
'descr': _("JPEG image"),
'mime' : "image/jpeg",
'class': GVJpegDoc}]
FORMATS += [{'type' : "gif",
'ext' : "gif",
'descr': _("GIF image"),
'mime' : "image/gif",
'class': GVGifDoc}]
FORMATS += [{'type' : "png",
'ext' : "png",
'descr': _("PNG image"),
'mime' : "image/png",
'class': GVPngDoc}]
FORMATS += [{'type' : "dot",
'ext' : "gv",
'descr': _("Graphviz File"),
'mime' : "text/x-graphviz",
'class': GVDotDoc}]
|
prculley/gramps
|
gramps/gen/plug/docgen/graphdoc.py
|
Python
|
gpl-2.0
| 41,093
|
[
"Brian"
] |
a2de670824f2c99c1785258eff3be84993e6a54f773606a72575c0422b4c69ac
|
#!/usr/bin/env python
"""Test script to check for required functionality.
Execute this code at the command line by typing:
python swc-installation-test-2.py
Run the script and follow the instructions it prints at the end.
This script requires at least Python 2.6. You can check the version
of Python that you have installed with 'swc-installation-test-1.py'.
By default, this script will test for all the dependencies your
instructor thinks you need. If you want to test for a different set
of packages, you can list them on the command line. For example:
python swc-installation-test-2.py git virtual-editor
This is useful if the original test told you to install a more recent
version of a particular dependency, and you just want to re-test that
dependency.
"""
from __future__ import print_function # for Python 2.6 compatibility
import distutils.ccompiler as _distutils_ccompiler
import fnmatch as _fnmatch
try: # Python 2.7 and 3.x
import importlib as _importlib
except ImportError: # Python 2.6 and earlier
class _Importlib (object):
"""Minimal workarounds for functions we need
"""
@staticmethod
def import_module(name):
module = __import__(name)
for n in name.split('.')[1:]:
module = getattr(module, n)
return module
_importlib = _Importlib()
import logging as _logging
import os as _os
import platform as _platform
import re as _re
import shlex as _shlex
import subprocess as _subprocess
import sys as _sys
try: # Python 3.x
import urllib.parse as _urllib_parse
except ImportError: # Python 2.x
import urllib as _urllib_parse # for quote()
if not hasattr(_shlex, 'quote'): # Python versions older than 3.3
# Use the undocumented pipes.quote()
import pipes as _pipes
_shlex.quote = _pipes.quote
__version__ = '0.1'
# Comment out any entries you don't need
CHECKS = [
# Shell
'virtual-shell',
# Editors
'virtual-editor',
# Browsers
'virtual-browser',
# Version control
'git',
'hg', # Command line tool
#'mercurial', # Python package
'EasyMercurial',
# Build tools and packaging
'make',
'virtual-pypi-installer',
'setuptools',
#'xcode',
# Testing
'nosetests', # Command line tool
'nose', # Python package
'py.test', # Command line tool
'pytest', # Python package
# SQL
'sqlite3', # Command line tool
'sqlite3-python', # Python package
# Python
'python',
'ipython', # Command line tool
'IPython', # Python package
'argparse', # Useful for utility scripts
'numpy',
'scipy',
'matplotlib',
'pandas',
'sympy',
'Cython',
'networkx',
'mayavi.mlab',
]
CHECKER = {}
_ROOT_PATH = _os.sep
if _platform.system() == 'win32':
_ROOT_PATH = 'c:\\'
class InvalidCheck (KeyError):
def __init__(self, check):
super(InvalidCheck, self).__init__(check)
self.check = check
def __str__(self):
return self.check
class DependencyError (Exception):
_default_url = 'http://software-carpentry.org/setup/'
_setup_urls = { # (system, version, package) glob pairs
('*', '*', 'Cython'): 'http://docs.cython.org/src/quickstart/install.html',
('Linux', '*', 'EasyMercurial'): 'http://easyhg.org/download.html#download-linux',
('Darwin', '*', 'EasyMercurial'): 'http://easyhg.org/download.html#download-mac',
('Windows', '*', 'EasyMercurial'): 'http://easyhg.org/download.html#download-windows',
('*', '*', 'EasyMercurial'): 'http://easyhg.org/download.html',
('*', '*', 'argparse'): 'https://pypi.python.org/pypi/argparse#installation',
('*', '*', 'ash'): 'http://www.in-ulm.de/~mascheck/various/ash/',
('*', '*', 'bash'): 'http://www.gnu.org/software/bash/manual/html_node/Basic-Installation.html#Basic-Installation',
('Linux', '*', 'chromium'): 'http://code.google.com/p/chromium/wiki/LinuxBuildInstructions',
('Darwin', '*', 'chromium'): 'http://code.google.com/p/chromium/wiki/MacBuildInstructions',
('Windows', '*', 'chromium'): 'http://www.chromium.org/developers/how-tos/build-instructions-windows',
('*', '*', 'chromium'): 'http://www.chromium.org/developers/how-tos',
('Windows', '*', 'emacs'): 'http://www.gnu.org/software/emacs/windows/Installing-Emacs.html',
('*', '*', 'emacs'): 'http://www.gnu.org/software/emacs/#Obtaining',
('*', '*', 'firefox'): 'http://www.mozilla.org/en-US/firefox/new/',
('Linux', '*', 'gedit'): 'http://www.linuxfromscratch.org/blfs/view/svn/gnome/gedit.html',
('*', '*', 'git'): 'http://git-scm.com/downloads',
('*', '*', 'google-chrome'): 'https://www.google.com/intl/en/chrome/browser/',
('*', '*', 'hg'): 'http://mercurial.selenic.com/',
('*', '*', 'mercurial'): 'http://mercurial.selenic.com/',
('*', '*', 'IPython'): 'http://ipython.org/install.html',
('*', '*', 'ipython'): 'http://ipython.org/install.html',
('*', '*', 'jinja'): 'http://jinja.pocoo.org/docs/intro/#installation',
('*', '*', 'kate'): 'http://kate-editor.org/get-it/',
('*', '*', 'make'): 'http://www.gnu.org/software/make/',
('Darwin', '*', 'matplotlib'): 'http://matplotlib.org/users/installing.html#building-on-osx',
('Windows', '*', 'matplotlib'): 'http://matplotlib.org/users/installing.html#installing-on-windows',
('*', '*', 'matplotlib'): 'http://matplotlib.org/users/installing.html#installing',
('*', '*', 'mayavi.mlab'): 'http://docs.enthought.com/mayavi/mayavi/installation.html',
('*', '*', 'nano'): 'http://www.nano-editor.org/dist/latest/faq.html#3',
('*', '*', 'networkx'): 'http://networkx.github.com/documentation/latest/install.html#installing',
('*', '*', 'nose'): 'https://nose.readthedocs.org/en/latest/#installation-and-quick-start',
('*', '*', 'nosetests'): 'https://nose.readthedocs.org/en/latest/#installation-and-quick-start',
('*', '*', 'notepad++'): 'http://notepad-plus-plus.org/download/v6.3.html',
('*', '*', 'numpy'): 'http://docs.scipy.org/doc/numpy/user/install.html',
('*', '*', 'pandas'): 'http://pandas.pydata.org/pandas-docs/stable/install.html',
('*', '*', 'pip'): 'http://www.pip-installer.org/en/latest/installing.html',
('*', '*', 'pytest'): 'http://pytest.org/latest/getting-started.html',
('*', '*', 'python'): 'http://www.python.org/download/releases/2.7.3/#download',
('*', '*', 'pyzmq'): 'https://github.com/zeromq/pyzmq/wiki/Building-and-Installing-PyZMQ',
('*', '*', 'py.test'): 'http://pytest.org/latest/getting-started.html',
('Linux', '*', 'scipy'): 'http://www.scipy.org/Installing_SciPy/Linux',
('Darwin', '*', 'scipy'): 'http://www.scipy.org/Installing_SciPy/Mac_OS_X',
('Windows', '*', 'scipy'): 'http://www.scipy.org/Installing_SciPy/Windows',
('*', '*', 'scipy'): 'http://www.scipy.org/Installing_SciPy',
('*', '*', 'setuptools'): 'https://pypi.python.org/pypi/setuptools#installation-instructions',
('*', '*', 'sqlite3'): 'http://www.sqlite.org/download.html',
('*', '*', 'sublime-text'): 'http://www.sublimetext.com/2',
('*', '*', 'sympy'): 'http://docs.sympy.org/dev/install.html',
('Darwin', '*', 'textmate'): 'http://macromates.com/',
('Darwin', '*', 'textwrangler'): 'http://www.barebones.com/products/textwrangler/download.html',
('*', '*', 'tornado'): 'http://www.tornadoweb.org/',
('*', '*', 'vim'): 'http://www.vim.org/download.php',
('Darwin', '*', 'xcode'): 'https://developer.apple.com/xcode/',
('*', '*', 'xemacs'): 'http://www.us.xemacs.org/Install/',
('*', '*', 'zsh'): 'http://www.zsh.org/',
}
def _get_message(self):
return self._message
def _set_message(self, message):
self._message = message
message = property(_get_message, _set_message)
def __init__(self, checker, message, causes=None):
super(DependencyError, self).__init__(message)
self.checker = checker
self.message = message
if causes is None:
causes = []
self.causes = causes
def get_url(self):
system = _platform.system()
version = None
for pversion in (
'linux_distribution',
'mac_ver',
'win32_ver',
):
value = getattr(_platform, pversion)()
if value[0]:
version = value[0]
break
package = self.checker.name
for (s,v,p),url in self._setup_urls.items():
if (_fnmatch.fnmatch(system, s) and
_fnmatch.fnmatch(version, v) and
_fnmatch.fnmatch(package, p)):
return url
return self._default_url
def __str__(self):
url = self.get_url()
lines = [
'check for {0} failed:'.format(self.checker.full_name()),
' ' + self.message,
' For instructions on installing an up-to-date version, see',
' ' + url,
]
if self.causes:
lines.append(' causes:')
for cause in self.causes:
lines.extend(' ' + line for line in str(cause).splitlines())
return '\n'.join(lines)
def check(checks=None):
successes = []
failures = []
if not checks:
checks = CHECKS
for check in checks:
try:
checker = CHECKER[check]
except KeyError as e:
raise InvalidCheck(check)# from e
_sys.stdout.write('check {0}...\t'.format(checker.full_name()))
try:
version = checker.check()
except DependencyError as e:
failures.append(e)
_sys.stdout.write('fail\n')
else:
_sys.stdout.write('pass\n')
successes.append((checker, version))
if successes:
print('\nSuccesses:\n')
for checker,version in successes:
print('{0} {1}'.format(
checker.full_name(),
version or 'unknown'))
if failures:
print('\nFailures:')
printed = []
for failure in failures:
if failure not in printed:
print()
print(failure)
printed.append(failure)
return False
return True
class Dependency (object):
def __init__(self, name, long_name=None, minimum_version=None,
version_delimiter='.', and_dependencies=None,
or_dependencies=None):
self.name = name
self.long_name = long_name or name
self.minimum_version = minimum_version
self.version_delimiter = version_delimiter
if not and_dependencies:
and_dependencies = []
self.and_dependencies = and_dependencies
if not or_dependencies:
or_dependencies = []
self.or_dependencies = or_dependencies
self._check_error = None
def __str__(self):
return '<{0} {1}>'.format(type(self).__name__, self.name)
def full_name(self):
if self.name == self.long_name:
return self.name
else:
return '{0} ({1})'.format(self.long_name, self.name)
def check(self):
if self._check_error:
raise self._check_error
try:
self._check_dependencies()
return self._check()
except DependencyError as e:
self._check_error = e # cache for future calls
raise
def _check_dependencies(self):
for dependency in self.and_dependencies:
if not hasattr(dependency, 'check'):
dependency = CHECKER[dependency]
try:
dependency.check()
except DependencyError as e:
raise DependencyError(
checker=self,
message=(
'some dependencies for {0} were not satisfied'
).format(self.full_name()),
causes=[e])
self.or_pass = None
or_errors = []
for dependency in self.or_dependencies:
if not hasattr(dependency, 'check'):
dependency = CHECKER[dependency]
try:
version = dependency.check()
except DependencyError as e:
or_errors.append(e)
else:
self.or_pass = {
'dependency': dependency,
'version': version,
}
break # no need to test other dependencies
if self.or_dependencies and not self.or_pass:
raise DependencyError(
checker=self,
message=(
'{0} requires at least one of the following dependencies'
).format(self.full_name()),
causes=or_errors)
def _check(self):
version = self._get_version()
parsed_version = None
if hasattr(self, '_get_parsed_version'):
parsed_version = self._get_parsed_version()
if self.minimum_version:
self._check_version(version=version, parsed_version=parsed_version)
return version
def _get_version(self):
raise NotImplementedError(self)
def _minimum_version_string(self):
return self.version_delimiter.join(
str(part) for part in self.minimum_version)
def _check_version(self, version, parsed_version=None):
if not parsed_version:
parsed_version = self._parse_version(version=version)
if not parsed_version or parsed_version < self.minimum_version:
raise DependencyError(
checker=self,
message='outdated version of {0}: {1} (need >= {2})'.format(
self.full_name(), version, self._minimum_version_string()))
def _parse_version(self, version):
if not version:
return None
parsed_version = []
for part in version.split(self.version_delimiter):
try:
parsed_version.append(int(part))
except ValueError as e:
raise DependencyError(
checker=self,
message=(
'unparsable {0!r} in version {1} of {2}, (need >= {3})'
).format(
part, version, self.full_name(),
self._minimum_version_string()))# from e
return tuple(parsed_version)
class PythonDependency (Dependency):
def __init__(self, name='python', long_name='Python version',
minimum_version=(2, 6), **kwargs):
super(PythonDependency, self).__init__(
name=name, long_name=long_name, minimum_version=minimum_version,
**kwargs)
def _get_version(self):
return _sys.version
def _get_parsed_version(self):
return _sys.version_info
CHECKER['python'] = PythonDependency()
class CommandDependency (Dependency):
exe_extension = _distutils_ccompiler.new_compiler().exe_extension
def __init__(self, command, paths=None, version_options=('--version',),
stdin=None, version_regexp=None, version_stream='stdout',
**kwargs):
if 'name' not in kwargs:
kwargs['name'] = command
super(CommandDependency, self).__init__(**kwargs)
self.command = command
self.paths = paths
self.version_options = version_options
self.stdin = None
if not version_regexp:
regexp = r'([\d][\d{0}]*[\d])'.format(self.version_delimiter)
version_regexp = _re.compile(regexp)
self.version_regexp = version_regexp
self.version_stream = version_stream
def _get_command_version_stream(self, command=None, stdin=None,
expect=(0,)):
if command is None:
command = self.command + (self.exe_extension or '')
if not stdin:
stdin = self.stdin
if stdin:
popen_stdin = _subprocess.PIPE
else:
popen_stdin = None
try:
p = _subprocess.Popen(
[command] + list(self.version_options), stdin=popen_stdin,
stdout=_subprocess.PIPE, stderr=_subprocess.PIPE,
universal_newlines=True)
except OSError as e:
raise DependencyError(
checker=self,
message="could not find '{0}' executable".format(command),
)# from e
stdout,stderr = p.communicate(stdin)
status = p.wait()
if status not in expect:
lines = [
"failed to execute: {0} {1}".format(
command,
' '.join(_shlex.quote(arg)
for arg in self.version_options)),
'status: {0}'.format(status),
]
for name,string in [('stdout', stdout), ('stderr', stderr)]:
if string:
lines.extend([name + ':', string])
raise DependencyError(checker=self, message='\n'.join(lines))
for name,string in [('stdout', stdout), ('stderr', stderr)]:
if name == self.version_stream:
if not string:
raise DependencyError(
checker=self,
message='empty version stream on {0} for {1}'.format(
self.version_stream, command))
return string
raise NotImplementedError(self.version_stream)
def _get_version_stream(self, **kwargs):
paths = [self.command + (self.exe_extension or '')]
if self.exe_extension:
paths.append(self.command) # also look at the extension-less path
if self.paths:
paths.extend(self.paths)
or_errors = []
for path in paths:
try:
return self._get_command_version_stream(command=path, **kwargs)
except DependencyError as e:
or_errors.append(e)
raise DependencyError(
checker=self,
message='errors finding {0} version'.format(
self.full_name()),
causes=or_errors)
def _get_version(self):
version_stream = self._get_version_stream()
match = self.version_regexp.search(version_stream)
if not match:
raise DependencyError(
checker=self,
message='no version string in output:\n{0}'.format(
version_stream))
return match.group(1)
def _program_files_paths(*args):
"Utility for generating MS Windows search paths"
pf = _os.environ.get('ProgramFiles', '/usr/bin')
pfx86 = _os.environ.get('ProgramFiles(x86)', pf)
paths = [_os.path.join(pf, *args)]
if pfx86 != pf:
paths.append(_os.path.join(pfx86, *args))
return paths
for command,long_name,minimum_version,paths in [
('sh', 'Bourne Shell', None, None),
('ash', 'Almquist Shell', None, None),
('bash', 'Bourne Again Shell', None, None),
('csh', 'C Shell', None, None),
('ksh', 'KornShell', None, None),
('dash', 'Debian Almquist Shell', None, None),
('tcsh', 'TENEX C Shell', None, None),
('zsh', 'Z Shell', None, None),
('git', 'Git', (1, 7, 0), None),
('hg', 'Mercurial', (2, 0, 0), None),
('EasyMercurial', None, (1, 3), None),
('pip', None, None, None),
('sqlite3', 'SQLite 3', None, None),
('nosetests', 'Nose', (1, 0, 0), None),
('ipython', 'IPython script', (0, 13), None),
('emacs', 'Emacs', None, None),
('xemacs', 'XEmacs', None, None),
('vim', 'Vim', None, None),
('vi', None, None, None),
('nano', 'Nano', None, None),
('gedit', None, None, None),
('kate', 'Kate', None, None),
('notepad++', 'Notepad++', None,
_program_files_paths('Notepad++', 'notepad++.exe')),
('firefox', 'Firefox', None,
_program_files_paths('Mozilla Firefox', 'firefox.exe')),
('google-chrome', 'Google Chrome', None,
_program_files_paths('Google', 'Chrome', 'Application', 'chrome.exe')
),
('chromium', 'Chromium', None, None),
]:
if not long_name:
long_name = command
CHECKER[command] = CommandDependency(
command=command, paths=paths, long_name=long_name,
minimum_version=minimum_version)
del command, long_name, minimum_version, paths # cleanup namespace
class MakeDependency (CommandDependency):
makefile = '\n'.join([
'all:',
'\t@echo "MAKE_VERSION=$(MAKE_VERSION)"',
'\t@echo "MAKE=$(MAKE)"',
'',
])
def _get_version(self):
try:
return super(MakeDependency, self)._get_version()
except DependencyError as e:
version_options = self.version_options
self.version_options = ['-f', '-']
try:
stream = self._get_version_stream(stdin=self.makefile)
info = {}
for line in stream.splitlines():
try:
key,value = line.split('=', 1)
except ValueError as ve:
raise e# from NotImplementedError(stream)
info[key] = value
if info.get('MAKE_VERSION', None):
return info['MAKE_VERSION']
elif info.get('MAKE', None):
return None
raise e
finally:
self.version_options = version_options
CHECKER['make'] = MakeDependency(command='make', minimum_version=None)
class EasyInstallDependency (CommandDependency):
def _get_version(self):
try:
return super(EasyInstallDependency, self)._get_version()
except DependencyError as e:
version_stream = self.version_stream
try:
self.version_stream = 'stderr'
stream = self._get_version_stream(expect=(1,))
if 'option --version not recognized' in stream:
return 'unknown (possibly Setuptools?)'
finally:
self.version_stream = version_stream
CHECKER['easy_install'] = EasyInstallDependency(
command='easy_install', long_name='Setuptools easy_install',
minimum_version=None)
CHECKER['py.test'] = CommandDependency(
command='py.test', version_stream='stderr',
minimum_version=None)
class PathCommandDependency (CommandDependency):
"""A command that doesn't support --version or equivalent options
On some operating systems (e.g. OS X), a command's executable may
be hard to find, or not exist in the PATH. Work around that by
just checking for the existence of a characteristic file or
directory. Since the characteristic path may depend on OS,
installed version, etc., take a list of paths, and succeed if any
of them exists.
"""
def _get_command_version_stream(self, *args, **kwargs):
raise NotImplementedError()
def _get_version_stream(self, *args, **kwargs):
raise NotImplementedError()
def _get_version(self):
for path in self.paths:
if _os.path.exists(path):
return None
raise DependencyError(
checker=self,
message=(
'nothing exists at any of the expected paths for {0}:\n {1}'
).format(
self.full_name(),
'\n '.join(p for p in self.paths)))
for paths,name,long_name in [
([_os.path.join(_ROOT_PATH, 'Applications', 'Sublime Text 2.app')],
'sublime-text', 'Sublime Text'),
([_os.path.join(_ROOT_PATH, 'Applications', 'TextMate.app')],
'textmate', 'TextMate'),
([_os.path.join(_ROOT_PATH, 'Applications', 'TextWrangler.app')],
'textwrangler', 'TextWrangler'),
([_os.path.join(_ROOT_PATH, 'Applications', 'Safari.app')],
'safari', 'Safari'),
([_os.path.join(_ROOT_PATH, 'Applications', 'Xcode.app'), # OS X >=1.7
_os.path.join(_ROOT_PATH, 'Developer', 'Applications', 'Xcode.app'
) # OS X 1.6,
],
'xcode', 'Xcode'),
]:
if not long_name:
long_name = name
CHECKER[name] = PathCommandDependency(
command=None, paths=paths, name=name, long_name=long_name)
del paths, name, long_name # cleanup namespace
class PythonPackageDependency (Dependency):
def __init__(self, package, **kwargs):
if 'name' not in kwargs:
kwargs['name'] = package
if 'and_dependencies' not in kwargs:
kwargs['and_dependencies'] = []
if 'python' not in kwargs['and_dependencies']:
kwargs['and_dependencies'].append('python')
super(PythonPackageDependency, self).__init__(**kwargs)
self.package = package
def _get_version(self):
package = self._get_package(self.package)
return self._get_version_from_package(package)
def _get_package(self, package):
try:
return _importlib.import_module(package)
except ImportError as e:
raise DependencyError(
checker=self,
message="could not import the '{0}' package for {1}".format(
package, self.full_name()),
)# from e
def _get_version_from_package(self, package):
try:
version = package.__version__
except AttributeError:
version = None
return version
for package,name,long_name,minimum_version,and_dependencies in [
('nose', None, 'Nose Python package',
CHECKER['nosetests'].minimum_version, None),
('pytest', None, 'pytest Python package',
CHECKER['py.test'].minimum_version, None),
('jinja2', 'jinja', 'Jinja', (2, 6), None),
('zmq', 'pyzmq', 'PyZMQ', (2, 1, 4), None),
('IPython', None, 'IPython Python package',
CHECKER['ipython'].minimum_version, ['jinja', 'tornado', 'pyzmq']),
('argparse', None, 'Argparse', None, None),
('numpy', None, 'NumPy', None, None),
('scipy', None, 'SciPy', None, None),
('matplotlib', None, 'Matplotlib', None, None),
('pandas', None, 'Pandas', (0, 8), None),
('sympy', None, 'SymPy', None, None),
('Cython', None, None, None, None),
('networkx', None, 'NetworkX', None, None),
('mayavi.mlab', None, 'MayaVi', None, None),
('setuptools', None, 'Setuptools', None, None),
]:
if not name:
name = package
if not long_name:
long_name = name
kwargs = {}
if and_dependencies:
kwargs['and_dependencies'] = and_dependencies
CHECKER[name] = PythonPackageDependency(
package=package, name=name, long_name=long_name,
minimum_version=minimum_version, **kwargs)
# cleanup namespace
del package, name, long_name, minimum_version, and_dependencies, kwargs
class MercurialPythonPackage (PythonPackageDependency):
def _get_version(self):
try: # mercurial >= 1.2
package = _importlib.import_module('mercurial.util')
except ImportError as e: # mercurial <= 1.1.2
package = self._get_package('mercurial.version')
return package.get_version()
else:
return package.version()
CHECKER['mercurial'] = MercurialPythonPackage(
package='mercurial.util', name='mercurial',
long_name='Mercurial Python package',
minimum_version=CHECKER['hg'].minimum_version)
class TornadoPythonPackage (PythonPackageDependency):
def _get_version_from_package(self, package):
return package.version
def _get_parsed_version(self):
package = self._get_package(self.package)
return package.version_info
CHECKER['tornado'] = TornadoPythonPackage(
package='tornado', name='tornado', long_name='Tornado', minimum_version=(2, 0))
class SQLitePythonPackage (PythonPackageDependency):
def _get_version_from_package(self, package):
return _sys.version
def _get_parsed_version(self):
return _sys.version_info
CHECKER['sqlite3-python'] = SQLitePythonPackage(
package='sqlite3', name='sqlite3-python',
long_name='SQLite Python package',
minimum_version=CHECKER['sqlite3'].minimum_version)
class UserTaskDependency (Dependency):
"Prompt the user to complete a task and check for success"
def __init__(self, prompt, **kwargs):
super(UserTaskDependency, self).__init__(**kwargs)
self.prompt = prompt
def _check(self):
if _sys.version_info >= (3, ):
result = input(self.prompt)
else: # Python 2.x
result = raw_input(self.prompt)
return self._check_result(result)
def _check_result(self, result):
raise NotImplementedError()
class EditorTaskDependency (UserTaskDependency):
def __init__(self, **kwargs):
self.path = _os.path.expanduser(_os.path.join(
'~', 'swc-installation-test.txt'))
self.contents = 'Hello, world!'
super(EditorTaskDependency, self).__init__(
prompt=(
'Open your favorite text editor and create the file\n'
' {0}\n'
'containing the line:\n'
' {1}\n'
'Press enter here after you have done this.\n'
'You may remove the file after you have finished testing.'
).format(self.path, self.contents),
**kwargs)
def _check_result(self, result):
message = None
try:
with open(self.path, 'r') as f:
contents = f.read()
except IOError as e:
raise DependencyError(
checker=self,
message='could not open {0!r}: {1}'.format(self.path, e)
)# from e
if contents.strip() != self.contents:
raise DependencyError(
checker=self,
message=(
'file contents ({0!r}) did not match the expected {1!r}'
).format(contents, self.contents))
CHECKER['other-editor'] = EditorTaskDependency(
name='other-editor', long_name='')
class VirtualDependency (Dependency):
def _check(self):
return '{0} {1}'.format(
self.or_pass['dependency'].full_name(),
self.or_pass['version'])
for name,long_name,dependencies in [
('virtual-shell', 'command line shell', (
'bash',
'dash',
'ash',
'zsh',
'ksh',
'csh',
'tcsh',
'sh',
)),
('virtual-editor', 'text/code editor', (
'emacs',
'xemacs',
'vim',
'vi',
'nano',
'gedit',
'kate',
'notepad++',
'sublime-text',
'textmate',
'textwrangler',
'other-editor', # last because it requires user interaction
)),
('virtual-browser', 'web browser', (
'firefox',
'google-chrome',
'chromium',
'safari',
)),
('virtual-pypi-installer', 'PyPI installer', (
'pip',
'easy_install',
)),
]:
CHECKER[name] = VirtualDependency(
name=name, long_name=long_name, or_dependencies=dependencies)
del name, long_name, dependencies # cleanup namespace
def _print_info(key, value, indent=19):
print('{0}{1}: {2}'.format(key, ' '*(indent-len(key)), value))
def print_system_info():
print("If you do not understand why the above failures occurred,")
print("copy and send the *entire* output (all info above and summary")
print("below) to the instructor for help.")
print()
print('==================')
print('System information')
print('==================')
_print_info('os.name', _os.name)
_print_info('os.uname', _platform.uname())
_print_info('platform', _sys.platform)
_print_info('platform+', _platform.platform())
for pversion in (
'linux_distribution',
'mac_ver',
'win32_ver',
):
value = getattr(_platform, pversion)()
if value[0]:
_print_info(pversion, value)
_print_info('prefix', _sys.prefix)
_print_info('exec_prefix', _sys.exec_prefix)
_print_info('executable', _sys.executable)
_print_info('version_info', _sys.version_info)
_print_info('version', _sys.version)
_print_info('environment', '')
for key,value in sorted(_os.environ.items()):
print(' {0}={1}'.format(key, value))
print('==================')
def print_suggestions(instructor_fallback=True):
print()
print('For suggestions on installing missing packages, see')
print('http://software-carpentry.org/setup/')
print('')
print('For instructings on installing a particular package,')
print('see the failure message for that package printed above.')
if instructor_fallback:
print('')
print('For help, email the *entire* output of this script to')
print('your instructor.')
if __name__ == '__main__':
import optparse as _optparse
parser = _optparse.OptionParser(usage='%prog [options] [check...]')
epilog = __doc__
parser.format_epilog = lambda formatter: '\n' + epilog
parser.add_option(
'-v', '--verbose', action='store_true',
help=('print additional information to help troubleshoot '
'installation issues'))
options,args = parser.parse_args()
try:
passed = check(args)
except InvalidCheck as e:
print("I don't know how to check for {0!r}".format(e.check))
print('I do know how to check for:')
for key,checker in sorted(CHECKER.items()):
if checker.long_name != checker.name:
print(' {0} {1}({2})'.format(
key, ' '*(20-len(key)), checker.long_name))
else:
print(' {0}'.format(key))
_sys.exit(1)
if not passed:
if options.verbose:
print()
print_system_info()
print_suggestions(instructor_fallback=True)
_sys.exit(1)
|
selimnairb/2014-02-25-swctest
|
setup/swc-installation-test-2.py
|
Python
|
bsd-2-clause
| 34,853
|
[
"Mayavi"
] |
1154dbe5c5e3a838fae8bfaf1a414abb7caad5a7dde31cb27634e1b4bc8fd1e6
|
# -*- coding: latin-1 -*-
# Copyright (C) 2009-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
####### Test replace hexa ###############
import hexablock
doc = hexablock.addDocument ("default")
# Hexa : Grid construction
orig = doc.addVertex ( 0, 0, 0)
vx = doc.addVector ( 1 ,0, 0)
vy = doc.addVector ( 0, 1, 0)
vz = doc.addVector ( 0, 0, 1)
size_x = 3
size_y = 3
size_z = 3
grid = doc.makeCartesian (orig, vx, vy, vz, size_x, size_y, size_z)
c1 = grid.getVertexIJK (1, 2, size_z)
c2 = grid.getVertexIJK (1, 1, size_z)
c3 = grid.getVertexIJK (2, 1, size_z)
# Hexa : Hexa construction
pa1 = doc.addVertex (-1, -1, 0)
pb1 = doc.addVertex ( 1, -1, 0)
pc1 = doc.addVertex ( 1, 1, 0)
pd1 = doc.addVertex (-1, 1, 0)
pa2 = doc.addVertex (-2, -2, 0)
pb2 = doc.addVertex ( 2, -2, 0)
pc2 = doc.addVertex ( 2, 2, 0)
pd2 = doc.addVertex (-2, 2, 0)
edab1 = doc.addEdge (pa1, pb1)
edbc1 = doc.addEdge (pb1, pc1)
edcd1 = doc.addEdge (pc1, pd1)
edda1 = doc.addEdge (pd1, pa1)
edab2 = doc.addEdge (pa2, pb2)
edbc2 = doc.addEdge (pb2, pc2)
edcd2 = doc.addEdge (pc2, pd2)
edda2 = doc.addEdge (pd2, pa2)
edaa = doc.addEdge (pa1, pa2)
edbb = doc.addEdge (pb1, pb2)
edcc = doc.addEdge (pc1, pc2)
eddd = doc.addEdge (pd1, pd2)
qpattern = []
qpattern.append (doc.addQuad (edab1, edbc1, edcd1, edda1))
qpattern.append (doc.addQuad (edab1, edbb, edab2, edaa))
qpattern.append (doc.addQuad (edbc1, edcc, edbc2, edbb))
qpattern.append (doc.addQuad (edcd1, eddd, edcd2, edcc))
qpattern.append (doc.addQuad (edda1, edaa, edda2, eddd))
##### doc.saveVtk ("replace0.vtk")
# Hexa replacement (on the grid)
doc.replace (qpattern, pd2,c1, pa2,c2, pb2,c3)
##### doc.saveVtk ("replace1.vtk")
|
FedoraScientific/salome-hexablock
|
doc/test_doc/replace_hexa/replace_hexa.py
|
Python
|
lgpl-2.1
| 2,479
|
[
"VTK"
] |
dac358884476a746809c268a9edfa142447b3f192e40619df01af57e465aa3dc
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## Copyright 2015 Rasmus Scholer Sorensen, rasmusscholer@gmail.com
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=W0142,C0103,C0301,W0141
"""
Transformation libraries:
* http://www.lfd.uci.edu/~gohlke/code/transformations.py.html
** Unofficial Github mirror: https://github.com/malcolmreynolds/transformations
** https://pythonhosted.org/MDAnalysis/documentation_pages/core/transformations.html
* https://code.google.com/p/gameobjects/
* Sage, http://www.sagemath.org/, https://vnoel.wordpress.com/2008/05/03/bye-matlab-hello-python-thanks-sage/
* math3d:
** http://git.automatics.dyndns.dk/?p=pymath3d.git
** Somewhat similar to Pyrr, uses numpy to provides intuitive wrappers for geometric computations.
* http://code.enthought.com/projects/mayavi/, http://docs.enthought.com/mayavi/mayavi/
* https://github.com/adamlwgriffiths/Pyrr This actually seems pretty intuitive!
** Basically just provides wrappers around numpy for geometric computations.
** https://pyrr.readthedocs.org/en/latest/api_geometry.html
* https://pypi.python.org/pypi/transforms3d
** https://github.com/matthew-brett/transforms3d
** Uses a lot of gohlke's transformations module.
* pypi.python.org/pypi/qmath (old, obsolete?)
** From 2012, but no home page or repository.
* pypi.python.org/pypi/se3
** https://github.com/ccorcos/se3
** New, from 2014. Also uses numpy, but much less mature thatn Pyrr/transforms3d/math3d
* http://cxc.harvard.edu/mta/ASPECT/tool_doc/pydocs/Quaternion.html
** Quaternion, from 2010.
* pypi.python.org/pypi/quaternion-algebra, bitbucket.org/sirex/quaternion
** From 2012.
* pypi.python.org/pypi/Pyternion
** github.com/philip-peterson/pyternion
** Uses standard python math module, no numpy.
* github.com/olsoneric/pedemath
* https://github.com/zonca/quaternionarray, pypi.python.org/pypi/quaternionarray
** From 2010,
* pypi.python.org/pypi/euclid, and pypi.python.org/pypi/euclid3 (py3k)
** Super old, from 2006.
2D only libs:
* Affine, https://pypi.python.org/pypi/affine, https://github.com/sgillies/affine (ONLY 2D!)
* http://toblerity.org/shapely/project.html
Stackoverflow:
* http://stackoverflow.com/questions/16083258/python-implementation-of-3d-rigid-body-translation-and-rotation
Educational:
* http://nghiaho.com/?page_id=671
* http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/Transformation3D.ipynb
** Very educational notebook with examples.
* http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/MARBLE/high/pose/express.htm
* http://planning.cs.uiuc.edu/node99.html
* http://en.wikipedia.org/wiki/Transformation_matrix, http://en.wikipedia.org/wiki/Transformation_(function),
* http://demonstrations.wolfram.com/Understanding3DTranslation/
DNA python code:
* http://www.lfd.uci.edu/~gohlke/code/dnacurve.py.html
3D plotting:
* http://stackoverflow.com/questions/11140163/python-matplotlib-plotting-a-3d-cube-a-sphere-and-a-vector
** ()
Other code:
* ~/ipython-notebooks/3D_plot_test.ipynb
"""
import numpy as np
from numpy.core import pi
import pyrr
from pyrr import Vector3 #, Quaternion, Matrix44,
#from matplotlib import pyplot
# Units:
A = Angstrom = 0.1
nm = Nanometer = 1
degree = pi/180
import random
def make_operation_hash():
""" Cheap way to make a random hash. """
return random.randint(0, 2**32)
class Nucleotide(object):
"""
How to keep track of world position and orientation?
a) Using a 4x4 transformation matrix
b) Using a 3x3 transformation matrix plus a vector for the world position
A 3x3 transform includes rotation+shear+reflection, but no translation.
c) Using a position vector plus orientation quaternion.
http://en.wikibooks.org/wiki/GLSL_Programming/Applying_Matrix_Transformations
Considering the coordinate frame, you may want to refer to the current standards, e.g.
Olson, (...), Berman's "standard reference frame" from J.Mol.Bio 2001. (10.1006/jmbi.2001.4987)
* This seems to be focusing on the nucleobase and the base-pair. I'm not sure this standard's
standard frame origin aligns with the helix center.
NB: For B-DNA, shift and slide step parameters are both very close to zero: -0.02 Å and 0.23 Å
Local helical parameters x- and y-displacement are also small, 0.05 and 0.02.
Also:
* http://x3dna.org/articles/seeing-is-understanding-as-well-as-believing
"""
# Specify what is "up". I currently use z-up (to match the standard from [Berman 2001]).
# Previously, I used y-up.
helix_up = Vector3([0., 0., 1.])
# Defaults for how to go to the next nucleotide. Could also be part of the strand model...
params = {'rise': {'simple': 0.45 * Nanometer,
'ss': 0.42 * Nanometer,
'ds': 0.36 * Nanometer},
'twist': {'simple': 0 * degree,
'ss': 10 * degree,
'ds': 36 * degree},
}
#standard_ss_translation = helix_up * 0.42 * Nanometer
#standard_ds_translation = helix_up * 0.36 * Nanometer
#standard_ss_rotation = pyrr.Quaternion.from_z_rotation(np.pi*0/180)
#standard_ds_rotation = pyrr.Quaternion.from_z_rotation(np.pi*36/180)
# The transform between one nucleotide to the other.
# B-DNA has essentially no slide or roll, so just do a 180 rotation around the x-axis:
standard_bp_transform = pyrr.Quaternion.from_x_rotation(pi)
# [Berman 2001, Table 1, average of ATGC]
# http://www.sciencedirect.com/science/article/pii/S0022283601949873
atoms_coords = {'C1p': [-2.478, 5.375, 0.]}
standard_bpc_to_C1p_vector = Vector3([-2.478, 5.375, 0.]) * Angstrom
def standard_step_param(self, param, conformation=None, direction='3p'):
""" Return standard step parameter. """
if conformation is None:
conformation = 'simple'
if param in self.params:
return self.params[param][conformation]
if param == 'translation':
return (-1 if '5' in direction else 1) * self.params['rise'][conformation] * self.helix_up
if param == 'orientation':
# For now we just assume the rotation is a simple twist around the z-axis:
twist = (-1 if '5' in direction else 1) * self.params['twist'][conformation]
return pyrr.Quaternion.from_z_rotation(twist)
def __init__(self, position, orientation, strand=None, spec='N', conformation='simple'):
self.Strand = strand
self.Spec = spec
# Base position, direction and rotation of the nucleotide:
# Note: These could be calculated on-the-fly from local values.
self.Position = position # The position of the base (currently defined as the base-pair center, bpc)
# Currently, the base's direction is the same as it's vector.
#self.World_direction = direction # Vector of translation to the next bpc.
self.Orientation = orientation # Orientation quaternion.
# (x, y, z) Vector translation from the base of this nucleotide to the base of the next.
# Helix axis is z, unit is nm:
#if vector_trans is None:
# vector_trans = (0, 0, 0.36) if basepaired else (0, 0, 0.40)
#if vector_rot is None:
# vector_rot = 36 if basepaired else 25
#self._next_nuc_transform = next_nuc_transform
self.Conformation = conformation
self.Basepaired_with = None
# When moving parts (strands or nucleotides), move whichever
# has the lowest fixation level.
self.fixation_level = 0 # 0: Free to move. (Generally affects translation).
self.fixed_orientation = 0 # 0: Free to move
self.Locked = {} # dict of locked attributes
self.Locked_temp = {} # dict of temporarily locked attributes
#self.Pivot_local = None # Local pivot is always the bpc
#self.Pivot_world = None # Global pivot should be avoided.
self.Nuc3p = None
self.Nuc5p = None
self.Operation_hashes = set()
@property
def Is_basepaired(self):
""" Returns True if nucleotide has a (watson-crick) base-pair partner. """
return self.Basepaired_with is not None
@property
def World_direction(self):
""" Just the unit vector of vector_trans. """
return self.Orientation * self.helix_up
def next_nuc_translation_default(self, conformation=None, direction='3p'):
""" Translation from this nucleotide to the next. """
#if self._next_nuc_transform:
# # This should be the translation part of the matrix,
# # as long as it is a 4x4 Matrix.
# # If we have a 3x3 matrix, it only includes the rotation part.
# return self._next_nuc_transform[3,0:3]
return self.standard_step_param('translation', conformation=conformation, direction=direction)
def next_nuc_position_default(self, conformation=None, direction='3p'):
""" Translation from this nucleotide to the next. """
return self.Position + self.next_nuc_translation_default(conformation=conformation, direction=direction)
def next_nuc_orientation_default(self, conformation=None, direction='3p'):
""" Return default orientation for the next nucleotide. """
step_rotation = self.standard_step_param('orientation', conformation=conformation, direction=direction)
return step_rotation * self.Orientation
#
#def next_nuc_transform(self):
# """
# The full 4x4 transformation matrix for the next nucleotide.
# Q: Is this the transformation matrix between this and the next,
# or the 'world' tranform?
# World is generally 'position' + 'orientation'.
# """
# #if self._next_nuc_transform:
# # return self._next_nuc_transform
# # We have to generate it from our own parameters:
# pass
#
#def next_nuc_position(self):
# """
# World position after applying vector.
# "Next" is the 3p nucleotide.
# """
# pass
def get_neighbour(self, direction='3p'):
""" Return neighbourh in direction. """
if '5' in direction:
return self.Nuc5p
else:
return self.Nuc3p
def walk(self, direction, operation_hash=None, extend=None, follow_bp=True,
strand_blacklist=None, visited_set=None, include_self=True, bp_strand_blacklist=None):
"""
In theory, this should just be a generator over all nucleotides.
An alternative to having a operation_hash would be to add
all visited nucleotides to a temporary set.
Note: The order of returned nucleotides cannot currently be guaranteed.
Also, this might produce funny comparisons for pseudo-knot structures.
"""
# Break conditions:
if extend is not None:
if extend <= 0:
return
extend -= 1
if visited_set is not None:
if self in visited_set:
return
visited_set.add(self)
if strand_blacklist is not None and self.Strand in strand_blacklist:
return
if operation_hash is not None:
if operation_hash in self.Operation_hashes:
return
self.Operation_hashes.add(operation_hash)
# Yields
if include_self:
yield self
if direction in ('both', '3p'):
yield from self.Nuc3p.walk('3p', operation_hash=operation_hash, extend=extend,
follow_bp=follow_bp, strand_blacklist=strand_blacklist,
bp_strand_blacklist=bp_strand_blacklist,
visited_set=visited_set, include_self=True)
if direction in ('both', '5p'):
yield from self.Nuc5p.walk('5p', operation_hash=operation_hash, extend=extend,
follow_bp=follow_bp, strand_blacklist=strand_blacklist,
bp_strand_blacklist=bp_strand_blacklist,
visited_set=visited_set, include_self=True)
if follow_bp:
if follow_bp is True:
bp_walk_dir = 'both'
elif follow_bp == 'continue':
bp_walk_dir = '3p' if '3' in direction else '5p'
else:
bp_walk_dir = follow_bp
nuc = self.Basepaired_with
if nuc.Strand in strand_blacklist or nuc.Strand in bp_strand_blacklist:
return
yield from nuc.walk(bp_walk_dir, operation_hash=operation_hash, extend=extend,
follow_bp=follow_bp, strand_blacklist=strand_blacklist,
bp_strand_blacklist=bp_strand_blacklist,
visited_set=visited_set, include_self=True)
def gen_fixation_level(self, direction, **kwargs):
""" Generator of fixation level using walk. """
nucs = self.walk(direction, **kwargs)
return (nuc.fixation_level if nuc else 0 for nuc in nucs)
def max_fixation_level(self, direction, **kwargs):
"""
kwargs are passed on to self.walk(...)
"""
#nucs = self.walk(direction, **kwargs)
#max_level = 0
#for nuc in nucs:
# if nuc and nuc.fixation_level > max_level:
# max_level = nuc.fixation_level
# Alternatively:
#max_level = max(map(lambda x: x.fixation_level if x else 0, nucs))
max_level = max(self.gen_fixation_level(direction, **kwargs))
return max_level
def max_fixated_nuc(self, direction, **kwargs):
""" Return the most fixated nucleotide, using walk. """
nucs = self.walk(direction, **kwargs)
keyfunc = lambda x: x.fixation_level if x else 0
nuc = max(nucs, key=keyfunc)
return nuc
def cumulative_fixation_level(self, direction, **kwargs):
"""
kwargs are passed on to self.walk(...)
"""
#nucs = self.walk(direction, **kwargs)
#cum_level = np.cumsum(map(lambda x: x.fixation_level if x else 0, nucs))
cum_level = np.cumsum(self.gen_fixation_level(direction, **kwargs)) # pylint: disable=E1101
return cum_level
#def max_fixation_level(self, direction, operation_hash=None, extend=None, follow_bp=True,
# strand_blacklist=None):
# if operation_hash in self.Operation_hashes:
# if operation_hash is None:
# print("WARNING: %s.Operation_hashes contains None!!" % self)
# return 0
# if extend is not None and extend <= 0:
# return self.fixation_level
# if operation_hash is None:
# operation_hash = make_operation_hash()
# self.Operation_hashes.add(operation_hash)
# nuc = self.get_neighbour(direction)
# this_strand_fixation_level = max((self.fixation_level, nuc.max_fixation_level(direction)))
# if self.Is_basepaired and follow_bp:
# other_dir = '3p' if '5' in direction else '5p'
# other_strand_fixation_level = self.Basepaired_with.max_fixation_level(other_dir, operation_hash, extend)
# return max((this_strand_fixation_level, other_strand_fixation_level))
#def cumulative_fixation_level(self, direction, operation_hash=None, extend=None):
# if extend is not None and extend <= 0:
# return self.fixation_level
# if operation_hash in self.Operation_hashes:
# if operation_hash is None:
# print("WARNING: %s.Operation_hashes contains None!!" % self)
# return
# if operation_hash is None:
# operation_hash = make_operation_hash()
# self.Operation_hashes.add(operation_hash)
#
# cum_level = self.fixation_level
# #return self.fixation_level + nuc.cumulative_fixation_level(direction)
# #nuc = self.get_neighbour(direction)
# if '3' in direction or direction == 'both':
# cum_level = self.Nuc3p.cumulative_fixation_level(direction='3p',
# operation_hash=operation_hash,
# extend=extend)
# if '5' in direction or direction == 'both':
# cum_level = self.Nuc5p.cumulative_fixation_level(direction='5p',
# operation_hash=operation_hash,
# extend=extend)
def strand_fixation_level(self):
""" Return the fixation level of this strand. """
return self.cumulative_fixation_level(direction='both', include_self=True)
def basepair_with(self, nuc):
"""
To make a basepair:
1) Align up bpc (moving the nuc with least fixation, or both if equal fixation).
2) If either 5p or 3p is fixed, adjust helical rotation (twist) and translation (rise)
to dsDNA conformation.
3) Check that the base-pair rotation is correct. Move if required.
For each step, when moving this or bp partner nucleotide, you also have
to move the remaing nucleotides on the strand (on the least-fixed side).
For translation, this is simple and should match up.
For rotation, you probably have to rotate the remaining nucleotides
around this nucleotide's bpc.
Can this be done simply by modifying the orientation with the same rotation,
and then rotating the position (separately)?
Note: What do you do if this is already connected, e.g. you are making a pseudo-knot
(you ALWAYS make lots of pseudo-knots when you make DNA nanostructures...)
EFFECTS:
1) You cannot perform simple translations. You have to move by rotating the different parts.
Transformation by rotation:
0) Objective is to move self and nuc together. Two ways to solve this:
a) Start from current point and find rotations to get the two nucleotides together.
b) Move the two nucleotides together and relax the position from there.
1) Treat all helices as rigid rods.
2) Use regions with 1 or more un-paired nucleotides as hinges.
A good starting point might be to distribute the rotations equally on all un-paired nucleotides,
(perhaps straightening ss-stretches at the ends).
"""
this_strand_fixation = self.strand_fixation_level()
other_strand_fixation = nuc.strand_fixation_level()
# 1: Translate to aligh the bpc of this and the other nuc:
translation = nuc.Position - self.Position
# If we are already aligned, don't go through the process of translating the bpc:
if any(translation):
isequal = this_strand_fixation == other_strand_fixation
if this_strand_fixation >= 2**32 and other_strand_fixation >= 2**32:
# The strands are both fixed, either completely or at one point. Special case!
raise NotImplementedError("Base-pairing two completely fixed strands is not yet supported.")
elif isequal:
# Move both strands half way: (You could scale by the total mass or inertia)
self.translate(0.5 * translation)
nuc.translate(-0.5 * translation)
else:
# Move the strand with least fixation:
if self.strand_fixation_level() < nuc.strand_fixation_level():
# translate_nuc = self
self.translate(translation)
else:
# translate_nuc = nuc
nuc.translate(-translation)
# 2: Adjust helical parameters, transforming from ss to ds:
# http://www.nature.com/ncomms/journal/v3/n6/fig_tab/ncomms1903_T1.html
# Cases:
# - This nucleotide xor the other is fixed
# - None of the strands are fixed (yet).
#if not this_strand_fixation and not other_strand_fixation:
if not this_strand_fixation > self.fixation_level \
and not other_strand_fixation > self.fixation_level:
# None of the strands are fixed (yet).
# This bp forms the basis of the helix.
# Let's just say we transform the other nuc:
#new_orientation = self.standard_step_param['orientation']['ds'] * self.Orientation
if self.fixation_level > nuc.fixation_level:
new_orientation = self.standard_bp_transform * self.Orientation
nuc.rotate_to(new_orientation)
else:
new_orientation = nuc.standard_bp_transform * nuc.Orientation
self.rotate_to(new_orientation)
elif this_strand_fixation > other_strand_fixation:
# This strand is fixed.
fixation_3p = self.cumulative_fixation_level('3p')
fixation_5p = self.cumulative_fixation_level('5p')
if fixation_5p >= fixation_3p:
# Adjust this nucleotide according to the 5' neighbour.
new_position = self.Nuc5p.next_nuc_position_default(conformation='ds', direction='3p')
new_orientation = self.Nuc5p.next_nuc_orientation_default(conformation='ds', direction='3p')
self.translate_to(new_position, neighbours='3p')
self.rotate_to(new_orientation, neighbours='3p')
else:
# Adjust this nucleotide according to the 3' neighbour.
new_position = self.Nuc3p.next_nuc_position_default(conformation='ds', direction='5p')
new_orientation = self.Nuc3p.next_nuc_orientation_default(conformation='ds', direction='5p')
self.translate_to(new_position, neighbours='5p')
self.rotate_to(new_orientation, neighbours='5p')
# Adjust partner nucleotide:
new_orientation = self.standard_bp_transform * self.Orientation
nuc.rotate_to(new_orientation)
else:
# Other strand is fixed.
fixation_3p = nuc.cumulative_fixation_level('3p')
fixation_5p = nuc.cumulative_fixation_level('5p')
if fixation_5p >= fixation_3p:
# Adjust partner nucleotide according to its 5' neighbour.
new_position = self.Nuc5p.next_nuc_position_default(conformation='ds', direction='3p')
new_orientation = self.Nuc5p.next_nuc_orientation_default(conformation='ds', direction='3p')
nuc.translate_to(new_position, neighbours='3p')
nuc.rotate_to(new_orientation, neighbours='3p')
else:
# Adjust partner nucleotide according to its 3' neighbour.
new_position = nuc.Nuc3p.next_nuc_position_default(conformation='ds', direction='5p')
new_orientation = nuc.Nuc3p.next_nuc_orientation_default(conformation='ds', direction='5p')
nuc.translate_to(new_position, neighbours='5p')
nuc.rotate_to(new_orientation, neighbours='5p')
# Adjust this nucleotide according to partner:
new_orientation = nuc.standard_bp_transform * nuc.Orientation
self.rotate_to(new_orientation)
self.fixation_level = self.fixation_level or 1
nuc.fixation_level = nuc.fixation_level or 1
def translate(self, translation, neighbours='both',
move_bp_partner=True, operation_hash=None):
"""
Translate this and other strand nucleotide strands to accomodate.
Use an operation hash to ensure that the same operation is not applied multiple times
because they are connected by multiple connections.
The simplest case of this is moving a hair-pin: The base-pair partner
should be translated, but it will also eventually be translated because of the backbone
connection.
Returns the number of nucleotides translated by the operation.
"""
# Special case: If any nucleotides are completely fixed, we can attempt to translate by rotating
# around the fixed nucleotide.
if operation_hash in self.Operation_hashes:
if operation_hash is None:
print("WARNING: %s.Operation_hashes contains None!!" % self)
return
if operation_hash is None:
operation_hash = make_operation_hash()
self.Operation_hashes.add(operation_hash)
self.Position += translation
nmoved = 1
if neighbours and ('both' in neighbours or '3p' in neighbours):
if self.Nuc3p:
nmoved += self.Nuc3p.translate(translation, neighbours='3p', operation_hash=operation_hash)
if neighbours and ('both' in neighbours or '5p' in neighbours):
if self.Nuc5p:
nmoved += self.Nuc5p.translate(translation, neighbours='5p', operation_hash=operation_hash)
if move_bp_partner and self.Is_basepaired:
nmoved += self.Basepaired_with.translate(translation, neighbours='both', operation_hash=operation_hash)
return nmoved
def translate_to(self, position, neighbours='both'):
"""
Translate this nucleotide to this position and make equivalent
translation to remaining nucleotide's in strand.
"""
translation = position - self.Position
return self.translate(translation, neighbours)
def rotate(self, rotation, pivot=None, neighbours='both',
move_bp_partner=True, operation_hash=None):
"""
Rotate this nucleotide by transform, and perform equivalent rotation
to remaining nucleotides in the strand.
Rotation is applied to orientation and to self.Position by rotating around pivot.
If no pivot is given, bpc is used.
Note: rotation must be a quaternion, which has axis and rotation (radians).
"""
if operation_hash in self.Operation_hashes:
if operation_hash is None:
print("WARNING: %s.Operation_hashes contains None!!" % self)
return
if operation_hash is None:
operation_hash = make_operation_hash()
self.Operation_hashes.add(operation_hash)
self.Orientation += rotation * self.Orientation
# To rotate position around a pivot point: subtract the pivot point from position,
# perform rotation, and add pivot point again:
# http://www.euclideanspace.com/maths/geometry/affine/aroundPoint/
if pivot:
self.Position = rotation*(self.Position-pivot) + pivot
else:
self.Position = rotation * self.Position
pivot = self.Position # Later rotation use this nuc's bpc as pivot
nmoved = 1
if neighbours and ('both' in neighbours or '3p' in neighbours):
if self.Nuc3p:
nmoved += self.Nuc3p.rotate(rotation, pivot=pivot, neighbours='3p', operation_hash=operation_hash)
if neighbours and ('both' in neighbours or '5p' in neighbours):
if self.Nuc5p:
nmoved += self.Nuc5p.rotate(rotation, pivot=pivot, neighbours='5p', operation_hash=operation_hash)
if move_bp_partner and self.Is_basepaired:
nmoved += self.Basepaired_with.rotate(rotation, pivot=pivot, neighbours='both', operation_hash=operation_hash)
return nmoved
def rotate_to(self, orientation, neighbours='both'):
"""
Rotate this nucleotide to given orientation, and perform equivalent rotation
to remaining nucleotides in the strand.
"""
# new_orientation = rotation * orientation
# <=> rotation = new_orientation * orientation.inverse
rotation = orientation * self.Orientation.inverse
return self.rotate(rotation, neighbours)
def append_nuc(self, direction='3p', nuc=None, spec='N'):
"""
Append nucleotide to self.
"""
assert (self.Nuc5p if '5' in direction else self.Nuc3p) is None
position = self.standard_step_param('translation', direction=direction) + self.Position
orientation = self.standard_step_param('orientation', direction=direction) * self.Orientation
nuc = Nucleotide(position=position, orientation=orientation, spec=spec)
if '5' in direction:
self.Nuc5p = nuc
nuc.Nuc3p = self
else:
self.Nuc3p = nuc
nuc.Nuc5p = self
return nuc
def append_nucs_recursive(self, sequence, direction='3p'):
"""
Note: The order of the returned nucs may be [3'-5'] if direction is '5p'.
"""
#assert sequence
if not sequence:
return []
spec, rest = sequence[0], sequence[1:]
nuc = self.append_nuc(direction, spec=spec)
if rest:
return [nuc] + nuc.append_nucs_recursive(rest, direction)
else:
return [nuc]
def __repr__(self):
return "Nucleotide({}, {})".format(self.Position, self.Orientation)
|
scholer/nascent
|
nascent/nucleotides.py
|
Python
|
agpl-3.0
| 29,920
|
[
"MDAnalysis",
"Mayavi"
] |
d7ced376be93a815345390fba84a5f73427c25d2054fe652c13d9f36b6491f73
|
# -*- coding: utf-8 -*-
import numpy as np
from PIL import Image
# /////////////// Corruption Helpers ///////////////
import skimage as sk
from skimage.filters import gaussian
from io import BytesIO
from wand.image import Image as WandImage
from wand.api import library as wandlibrary
import wand.color as WandColor
import ctypes
from PIL import Image as PILImage
import cv2
from scipy.ndimage import zoom as scizoom
from scipy.ndimage.interpolation import map_coordinates
import warnings
import os
from pkg_resources import resource_filename
warnings.simplefilter("ignore", UserWarning)
def disk(radius, alias_blur=0.1, dtype=np.float32):
if radius <= 8:
L = np.arange(-8, 8 + 1)
ksize = (3, 3)
else:
L = np.arange(-radius, radius + 1)
ksize = (5, 5)
X, Y = np.meshgrid(L, L)
aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur)
# Tell Python about the C method
wandlibrary.MagickMotionBlurImage.argtypes = (ctypes.c_void_p, # wand
ctypes.c_double, # radius
ctypes.c_double, # sigma
ctypes.c_double) # angle
# Extend wand.image.Image class to include method signature
class MotionImage(WandImage):
def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0):
wandlibrary.MagickMotionBlurImage(self.wand, radius, sigma, angle)
# modification of https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
def plasma_fractal(mapsize=256, wibbledecay=3):
"""
Generate a heightmap using diamond-square algorithm.
Return square 2d array, side length 'mapsize', of floats in range 0-255.
'mapsize' must be a power of two.
"""
assert (mapsize & (mapsize - 1) == 0)
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square of points stepsize apart,
calculate middle value as mean of points + wibble"""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond of points stepsize apart,
calculate middle value as mean of points + wibble"""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
def clipped_zoom(img, zoom_factor):
h = img.shape[0]
# ceil crop height(= crop width)
ch = int(np.ceil(h / float(zoom_factor)))
top = (h - ch) // 2
img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)
# trim off any extra pixels
trim_top = (img.shape[0] - h) // 2
return img[trim_top:trim_top + h, trim_top:trim_top + h]
# /////////////// End Corruption Helpers ///////////////
# /////////////// Corruptions ///////////////
def gaussian_noise(x, severity=1):
c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]
x = np.array(x) / 255.
return np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255
def shot_noise(x, severity=1):
c = [60, 25, 12, 5, 3][severity - 1]
x = np.array(x) / 255.
return np.clip(np.random.poisson(x * c) / float(c), 0, 1) * 255
def impulse_noise(x, severity=1):
c = [.03, .06, .09, 0.17, 0.27][severity - 1]
x = sk.util.random_noise(np.array(x) / 255., mode='s&p', amount=c)
return np.clip(x, 0, 1) * 255
def speckle_noise(x, severity=1):
c = [.15, .2, 0.35, 0.45, 0.6][severity - 1]
x = np.array(x) / 255.
return np.clip(x + x * np.random.normal(size=x.shape, scale=c), 0, 1) * 255
def fgsm(x, source_net, severity=1):
c = [8, 16, 32, 64, 128][severity - 1]
x = V(x, requires_grad=True)
logits = source_net(x)
source_net.zero_grad()
loss = F.cross_entropy(logits, V(logits.data.max(1)[1].squeeze_()), size_average=False)
loss.backward()
return standardize(torch.clamp(unstandardize(x.data) + c / 255. * unstandardize(torch.sign(x.grad.data)), 0, 1))
def gaussian_blur(x, severity=1):
c = [1, 2, 3, 4, 6][severity - 1]
x = gaussian(np.array(x) / 255., sigma=c, multichannel=True)
return np.clip(x, 0, 1) * 255
def glass_blur(x, severity=1):
# sigma, max_delta, iterations
c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4, 2)][severity - 1]
x = np.uint8(gaussian(np.array(x) / 255., sigma=c[0], multichannel=True) * 255)
# locally shuffle pixels
for i in range(c[2]):
for h in range(224 - c[1], c[1], -1):
for w in range(224 - c[1], c[1], -1):
dx, dy = np.random.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
return np.clip(gaussian(x / 255., sigma=c[0], multichannel=True), 0, 1) * 255
def defocus_blur(x, severity=1):
c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1]
x = np.array(x) / 255.
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(3):
channels.append(cv2.filter2D(x[:, :, d], -1, kernel))
channels = np.array(channels).transpose((1, 2, 0)) # 3x224x224 -> 224x224x3
return np.clip(channels, 0, 1) * 255
def motion_blur(x, severity=1):
c = [(10, 3), (15, 5), (15, 8), (15, 12), (20, 15)][severity - 1]
output = BytesIO()
x.save(output, format='PNG')
x = MotionImage(blob=output.getvalue())
x.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45))
x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED)
if x.shape != (224, 224):
return np.clip(x[..., [2, 1, 0]], 0, 255) # BGR to RGB
else: # greyscale to RGB
return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255)
def zoom_blur(x, severity=1):
c = [np.arange(1, 1.11, 0.01),
np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.02),
np.arange(1, 1.26, 0.02),
np.arange(1, 1.31, 0.03)][severity - 1]
x = (np.array(x) / 255.).astype(np.float32)
out = np.zeros_like(x)
for zoom_factor in c:
out += clipped_zoom(x, zoom_factor)
x = (x + out) / (len(c) + 1)
return np.clip(x, 0, 1) * 255
def fog(x, severity=1):
c = [(1.5, 2), (2., 2), (2.5, 1.7), (2.5, 1.5), (3., 1.4)][severity - 1]
x = np.array(x) / 255.
max_val = x.max()
x += c[0] * plasma_fractal(wibbledecay=c[1])[:224, :224][..., np.newaxis]
return np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255
def frost(x, severity=1):
c = [(1, 0.4),
(0.8, 0.6),
(0.7, 0.7),
(0.65, 0.7),
(0.6, 0.75)][severity - 1]
idx = np.random.randint(5)
filename = [resource_filename(__name__, 'frost/frost1.png'),
resource_filename(__name__, 'frost/frost2.png'),
resource_filename(__name__, 'frost/frost3.png'),
resource_filename(__name__, 'frost/frost4.jpg'),
resource_filename(__name__, 'frost/frost5.jpg'),
resource_filename(__name__, 'frost/frost6.jpg')][idx]
frost = cv2.imread(filename)
# randomly crop and convert to rgb
x_start, y_start = np.random.randint(0, frost.shape[0] - 224), np.random.randint(0, frost.shape[1] - 224)
frost = frost[x_start:x_start + 224, y_start:y_start + 224][..., [2, 1, 0]]
return np.clip(c[0] * np.array(x) + c[1] * frost, 0, 255)
def snow(x, severity=1):
c = [(0.1, 0.3, 3, 0.5, 10, 4, 0.8),
(0.2, 0.3, 2, 0.5, 12, 4, 0.7),
(0.55, 0.3, 4, 0.9, 12, 8, 0.7),
(0.55, 0.3, 4.5, 0.85, 12, 8, 0.65),
(0.55, 0.3, 2.5, 0.85, 12, 12, 0.55)][severity - 1]
x = np.array(x, dtype=np.float32) / 255.
snow_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1]) # [:2] for monochrome
snow_layer = clipped_zoom(snow_layer[..., np.newaxis], c[2])
snow_layer[snow_layer < c[3]] = 0
snow_layer = PILImage.fromarray((np.clip(snow_layer.squeeze(), 0, 1) * 255).astype(np.uint8), mode='L')
output = BytesIO()
snow_layer.save(output, format='PNG')
snow_layer = MotionImage(blob=output.getvalue())
snow_layer.motion_blur(radius=c[4], sigma=c[5], angle=np.random.uniform(-135, -45))
snow_layer = cv2.imdecode(np.fromstring(snow_layer.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED) / 255.
snow_layer = snow_layer[..., np.newaxis]
x = c[6] * x + (1 - c[6]) * np.maximum(x, cv2.cvtColor(x, cv2.COLOR_RGB2GRAY).reshape(224, 224, 1) * 1.5 + 0.5)
return np.clip(x + snow_layer + np.rot90(snow_layer, k=2), 0, 1) * 255
def spatter(x, severity=1):
c = [(0.65, 0.3, 4, 0.69, 0.6, 0),
(0.65, 0.3, 3, 0.68, 0.6, 0),
(0.65, 0.3, 2, 0.68, 0.5, 0),
(0.65, 0.3, 1, 0.65, 1.5, 1),
(0.67, 0.4, 1, 0.65, 1.5, 1)][severity - 1]
x = np.array(x, dtype=np.float32) / 255.
liquid_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1])
liquid_layer = gaussian(liquid_layer, sigma=c[2])
liquid_layer[liquid_layer < c[3]] = 0
if c[5] == 0:
liquid_layer = (liquid_layer * 255).astype(np.uint8)
dist = 255 - cv2.Canny(liquid_layer, 50, 150)
dist = cv2.distanceTransform(dist, cv2.DIST_L2, 5)
_, dist = cv2.threshold(dist, 20, 20, cv2.THRESH_TRUNC)
dist = cv2.blur(dist, (3, 3)).astype(np.uint8)
dist = cv2.equalizeHist(dist)
ker = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])
dist = cv2.filter2D(dist, cv2.CV_8U, ker)
dist = cv2.blur(dist, (3, 3)).astype(np.float32)
m = cv2.cvtColor(liquid_layer * dist, cv2.COLOR_GRAY2BGRA)
m /= np.max(m, axis=(0, 1))
m *= c[4]
# water is pale turqouise
color = np.concatenate((175 / 255. * np.ones_like(m[..., :1]),
238 / 255. * np.ones_like(m[..., :1]),
238 / 255. * np.ones_like(m[..., :1])), axis=2)
color = cv2.cvtColor(color, cv2.COLOR_BGR2BGRA)
x = cv2.cvtColor(x, cv2.COLOR_BGR2BGRA)
return cv2.cvtColor(np.clip(x + m * color, 0, 1), cv2.COLOR_BGRA2BGR) * 255
else:
m = np.where(liquid_layer > c[3], 1, 0)
m = gaussian(m.astype(np.float32), sigma=c[4])
m[m < 0.8] = 0
# mud brown
color = np.concatenate((63 / 255. * np.ones_like(x[..., :1]),
42 / 255. * np.ones_like(x[..., :1]),
20 / 255. * np.ones_like(x[..., :1])), axis=2)
color *= m[..., np.newaxis]
x *= (1 - m[..., np.newaxis])
return np.clip(x + color, 0, 1) * 255
def contrast(x, severity=1):
c = [0.4, .3, .2, .1, .05][severity - 1]
x = np.array(x) / 255.
means = np.mean(x, axis=(0, 1), keepdims=True)
return np.clip((x - means) * c + means, 0, 1) * 255
def brightness(x, severity=1):
c = [.1, .2, .3, .4, .5][severity - 1]
x = np.array(x) / 255.
x = sk.color.rgb2hsv(x)
x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)
x = sk.color.hsv2rgb(x)
return np.clip(x, 0, 1) * 255
def saturate(x, severity=1):
c = [(0.3, 0), (0.1, 0), (2, 0), (5, 0.1), (20, 0.2)][severity - 1]
x = np.array(x) / 255.
x = sk.color.rgb2hsv(x)
x[:, :, 1] = np.clip(x[:, :, 1] * c[0] + c[1], 0, 1)
x = sk.color.hsv2rgb(x)
return np.clip(x, 0, 1) * 255
def jpeg_compression(x, severity=1):
c = [25, 18, 15, 10, 7][severity - 1]
output = BytesIO()
x.save(output, 'JPEG', quality=c)
x = PILImage.open(output)
return x
def pixelate(x, severity=1):
c = [0.6, 0.5, 0.4, 0.3, 0.25][severity - 1]
x = x.resize((int(224 * c), int(224 * c)), PILImage.BOX)
x = x.resize((224, 224), PILImage.BOX)
return x
# mod of https://gist.github.com/erniejunior/601cdf56d2b424757de5
def elastic_transform(image, severity=1):
c = [(244 * 2, 244 * 0.7, 244 * 0.1), # 244 should have been 224, but ultimately nothing is incorrect
(244 * 2, 244 * 0.08, 244 * 0.2),
(244 * 0.05, 244 * 0.01, 244 * 0.02),
(244 * 0.07, 244 * 0.01, 244 * 0.02),
(244 * 0.12, 244 * 0.01, 244 * 0.02)][severity - 1]
image = np.array(image, dtype=np.float32) / 255.
shape = image.shape
shape_size = shape[:2]
# random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size])
pts2 = pts1 + np.random.uniform(-c[2], c[2], size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
dx = (gaussian(np.random.uniform(-1, 1, size=shape[:2]),
c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)
dy = (gaussian(np.random.uniform(-1, 1, size=shape[:2]),
c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)
dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]
x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1))
return np.clip(map_coordinates(image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255
# /////////////// End Corruptions ///////////////
|
hendrycks/robustness
|
ImageNet-C/imagenet_c/imagenet_c/corruptions.py
|
Python
|
apache-2.0
| 14,857
|
[
"Gaussian"
] |
fb962115845e0a7ceaf5a6e3cda17f4aab07c66e3c367c219cafc7032c4a43fb
|
from coalib.bearlib.abstractions.Linter import linter
from coalib.bears.requirements.DistributionRequirement import (
DistributionRequirement)
@linter(executable='verilator',
output_format='regex',
use_stderr=True,
output_regex=r'\%(?:(?P<severity>Error|Warning.*?).*?):'
r'.+?:(?P<line>.+?): (?P<message>.+)')
class VerilogLintBear:
"""
Analyze Verilog code using ``verilator`` and checks for all lint
related and code style related warning messages. It supports the
synthesis subset of Verilog, plus initial statements, proper
blocking/non-blocking assignments, functions, tasks.
It also warns about unused code when a specified signal is never sinked,
and unoptimized code due to some construct, with which the
optimization of the specified signal or block is disabled.
This is done using the ``--lint-only`` command. For more information visit
<http://www.veripool.org/projects/verilator/wiki/Manual-verilator>.
"""
LANGUAGES = {"Verilog"}
REQUIREMENTS = {DistributionRequirement(apt_get='verilator')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
ASCIINEMA_URL = 'https://asciinema.org/a/45275'
CAN_DETECT = {'Formatting', 'Code Simplification', 'Syntax', 'Unused Code'}
@staticmethod
def create_arguments(filename, file, config_file):
return '--lint-only', filename
|
SanketDG/coala-bears
|
bears/verilog/VerilogLintBear.py
|
Python
|
agpl-3.0
| 1,474
|
[
"VisIt"
] |
b840885b57da73425f0bd8db3a52d596ac1d6d2e1e9419fdf503bf5a47ed049d
|
"""
=================
Drop Shadow Frame
=================
A widget providing a drop shadow (gaussian blur effect) around another
widget.
"""
from AnyQt.QtWidgets import (
QWidget, QGraphicsScene, QGraphicsRectItem, QGraphicsDropShadowEffect,
QStyleOption, QAbstractScrollArea, QToolBar
)
from AnyQt.QtGui import QPainter, QPixmap, QColor, QPen, QPalette, QRegion
from AnyQt.QtCore import (
Qt, QPoint, QPointF, QRect, QRectF, QSize, QSizeF, QEvent
)
from AnyQt.QtCore import pyqtProperty as Property
CACHED_SHADOW_RECT_SIZE = (50, 50)
def render_drop_shadow_frame(pixmap, shadow_rect, shadow_color,
offset, radius, rect_fill_color):
pixmap.fill(QColor(0, 0, 0, 0))
scene = QGraphicsScene()
rect = QGraphicsRectItem(shadow_rect)
rect.setBrush(QColor(rect_fill_color))
rect.setPen(QPen(Qt.NoPen))
scene.addItem(rect)
effect = QGraphicsDropShadowEffect(color=shadow_color,
blurRadius=radius,
offset=offset)
rect.setGraphicsEffect(effect)
scene.setSceneRect(QRectF(QPointF(0, 0), QSizeF(pixmap.size())))
painter = QPainter(pixmap)
scene.render(painter)
painter.end()
scene.clear()
scene.deleteLater()
return pixmap
class DropShadowFrame(QWidget):
"""
A widget drawing a drop shadow effect around the geometry of
another widget (works similar to :class:`QFocusFrame`).
Parameters
----------
parent : :class:`QObject`
Parent object.
color : :class:`QColor`
The color of the drop shadow.
radius : float
Shadow radius.
"""
def __init__(self, parent=None, color=None, radius=5,
**kwargs):
QWidget.__init__(self, parent, **kwargs)
self.setAttribute(Qt.WA_TransparentForMouseEvents, True)
self.setAttribute(Qt.WA_NoChildEventsForParent, True)
self.setFocusPolicy(Qt.NoFocus)
if color is None:
color = self.palette().color(QPalette.Dark)
self.__color = color
self.__radius = radius
self.__widget = None
self.__widgetParent = None
self.__updatePixmap()
def setColor(self, color):
"""
Set the color of the shadow.
"""
if not isinstance(color, QColor):
color = QColor(color)
if self.__color != color:
self.__color = QColor(color)
self.__updatePixmap()
def color(self):
"""
Return the color of the drop shadow.
"""
return QColor(self.__color)
color_ = Property(QColor, fget=color, fset=setColor, designable=True,
doc="Drop shadow color")
def setRadius(self, radius):
"""
Set the drop shadow's blur radius.
"""
if self.__radius != radius:
self.__radius = radius
self.__updateGeometry()
self.__updatePixmap()
def radius(self):
"""
Return the shadow blur radius.
"""
return self.__radius
radius_ = Property(int, fget=radius, fset=setRadius, designable=True,
doc="Drop shadow blur radius.")
def setWidget(self, widget):
"""
Set the widget around which to show the shadow.
"""
if self.__widget:
self.__widget.removeEventFilter(self)
self.__widget = widget
if self.__widget:
self.__widget.installEventFilter(self)
# Find the parent for the frame
# This is the top level window a toolbar or a viewport
# of a scroll area
parent = widget.parentWidget()
while not (isinstance(parent, (QAbstractScrollArea, QToolBar)) or \
parent.isWindow()):
parent = parent.parentWidget()
if isinstance(parent, QAbstractScrollArea):
parent = parent.viewport()
self.__widgetParent = parent
self.setParent(parent)
self.stackUnder(widget)
self.__updateGeometry()
self.setVisible(widget.isVisible())
def widget(self):
"""
Return the widget that was set by `setWidget`.
"""
return self.__widget
def paintEvent(self, event):
# TODO: Use QPainter.drawPixmapFragments on Qt 4.7
opt = QStyleOption()
opt.initFrom(self)
pixmap = self.__shadowPixmap
shadow_rect = QRectF(opt.rect)
widget_rect = QRectF(self.widget().geometry())
widget_rect.moveTo(self.radius_, self.radius_)
left = top = right = bottom = self.radius_
pixmap_rect = QRectF(QPointF(0, 0), QSizeF(pixmap.size()))
# Shadow casting rectangle in the source pixmap.
pixmap_shadow_rect = pixmap_rect.adjusted(left, top, -right, -bottom)
source_rects = self.__shadowPixmapFragments(pixmap_rect,
pixmap_shadow_rect)
target_rects = self.__shadowPixmapFragments(shadow_rect, widget_rect)
painter = QPainter(self)
for source, target in zip(source_rects, target_rects):
painter.drawPixmap(target, pixmap, source)
painter.end()
def eventFilter(self, obj, event):
etype = event.type()
if etype == QEvent.Move or etype == QEvent.Resize:
self.__updateGeometry()
elif etype == QEvent.Show:
self.__updateGeometry()
self.show()
elif etype == QEvent.Hide:
self.hide()
return QWidget.eventFilter(self, obj, event)
def __updateGeometry(self):
"""
Update the shadow geometry to fit the widget's changed
geometry.
"""
widget = self.__widget
parent = self.__widgetParent
radius = self.radius_
pos = widget.pos()
if parent != widget.parentWidget():
pos = widget.parentWidget().mapTo(parent, pos)
geom = QRect(pos, widget.size())
geom.adjust(-radius, -radius, radius, radius)
if geom != self.geometry():
self.setGeometry(geom)
# Set the widget mask (punch a hole through to the `widget` instance.
rect = self.rect()
mask = QRegion(rect)
transparent = QRegion(rect.adjusted(radius, radius, -radius, -radius))
mask = mask.subtracted(transparent)
self.setMask(mask)
def __updatePixmap(self):
"""
Update the cached shadow pixmap.
"""
rect_size = QSize(50, 50)
left = top = right = bottom = self.radius_
# Size of the pixmap.
pixmap_size = QSize(rect_size.width() + left + right,
rect_size.height() + top + bottom)
shadow_rect = QRect(QPoint(left, top), rect_size)
pixmap = QPixmap(pixmap_size)
pixmap.fill(QColor(0, 0, 0, 0))
rect_fill_color = self.palette().color(QPalette.Window)
pixmap = render_drop_shadow_frame(
pixmap,
QRectF(shadow_rect),
shadow_color=self.color_,
offset=QPointF(0, 0),
radius=self.radius_,
rect_fill_color=rect_fill_color
)
self.__shadowPixmap = pixmap
self.update()
def __shadowPixmapFragments(self, pixmap_rect, shadow_rect):
"""
Return a list of 8 QRectF fragments for drawing a shadow.
"""
s_left, s_top, s_right, s_bottom = \
shadow_rect.left(), shadow_rect.top(), \
shadow_rect.right(), shadow_rect.bottom()
s_width, s_height = shadow_rect.width(), shadow_rect.height()
p_width, p_height = pixmap_rect.width(), pixmap_rect.height()
top_left = QRectF(0.0, 0.0, s_left, s_top)
top = QRectF(s_left, 0.0, s_width, s_top)
top_right = QRectF(s_right, 0.0, p_width - s_width, s_top)
right = QRectF(s_right, s_top, p_width - s_right, s_height)
right_bottom = QRectF(shadow_rect.bottomRight(),
pixmap_rect.bottomRight())
bottom = QRectF(shadow_rect.bottomLeft(),
pixmap_rect.bottomRight() - \
QPointF(p_width - s_right, 0.0))
bottom_left = QRectF(shadow_rect.bottomLeft() - QPointF(s_left, 0.0),
pixmap_rect.bottomLeft() + QPointF(s_left, 0.0))
left = QRectF(pixmap_rect.topLeft() + QPointF(0.0, s_top),
shadow_rect.bottomLeft())
return [top_left, top, top_right, right, right_bottom,
bottom, bottom_left, left]
# A different obsolete implementation
class _DropShadowWidget(QWidget):
"""A frame widget drawing a drop shadow effect around its
contents.
"""
def __init__(self, parent=None, offset=None, radius=None,
color=None, **kwargs):
QWidget.__init__(self, parent, **kwargs)
# Bypass the overloaded method to set the default margins.
QWidget.setContentsMargins(self, 10, 10, 10, 10)
if offset is None:
offset = QPointF(0., 0.)
if radius is None:
radius = 20
if color is None:
color = QColor(Qt.black)
self.offset = offset
self.radius = radius
self.color = color
self._shadowPixmap = None
self._updateShadowPixmap()
def setOffset(self, offset):
"""Set the drop shadow offset (`QPoint`)
"""
self.offset = offset
self._updateShadowPixmap()
self.update()
def setRadius(self, radius):
"""Set the drop shadow blur radius (`float`).
"""
self.radius = radius
self._updateShadowPixmap()
self.update()
def setColor(self, color):
"""Set the drop shadow color (`QColor`).
"""
self.color = color
self._updateShadowPixmap()
self.update()
def setContentsMargins(self, *args, **kwargs):
QWidget.setContentsMargins(self, *args, **kwargs)
self._updateShadowPixmap()
def _updateShadowPixmap(self):
"""Update the cached drop shadow pixmap.
"""
# Rectangle casting the shadow
rect_size = QSize(*CACHED_SHADOW_RECT_SIZE)
left, top, right, bottom = self.getContentsMargins()
# Size of the pixmap.
pixmap_size = QSize(rect_size.width() + left + right,
rect_size.height() + top + bottom)
shadow_rect = QRect(QPoint(left, top), rect_size)
pixmap = QPixmap(pixmap_size)
pixmap.fill(QColor(0, 0, 0, 0))
rect_fill_color = self.palette().color(QPalette.Window)
pixmap = render_drop_shadow_frame(pixmap, QRectF(shadow_rect),
shadow_color=self.color,
offset=self.offset,
radius=self.radius,
rect_fill_color=rect_fill_color)
self._shadowPixmap = pixmap
def paintEvent(self, event):
pixmap = self._shadowPixmap
widget_rect = QRectF(QPointF(0.0, 0.0), QSizeF(self.size()))
frame_rect = QRectF(self.contentsRect())
left, top, right, bottom = self.getContentsMargins()
pixmap_rect = QRectF(QPointF(0, 0), QSizeF(pixmap.size()))
# Shadow casting rectangle.
pixmap_shadow_rect = pixmap_rect.adjusted(left, top, -right, -bottom)
source_rects = self._shadowPixmapFragments(pixmap_rect,
pixmap_shadow_rect)
target_rects = self._shadowPixmapFragments(widget_rect, frame_rect)
painter = QPainter(self)
for source, target in zip(source_rects, target_rects):
painter.drawPixmap(target, pixmap, source)
painter.end()
def _shadowPixmapFragments(self, pixmap_rect, shadow_rect):
"""Return a list of 8 QRectF fragments for drawing a shadow.
"""
s_left, s_top, s_right, s_bottom = \
shadow_rect.left(), shadow_rect.top(), \
shadow_rect.right(), shadow_rect.bottom()
s_width, s_height = shadow_rect.width(), shadow_rect.height()
p_width, p_height = pixmap_rect.width(), pixmap_rect.height()
top_left = QRectF(0.0, 0.0, s_left, s_top)
top = QRectF(s_left, 0.0, s_width, s_top)
top_right = QRectF(s_right, 0.0, p_width - s_width, s_top)
right = QRectF(s_right, s_top, p_width - s_right, s_height)
right_bottom = QRectF(shadow_rect.bottomRight(),
pixmap_rect.bottomRight())
bottom = QRectF(shadow_rect.bottomLeft(),
pixmap_rect.bottomRight() - \
QPointF(p_width - s_right, 0.0))
bottom_left = QRectF(shadow_rect.bottomLeft() - QPointF(s_left, 0.0),
pixmap_rect.bottomLeft() + QPointF(s_left, 0.0))
left = QRectF(pixmap_rect.topLeft() + QPointF(0.0, s_top),
shadow_rect.bottomLeft())
return [top_left, top, top_right, right, right_bottom,
bottom, bottom_left, left]
|
cheral/orange3
|
Orange/canvas/gui/dropshadow.py
|
Python
|
bsd-2-clause
| 13,319
|
[
"Gaussian"
] |
6f46fd315e038ab0bf859fc0adf6470e228bbceba1e6bf88a4aaac950f5ca2fc
|
# -*- coding: utf-8 -*-
{
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'この地域を地理的に指定するロケーション。これはロケーションの階層構造のうちの一つか、ロケーショングループの一つか、この地域の境界に面するロケーションです。',
"Acronym of the organization's name, eg. IFRC.": '団体の略称 (IFRCなど)',
"Authenticate system's Twitter account": '認証システムの Twitter アカウント',
"Can't import tweepy": 'tweepyをインポートできません',
"Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.": "救援要請と寄付項目を関連付けるには、項目左の'寄付'ボタンを押してください。",
"Couldn't import tweepy library": 'tweepy libraryをインポートできません',
"Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.": 'サイトの所在地住所を詳細に記述します。情報伝達と物品搬送に使用します。このサイトに関する情報を、以下の「ロケーション」項目にGIS/地図データを挿入できることに注意してください。',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'もしこの設定が地域メニューにある地域を指しているのであれば、メニューで使う名前を設定してください。個人用の地図設定の名前では、ユーザの名前で設定されます。',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'この項目が設定されている場合、ユーザーは、登録の際、この団体のスタッフとしてアサインされるように指定することができます。ただし、ユーザーのドメインと団体のドメイン項目に差異がない場合のみ有効です。',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'この項目の内容はユーザーの基本所在地となり、ユーザーが地図上に表示されるようになります。',
"If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.": 'この設定が有効の場合、削除されたレコードには削除済みフラグが付与されるだけで、実際のデータは消去されません。一般のユーザが閲覧することはできませんが、データベースを直接参照することでデータを確認できます。',
"If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:": '行方不明者の登録が存在しない場合、「人物情報を追加」ボタンを押して、新規登録を行ってください。',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": 'リストに病院が表示されない場合、「病院情報を追加」することで新規に登録が可能です。',
"If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": 'オフィスが一覧にない場合は、「オフィスを追加」をクリックすることで新規のオフィスを追加できます。',
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": "もしあなたの団体の登録がない場合、'団体を追加'リンクをクリックすることで追加が可能です",
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'データを同期する際には、ネットワークを経由してではなく、ファイルから行うことも可能です。ネットワークが存在しない場合に利用されます。ファイルからのデータインポート、およびファイルへのエクスポートはこのページから実行可能です。右部のリンクをクリックしてください。',
"Level is higher than parent's": '親情報よりも高いレベルです',
"NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.": "注意: SMS は'アクション可能'のためリクエストがフィルターされます。一方、ツイートのリクエストはフィルターされません。よって、これは検索する手段となります",
"Need a 'url' argument!": "'url'引数が必要です。",
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "オプション項目。ジオメトリカラムの名称です。PostGISでのデフォルト値は 'the_geom'となります。",
"Parent level should be higher than this record's level. Parent level is": '親レベルは、このレコードのレベルより上位でなければなりません。親レベルは',
"Password fields don't match": 'パスワードが一致しません。',
"Phone number to donate to this organization's relief efforts.": 'この団体の救援活動に対して寄付を行う際の連絡先となる電話番号を記載します。',
"Please come back after sometime if that doesn't help.": 'この方法で問題が解決しない場合は、しばらく時間を置いて再度アクセスしてください。',
"Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.": "'Delete Old'ボタンを押すことで、データを参照しているレコードは全て参照先を再指定され、古い方のレコードは削除されます。",
"Quantity in %s's Inventory": '%s 倉庫にある量',
"Search here for a person's record in order to:": '人物情報の検索を行い、以下の機能を実現します:',
"Select a person in charge for status 'assigned'": "状況が '割り当て済み' である担当者を選択します",
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": 'もし全ての特定の場所が住所階層の最下層で親の場所を必要とするなら、これを選択して下さい。例えば、もし「地区」が階層の最小の地域なら、全ての特定の場所は親階層の地区を持っている必要が有るでしょう。',
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'もし全ての特定の場所が住所階層での親の場所を必要とするなら、これを選択して下さい。これは被災地の「地域」表示の設定に役立てられます。',
"Sorry, things didn't get done on time.": 'すいません、時間通りに行われていません。',
"Sorry, we couldn't find that page.": 'すいません、お探しのページは見つかりませんでした。',
"System's Twitter account updated": 'システムのTwitterアカウントを変更しました',
"The <a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>Well-Known Text</a> representation of the Polygon/Line.": "この線、あるいは面の<a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>具体的な説明</a>",
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": 'このプロジェクトの資金提供組織を選択します。複数の項目を選択するには、Ctrlキーを押しながらクリックしてください。',
"The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.": 'この団体の活動分野を選択します。複数の項目を選択するには、コントロールキーを押しながらクリックしてください。',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": '画像ファイルのURLです。ファイルのアップロードを行わない場合、ロケーションをURL項目に入力してください。',
"The person's manager within this Office/Project.": 'このオフィス/プロジェクトのマネージャ。',
"To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": '遺体の検索を行うには、遺体のID番号を入力してください。検索時のワイルドカード文字として、%を使うことができます。入力せずに「検索」すると、全ての遺体が表示されます。',
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": 'ID情報を入力することで、遺体を検索します。ワイルドカードとして % が使用できます。何も指定せずに「検索」すると、全ての遺体が表示されます。',
"To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "病院を検索するには、名前、病院のID、団体名、省略名のいずれかをスペース(空白)で区切って入力してください。 % がワイルドカードとして使えます。全病院のリストを表示するにはなにも入力せずに '検索' ボタンを押してください。",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '探し出したい病院をテキスト入力し、検索を行うことができます。検索時のワイルドカード文字として、%を使うことができます。何も入力せずに「検索」ボタンを押した場合、全ての病院を表示します。',
"To search for a hospital, enter any part of the name or ID. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '病院を検索するには、名称の一部かIDを入力してください。検索時のワイルドカード文字として、%を使うことができます。何も入力せずに「検索」を押した場合、全ての病院を表示します。',
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "ロケーションを検索するには、名前を入力します。%をワイルドカード文字として使用することが出来ます。何も入力しないで '検索' をクリックするとすべてのロケーションが表示されます。",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '苗字、名前などを半角スペースで区切って入力し、人物検索して下さい。「%」を使うとファジー検索できます。何も入力せずに検索すれば、全ての情報を検索表示します。',
"To search for a person, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '人を検索するためには、お名前(苗字、名前または両方)を入力してください。また姓名の間にはスペースをいれてください。ワイルドカードとして % が使えます。すべての人物情報をリストするには、検索ボタンをおしてください。',
"To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.": '探し出したい支援要請をテキスト入力し、検索を行うことができます。検索時のワイルドカード文字として、%を使うことができます。何も入力せずに「検索」ボタンを押した場合、全ての支援要請を表示します。',
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": 'アセスメントを検索するには、アセスメントのチケット番号の一部を入力してください。ワイルドカードとして % が使えます。すべてのアセスメントをリストするには、なにも入力せず検索ボタンをおしてください。',
"Type the first few characters of one of the Person's names.": '検索したい人物の名前の先頭数文字を入力してください',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": '画像ファイルのアップロードはここから行ってください。ファイルのアップロードを行わない場合、ロケーションをURL項目に入力してください。',
"View and/or update details of the person's record": '人物情報を検索し、詳細の閲覧や更新を行ないます',
"View/Edit the Database directly (caution: doesn't respect the framework rules!)": 'データベースの直接閲覧/編集(注意:フレームワークの規則に反します)',
"What are the people's normal ways to obtain food in this area?": 'この地域で食料を調達するための手段を記載してください',
"What should be done to reduce women and children's vulnerability to violence?": '未成年や女性を暴力から守るために、どのような活動や設備が必要かを記載してください',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": '他とデータを同期するとき、二つ(以上)の団体がそれぞれ更新した情報を同期するときにコンフリクトが発生することがあります。同期モジュールは、コンフリクトを自動解決しようと試みますが、解決できないことがあります。そのような場合、手作業でコンフリクトを解決するか、クリックして次のページに進んでください。',
"You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click ": 'ユーザ固有の設定を行っている場合、ここで変更を行っても、目に見える変化がない場合があります。ユーザ固有の設定を行うには、以下をクリックしてください。 ',
"You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": '変更が保存されていません。「キャンセル」をクリックした後、「保存」を押して保存してください。変更を破棄するには、OK をクリックしてください。',
"You haven't made any calculations": '計算が実行されていません',
"You haven't yet Verified your account - please check your email": '利用者登録はまだ有効ではありません。',
"couldn't be parsed so NetworkLinks not followed.": 'パースできなかったため、 NetworkLinksはフォローされません。',
"includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'OpenLayersで未サポートの機能である GroundOverlayやScreenOverlayを含むため、不具合がある可能性があります。',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"更新" は、"field1=\'newvalue\'" のようなオプションです。"JOIN の結果を更新または削除することはできません。',
'# Houses Damaged': '損傷した家屋の数',
'# Houses Flooded': '浸水した家屋数',
'# People Needing Food': '食料が必要な人の数',
'# People at Risk From Vector-Borne Diseases': '生物が媒介する疾病の危険性がある人の数',
'# People without Access to Safe Drinking-Water': '安全な飲料水が確保されていない人の数',
'# of Houses Damaged': '損壊した家屋数',
'# of Houses Destroyed': '全壊した家屋数',
'# of International Staff': '国外スタッフ人数',
'# of National Staff': '国内スタッフの人数',
'# of People Affected': '被災者数',
'# of People Deceased': '死亡者数',
'# of People Injured': '負傷者数',
'# of Vehicles': '車両数',
'%s Create a new site or ensure that you have permissions for an existing site.': '%s 新しいサイトを作成するか既存のサイトに対する権限を持っているかどうか確認して下さい',
'%s rows deleted': '%s 行を削除しました',
'%s rows updated': '%s 行を更新しました',
'& then click on the map below to adjust the Lat/Lon fields': 'そして下の地図をクリックして、緯度 / 経度フィールドを調節してください',
'* Required Fields': '* は必須項目です',
'0-15 minutes': '0-15 分間',
'1 Assessment': '1アセスメント',
'1 location, shorter time, can contain multiple Tasks': '1つの地域における短期間の活動を表し、1つの支援活動のなかで複数のタスクを実行します。',
'1-3 days': '1-3 日間',
'1. Fill the necessary fields in BLOCK letters.': '1. 太字の項目は必須項目です.',
'15-30 minutes': '15-30 分間',
'2 different options are provided here currently:': '現在は、2種類のオプションが提供されています。',
'2. Always use one box per letter and leave one box space to seperate words.': '2. 一マス一文字で、単語の間は一マス開けてください。',
'2x4 Car': '2x4 車両',
'30-60 minutes': '30-60 分間',
'4-7 days': '4-7 日間',
'4x4 Car': '四輪駆動車',
'8-14 days': '8-14 日間',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': '機能クラスに設定したマーカーを上書きする必要があれば、個々のロケーションに設定したマーカーを設定します',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'このデータ内容を確認できるファイルやURL情報、連絡先担当者などのリファレンスデータを記載します。最初の何文字かを入力することで、既存の類似文書にリンクすることが可能です。',
'A Warehouse is a physical place which contains Relief Items available to be Distributed.': '倉庫とは、救援物資の配布を行うことができる物理的な地点を意味します。',
'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.': '倉庫 / サイトとは、物資の保管場所のことであり、住所とGIS情報が付帯します。特定の建物や、市内の特定地域などがあげられます。',
'A brief description of the group (optional)': 'グループの詳細(オプション)',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'GPSからダウンロードしたファイルには、その地点に関する様々な情報がXML形式で保存されています。',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'GPSから取得したGPX形式のファイル。タイムスタンプは画像と関連づけられ、地図上に配置することができます。',
'A library of digital resources, such as photos, documents and reports': '写真や文書、レポートなど、電子化された資料',
'A location group is a set of locations (often, a set of administrative regions representing a combined area). Member locations are added to a location group here. Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group. A location group can be used to define the extent of an affected area, if it does not fall within one administrative region. Location groups can be used in the Regions menu.': 'ロケーションを取りまとめた単位はロケーショングループと呼称されます(たいていは、一定範囲内の管理対象地域をさします)。このページから、ロケーションをグループに追加することができます。ロケーショングループ単位で地図上に表示させたり、検索結果として表示させることが可能となります。グループを使用することで、1つの管理地域に縛られない被災地域定義が可能となります。ロケーショングループは、地域メニューから定義できます。',
'A location group must have at least one member.': 'ロケーショングループには、メンバーが最低一人必要です。',
'A place within a Site like a Shelf, room, bin number etc.': 'Site内に存在する施設。例えば棚、部屋、Binの番号など',
'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.': 'binのスナップショットや追加情報の更新は、ここから行えます。',
'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.': 'ロケーションのスナップショットや、Siteに関する追加情報の更新は、ここから行えます。',
'A snapshot of the location or additional documents that contain supplementary information about the Site can be uploaded here.': 'ロケーションのスナップショットや、Siteに関する追加情報の更新は、ここから行えます。',
'A survey series with id %s does not exist. Please go back and create one.': 'ID番号 %sに関するsurvey seriesは存在しません。「戻る」ボタンを押して、新規に作成してください。',
'ABOUT THIS MODULE': 'このモジュールについて',
'ABOUT': '概要',
'ACCESS DATA': 'アクセスデータ',
'ANY': '全て',
'API is documented here': 'APIに関する文書はこちら',
'ATC-20 Rapid Evaluation modified for New Zealand': 'ニュージーランド向けに変更したATC-20(建物の簡易安全性評価プロセス)',
'ATC-20': 'ATC-20(建物の簡易安全性評価プロセス)',
'Abbreviation': '省略',
'Ability to Fill Out Surveys': '調査記入能力',
'Ability to customize the list of details tracked at a Shelter': '避難所で追跡する詳細のリストのカスタマイズ可否',
'Ability to customize the list of human resource tracked at a Shelter': '避難所で追跡する詳細のリストのカスタマイズの可否',
'Ability to customize the list of important facilities needed at a Shelter': '避難所で追跡する人的資源のリストのカスタマイズの可否',
'Ability to track partial fulfillment of the request': '支援要請の部分的な達成度の追跡可否',
'Ability to view Results of Completed and/or partially filled out Surveys': '完了または一部完了した聞き取り調査の結果をみる機能',
'About Sahana Eden': 'Sahana Edenについて',
'About Sahana': 'Sahanaについて',
'About this module': 'モジュールの詳細',
'About': '情報',
'Access denied': 'アクセスが拒否されました',
'Access to Shelter': '避難所へのアクセス',
'Access to education services': '学校へのアクセス',
'Accessibility of Affected Location': '被災地域へのアクセス方法',
'Account registered, however registration is still pending approval - please wait until confirmation received.': '利用者登録の申請を受け付けました。所属団体またはサイト管理者による承認を待っています。',
'Acronym': '略称/イニシャル',
'Actionable by all targeted recipients': 'すべての対象受信者にとって実用的な',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': '指定された参加者のみ実施可能です。<note>の中に行使するためのIDがあることが必要です。',
'Actionable': '対応可能',
'Actioned?': '実施済み?',
'Actions taken as a result of this request.': '要請に対して行われるアクション',
'Actions': 'アクション',
'Active Problems': '対処中の問題',
'Activities Map': '支援活動マップ',
'Activities are blue.': '支援活動(アクティビティ)は青色で表示されます。',
'Activities matching Assessments:': 'アセスメントに適合した支援活動',
'Activities of boys 13-17yrs before disaster': '災害発生前の13-17歳男子の活動状況',
'Activities of boys 13-17yrs now': '現在の13-17歳男子の活動状況',
'Activities of boys <12yrs before disaster': '災害発生前の12歳以下男子の活動状況',
'Activities of boys <12yrs now': '現在の12歳以下男子の活動状況',
'Activities of children': '子供たちの活動',
'Activities of girls 13-17yrs before disaster': '災害発生前の13-17歳女子の活動状況',
'Activities of girls 13-17yrs now': '現在の13-17歳女子の活動状況',
'Activities of girls <12yrs before disaster': '災害発生前の12歳以下女子の活動状況',
'Activities of girls <12yrs now': '現在の12歳以下女子の活動状況',
'Activities': '支援活動',
'Activity Added': '支援活動を追加しました',
'Activity Deleted': '支援活動を削除しました',
'Activity Details': '支援活動の詳細',
'Activity Report': '支援活動レポート',
'Activity Reports': '支援活動レポート',
'Activity Type': '支援活動タイプ',
'Activity Updated': '支援活動を更新しました',
'Activity': '支援活動',
'Add Address': 'アドレスを追加',
'Add Activity Type': '支援活動タイプを追加',
'Add Aid Request': '治療要請を追加',
'Add Alternative Item': '代わりの物資を追加',
'Add Assessment Summary': 'アセスメントの要約を追加',
'Add Assessment': 'アセスメントを追加',
'Add Baseline Type': '基準値タイプの追加',
'Add Baseline': '基準値の追加',
'Add Bin Type': 'Bin Typeを追加',
'Add Bins': 'Binを追加',
'Add Bundle': 'Bundleを追加',
'Add Catalog.': 'カタログを追加',
'Add Category': 'カテゴリを追加',
'Add Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 関係を追加',
'Add Config': '設定を追加',
'Add Contact': '連絡先を追加',
'Add Contact Information': '連絡先情報を追加',
'Add Credential': '証明書の追加',
'Add Credentials': '証明書の追加',
'Add Detailed Evaluation': '詳細な評価を追加',
'Add Disaster Victims': '被災者情報を追加',
'Add Distribution.': '配給所を追加',
'Add Donor': '資金提供組織を追加',
'Add Flood Report': '洪水レポートを追加',
'Add Group Member': 'グループメンバを追加',
'Add Identity': 'IDを追加',
'Add Image': '画像を追加',
'Add Impact Type': '災害影響のタイプを追加',
'Add Impact': '被災状況の追加',
'Add Inventory Item': '備蓄物資を追加します',
'Add Inventory Store': '物資集積地点を追加',
'Add Item (s)': '物資を追加',
'Add Item Catalog': '物資カタログを追加',
'Add Item Category': '救援物資カタログカテゴリを追加',
'Add Item Sub-Category': '救援物資サブカテゴリを追加',
'Add Item to Request': '要求する支援物資の登録',
'Add Item to Shipment': '輸送に物資を追加する',
'Add Item': '物資を追加',
'Add Key': 'Keyを追加',
'Add Kit': 'Kitを追加',
'Add Level 1 Assessment': 'レベル1アセスメントを追加',
'Add Level 2 Assessment': 'レベル2アセスメントを追加',
'Add Line': '行を追加',
'Add Location': 'ロケーションを追加',
'Add Location Group': 'ロケーショングループを追加',
'Add Locations': 'ロケーションを追加',
'Add Log Entry': 'ログエントリを追加',
'Add Member': 'メンバを追加',
'Add Membership': 'メンバシップを追加',
'Add Message': 'メッセージを追加',
'Add Need Type': '需要タイプを追加',
'Add Need': '要求を追加',
'Add New Aid Request': '援助要請を新規追加',
'Add New Assessment Summary': '新規アセスメントの要約を追加',
'Add New Baseline Type': '基準値タイプの新規追加',
'Add New Baseline': '新しい基準値を追加',
'Add New Bin Type': 'Bin Typeを新規追加',
'Add New Bin': 'Binを新規追加',
'Add New Budget': '予算を新規追加',
'Add New Bundle': 'Bundleを新規追加',
'Add New Cluster Subsector': 'クラスタのサブセクタを新規作成',
'Add New Cluster': 'クラスタを新規追加',
'Add New Commitment Item': '物資コミットメントを新規追加',
'Add New Config': '設定を新規追加',
'Add New Distribution Item': '配給物資を新規追加',
'Add New Distribution': '配給所を新規追加',
'Add New Document': '文書を新規追加',
'Add New Donor': '資金提供組織を新規追加',
'Add New Entry': 'エントリを新規追加',
'Add New Flood Report': '洪水情報を新規追加',
'Add New Image': '画像を新規追加',
'Add New Impact Type': '災害影響のタイプを新規追加',
'Add New Impact': '新規影響を追加',
'Add New Inventory Item': '備蓄物資を新規追加',
'Add New Inventory Store': '物資集積場所を新規追加',
'Add New Item Catalog Category': '物資カタログカテゴリを新規追加',
'Add New Item Catalog': '物資カタログを新規追加',
'Add New Item Sub-Category': '物資サブカテゴリを新規追加',
'Add New Item to Kit': 'キットに救援物資を新規追加',
'Add New Key': 'Keyを新規追加',
'Add New Level 1 Assessment': 'レベル1アセスメントを新規追加',
'Add New Level 2 Assessment': 'レベル2アセスメントを新規追加',
'Add New Member': 'メンバを新規追加',
'Add New Membership': 'メンバシップを新規追加',
'Add New Metadata': 'メタデータを新規追加',
'Add New Need Type': '需要タイプを新規追加',
'Add New Need': '新しい要求を登録する',
'Add New Note': '追加情報を新規追加',
'Add New Peer': 'データ同期先を新規追加',
'Add New Position': '場所を新規追加',
'Add New Problem': '問題を新規追加',
'Add New Rapid Assessment': '被災地の現況アセスメントを新規追加',
'Add New Received Item': '受領した物資を新規追加',
'Add New Record': 'レコードを新規追加',
'Add New Request Item': '特定物資の要請を新規追加',
'Add New Request': '支援要請を新規追加',
'Add New Response': '支援要請を新規追加',
'Add New River': '河川情報を新規追加',
'Add New Role to User': 'ユーザに役割を新規割り当て',
'Add New Sent Item': '送った物資の追加',
'Add New Setting': '設定を新規追加',
'Add New Shipment to Send': '発送する輸送物資を新規追加',
'Add New Site': 'Siteを新規追加',
'Add New Solution': '解決案を提示する',
'Add New Staff Type': 'スタッフタイプを新規追加',
'Add New Staff': 'スタッフを新規追加',
'Add New Storage Location': '備蓄場所を新規追加',
'Add New Survey Answer': '新しい調査の回答を追加しました',
'Add New Survey Question': '調査項目を新規追加',
'Add New Survey Section': '新しい調査セクションを追加',
'Add New Survey Series': '新しい一連の調査を追加します',
'Add New Survey Template': 'Survey Templateを新規追加',
'Add New Team': 'チームを新規追加',
'Add New Ticket': 'チケットを新規追加',
'Add New Track': '追跡情報を新規追加',
'Add New Unit': '単位を新規追加',
'Add New User to Role': '新規ユーザに役割を割り当て',
'Add New Warehouse Item': '倉庫物資を新規追加',
'Add New': '新規追加',
'Add Note': 'ノートを追加',
'Add Peer': 'データ同期先を追加',
'Add Performance Evaluation': 'パフォーマンス評価を追加',
'Add Person': '人物情報を追加',
'Add Photo': '写真を追加',
'Add Point': 'ポイントを追加',
'Add Polygon': 'Polygonを追加',
'Add Position': '場所を追加',
'Add Problem': '問題を追加',
'Add Projections': '地図投影法を追加',
'Add Question': '質問事項を追加',
'Add Rapid Assessment': '被災地の現況アセスメントを追加',
'Add Rapid Evaluation': '迅速評価を追加',
'Add Recipient Site': '受け取りSiteを追加',
'Add Recipient': '受け取り担当者を追加',
'Add Record': 'レコードを追加',
'Add Recovery Report': '遺体回収レポートを追加',
'Add Reference Document': 'リファレンス文書を追加',
'Add Report': 'レポートを追加',
'Add Request Detail': '支援要請の詳細を追加',
'Add Request Item': '物資の要請を追加します',
'Add Request': '支援要請を追加',
'Add Resource': 'リソースを追加',
'Add Response': '返答を追加',
'Add Section': 'Sectionを追加',
'Add Sender Organization': '送付元団体を追加',
'Add Sender Site': '送付元Siteを追加',
'Add Setting': '設定を追加',
'Add Shipment Transit Log': '輸送履歴を追加',
'Add Shipment/Way Bills': '輸送費/渡航費を追加',
'Add Site': 'サイトを追加',
'Add Skill Types': 'スキルタイプを追加',
'Add Solution': '解決案を追加',
'Add Staff Type': 'スタッフタイプを追加',
'Add Staff': 'スタッフを追加',
'Add Storage Bin ': 'Storage Binを追加 ',
'Add Storage Bin Type': 'Storage Bin Typeを追加',
'Add Storage Location': '備蓄地点を追加',
'Add Sub-Category': 'サブカテゴリを追加',
'Add Subscription': '寄付金情報を追加',
'Add Survey Answer': '調査の回答を追加',
'Add Survey Question': '聞き取り調査項目を追加',
'Add Survey Section': '調査セクションの追加',
'Add Survey Series': '一連の調査を追加',
'Add Survey Template': '調査テンプレートを追加',
'Add Team Member': 'メンバを追加',
'Add Team': 'チームを追加',
'Add Ticket': 'チケットを追加',
'Add Unit': '単位を追加',
'Add Volunteer Registration': 'ボランティア登録を追加',
'Add Warehouse Item': '倉庫物資を追加',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'ファイル、URL、あるいは、このデータの確認を行なう連絡先のような参照文書を追加します。参照文書を入力しない場合、代わりにあなたのメールが表示されます。',
'Add a Volunteer': 'ボランティアの追加',
'Add a new Relief Item.': '救援物資を新規追加',
'Add a new Site from where the Item is being sent.': 'この救援物資の送付先を新規サイトとして追加',
'Add a new Site where the Item is being sent to.': 'この物資の送付先サイトを新規追加',
'Add an Photo.': '写真を追加.',
'Add location': 'ロケーションを追加',
'Add main Item Category.': '主要なアイテムカテゴリを追加',
'Add main Item Sub-Category.': '主要な救援物資サブカテゴリを追加',
'Add new Group': 'グループを新規追加',
'Add new Individual': '個人を新規追加',
'Add new position.': '新しいポジションを追加してください。',
'Add new project.': 'プロジェクトを新規追加',
'Add new staff role.': 'スタッフの権限を新規追加',
'Add or Update': '追加、あるいは更新',
'Add the Storage Bin Type.': 'Storage Binタイプを追加します。',
'Add the Storage Location where this bin is located.': 'binが保存されている貯蔵場所を追加します。',
'Add the Storage Location where this this Bin belongs to.': 'このBinがある備蓄地点を追加します。',
'Add the main Warehouse/Site information where this Bin belongs to.': 'その物資の備蓄スペースとなっている倉庫/サイトの情報を追加してください。',
'Add the main Warehouse/Site information where this Item is to be added.': 'この物資が追加されることになっている主要な倉庫 / サイトの情報を追加してください。',
'Add the main Warehouse/Site information where this Storage location is.': 'その物資の備蓄場所となっている倉庫/サイトの情報を追加してください。',
'Add the unit of measure if it doesnt exists already.': '距離単位が未登録の場合、単位を追加します。',
'Add to Bundle': 'Bundleへの登録',
'Add to Catalog': 'カタログへ登録',
'Add to budget': '予算項目へ登録',
'Add': '追加',
'Add/Edit/Remove Layers': 'レイヤを追加/編集/削除',
'Added to Group': 'メンバシップを追加しました',
'Added to Team': 'メンバシップを追加しました',
'Additional Beds / 24hrs': '追加ベッド予測数 / 24h',
'Additional Comments': '追加コメント',
'Additional quantity quantifier – i.e. “4x5”.': '数量を表す追記(例 「4x5」)',
'Address Details': '住所情報の詳細',
'Address Type': '住所情報タイプ',
'Address added': '住所情報を追加しました',
'Address deleted': '住所情報を削除しました',
'Address updated': '住所情報を更新しました',
'Address': '住所情報',
'Addresses': '住所',
'Adequate food and water available': '適切な量の食料と水が供給されている',
'Adequate': '適正',
'Adjust Item(s) Quantity': 'アイテム量の修正',
'Adjust Items due to Theft/Loss': 'アイテム量の修正(盗難/紛失のため)',
'Admin Email': '管理者の電子メール',
'Admin Name': '管理者名',
'Admin Tel': '管理者の電話番号',
'Admin': '管理者',
'Administration': '管理',
'Administrator': '管理者',
'Admissions/24hrs': '患者増加数/24h',
'Adolescent (12-20)': '青年(12-20)',
'Adolescent participating in coping activities': '未成年が災害対応に従事',
'Adult (21-50)': '成人(21-50)',
'Adult ICU': '成人 ICU',
'Adult Psychiatric': '精神病の成人',
'Adult female': '成人女性',
'Adult male': '成人男性',
'Adults in prisons': '刑務所で服役中の成人がいる',
'Advanced Bin Search': 'Binの詳細検索',
'Advanced Catalog Search': 'カタログの詳細検索',
'Advanced Category Search': '詳細カテゴリー検索',
'Advanced Item Search': '詳細な物資検索',
'Advanced Location Search': '詳細な位置検索',
'Advanced Site Search': 'Siteの詳細検索',
'Advanced Sub-Category Search': 'サブカテゴリの詳細検索',
'Advanced Unit Search': '高度な単位検索',
'Advanced': '詳細',
'Advanced:': 'もっと正確に:',
'Advisory': '注意喚起',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'このボタンをクリックすると、解決法のペアが順に表示されます。各ペアから、最も適する項目を1つずつ選択してください。',
'Age Group': '年齢グループ',
'Age group does not match actual age.': '年齢グループが実際の年齢と一致しません。',
'Age group': '年齢グループ',
'Aggravating factors': '悪化要因',
'Aggregate Items': 'アイテムの集約',
'Agriculture': '農業',
'Aid Request Details': '援助要請の詳細',
'Aid Request added': '援助要請を追加しました',
'Aid Request deleted': '救援要請を追加しました',
'Aid Request updated': '援助要請を更新しました',
'Aid Request': '治療要請',
'Aid Requests': '援助要請',
'Air Transport Service': '物資空輸サービス',
'Aircraft Crash': '飛行機事故',
'Aircraft Hijacking': '航空機ハイジャック',
'Airport Closure': '空港閉鎖',
'Airspace Closure': '離陸地点閉鎖',
'Alcohol': 'アルコール',
'Alert': 'アラート',
'All Inbound & Outbound Messages are stored here': '送受信した全てのメッセージはここに格納されます。',
'All Locations': '全てのロケーション',
'All Records': 'すべてのレコード',
'All Requested Items': '物資要請一覧',
'All Resources': 'すべての資源',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'このサイトのSahana Software Foundationで提供されるデータのライセンスは、CCA (Creative Commons Attribution licence)となります。しかし、すべてのデータの発生源が、このサイトであるとは限りません。詳細は、各エントリの情報ソースの項目に記載されています。',
'All': '全て',
'Allowed to push': 'プッシュが許可済みである',
'Allows a Budget to be drawn up': '予算の策定を行ないます',
'Allows authorized users to control which layers are available to the situation map.': '認証済みユーザーが「状況地図のどのレイヤが利用できるか」を制御することを許可します。',
'Alternative Item Details': '代わりの品物についての詳細',
'Alternative Item added': '代わりの物資を追加しました',
'Alternative Item deleted': '代わりの品物が削除されました',
'Alternative Item updated': '代わりの物資を更新しました',
'Alternative Item': '代わりの物資',
'Alternative Items': '代わりとなる物資',
'Alternative infant nutrition in use': '利用中の乳児用代替食',
'Alternative places for studying available': '学校以外の場所を学習に利用可能である',
'Alternative places for studying': '授業開設に利用可能な施設',
'Ambulance Service': '救急サービス',
'An Inventory Store is a physical place which contains Relief Items available to be Distributed.': '物資集積場所とは、救援物資の配給能力をもつ、物理的な場所を指します。',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': '物資の受け入れ、貯蔵設備の管理、必要な物資の記録、サプライチェーン・マネジメント、調達、その他様々な資産やリソースの管理といった機能。',
'An item which can be used in place of another item': '他の物資の代わりに使う物資',
'Analysis of Completed Surveys': '完了したフィードバックの分析',
'Animal Die Off': '動物の死',
'Animal Feed': '動物のエサ',
'Animals': '動物',
'Answer Choices (One Per Line)': '選択肢(一行に一つ)',
'Anthropology': '人類学',
'Antibiotics available': '抗生物質が利用可能',
'Antibiotics needed per 24h': '24時間ごとに必要な抗生物質',
'Any available Metadata in the files will be read automatically, such as Timestamp, Author, Latitude & Longitude.': 'ファイル内の利用可能なすべてのメタデータ(タイムスタンプ、作成者、緯度経度等)を自動的に読み込みます。',
'Any comments about this sync partner.': 'データの同期先に関するコメント',
'Apparent Age': '年齢(外見)',
'Apparent Gender': '性別(外見)',
'Application Permissions': 'アプリケーションに対する権限',
'Application': '申請',
'Applications': 'アプリケーション',
'Appropriate clothing available': '適切な衣料が利用可能である',
'Appropriate cooking equipment/materials in HH': '世帯内にて適切な調理器具/食材が利用可能である',
'Approved': '承認されました',
'Approver': '承認者',
'Approx. number of cases/48h': '事象の発生概数/48h',
'Approximately how many children under 5 with diarrhea in the past 48 hours?': '過去48時間以内に発生した、5歳未満小児の下痢症状発生件数を記載してください。概数でかまいません',
'Archive not Delete': 'Archiveを削除しない',
'Arctic Outflow': '北極気団の南下',
'Are basic medical supplies available for health services since the disaster?': '災害発生後、基本的な医療行為を行えるよう、ヘルスサービスに対して供給があったかどうかを記載します',
'Are breast milk substitutes being used here since the disaster?': '災害発生後、母乳代替品が使われているかどうかを記載します',
'Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?': '日中時間帯、この地域での生活や遊び、通行によって、未成年や高齢者、障碍者に肉体的な危害が及ぶ可能性があるかを記載します',
'Are the chronically ill receiving sufficient care and assistance?': '慢性病の罹患者に対して、十分なケアと介護が行われているかを記載します',
'Are there adults living in prisons in this area?': 'この地域で刑務所に収容されている成人がいるかどうかを記載してください',
'Are there alternative places for studying?': '学校以外に学習を行える場所があるかどうかを記載してください',
'Are there cases of diarrhea among children under the age of 5?': '5歳未満の幼児に下痢症状が発生しているかどうかを記載してください',
'Are there children living in adult prisons in this area?': 'この地域で、成人用刑務所に収容されている未成年がいるかどうかを記載してください',
'Are there children living in boarding schools in this area?': 'この地域で、寄宿舎に居住している未成年がいるかどうかを記載してください',
'Are there children living in homes for disabled children in this area?': 'この地域で、障がいのある子供の世話をするために家にいる未成年がいるかどうかを記載してください',
'Are there children living in juvenile detention in this area?': 'この地域で、少年院に収容されている未成年がいるかどうかを記載してください',
'Are there children living in orphanages in this area?': 'この地域で、孤児となった子供は居ますか?',
'Are there children with chronical illnesses in your community?': '慢性疾患をもった子どもが共同体の中にいるかどうかを記載してください',
'Are there health services functioning for the community since the disaster?': '災害発生後、共同体で医療サービスが機能しているかどうかを記載してください',
'Are there older people living in care homes in this area?': 'この地域で、介護施設に居住している高齢者がいるかどうかを記載してください',
'Are there older people with chronical illnesses in your community?': 'この共同体のなかで、慢性疾患を患っている高齢者がいるかどうかを記載してください',
'Are there people with chronical illnesses in your community?': 'この共同体の中で、慢性疾患を患っている人物がいるかどうかを記載してください',
'Are there separate latrines for women and men available?': 'トイレが男女別になっているかどうかを記載してください',
'Are there staff present and caring for the residents in these institutions?': 'これら施設の居住者に対して、ケアと介護を行えるスタッフが存在するかどうかを記載してください',
'Area': 'エリア',
'Areas inspected': '調査済み地域',
'Assessment Details': 'アセスメントの詳細',
'Assessment Reported': 'アセスメントを報告しました',
'Assessment Summaries': 'アセスメントの要約',
'Assessment Summary Details': 'アセスメント要約の詳細',
'Assessment Summary added': 'アセスメントの要約を追加しました',
'Assessment Summary deleted': 'アセスメントの要約を削除しました',
'Assessment Summary updated': 'アセスメントの要約を更新しました',
'Assessment Type': 'アセスメントタイプ',
'Assessment added': 'アセスメントを追加しました',
'Assessment admin level': 'アセスメントの管理レベル',
'Assessment and Activities Gap Analysis Map': 'アセスメントと活動のギャップについての解析マップ',
'Assessment and Activities Gap Analysis Report': 'アセスメントと支援活動のギャップ解析レポート',
'Assessment deleted': 'アセスメントを削除しました',
'Assessment timeline': 'アセスメントタイムライン',
'Assessment updated': 'アセスメントを更新しました',
'Assessment': 'アセスメント',
'Assessments Needs vs. Activities': '需要アセスメントと支援活動のギャップ',
'Assessments and Activities': 'アセスメントと支援活動',
'Assessments are shown as green, yellow, orange, red.': 'アセスメントは、緑・黄・オレンジ・赤のいずれかの色で表されます。',
'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments': 'アセスメントとは、専門団体によって作成された調査文書のことを指します。データには、WFP(国連世界食糧計画)アセスメントも含まれます',
'Assessments are structured reports done by Professional Organizations': 'アセスメントとは、専門団体によって作成された調査文書のことを指します。',
'Assessments': 'アセスメント',
'Assessments:': 'アセスメント:',
'Assessor': '査定実施者',
'Asset Assigned': '資産割り当て',
'Asset Assignment Details': '資産割り当ての詳細',
'Asset Assignments deleted': '資産の割り当てを削除しました',
'Asset Assignments updated': '物資割り当てを更新しました',
'Asset Assignments': '資産割り当て',
'Asset Details': '資産の詳細',
'Asset Management': '資産管理',
'Asset Number': '資産番号',
'Asset added': '資産を追加しました',
'Asset deleted': '資産を削除しました',
'Asset updated': '資産を更新しました',
'Asset': '資産',
'Assets': '資産',
'Assign Asset': '資産割り当て',
'Assign Storage Location': '蓄積地点の割り当て',
'Assign to Org.': '組織に割り当て',
'Assigned To': '担当者',
'Assigned to': '担当者',
'Assigned': '割り当てられた',
'Assignments': '割り当て',
'Assistance for immediate repair/reconstruction of houses': '緊急の修理/家屋復旧の手伝い',
'Assistant': 'アシスタント',
'At/Visited Location (not virtual)': '実際に訪問した/訪問中のロケーション',
'Attend to information sources as described in <instruction>': '<instruction>に記載されている情報ソースへの参加',
'Attribution': '属性',
'Audit Read': '監査報告書の読み込み',
'Audit Write': '監査報告書の書き込み',
'Author': '作者',
'Automotive': '車両',
'Availability': 'ボランティア期間',
'Available Alternative Inventory Items': '利用可能な他の物資',
'Available Beds': '利用可能なベッド数',
'Available Inventory Items': '利用可能な倉庫内の物資',
'Available Messages': '利用可能なメッセージ',
'Available Records': '利用可能なレコード',
'Available databases and tables': '利用可能なデータベースおよびテーブル',
'Available for Location': '活動可能な地域',
'Available from': 'ボランティア開始日',
'Available in Viewer?': 'ビューワ内で利用可能?',
'Available until': 'ボランティア終了日',
'Availablity': '活動期間',
'Avalanche': '雪崩',
'Avoid the subject event as per the <instruction>': '<instruction>に従って対象の事象を避ける',
'Babies who are not being breastfed, what are they being fed on?': '乳児に対して母乳が与えられない場合、どうやって乳幼児の食事を確保しますか?',
'Baby And Child Care': '乳幼児へのケア',
'Background Color for Text blocks': 'テキストブロックの背景色',
'Background Color': '背景色',
'Bahai': 'バハイ',
'Baldness': '禿部',
'Balochi': 'バロチ語',
'Banana': 'バナナ',
'Bank/micro finance': '銀行/マイクロファイナンス',
'Barricades are needed': 'バリケードが必要',
'Base Layer?': '基本レイヤ?',
'Base Layers': '基本レイヤ',
'Base Location': '基本となるロケーション',
'Base Unit': '基本単位',
'Baseline Number of Beds': '平常時のベッド設置数',
'Baseline Type Details': '基準値タイプの詳細',
'Baseline Type added': '基準値タイプを追加しました',
'Baseline Type deleted': '基準値のタイプを削除しました',
'Baseline Type updated': '基準値タイプを更新しました',
'Baseline Type': '基準値タイプ',
'Baseline Types': '基準値の種類',
'Baseline added': '基準値を追加しました',
'Baseline deleted': '基準値を削除しました',
'Baseline number of beds of that type in this unit.': 'この施設における、通常状態のベッド収容数です。',
'Baseline updated': '基準値を更新しました',
'Baselines Details': '基準値の詳細',
'Baselines': '基準値',
'Basic Assess.': '基本アセスメント',
'Basic Assessment Reported': 'ベーシック・アセスメントを報告しました',
'Basic Assessment': '基本アセスメント',
'Basic Details': '基本情報',
'Basic information on the requests and donations, such as category, the units, contact details and the status.': '支援要請と寄付に関する基本情報です。カテゴリ、単位、連絡先詳細および状態等が記載されています。',
'Basic medical supplies available prior to disaster': '災害発生以前 基本的な医療行為の提供',
'Basic medical supplies available since disaster': '災害発生後 基本的な医療行為の提供',
'Basic reports on the Shelter and drill-down by region': '避難所の基本レポートと、地域による絞り込み',
'Basic': '基本',
'Baud rate to use for your modem - The default is safe for most cases': 'モデムを使用するためのボーレートです。大抵の場合はデフォルトが安全です。',
'Baud': 'ボー値',
'Beam': '梁',
'Bed Capacity per Unit': '施設ごとのベッド最大収容数',
'Bed Capacity': 'ベッド最大収容数',
'Bed Type': 'ベッド種別',
'Bed type already registered': 'ベッドのタイプは既に登録済みです。',
'Bedding materials available': '寝具が利用可能である',
'Below ground level': '地下',
'Beneficiary Type': '受益者タイプ',
'Biological Hazard': '生物災害',
'Biscuits': 'ビスケット',
'Blizzard': '吹雪',
'Blood Type (AB0)': '血液型 (AB0式)',
'Blowing Snow': '地吹雪',
'Boat': 'ボート',
'Bodies found': '未回収の遺体',
'Bodies recovered': '回収済みの遺体',
'Body Recovery Reports': '遺体回収レポート',
'Body Recovery Request': '遺体回収の要請',
'Body Recovery Requests': '遺体回収の要請',
'Body': '本文',
'Bomb Explosion': '爆発が発生',
'Bomb Threat': '爆発の危険性',
'Bomb': '爆発物',
'Border Color for Text blocks': 'テキストブロックの枠色',
'Bounding Box Insets': '領域を指定した枠組みへ差し込む',
'Bounding Box Size': '領域を指定した枠組みのサイズ',
'Boys 13-18 yrs in affected area': '影響地域内の13-18歳の男子数',
'Boys 13-18 yrs not attending school': '学校に来ていなかった13-18歳の男子数',
'Boys 6-12 yrs in affected area': '影響地域内の6-12歳の男子数',
'Boys 6-12 yrs not attending school': '学校に来ていなかった6-12歳の男子数',
'Brand Details': '銘柄の詳細',
'Brand added': '銘柄を追加しました',
'Brand deleted': '銘柄が削除されました',
'Brand updated': '銘柄が更新されました',
'Brand': '銘柄',
'Brands': '銘柄',
'Breast milk substitutes in use since disaster': '災害発生後から母乳代替品を使用している',
'Breast milk substitutes used prior to disaster': '災害前から母乳代替品を使用していた',
'Bricks': 'レンガ',
'Bridge Closed': '橋梁(通行止め)',
'Bucket': 'バケツ',
'Buddhist': '仏教徒',
'Budget Details': '予算の詳細',
'Budget Updated': '予算を更新しました',
'Budget added': '予算を追加しました',
'Budget deleted': '予算を削除しました',
'Budget updated': '予算を更新しました',
'Budget': '予算',
'Budgeting Module': '予算編成モジュール',
'Budgets': '予算編成',
'Buffer': 'バッファ',
'Bug': 'バグ',
'Building Aide': '建設援助',
'Building Assessment': '建物のアセスメント',
'Building Assessments': '建築物アセスメント',
'Building Collapsed': '崩壊した建物',
'Building Name': '建物名',
'Building Safety Assessments': '建物の安全アセスメント',
'Building Short Name/Business Name': '建物の名前 / 会社名',
'Building or storey leaning': '建物または階層が傾いている',
'Built using the Template agreed by a group of NGOs working together as the': '例えばECB等、多くのNGOによって利用されている形式を使っての記録が可能です。',
'Bulk Uploader': 'まとめてアップロード',
'Bundle Contents': '小包の内容',
'Bundle Details': 'Bundleの詳細',
'Bundle Updated': 'バンドルを更新しました',
'Bundle added': 'バンドルを追加しました',
'Bundle deleted': 'バンドルを削除しました',
'Bundle updated': 'バンドル・セットを更新しました',
'Bundle': 'バンドル',
'Bundles': 'バンドル',
'Burn ICU': '熱傷 ICU',
'Burn': '火傷(やけど)',
'Burned/charred': '火傷/炭化',
'Business damaged': 'ビジネスへの損害が発生している',
'By Inventory': '物資の送付元',
'By Person': '人物ごと',
'By Site': 'サイト別',
'By Warehouse': '送付元倉庫',
'CBA Women': 'CBA 女性',
'CN': '貨物運送状',
'CSS file %s not writable - unable to apply theme!': 'CSS ファイル %s が書き込み不可になっているため、テーマを適用することができません。',
'Calculate': '計算',
'Camp Coordination/Management': '仮泊施設間の調整 / 管理',
'Camp': '仮泊施設',
'Can only disable 1 record at a time!': '一度に1つしか無効にできません!',
'Can users register themselves for authenticated login access?': '新規ユーザが、他者の承認なしに自分を新規ユーザとして登録できるか?',
'Cancel Add': '追加を取り消す',
'Cancel Shipment': '輸送をキャンセルする',
'Cancel': 'キャンセル',
'Canceled': 'キャンセル',
'Candidate Matches for Body %s': 'Bodyに適合した候補者は %s',
'Canned Fish': '魚の缶詰',
'Cannot be empty': '必ず入力してください。',
'Cannot delete whilst there are linked records. Please delete linked records first.': 'リンクされたレコードがあるので削除できません。このレコードよりも先に、リンク先のレコードを削除してください。',
'Cannot disable your own account!': '自分自身のアカウントを無効にする事はできません',
'Capacity (Max Persons)': '収容可能数 (最大人数)',
'Capacity (W x D X H)': '収容可能面積 (W x D X H)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': '被災者の個々のグループについて、情報を取得する (ツアー旅行者、滞在者、家族、など)',
'Capture Information on each disaster victim': '被災者情報を個別に把握する',
'Capturing organizational information of a relief organization and all the projects they have in the region': '個々の支援団体と、地域内で実行中の全てのプロジェクトを取得します',
'Capturing the essential services each Volunteer is providing and where': '各ボランティアの居場所と、提供している主要なサービスを取得する',
'Capturing the projects each organization is providing and where': '各団体の所在地と、提供している主要なサービスを取得します',
'Cardiology': '心臓病学',
'Cash available to restart business': '事業再開に必要な資金調達が可能',
'Cassava': 'キャッサバ',
'Casual Labor': '一般労働',
'Casualties': '犠牲者',
'Catalog Item added': '救援物資カタログにアイテムを追加しました',
'Catalog Item deleted': 'カタログアイテムを削除しました',
'Catalog Item updated': '救援物資カタログを更新しました',
'Catalog Item': '救援物資カタログ',
'Catalog Items': '物資カタログ',
'Catalog Name': 'カタログ名',
'Catalog': 'カタログ',
'Category': 'カテゴリ',
'Category<>Sub-Category<>Catalog Relation added': 'Category<>Sub-Category<>Catalog 間の関係を追加しました',
'Category<>Sub-Category<>Catalog Relation deleted': 'Category<>Sub-Category<>Catalog 関係を削除しました',
'Category<>Sub-Category<>Catalog Relation updated': 'Category<>Sub-Category<>Catalog 間の関係を更新しました',
'Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 間の関係',
'Ceilings, light fixtures': '天井、照明あり',
'Central point to record details on People': '被災者や支援者など、関係者情報の集積を行ないます',
'Certificate Status': '認証状態',
'Certification': '有資格者',
'Change Password': 'パスワードの変更',
'Check for errors in the URL, maybe the address was mistyped.': '入力したURLに間違いがないか確認してください。',
'Check if the URL is pointing to a directory instead of a webpage.': 'URLがウェブページではなくディレクトリを指定しているか、確認してください。',
'Check outbox for the message status': '送信箱を調べてメッセージステータスを確認する',
'Check to delete': '削除項目にチェック',
'Check to delete:': '削除項目にチェック:',
'Check': '確認',
'Check-In': 'チェックイン',
'Check-Out': 'チェックアウト',
'Check-in': 'チェックイン',
'Check-out': 'チェックアウト',
'Checklist created': 'チェックリストを作成しました',
'Checklist deleted': 'チェックリストを削除しました',
'Checklist of Operations': '作業項目チェックリスト',
'Checklist updated': 'チェックリストを更新しました',
'Checklist': 'チェックリスト',
'Chemical Hazard': '化学災害',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': '兵器による攻撃、脅威(化学兵器、生物兵器、放射能汚染、核兵器、高威力の爆発)',
'Chicken': 'ニワトリ',
'Child (2-11)': '子供 (2-11歳)',
'Child (< 18 yrs)': '子供 (18歳未満)',
'Child Abduction Emergency': '未成年誘拐警報',
'Child headed households (<18 yrs)': '代表者が未成年 (18歳以下)の世帯数',
'Child': '子供',
'Children (2-5 years)': '子供たち (2-5歳)',
'Children (5-15 years)': '子供たち(5-15歳)',
'Children (< 2 years)': '子供たち (2歳未満)',
'Children in adult prisons': '成人用刑務所に未成年がいる',
'Children in boarding schools': '寄宿制学校の児童がいる',
'Children in homes for disabled children': '障がい児施設にいる子ども',
'Children in juvenile detention': '少年院収容者がいる',
'Children in orphanages': '身寄りの無い人がいる',
'Children living on their own (without adults)': '未成年のみで自活(成人無し)',
'Children not enrolled in new school': '新しい学校に入学していない子供',
'Children orphaned by the disaster': '被災のため孤児になった子供たち',
'Children separated from their parents/caregivers': '親(または親相当の後見人)とはぐれた子供の数',
'Children that have been sent to safe places': '安全な地域へ疎開済みの子供数',
'Children who have disappeared since the disaster': '災害発生後に行方不明の子供たち',
'Children with chronical illnesses': '慢性疾患をもつ子供がいる',
'Chinese (Taiwan)': '中国語 (台湾繁体字)',
'Cholera Treatment Capability': 'コレラ治療対応能力',
'Cholera Treatment Center': 'コレラ治療センター',
'Cholera Treatment': 'コレラの治療',
'Cholera-Treatment-Center': 'コレラ治療センター',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': '新規の評価とチームの判定に基づいた新しいポスターを選択してください。建物全体が深刻な状態の場合「危険」を、一部は使える場合「制限あり」です。主要な出入口に「調査済み」プラカードを設置してください。全ての使用可能な出入口には他のプラカードを設置してください。',
'Choose': '選択',
'Choosing Skill and Resources of Volunteers': 'ボランティアのスキルとリソースを選択してください',
'Christian': 'キリスト教徒',
'Church': '教会',
'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': '行方不明時の状況や、この人物の生存を最後に確認した人物についての情報を記載してください。',
'Civil Emergency': '市民緊急事態',
'Cladding, glazing': '被覆・外壁、ガラス板',
'Clear Selection': '選択をクリア',
'Click on the link %(url)s to reset your password': 'リンクをクリックしてください %(url)s パスワードのリセット',
'Click on the link %(url)s to verify your email': 'リンクをクリックしてください %(url)s 登録されたメールアドレスに間違いが無いことが確認されます',
'Client IP': 'クライアントIP',
'Clinical Laboratory': '臨床検査',
'Clinical Operations': '診療の人員数',
'Clinical Status': '診療状況',
'Close map': '地図を閉じる',
'Closed': '閉鎖中',
'Closure': '閉鎖・通行止め',
'Clothing': '衣服',
'Cluster Details': 'クラスタの詳細',
'Cluster Distance': 'クラスタ距離',
'Cluster Subsector Details': 'クラスタのサブクラスタの詳細',
'Cluster Subsector added': 'クラスタのサブセクタを追加しました',
'Cluster Subsector deleted': 'クラスタのサブセクタを削除しました',
'Cluster Subsector updated': 'クラスタのサブセクタを更新しました',
'Cluster Subsector': 'クラスタのサブクラスタ',
'Cluster Subsectors': 'クラスタのサブセクタ',
'Cluster Threshold': 'クラスタのしきい値',
'Cluster added': 'クラスタを追加しました',
'Cluster deleted': 'クラスタを削除しました',
'Cluster updated': 'クラスタを更新しました',
'Cluster': 'クラスタ',
'Cluster(s)': 'クラスタ',
'Clusters': 'クラスタ',
'Code': 'プロジェクトコード',
'Cold Wave': '寒波',
'Collapse, partial collapse, off foundation': '全壊、一部損壊、off foundation',
'Collective center': '収集センター',
'Color for Underline of Subheadings': 'サブヘッダのアンダーラインの色',
'Color of Buttons when hovering': 'ホバー時のボタンの色',
'Color of bottom of Buttons when not pressed': '押されなかった時のボタンの下部の色',
'Color of bottom of Buttons when pressed': 'ボタン押下時の下部の色',
'Color of dropdown menus': 'ドロップダウンメニューの色',
'Color of selected Input fields': '選択中の入力フィールドの色',
'Color of selected menu items': '選択中のメニューアイテムの色',
'Column Choices (One Per Line': 'カラム選択 (一行に一つ',
'Columns, pilasters, corbels': '円柱、付け柱、コーベル',
'Combined Method': '複数証跡の組み合わせ',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': '復旧まで少々お待ちください。あなた以外の閲覧者にも、この表示がされています。',
'Come back later.': '復旧まで少々お待ちください',
'Comments': 'コメント',
'Commercial/Offices': '商業 / オフィス',
'Commit Date': '受け入れ日',
'Commit from %s': '%sからのコミット',
'Commit': 'コミット',
'Commit Status': '支援の引き受け状況',
'Commiting a changed spreadsheet to the database': '変更後のスプレッドシートをデータベースに反映します',
'Commitment Added': 'コミットメントを追加しました',
'Commitment Canceled': 'コミットをキャンセルしました',
'Commitment Details': 'コミットの詳細',
'Commitment Item Details': 'コミットされた救援物資の詳細',
'Commitment Item added': 'コミットの物資を追加しました',
'Commitment Item deleted': 'コミットされた救援物資を削除しました',
'Commitment Item updated': 'コミット物資を更新しました',
'Commitment Item': '物資のコミットメント',
'Commitment Items': 'コミットされた物資',
'Commitment Status': '支援の引き受け状況',
'Commitment Updated': 'コミットを更新しました',
'Commitment': 'コミットメント',
'Commitments': 'コミット',
'Committed By': '受け入れ団体/人',
'Committed': 'コミット済み',
'Committing Inventory': '引き受け中の倉庫',
'Communication problems': 'コミュニケーションの問題',
'Community Centre': 'コミュニティセンター',
'Community Health Center': '地域の医療センター',
'Community Member': 'コミュニティの構成員',
'Complete Unit Label for e.g. meter for m.': '単位を表すラベル。例えばメートルなら m など。',
'Complete': '完了',
'Completed': '完了',
'Complexion': '人種、肌色',
'Compose': 'メッセージ作成',
'Compromised': '易感染状態',
'Concrete frame': 'コンクリートのフレーム',
'Concrete shear wall': 'コンクリートせん断壁',
'Config added': '設定を追加しました',
'Config deleted': '設定を削除しました',
'Config updated': '設定を更新しました',
'Config': '設定',
'Configs': '設定',
'Configurations': '設定',
'Configure Run-time Settings': 'ランタイムの設定',
'Confirm Shipment Received': '配送物の受領を確認',
'Confirmed Incidents': '確認済みのインシデント',
'Confirmed': '確認済み',
'Conflict Details': 'コンフリクトの詳細',
'Conflict Resolution': 'データ競合の解決',
'Consignment Note': '出荷通知',
'Constraints Only': '制約のみ',
'Consumable': '消耗品',
'Contact Data': '連絡先データ',
'Contact Details': '連絡先の詳細',
'Contact Information Added': '連絡先情報を追加しました',
'Contact Information Deleted': '連絡先情報を削除しました',
'Contact Information Updated': '連絡先情報を更新しました',
'Contact Information': '連絡先情報',
'Contact Method': '問い合わせ方法',
'Contact Name': '連絡先名',
'Contact Person': '窓口担当者',
'Contact Phone': '連絡先電話番号',
'Contact details': '連絡先の詳細',
'Contact information added': '連絡先情報を追加しました',
'Contact information deleted': '連絡先情報を削除しました',
'Contact information updated': '連絡先情報を更新しました',
'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': '詳細事項の質問や連絡を行なう際の連絡担当者を記載します(レポート報告者と異なる場合のみ)。電話番号、住所、電子メールなどを記載してください。',
'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': '情報伝達や追加質問を行う際の代表担当者(報告者と異なる場合のみ記載してください)。電話番号や住所、メールアドレスなどを指定できます。',
'Contact us': '問い合わせ',
'Contact': '連絡先',
'Contacts': '連絡先',
'Contents': '内容',
'Contradictory values!': '値が矛盾しています!',
'Contributor': '投稿者',
'Conversion Tool': '変換ツール',
'Cooking NFIs': '調理用器具',
'Cooking Oil': '調理油',
'Coordinate Conversion': '座標変換',
'Coping Activities': '一時対応活動',
'Copy any data from the one to be deleted into the one to keep': '削除する側の候補地から残す方の候補地へ、必要なデータを転載します。',
'Copy': 'コピー',
'Corn': 'とうもろこし',
'Cost Type': '料金種別',
'Cost per Megabyte': '1メガバイト毎に課金',
'Cost per Minute': '1分毎に課金',
'Country of Residence': '居住国',
'Country': '国',
'Create & manage Distribution groups to receive Alerts': 'アラートの送付先グループを作成・管理する',
'Create Activity Report': '支援活動レポートを追加',
'Create Activity Type': '支援活動タイプを追加',
'Create Activity': '支援活動を追加',
'Create Assessment': 'アセスメントを新規追加',
'Create Asset': '資産の追加',
'Create Bed Type': 'ベッドの種類を追加',
'Create Brand': '銘柄を追加',
'Create Budget': '予算を追加',
'Create Catalog Item': '物資カタログを追加',
'Create Catalog': 'カタログを追加',
'Create Checklist': 'チェックリストの作成',
'Create Cholera Treatment Capability Information': 'コレラ治療能力に関する情報の追加',
'Create Cluster Subsector': 'クラスタのサブセクタを追加',
'Create Cluster': 'クラスタを追加',
'Create Contact': '連絡先を追加',
'Create Dead Body Report': '遺体発見レポートを追加',
'Create Feature Layer': 'Feature Layerを追加',
'Create Group Entry': 'グループエントリの作成',
'Create Group': 'グループを追加',
'Create Hospital': '病院を新規追加',
'Create Identification Report': 'IDレポートを追加',
'Create Impact Assessment': '災害影響範囲アセスメントの作成',
'Create Import Job': 'Import Jobの作成',
'Create Incident Report': 'インシデントレポートを追加',
'Create Incident': 'インシデントを追加',
'Create Item Category': '物資カテゴリを追加',
'Create Item Pack': '救援物資パックの追加',
'Create Item': '救援物資を新規追加',
'Create Kit': 'キットを新規追加',
'Create Layer': 'レイヤを追加',
'Create Location': 'ロケーションを追加',
'Create Map Profile': '地図設定を追加',
'Create Marker': 'マーカーを追加',
'Create Member': 'メンバを追加',
'Create Mobile Impact Assessment': '災害影響範囲アセスメントをモバイル端末から作成',
'Create Office': 'オフィスを追加',
'Create Organization': '団体を追加',
'Create Personal Effects': 'Personal Effectsを追加',
'Create Project': 'プロジェクトを追加',
'Create Projection': '地図投影法を追加',
'Create Rapid Assessment': '被災地の現況アセスメントを作成',
'Create Report': 'レポートを新規追加',
'Create Request': '支援要請を作成',
'Create Resource': 'リソースを追加',
'Create River': '河川情報を追加',
'Create Role': '役割を追加',
'Create Sector': '活動分野を追加',
'Create Service Profile': 'サービスプロファイルを追加',
'Create Shelter Service': '避難所における提供サービスを追加',
'Create Shelter Type': '避難所タイプを追加',
'Create Shelter': '避難所を追加',
'Create Skill Type': 'スキルタイプを追加',
'Create Skill': 'スキルを追加',
'Create Status': '状況を追加',
'Create Task': 'タスクを追加',
'Create Theme': 'テーマを追加',
'Create User': 'ユーザを追加',
'Create Volunteer': 'ボランティアの追加',
'Create Warehouse': '倉庫を追加',
'Create a Person': '人物情報を追加',
'Create a group entry in the registry.': '登録にグループエントリを作成。',
'Create, enter, and manage surveys.': '調査の作成、入力、管理を実施',
'Creation of Surveys': '聞き取り調査の新規作成',
'Credential Details': '証明書の詳細',
'Credential added': '証明書を追加しました',
'Credential deleted': '証明書を削除しました',
'Credential updated': '証明書を更新しました',
'Credentials': '証明書',
'Crime': '犯罪',
'Criteria': '基準',
'Currency': '通貨',
'Current Group Members': '現在のグループメンバ',
'Current Identities': '現在のID',
'Current Location': '現在のロケーション',
'Current Log Entries': '現在のログエントリ',
'Current Memberships': '現在のメンバシップ',
'Current Notes': '現在選択中の追加情報',
'Current Registrations': '現在の登録',
'Current Status': '現在の状況',
'Current Team Members': '現在のチームメンバ',
'Current Twitter account': '現在のTwitterアカウント',
'Current community priorities': '現在のコミュニティの優先順位',
'Current general needs': '現在の需要',
'Current greatest needs of vulnerable groups': '現在、被災者が最も必要としている物資/サービス',
'Current health problems': '現在の健康問題',
'Current main income sources': '現在の主な収入源',
'Current major expenses': '現在の主な支出項目',
'Current number of patients': '現在の患者数',
'Current problems, categories': '現在の問題、カテゴリ',
'Current problems, details': '現在の問題の詳細',
'Current request': '現在の要求',
'Current response': '現在の対応状況',
'Current session': '現在のセッション',
'Current type of health problems, adults': '現在発生中の健康問題(成人)',
'Current type of health problems, children': '現在発生中の健康問題(小児)',
'Current type of source for drinking water': '現在の飲料水確保方法',
'Current type of source for sanitary water': '現在の生活用水確保方法',
'Custom Database Resource (e.g., anything defined as a resource in Sahana)': 'カストマイズされたデータベースのリソース (例:Sahana 内のリソースとして定義された物)',
'Customisable category of aid': 'カスタマイズ可能な支援カテゴリ',
'DC': '寄付の証明(Donation Certificate)',
'DECISION': '決定',
'DNA Profile': 'DNAプロファイル',
'DNA Profiling': 'DNAプロファイリング',
'DVI Navigator': '被災者の検索',
'Daily': '日次',
'Dam Overflow': 'ダム決壊',
'Damage': '損傷',
'Dangerous Person': '危険人物',
'Dashboard': 'ダッシュボード',
'Data import policy': 'データのインポートポリシー',
'Data uploaded': 'データがアップロードされました',
'Database': 'データベース',
'Date & Time': '日付と時刻',
'Date Avaialble': '日付あり',
'Date Available': '可能な日付',
'Date Received': '物資受領日',
'Date Requested': '要請した日',
'Date Required': '物資が必要になる日',
'Date Sent': '送付日',
'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': '物資を受領した日時を記録します。デフォルトでは現在の時間が入力されます。変更するには、ドロップダウンリストから選択してください。',
'Date and Time': '日付と時刻',
'Date and time this report relates to.': 'このレポートに関連する日付と時刻',
'Date of Birth': '生年月日',
'Date of Latest Information on Beneficiaries Reached': '恩恵を受ける人にたどり着いた最新の情報の日付',
'Date of Report': 'レポートの日付',
'Date': '日付',
'Date/Time of Find': '日付/発見日時',
'Date/Time of disappearance': '行方不明になった日付/時刻',
'Date/Time': '日付/時刻',
'De-duplicator': '重複解消機能',
'Dead Body Details': '遺体の詳細',
'Dead Body Reports': '遺体情報レポート',
'Dead Body': '遺体の管理',
'Dead body report added': '遺体発見レポートを追加しました',
'Dead body report deleted': '遺体報告を削除しました',
'Dead body report updated': '遺体レポートを更新しました',
'Deaths in the past 24h': '過去24時間の死者',
'Deaths/24hrs': '死亡者数/24h',
'Debug': 'デバッグ',
'Deceased': '死亡',
'Decimal Degrees': '十進角',
'Decomposed': '腐乱',
'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'マップウィンドウのデフォルトの縦高。ウィンドウレイアウトでは、マップはウィンドウ全体に最大化されるので、大きな値を設定する必要はありません。',
'Default Height of the map window.': '地図ウィンドウの初期の高さ',
'Default Marker': 'デフォルトマーカー',
'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'マップウィンドウのデフォルトの幅。ウィンドウレイアウトでは、マップはウィンドウ全体に最大化されるので、大きな値を設定する必要はありません。',
'Default Width of the map window.': '地図ウィンドウの幅の初期値',
'Default synchronization policy': 'データ同期ポリシーのデフォルト設定',
'Defaults updated': 'デフォルト値を更新しました',
'Defaults': 'デフォルト値',
'Defecation area for animals': '動物排便用の地域',
'Defines the icon used for display of features on handheld GPS.': 'ハンドヘルドGPSに表示するアイコンを決定します。',
'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.': '対話型地図および KML の出力上で Feature の表示に使用するアイコンを定義します。Feature Class に割り当てられたマーカーを上書きする必要がある場合、個々の場所に割り当てられたマーカーが設定されます。どちらも定義されていない場合は、デフォルトのマーカーが使用されます。',
'Defines the icon used for display of features on interactive map & KML exports.': 'インタラクティブマップとKMLエクスポートで建物などの表示に使われるアイコン定義',
'Defines the marker used for display & the attributes visible in the popup.': 'ポップアップ時と通常時に表示されるマーカーを指定してください。',
'Degrees must be a number between -180 and 180': '度数は -180 から 180 の間にしてください。',
'Dehydration': '脱水症状',
'Delete Aid Request': '援助要請を削除',
'Delete Alternative Item': '代わりの物資を削除する',
'Delete Assessment Summary': 'アセスメントの要約を削除',
'Delete Assessment': 'アセスメントを削除',
'Delete Asset Assignments': '資産割り当ての削除',
'Delete Asset': '資産の削除',
'Delete Baseline Type': '基準値タイプを削除',
'Delete Baseline': '基準値を削除',
'Delete Brand': 'ブランドを削除してください',
'Delete Budget': '予算を削除',
'Delete Bundle': 'Bundleを削除',
'Delete Catalog Item': '救援物資カタログを削除',
'Delete Cluster Subsector': 'クラスタのサブクラスタを削除',
'Delete Cluster': 'クラスタを削除',
'Delete Commitment Item': 'コミットした物資の削除',
'Delete Commitment': 'コミットメントの削除',
'Delete Config': '設定を削除',
'Delete Contact Information': '連絡先情報の削除',
'Delete Credential': '証明書の削除',
'Delete Distribution Item': '配給物資を削除',
'Delete Distribution': '配給所を削除',
'Delete Document': '文書を削除',
'Delete Donor': '資金提供組織を削除',
'Delete Entry': 'エントリを削除',
'Delete Feature Layer': '機能レイヤを削除',
'Delete Group': 'グループを削除',
'Delete Hospital': '病院を削除',
'Delete Image': '画像を削除',
'Delete Impact Type': '影響範囲のタイプを削除',
'Delete Impact': '影響範囲の削除',
'Delete Incident Report': 'インシデントレポートを削除',
'Delete Incident': 'インシデントを削除',
'Delete Inventory Item': '備蓄物資を削除',
'Delete Inventory Store': '物資集積地点を削除',
'Delete Item Category': 'アイテムカテゴリを削除',
'Delete Item Pack': '救援物資パックの削除',
'Delete Item': '救援物資を削除',
'Delete Key': 'Keyを削除',
'Delete Kit': 'Kitを削除',
'Delete Layer': 'レイヤーを削除',
'Delete Level 1 Assessment': 'レベル1アセスメントの削除',
'Delete Level 2 Assessment': 'レベル2アセスメントの削除',
'Delete Location': 'ロケーションを削除',
'Delete Map Profile': '地図設定を削除',
'Delete Marker': 'マーカーを削除',
'Delete Membership': 'メンバシップを削除',
'Delete Message': 'メッセージを削除',
'Delete Metadata': 'メタデータを削除',
'Delete Need Type': '需要タイプを削除',
'Delete Need': '要求を削除',
'Delete Office': 'オフィスを削除',
'Delete Old': '古いものを削除',
'Delete Organization': '団体情報を削除',
'Delete Peer': 'データ同期先の削除',
'Delete Person': '人物情報を削除',
'Delete Photo': '写真を削除',
'Delete Project': 'プロジェクトを削除',
'Delete Projection': '地図投影法を削除',
'Delete Rapid Assessment': '被災地の現況アセスメントを削除',
'Delete Received Item': '受け取った物資の削除',
'Delete Received Shipment': '受け取った輸送の削除',
'Delete Record': 'レコードを削除',
'Delete Recovery Report': '遺体回収レポートを削除',
'Delete Report': 'レポートを削除',
'Delete Request Item': '物資の要請を削除',
'Delete Request': '支援要請を削除',
'Delete Resource': 'リソースを削除',
'Delete Section': 'Sectionを削除',
'Delete Sector': '活動分野を削除',
'Delete Sent Item': '送付物資を削除',
'Delete Sent Shipment': '輸送物資を削除',
'Delete Service Profile': 'サービスプロファイルを削除',
'Delete Setting': '設定を削除',
'Delete Skill Type': 'スキルタイプを削除',
'Delete Skill': 'スキルを削除',
'Delete Staff Type': 'スタッフタイプを削除',
'Delete Status': '状況を削除しました',
'Delete Subscription': '寄付申し込みを削除',
'Delete Survey Answer': '調査回答削除',
'Delete Survey Question': 'Survey Questionを削除',
'Delete Survey Section': '調査項目を削除',
'Delete Survey Series': '一連の調査を削除',
'Delete Survey Template': '調査用テンプレートを削除',
'Delete Unit': '単位を削除',
'Delete User': 'ユーザを削除',
'Delete Volunteer': 'ボランティアを削除',
'Delete Warehouse Item': '倉庫物資の削除',
'Delete Warehouse': '倉庫を削除',
'Delete from Server?': 'サーバから削除しますか?',
'Delete': '削除',
'Delivered': '配信済み',
'Delphi Decision Maker': 'Delphi意思決定',
'Demographic': '人口情報',
'Demonstrations': 'デモ発生',
'Dental Examination': '歯科検査',
'Dental Profile': '歯の欠損/治療跡',
'Department/Unit Name': '所属部課名',
'Deployment': '展開',
'Describe the condition of the roads to your hospital.': '道路状況|病院までの道路状況を記載してください',
"Describe the procedure which this record relates to (e.g. 'medical examination')": 'このレコードに関連する手続きを説明してください。(例えば "検診" です。)',
'Description of Bin Type': 'Binタイプを記載してください',
'Description of Contacts': '連絡先の説明',
'Description of defecation area': '排泄用地についての補足説明',
'Description of drinking water source': '飲料水に関する補足説明',
'Description of sanitary water source': '生活用水に関する説明',
'Description of water source before the disaster': '災害発生前の水の確保方法について補足説明',
'Description': '説明',
'Descriptive Text (e.g., Prose, etc)': '説明文 (例: 文学、等)',
'Designated for': '指定済み',
'Desire to remain with family': '家族との残留を希望',
'Destination': '目的地',
'Detail': '詳細',
'Details': '詳細',
'Dialysis': '透析',
'Diaphragms, horizontal bracing': '仕切り板、水平部材',
'Diarrhea among children under 5': '5歳未満の幼児に下痢が蔓延している',
'Diarrhea': '下痢',
'Dignitary Visit': '要人の訪問',
'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': '物資備蓄スペースの容積。ドロップダウンリストから単位を選び、以下の形式にしたがって入力してください。 1 x 2 x 3 , 横幅 x 奥行き x 縦幅。',
'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': '物資備蓄スペースの容積。ドロップダウンリストから単位を選び、以下の形式にしたがって入力してください。 1 x 2 x 3 , 横幅 x 奥行き x 縦幅。',
'Direction': '方向',
'Disable': '無効',
'Disabled participating in coping activities': '障害者が災害対応に従事',
'Disabled': '無効',
'Disabled?': '無効になっているか?',
'Disaster Victim Identification': '被災者の同定',
'Disaster Victim Registry': '被災者登録',
'Disaster clean-up/repairs': '災害の清掃活動や修復',
'Discharge (cusecs)': '流水量 (cusecs)',
'Discharges/24hrs': '退院者数/24h',
'Discussion Forum on item': 'フォーラム(物資について)',
'Discussion Forum': 'フォーラム',
'Disease vectors': '病原媒介者',
'Dispatch Items': 'アイテムの発送',
'Dispatch': '発送',
'Dispensary': '診療所',
'Displaced Populations': '避難者数',
'Displaced': '避難中',
'Display Polygons?': '多角形を表示しますか?',
'Display Routes?': 'ルートを表示しますか?',
'Display Tracks?': 'Tracksを表示しますか?',
'Display Waypoints?': 'ウェイポイントを表示しますか?',
'Dispose Expired/Unusable Items': '期限切れ / 使用できない物資の処分',
'Dispose': '処分',
'Distance between defecation area and water source': '水資源採取場所と排泄場所の間の距離',
'Distance between latrines and temporary shelter in meters': 'トイレと避難所の距離(m)',
'Distance between shelter and latrines': '簡易避難所と排泄場所との間の距離(メートル)',
'Distance(Kms)': '距離(Kms)',
'Distribution Details': '配給所の詳細',
'Distribution Item Details': '配給物資の詳細',
'Distribution Item added': '配給物資を追加しました',
'Distribution Item deleted': '配給物資を削除しました',
'Distribution Item updated': '配給物資を更新しました',
'Distribution Item': '配給物資',
'Distribution Items': '配給物資',
'Distribution added': '配給所を追加しました',
'Distribution deleted': '配給所を削除しました',
'Distribution groups': '配信グループ',
'Distribution updated': '配給所を更新しました',
'Distribution': '配給所',
'Distributions': '配給所',
'District': '地区(行政地区)',
'Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'あなたの地域の青年は、災害に対応するための支援活動に参加しますか?(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do households each have at least 2 containers (10-20 litres each) to hold water?': '1つの世帯ごとに、少なくとも2つ以上の水貯蔵容器(10-20リットル/容器)があるかどうかを記載してください',
'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?': '調理や食事に必要となる道具や器材(コンロ、ポット、皿やプレート、マグカップ、飲料容器など)が世帯に存在するかを記載します',
'Do households have bedding materials available (tarps, plastic mats, blankets)?': 'ベッド、あるいはベッド用部材(例:タープ、プラスチックマット、毛布)が世帯に存在するかを記載します',
'Do households have household water storage containers?': '水貯蔵容器が世帯に存在するかを記載します',
'Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '地域にいるマイノリティ(社会的少数者)の人が、自助的な災害対処につながる活動に参加しているか記載してください。(例 打ち合わせ、宗教活動、地域の清掃ボランティアなど)',
'Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '災害復旧活動に従事している高齢者が、共同体の中にいるかどうかを記載してください(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?': '個人に対して、少なくとも2セット以上の衣服(シャツ、ズボン/腰巻、下着など)があるかどうか記載してください',
'Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?': '十分な量のサニタリ / 衛生用品が、安定して供給されているかどうかを記載します(石鹸、シャンプー、歯ブラシ、洗濯用洗剤など)',
'Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'あなたの地域で障害者と一緒にいる方は、災害に対処るための彼らの支援活動に参加しますか?(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do women and girls have easy access to sanitary materials?': '女性用生理用品の入手が容易かどうかを記載してください',
'Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'あなたの地域の女性は、災害対応のための支援活動に参加しますか?(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do you have access to cash to restart your business?': 'ビジネス再開に必要な現金が入手可能かどうかを記載してください',
'Do you know of any incidents of violence?': '暴力事件が発生したかどうかを記載してください',
'Do you know of children living on their own (without adults)?': '成人がおらず、未成年のみで生活しているグループがあるかどうかを記載してください',
'Do you know of children separated from their parents or caregivers?': '親や養育者とはぐれた未成年がいるかどうかを記載してください',
'Do you know of children that have been orphaned by the disaster?': '災害によって孤児となった未成年がいるかどうかを記載してください',
'Do you know of children that have been sent to safe places?': '安全な場所に疎開した未成年がいるかどうかを記載してください',
'Do you know of children that have disappeared without explanation in the period since the disaster?': '災害発生後、行き先の説明ないまま連絡が取れなくなった未成年がいるかどうかを記載してください',
'Do you know of older people who are primary caregivers of children?': '未成年に対する介護経験がある高齢者がいるかどうかを記載してください',
'Do you know of parents/caregivers missing children?': '子供と連絡が取れなくなった親や養育者がいるかどうかを記載してください',
'Do you really want to delete these records?': '本当にこれらのデータを削除しますか?',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'この輸送の受領をキャンセルしますか?キャンセルするとこの物資は備蓄から削除されます。この操作は *取り消せません!*',
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': '出荷された物資をキャンセルしますか?この物資は、在庫に返されます。このアクションは、元に戻せません。',
'Do you want to over-write the file metadata with new default values?': 'ファイルのメタデータを、新しいデフォルト値で上書きしますか?',
'Do you want to receive this shipment?': 'この輸送物資を受け取られますか?',
'Do you want to send these Committed items?': 'これらコミットされた物資を送付してよいですか?',
'Do you want to send this shipment?': 'この発送情報を送信しますか?',
'Document Details': '文書の詳細',
'Document Scan': '文書のスキャン',
'Document added': '文書を追加しました',
'Document deleted': '文書を削除しました',
'Document updated': '文書を更新しました',
'Document': '文書',
'Documents and Photos': '文書と写真',
'Documents': '文書',
'Does this facility provide a cholera treatment center?': 'コレラ治療センターの機能を提供可能かどうか',
'Doing nothing (no structured activity)': '活動なし(組織立った行動なし)',
'Dollars': 'ドル',
'Domain': 'ドメイン',
'Domestic chores': '家事手伝い',
'Donation Certificate': '寄付証明書',
'Donation Phone #': '寄付受付電話番号',
'Donor Details': '資金提供組織の詳細',
'Donor added': '資金提供組織を追加しました',
'Donor deleted': '資金提供組織を削除しました',
'Donor updated': '資金提供組織を更新しました',
'Donor': '資金提供組織',
'Donors Report': '資金提供レポート',
'Donors': '資金提供組織',
'Door frame': 'ドア枠',
'Download PDF': 'PDFをダウンロード',
'Draft Features': '草案(ドラフト)',
'Draft': 'ドラフト',
'Drainage': '排水',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'ロケーションに対する、スタッフと備品の予算を作成します。',
'Drill Down by Group': 'グループで絞り込み',
'Drill Down by Incident': 'インシデントで絞り込み',
'Drill Down by Shelter': '避難所で絞り込み',
'Driving License': '運転免許',
'Drought': '干ばつ',
'Drugs': '医薬品',
'Dug Well': '丸井戸',
'Duplicate?': '重複?',
'Duration': '活動実施期間',
'Dust Storm': '粉塵嵐',
'Dwelling': '居住施設',
'Dwellings': '住居数',
'EMS Reason': '緊急医療受け入れ状態',
'EMS Status Reason': '救急医療状況の理由',
'EMS Status': 'EMSステータス',
'EMS Traffic Status': '救急医療の混雑状況',
'ER Status Reason': 'ER医療状況の理由',
'ER Status': 'ER ステータス',
'Early Recovery': '早期復旧',
'Earthquake': '地震',
'Easy access to sanitation items for women/girls': '女性用サニタリ用品の入手が容易である',
'Edit Activity': '支援活動を編集',
'Edit Address': '住所の編集',
'Edit Aid Request': '援助要請を編集',
'Edit Alternative Item': '代わりの物資を編集',
'Edit Application': 'アプリケーションの編集',
'Edit Assessment Summary': 'アセスメントの要約を編集',
'Edit Assessment': 'アセスメントを編集',
'Edit Asset Assignment': '資産割り当ての編集',
'Edit Asset': '資産を編集',
'Edit Baseline Type': '基準値のタイプを編集',
'Edit Baseline': 'Baselineの編集',
'Edit Brand': '銘柄の編集',
'Edit Budget': '予算の編集',
'Edit Bundle': 'Bundleの編集',
'Edit Catalog Item': '救援物資カタログの編集',
'Edit Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 関係の編集',
'Edit Cluster Subsector': 'クラスタのサブセクターの編集',
'Edit Cluster': 'クラスタを編集',
'Edit Commitment Item': 'コミットされた物資の検索',
'Edit Commitment': 'コミットを編集',
'Edit Config': '設定の編集',
'Edit Contact Information': '連絡先情報の編集',
'Edit Contact': '連絡先の編集',
'Edit Contents': '内容の編集',
'Edit Credential': '証明書の編集',
'Edit Dead Body Details': '遺体の詳細を編集',
'Edit Defaults': 'デフォルト値の編集',
'Edit Description': '説明の編集',
'Edit Details': '詳細の編集',
'Edit Disaster Victims': '被災者情報の編集',
'Edit Distribution Item': '配給物資の編集',
'Edit Distribution': '配給所の編集',
'Edit Document': '文書を編集',
'Edit Donor': '資金提供組織の編集',
'Edit Email Settings': '電子メール設定の編集',
'Edit Feature Layer': 'Feature Layerの編集',
'Edit Flood Report': '洪水レポートの編集',
'Edit Gateway Settings': 'ゲートウェイ設定の編集',
'Edit Group': 'グループの編集',
'Edit Hospital': '病院の編集',
'Edit Identification Report': 'IDレポートの編集',
'Edit Identity': 'IDの編集',
'Edit Image Details': '画像の詳細の編集',
'Edit Image': '画像の編集',
'Edit Impact Type': '災害影響のタイプを編集',
'Edit Impact': '被災影響の編集',
'Edit Incident Report': 'インシデントレポートの編集',
'Edit Incident': 'インシデントを編集',
'Edit Inventory Item': '備蓄物資の編集',
'Edit Inventory Store': '物資集積地点の編集',
'Edit Item Catalog Categories': '救援物資カタログのカテゴリを編集',
'Edit Item Catalog': '救援物資カタログの編集',
'Edit Item Category': '救援物資カテゴリの編集',
'Edit Item Pack': '物資パックを編集',
'Edit Item Sub-Categories': '救援物資サブカテゴリの編集',
'Edit Item': '物資の編集',
'Edit Key': 'Keyの編集',
'Edit Kit': 'Kitの編集',
'Edit Layer': 'レイヤの編集',
'Edit Level 1 Assessment': 'レベル1アセスメントを編集する',
'Edit Level 2 Assessment': 'レベル2アセスメントを編集',
'Edit Location': 'ロケーションの編集',
'Edit Log Entry': 'ログエントリの編集',
'Edit Map Profile': '地図設定を編集する',
'Edit Map Services': '地図サービスの編集',
'Edit Marker': 'マーカーの編集',
'Edit Membership': 'メンバシップの編集',
'Edit Message': 'メッセージの編集',
'Edit Messaging Settings': 'メッセージ設定の編集',
'Edit Metadata': 'メタデータの編集',
'Edit Modem Settings': 'モデム設定の編集',
'Edit Need Type': '需要タイプの編集',
'Edit Need': 'ニーズを編集',
'Edit Note': '追加情報を編集',
'Edit Office': 'オフィスの編集',
'Edit Options': 'オプション編集',
'Edit Organization': '団体の編集',
'Edit Parameters': 'パラメータの編集',
'Edit Peer Details': 'データ同期先の詳細を編集',
'Edit Peer': 'データ同期先の編集',
'Edit Person Details': '人物情報の詳細を編集',
'Edit Personal Effects Details': 'Personal Effectsの詳細の編集',
'Edit Photo': '写真の編集',
'Edit Pledge': '寄付の編集',
'Edit Position': '場所の編集',
'Edit Problem': '問題の編集',
'Edit Project': 'プロジェクトの編集',
'Edit Projection': '地図投影法の編集',
'Edit Rapid Assessment': '被災地の現況アセスメントの編集',
'Edit Received Item': '物資の受領を編集',
'Edit Received Shipment': '物資の輸送の受領報告を編集',
'Edit Record': 'レコードの編集',
'Edit Recovery Details': '遺体回収の詳細を編集',
'Edit Registration Details': '登録状況の詳細を編集',
'Edit Registration': '登録の編集',
'Edit Report': 'レポートの編集',
'Edit Request Item': '物資の要請を編集',
'Edit Request': '支援要請の編集',
'Edit Resource': 'リソースの編集',
'Edit Response': '返信を編集',
'Edit River': '河川の編集',
'Edit Role': '役割の編集',
'Edit Sector': '活動分野を編集',
'Edit Sent Item': '送付した物資の編集',
'Edit Setting': '設定の編集',
'Edit Settings': '設定の編集',
'Edit Shelter Service': '避難所提供サービスの編集',
'Edit Shelter Type': '避難所タイプの編集',
'Edit Shelter': '避難所の編集',
'Edit Shipment Transit Log': '輸送履歴の編集',
'Edit Shipment to Send': '送付する輸送を編集',
'Edit Shipment/Way Bills': '輸送費/移動費の編集',
'Edit Shipment<>Item Relation': '輸送<>物資の関係を編集',
'Edit Site': 'Siteを編集',
'Edit Skill Type': 'スキルタイプの編集',
'Edit Skill': 'スキルの編集',
'Edit Solution': '解決案の編集',
'Edit Staff Type': 'スタッフタイプの編集',
'Edit Staff': 'スタッフの編集',
'Edit Storage Bin Type(s)': 'Storage Binタイプを編集',
'Edit Storage Bins': 'Storage Binの編集',
'Edit Storage Location': '備蓄地点の編集',
'Edit Subscription': '寄付申し込みの編集',
'Edit Survey Answer': '調査回答の編集',
'Edit Survey Question': '調査の質問項目を編集',
'Edit Survey Section': 'フィードバック内容を編集します',
'Edit Survey Series': '一連の調査の編集',
'Edit Survey Template': '調査テンプレートを編集',
'Edit Task': 'タスクの編集',
'Edit Team': 'チームの編集',
'Edit Theme': 'テーマの編集',
'Edit Themes': 'テーマの編集',
'Edit Ticket': 'チケットの編集',
'Edit Track': '追跡情報の編集',
'Edit Tropo Settings': 'Tropo 設定の編集',
'Edit Unit': '単位の編集',
'Edit User': 'ユーザの編集',
'Edit Volunteer Details': 'ボランティアの詳細を編集する',
'Edit Volunteer Registration': 'ボランティア登録の編集',
'Edit Warehouse Item': '倉庫物資を編集',
'Edit Warehouse': '倉庫を編集',
'Edit current record': '現在のレコードの編集',
'Edit message': 'メッセージの編集',
'Edit the Application': 'アプリケーションの編集',
'Edit': '編集',
'Editable?': '編集可能?',
'Education materials received': '教育資材を受領した',
'Education materials, source': '教育資材の送付元',
'Education': '教育',
'Effects Inventory': '備蓄物資への影響',
'Eggs': '卵',
'Either a shelter or a location must be specified': '避難所かロケーションのどちらかを特定する必要があります',
'Either file upload or document URL required.': 'ファイルのアップロードと文書のURLの両方が必要です。',
'Either file upload or image URL required.': 'アップロードするファイルか、URLを指定してください。',
'Elderly person headed households (>60 yrs)': '代表者が60歳以上の世帯数',
'Electrical': '電動の',
'Electrical, gas, sewerage, water, hazmats': '電気、ガス、下水道、水、有害物',
'Elevated': '高まる',
'Elevators': 'エレベーター',
'Email Address': 'メールアドレス',
'Email Settings': '電子メール設定',
'Email address verified, however registration is still pending approval - please wait until confirmation received.': '電子メールの認証は完了しましたが、登録はまだ完了していません。確認が完了するまで少々お待ちください。',
'Email settings updated': '電子メールの設定を更新しました',
'Email verification': '利用者登録の確認',
'Email': '電子メール',
'Embalming': '遺体防腐処理',
'Embassy': '大使館',
'Emergency Capacity Building project': 'ECB (緊急時の被災者収容建築プロジェクト)',
'Emergency Department': '救急部門',
'Emergency Shelter': '緊急避難所',
'Emergency Support Facility': '緊急支援施設',
'Emergency Support Service': '緊急支援サービス',
'Emergency Telecommunications': '緊急時電話連絡先',
'Enable/Disable Layers': 'レイヤの有効化/無効化',
'Enabled': '有効',
'End date should be after start date': '終了日付は開始日付より後にしてください',
'End date': '終了日',
'End of Period': '終了期間',
'English': 'English 英語',
'Enter Coordinates': '緯度経度を入力',
'Enter Coordinates:': '座標入力:',
'Enter a GPS Coord': 'GPS Coordを入力',
'Enter a GPS Coordinate': 'GPS座標を入力してください',
'Enter a date before': '以前の日時を入力',
'Enter a few characters of the name to select an existing Location or else simply type the name of the new Location.': '最初の数文字を入力して既存の項目から選ぶか、あるいは新しいロケーション名を入力して、ロケーションを特定してください。',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'アップロードするスプレッドシートの名前を入力してください。(必須項目)',
'Enter a new support request.': '新規の支援要請を登録',
'Enter a summary of the request here.': '要求事項の概要を入力',
'Enter a unique label!': 'そのラベル名は使われています。一意のラベル名を入力してください。',
'Enter a valid date before': 'より前の正しい日付を入力してください',
'Enter a valid email': '正しいメールアドレスを入力してください',
'Enter a valid future date': '正しい未来の日付を入力してください',
'Enter some characters to bring up a list of possible matches': '文字を入力することで、候補の一覧が表示されます',
'Enter some characters to bring up a list of possible matches.': '検索文字列を入力してください',
'Enter tags separated by commas.': 'タグはカンマで区切って入力してください。',
'Enter the same password as above': '確認のため、パスワードを再入力',
'Enter your firstname': 'あなたの名前を入力',
'Entered': '入力された',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': '電話番号の入力は任意です。入力すると、SMS メッセージの受け取り登録ができます。',
'Entering an Organization is optional, but doing so directs you to the appropriate approver & means you automatically get the appropriate permissions.': '選択リストに含まれる団体のメンバーであれば、所属する団体を選択してください。(団体の選択は必須ではありません)',
'Entry deleted': 'エントリを削除しました',
'Environment': '環境',
'Equipment': '備品',
'Error encountered while applying the theme.': 'テーマ適用時にエラーが発生しました。',
'Error in message': 'エラーメッセージ',
"Error logs for '%(app)s'": '"%(app)s" に関するエラーログ',
'Errors': 'エラー',
'Estimated # of households who are affected by the emergency': '非常事態の影響を受けた世帯の推定数',
'Estimated # of people who are affected by the emergency': '非常事態の影響を受けた住民の推定数',
'Estimated Overall Building Damage': '建物全体の被害見積り',
'Estimated total number of people in institutions': 'なんらかの施設に収容されている住民の推定数',
'Euros': 'ユーロ',
'Evacuating': '退避中',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'このメッセージの情報を評価します。(この値は、公開される警告アプリケーションで使用してはなりません)',
'Event Time': 'イベント発生時刻',
'Event Type': 'イベントタイプ',
'Event type': 'イベントタイプ',
'Example': '例',
'Exceeded': '超過',
'Exclude contents': 'コンテンツを除く',
'Excreta disposal': 'し尿処理',
'Execute a pre-planned activity identified in <instruction>': '事前に準備していた計画 <instruction>を実行する',
'Existing Placard Type': '設置されたポスターのタイプ',
'Existing food stocks': '食糧備蓄あり',
'Existing food stocks, main dishes': '備蓄中の食料(主皿)',
'Existing food stocks, side dishes': '備蓄中の食料(副皿)',
'Exits': '出口',
'Expected In': '予定期間',
'Expected Out': '予期される出力',
'Experience': '熟練者',
'Expiry Date': '有効期限',
'Expiry Time': '有効期限',
'Expiry_Date': '有効期限',
'Explosive Hazard': '爆発災害',
'Export Data': 'データのエクスポート',
'Export Database as CSV': 'データベースをCSV形式でエクスポート',
'Export in GPX format': 'GPXフォーマットでエクスポート',
'Export in KML format': 'KMLフォーマットでエクスポート',
'Export in OSM format': 'OSMフォーマットでエクスポート',
'Export in PDF format': 'PDFフォーマットでエクスポート',
'Export in RSS format': 'RSSフォーマットでエクスポート',
'Export in XLS format': 'XLSフォーマットでエクスポート',
'Export': 'エクスポート',
'Exterior Only': '外装のみ',
'Exterior and Interior': '外装と内装',
'External Features': '外部機能',
'Eye Color': '目の色',
'Facial hair, color': 'ヒゲ, 色',
'Facial hair, type': 'ヒゲ, 形状',
'Facial hear, length': 'ヒゲ, 長さ',
'Facility Operations': '施設の運用',
'Facility Status': '施設の状態',
'Facility Type': '施設タイプ',
'Factors affecting school attendance': '生徒の就学に影響する要因',
'Failed to send mail to Approver - see if you can notify them manually!': '承認依頼メールを送信できませんでした。利用者登録は完了していません。サイト管理者へ連絡してください。',
'Failed!': '失敗しました!',
'Falling Object Hazard': '落下/墜落による災害',
'Families/HH': '家族/世帯',
'Family tarpaulins received': 'タープ(家族用簡易テント)を受領した',
'Family tarpaulins, source': 'タープ(家族用簡易テント)の送付元',
'Family': '家族',
'Family/friends': '家族/友人',
'Farmland/fishing material assistance, Rank': '農業 / 漁業用物資の補助、ランク',
'Fatalities': '死亡者',
'Fax': 'ファックス',
'Feature Layer Details': '機能レイヤの詳細',
'Feature Layer added': '機能レイヤを追加しました',
'Feature Layer deleted': '機能レイヤを削除しました',
'Feature Layer updated': '機能レイヤを更新しました',
'Feature Layers': '機能レイヤ',
'Feature Namespace': 'Feature 名前空間',
'Feature Request': '機能の要求',
'Feature Type': 'Feature タイプ',
'Feature': '機能',
'Features Include': '含まれる機能',
'Female headed households': '代表者が女性の世帯数',
'Female': '女性',
'Few': '少数',
'Field Hospital': '野外病院',
'File': 'ファイル',
'Fill in Latitude': '緯度を記入',
'Fill in Longitude': '経度を記入',
'Fill out Rapid Evaluation Forms': '迅速評価フォームに記入します',
'Fill out detailed Evaluation Forms': '詳細な評価フォームに入力する',
'Filter Field': 'フィールドをフィルタする',
'Filter Value': '値をフィルタ',
'Filter': 'フィルタ',
'Filtered search of aid pledges and requests': '援助申出と要請の検索されたもの',
'Find All Matches': '完全一致',
'Find Dead Body Report': '遺体レポートの発見',
'Find Hospital': '病院を探す',
'Find Person Record': '人物情報を検索',
'Find Recovery Report': '遺体発見レポート',
'Find Volunteers': 'ボランティアを探す',
'Find a Person Record': '人物情報を検索する',
'Find by Name': '名前で検索',
'Find': '検索',
'Finder': '発見者',
'Fingerprint': '指紋',
'Fingerprinting': '指紋',
'Fingerprints': '指紋',
'Finish': '完了',
'Finished Jobs': '完了したジョブ',
'Fire suppression and rescue': '消火・救出活動',
'Fire': '火災',
'First Name': '苗字',
'First name': '苗字',
'Fishing': '漁業',
'Flash Flood': '鉄砲水',
'Flash Freeze': '瞬間凍結',
'Fleet Management': '船舶の管理',
'Flexible Impact Assessments': '災害影響範囲アセスメント',
'Flood Alerts show water levels in various parts of the country': '洪水警報では、国内各所の水位情報を確認することができます。',
'Flood Alerts': '洪水警報',
'Flood Report Details': '洪水レポートの詳細',
'Flood Report added': '洪水レポートを追加しました',
'Flood Report deleted': '洪水レポートを削除しました',
'Flood Report updated': '洪水レポートを更新しました',
'Flood Report': '洪水レポート',
'Flood Reports': '洪水レポート',
'Flood': '洪水',
'Flow Status': '流れの状況',
'Focal Point': '代表者',
'Fog': '濃霧',
'Food Supply': '食料の供給',
'Food assistance available/expected': '食糧援助が利用可能 / 期待できる',
'Food assistance': '食糧援助',
'Food': '食料',
'Footer file %s missing!': 'フッターファイル%sが見つかりません。',
'Footer': 'フッタ',
'For Eden instances enter the application base URL, e.g. http://sync.sahanfoundation.org/eden, for other peers the URL of the synchronization interface.': 'Eden の場合はベースURL(例えば http://sync.sahanfoundation.org/eden)、他のシステムの場合は同期インターフェースのURL。',
'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'POP-3では通常110 (SSLでは995)で、IMAPでは通常143 (IMAPSでは993)。',
'For Warehouse': '倉庫向け',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': '国の場合は ISO2 コード、町の場合は 空港コード(Airport Locode)',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'それぞれの同期パートナーについて、指定した間隔で実行する同期ジョブがデフォルトで存在します。必要に応じて、さらなる同期ジョブを設定し、カスタマイズすることができます。開始するには、リンクをクリックしてください。',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'セキュリティ向上のため、ユーザー名とパスワードを入力し、団体の他端末の管理者にユーザー名とパスワードを通知して「データ同期」 -> 「データ同期パートナー」であなたのUUIDに追加してもらうことを推奨します。',
'For live help from the Sahana community on using this application, go to': 'Sahanaの使い方について Sahanaコミュニティからライブヘルプを希望する際は、以下に進んでください。',
'For messages that support alert network internal functions': '警戒(alert)ネットワークの内部機能をサポートするメッセージの場合',
'For more details on the Sahana Eden system, see the': 'Sahana Edenに関する詳細は、以下をごらんください。',
'For more information, see ': '詳細は、以下を参照してください。',
'For other types, the next screen will allow you to enter the relevant details...': 'その他の種類については、次の画面で関連する詳細情報を入力できます…',
'For': ' ',
'For:': '対象:',
'Forest Fire': '森林火災',
'Formal camp': '指定避難所',
'Format': 'フォーマット',
'Forms': 'フォーム',
'Found': '発見された',
'Foundations': '構造基礎',
'Freezing Drizzle': '凍結霧雨',
'Freezing Rain': 'みぞれ',
'Freezing Spray': '冷却スプレー',
'French': 'フランス語',
'Friday': '金曜日',
'From Inventory': '送付元',
'From Location': '送付元ロケーション',
'From Organization': '送付元団体',
'From Person': '送付元の担当者',
'From Warehouse': '倉庫から',
'From': '輸送元',
'Frost': '凍結',
'Fulfil. Status': '確保量は十分か',
'Fulfillment Status': '充足状況',
'Full beard': 'もみあげまでのアゴヒゲ、口髭あり',
'Full': '満員',
'Fullscreen Map': 'フルスクリーン表示',
'Function Permissions': '機能に対する権限',
'Function': '機能',
'Functional Tests': '機能テスト',
'Functions available': '利用可能な機能',
'Funding Organization': '資金提供団体',
'Funeral': '葬儀',
'Further Action Recommended': '更なる対応が推奨されている',
'GIS Reports of Shelter': '避難所のGISレポート',
'GIS integration to view location details of the Shelter': '避難所のロケーション詳細を閲覧するGISインテグレーション',
'GPS Marker': 'GPSマーカー',
'GPS Track File': 'GPS Track ファイル',
'GPS Track': 'GPS トラック',
'GPX Layers': 'GPX レイヤ',
'GPX Track': 'GPX形式の追跡情報',
'GRN Status': 'GRNステータス',
'Gale Wind': '強風',
'Gantt Chart': 'ガントチャート',
'Gap Analysis Map': 'ギャップ解析マップ',
'Gap Analysis Report': 'ギャップ解析報告',
'Gap Analysis': 'ギャップ解析',
'Gap Map': '需給ギャップマップ',
'Gap Report': '需給ギャップの報告',
'Gateway Settings': 'ゲートウェイ設定',
'Gateway settings updated': 'ゲートウェイ設定を更新しました',
'Gender': '性別',
'General Comment': '包括コメント',
'General Medical/Surgical': '一般医学/外科',
'General emergency and public safety': '一般的緊急事態と公共の安全',
'General information on demographics': '人口統計の情報',
'Generator': '発電機',
'Geocoder Selection': 'Geocoder 選択',
'Geometry Name': 'Geometry名',
'Geonames.org search requires Internet connectivity!': 'Geonames.org の検索を行うには、インターネットに接続している必要があります。',
'Geophysical (inc. landslide)': '地球物理 (地滑りを含む)',
'Geotechnical Hazards': '地盤災害',
'Geotechnical': '地質工学',
'Geraldo module not available within the running Python - this needs installing for PDF output!': '実行中のPythonでGeraldoモジュールが利用できません。PDF出力に必要です。',
'Geraldo not installed': 'Geraldoがインストールされていません',
'Get incoming recovery requests as RSS feed': '遺体回収要請をRSSフィードとして取得する',
'Girls 13-18 yrs in affected area': '影響地域内の13-18歳の女子数',
'Girls 13-18 yrs not attending school': '学校に来ていなかった13-18歳の女子数',
'Girls 6-12 yrs in affected area': '影響地域内の6-12歳の女子数',
'Girls 6-12 yrs not attending school': '学校に来ていなかった6-12歳の女子数',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': '画像に関する説明。特に、写真のどの箇所に何が確認できるかを記載します (オプション)',
'Give information about where and when you have seen the person': '人物を見かけた場所や時間の情報を提供してください',
'Give information about where and when you have seen them': 'どこで、いつ、彼らを見かけたのか、情報をください',
'Global Messaging Settings': 'メッセージの全般設定',
'Glossary': '用語集',
'Go to Request': '支援要請に行く',
'Goatee': 'やぎヒゲ',
'Goods Received Note': '受諾した物資の注釈',
'Government UID': '政府UID',
'Government building': '政府所管の建物',
'Government': '政府・行政機関',
'Grade': '学年',
'Greek': 'ギリシャ語',
'Green': '緑',
'Ground movement, fissures': '地盤移動、亀裂',
'Ground movement, settlement, slips': '地盤移動、沈下、がけ崩れ',
'Group %(group_id)s created': 'グループ %(group_id)s を作成しました',
'Group Description': 'グループの説明',
'Group Details': 'グループの詳細',
'Group ID': 'グループID',
'Group Member added': 'グループメンバを追加しました',
'Group Members': 'グループメンバ',
'Group Memberships': 'グループメンバシップ',
'Group Name': 'グループ名',
'Group Title': 'グループのタイトル',
'Group Type': 'グループのタイプ',
'Group added': 'グループを追加しました',
'Group deleted': 'グループを削除しました',
'Group description': 'グループの説明',
'Group name': 'グループ名',
'Group type': 'グループタイプ',
'Group updated': 'グループを更新しました',
'Group': 'グループ',
'Groups removed': 'グループを削除しました',
'Groups': 'グループ',
'Guest': 'ゲスト',
'HR Data': '人的資源の情報',
'HR Manager': '人的資源マネージャー',
'Hail': 'あられ',
'Hair Color': '頭髪の色',
'Hair Length': '頭髪の長さ',
'Hair Style': 'ヘアスタイル',
'Has additional rights to modify records relating to this Organization or Site.': 'この団体やサイトに関連するレコードを変更するための権限を追加します',
'Has data from this Reference Document been entered into Sahana?': 'リファレンス文書の内容が Sahanaに登録してあるかどうかを記載してください。',
'Has only read-only access to records relating to this Organization or Site.': 'この団体やサイトに関連するレコードを閲覧のみに制限します',
'Has the safety and security of women and children in your community changed since the emergency?': '緊急事態以来、女性や未成年の生活の危険度が変化したかどうかを記載してください',
'Has your business been damaged in the course of the disaster?': '災害の過程で、ビジネス上の損害を受けているかどうかを記載してください',
'Have households received any shelter/NFI assistance or is assistance expected in the coming days?': '世帯に対して避難所用品や生活必需品が配布されている、あるいは数日以内に配布を実施できるかを記載してください',
'Have normal food sources been disrupted?': '平常時の食料調達源が利用不可能になったかどうかを記載してください',
'Have schools received or are expecting to receive any assistance?': '学校に対してなんらかの支援が行われた、あるいは行われる予定であるかどうかを記載してください',
'Have the people received or are you expecting any medical or food assistance in the coming days?': '医療品や食糧支援を、被災者、あるいはあなたが受領したかどうか、あるいは数日以内に受領できそうかどうかを記載してください。',
'Hazard Pay': '災害補償金',
'Hazardous Material': '危険物',
'Hazardous Road Conditions': '災害発生後の道路状況',
'Header Background': 'ヘッダー背景',
'Header background file %s missing!': 'ヘッダー背景ファイル%sが存在しません。',
'Headquarters': '本部・本社',
'Health care assistance, Rank': '医療 / 介護支援、ランク',
'Health center with beds': '保健所(ベッドあり)',
'Health center without beds': '保健所(ベッドなし)',
'Health center': '保健所',
'Health services functioning prior to disaster': '災害発生以前 ヘルスサービスの提供',
'Health services functioning since disaster': '災害発生後 ヘルスサービスの提供',
'Health services status': '医療サービス状況',
'Health': '保険・介護',
'Healthcare Worker': 'ヘルスケア要員',
'Heat Wave': '熱波',
'Heat and Humidity': '熱と湿度',
'Height (cm)': '身長 (cm)',
'Height': '身長',
'Help': ' ヘルプ ',
'Helps to monitor status of hospitals': '病院の現状把握に役立つ情報を管理します',
'Helps to report and search for Missing Persons': '行方不明者の報告と検索を支援します。',
'Here are the solution items related to the problem.': '問題に関連する解決案です。',
'Heritage Listed': '遺産登録',
'Hide Details': '詳細を隠す',
'Hierarchy Level 0 Name (e.g. Country)': '階層レベル0の名前(例: 国)',
'Hierarchy Level 1 Name (e.g. Province)': '階層レベル1の名前 (例: 都道府県)',
'Hierarchy Level 2 Name': 'ロケーション階層レベル2の名前',
'Hierarchy Level 3 Name': '階層レベル3の名前',
'Hierarchy Level 4 Name': '階層レベル4の名前',
'High Water': '最高水位',
'High': '高',
'Hindu': 'ヒンズー教徒',
'History': '履歴',
'Hit the back button on your browser to try again.': 'ブラウザの「戻る」ボタンを押して、やり直してください。',
'Holiday Address': '休日の住所',
'Home Address': '自宅住所',
'Home Country': '所属国',
'Home Crime': '住居犯罪',
'Home': 'ホーム',
'Hospital Details': '病院の詳細',
'Hospital Status Report': '病院ステータスレポート',
'Hospital information added': '病院情報を追加しました',
'Hospital information deleted': '病院情報を削除しました',
'Hospital information updated': '病院情報を更新しました',
'Hospital status assessment.': '病院ステータスアセスメント',
'Hospital': '病院',
'Hospitals': '病院情報',
'Hot Spot': 'ホットスポット',
'Hour': '時間',
'Hourly': '1時間毎',
'Household kits received': '家事用品を受領しました',
'Household kits, source': '家事用品の送付元',
'How did boys 13-17yrs spend most of their time prior to the disaster?': '災害発生前、13-17歳の男子がよく集まっていた場所と活動は?',
'How did boys <12yrs spend most of their time prior to the disaster?': '災害発生前、12歳以下の男子がよく集まっていた場所と活動は?',
'How did boys girls 13-17yrs spend most of their time prior to the disaster?': '災害発生前、13-17歳の女子がよく集まっていた場所と活動は?',
'How did girls <12yrs spend most of their time prior to the disaster?': '災害発生前、12歳以下の女子がよく集まっていた場所と活動は?',
'How do boys 13-17yrs spend most of their time now?': '現在、13-17歳の男子は普段何をして過ごしていますか?',
'How do boys <12yrs spend most of their time now?': '現在、12歳以下の男子は普段何をして過ごしていますか?',
'How do girls 13-17yrs spend most of their time now?': '現在、13-17歳の女子は普段何をして過ごしていますか?',
'How do girls <12yrs spend most of their time now?': '現在、12歳以下の女子は普段何をして過ごしていますか?',
'How does it work?': 'どのように動きますか?',
'How is this person affected by the disaster? (Select all that apply)': 'この人物の被災状況を記載してください(該当する項目を全て選択)',
'How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.': '水資源を確保できる地点までの距離を記載します。徒歩で往復し、待ち時間も含めた時間を記載してください。',
'How long does it take you to walk to the health service?': '医療サービスが提供されている場所まで、徒歩で必要な時間を記載します。',
'How long will the food last?': '洪水の残存予測期間',
'How long will this water resource last?': '水の供給が枯渇する時期',
'How many Boys (0-17 yrs) are Dead due to the crisis': '災害で死亡した少年の数(0-17歳)',
'How many Boys (0-17 yrs) are Injured due to the crisis': '災害で負傷した少年の数(0-17歳)',
'How many Boys (0-17 yrs) are Missing due to the crisis': '災害で行方不明となった少年の数(0-17歳)',
'How many Girls (0-17 yrs) are Dead due to the crisis': '災害で死亡した少女の数(0-17歳)',
'How many Girls (0-17 yrs) are Injured due to the crisis': '災害で負傷した少女の数(0-17歳)',
'How many Girls (0-17 yrs) are Missing due to the crisis': '災害で行方不明になった少女の数(0-17歳)',
'How many Men (18 yrs+) are Dead due to the crisis': '災害で死亡した男性の数(18歳以上)',
'How many Men (18 yrs+) are Injured due to the crisis': '災害で負傷した男性の数(18歳以上)',
'How many Men (18 yrs+) are Missing due to the crisis': '災害で行方不明となった男性の数(18歳以上)',
'How many Women (18 yrs+) are Dead due to the crisis': '災害で死亡した女性の数(18歳以上)',
'How many Women (18 yrs+) are Injured due to the crisis': '災害で負傷した女性の数(18歳以上)',
'How many Women (18 yrs+) are Missing due to the crisis': '災害で行方不明となった女性の数(18歳以上)',
'How many days will the supplies last?': '支援物資がなくなるまでの日数',
'How many doctors in the health centers are still actively working?': 'ヘルスセンター内の医師の人数を記載してください',
'How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?': '居住不可になった家屋数を記載してください(居住不可 = 基礎構造や土台部分の破壊など)',
'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?': '災害によって破損したが、まだ利用が可能である住居の数を記載してください(利用可能 = 窓の破壊、壁のヒビ、屋根の軽微な破損など)',
'How many latrines are available in the village/IDP centre/Camp?': '村落/IDPセンター/仮泊施設内で利用可能なトイレの数を記載してください',
'How many midwives in the health centers are still actively working?': '医療センター内の助産師の人数を記載してください',
'How many new cases have been admitted to this facility in the past 24h?': '過去24時間でこの施設で受け入れたケースの数は?',
'How many nurses in the health centers are still actively working?': '保健所で活動可能な看護師は何人居ますか?',
'How many of the patients with the disease died in the past 24h at this facility?': 'この施設で過去24時間で何人の患者がこの病気で亡くなりましたか?',
'How many of the primary school age boys (6-12) in the area are not attending school?': 'この地域の、登校していない学童期男児(6-12歳)の数を記載してください。',
'How many of the primary school age girls (6-12) in the area are not attending school?': 'この地域の、登校していない学童期女児(6-12歳)の数を記載してください。',
'How many of the primary/secondary schools are now open and running a regular schedule of class?': '平常通りの授業を実施できている小学校・中学校・高校の数を記入してください',
'How many of the secondary school age boys (13-18) in the area are not attending school?': 'この地域の、登校していない中高校生年齢男子(13-18歳)の数を記載してください。',
'How many of the secondary school age girls (13-18) in the area are not attending school?': 'この地域の、登校していない女子中高生(13-18歳)の数を記載してください。',
'How many patients with the disease are currently hospitalized at this facility?': 'この病気のためにこの施設に入院している患者は現在何人ですか?',
'How many primary school age boys (6-12) are in the affected area?': '被災地域内の学童期男児(6-12歳)の数を記載してください',
'How many primary school age girls (6-12) are in the affected area?': '被災地域内の学童期女児(6-12歳)の数を記載してください。',
'How many primary/secondary schools were opening prior to the disaster?': '災害発生前に授業が行われていた小学校・中学校・高校の数を記載してください',
'How many secondary school age boys (13-18) are in the affected area?': '被災地域内の男子中学生・男子高校生(13-18歳)の数を記載してください',
'How many secondary school age girls (13-18) are in the affected area?': '被災地域内の中高生年齢女子(13-18歳)の数を記載してください。',
'How many teachers have been affected by the disaster (affected = unable to work)?': '被災し、授業ができない状態の教師の人数を記載してください',
'How many teachers worked in the schools prior to the disaster?': '災害発生前の教師の人数を記載してください',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'どの程度詳細な情報が表示されるかを定義します。ズームすることで詳細が表示されるようになりますが、そのかわり、広域を見渡すことができなくなります。逆に、ズームしないことで広域を表示できますが、詳細情報の確認は行えなくなります。',
'Human Resource Management': '人的資源マネージメント',
'Human Resource': '人的資源',
'Human Resources Management': '人的資源管理',
'Human Resources': '人的資源',
'Humanitarian NGO': '人道支援NGO',
'Hurricane Force Wind': 'ハリケーンの風力',
'Hurricane': 'ハリケーン',
'Hygiene NFIs': '衛生用品',
'Hygiene kits received': '衛生用品を受領した',
'Hygiene kits, source': '衛生用品の送付元',
'Hygiene practice': '衛生習慣',
'Hygiene problems': '衛生上の問題',
'Hygiene': '衛生',
'I am available in the following area(s)': '以下の地域を担当できます',
'ID Label': 'IDラベル',
'ID Label: ': 'IDラベル: ',
'ID Tag Number': 'IDタグ番号',
'ID Tag': 'ID タグ',
'ID Type': 'IDタイプ',
'Ice Pressure': '氷結圧力',
'Iceberg': 'アイスバーグ',
'Ideally a full URL to the source file, otherwise just a note on where data came from.': 'できればソースファイルの完全なURLを記載します。難しい場合はデータ入手元のメモでも構いません。',
'Identification Report': 'IDレポート',
'Identification Reports': 'IDレポート',
'Identification Status': 'IDステータス',
'Identification label of the Storage bin.': '備蓄コンテナの区別用ラベル番号。',
'Identification': 'ID',
'Identified as': '判明した身元',
'Identified by': 'によって識別された',
'Identity Details': '身元確認の詳細',
'Identity added': '身元情報を追加しました',
'Identity deleted': '身元確認を削除しました',
'Identity updated': '身元確認を更新しました',
'Identity': '身元確認',
'If Staff have login accounts then they are given access to edit the details of the': 'スタッフがログイン用アカウントを有している場合、以下項目の詳細を編集することができます:',
'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.': '「Unit = m, Base Unit = Km」の場合、「1m = 0.001 km」なので乗数は0.0001 です。',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'このドメインの電子メールアドレスを所有するユーザーを認証する場合は、承認がさらに必要かどうか、必要なら誰が承認するか、を決めるのに承認者フィールドを使用します。',
'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.': '有効にすると、ユーザーがアクセスしたときに、全てのレコードがログに保存されます。無効にすると、モジュール毎に有効にすることができます。',
'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.': '有効にすると、ユーザーが編集したすべてのレコードを記録します。無効にすると、モジュール毎に有効にできます。',
'If neither are defined, then the Default Marker is used.': 'もし両方共定義されていない場合、デフォルトマーカーが使われます。',
'If no marker defined then the system default marker is used': 'マーカーが定義されていない場合は、システムのデフォルトマーカーを使用します。',
'If no, specify why': 'いいえ、の場合はその理由を記載してください',
'If none are selected, then all are searched.': 'もしなにも選択しなければ、全てを検索します',
'If the location is a geographic area, then state at what level here.': '場所が地理的に確定できる場所ならば、その場所のレベルを記載してくだい。',
'If the request is for type "Other", you should enter a summary of the request here.': '支援要請が"その他"の場合、概要をここに入力する必要があります',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'この項目が設定されている場合、ユーザーは、登録の際、この団体のスタッフとして登録されるように指定することができます',
'If this is set to True then mails will be deleted from the server after downloading.': 'Trueに設定されている場合は、メールはダウンロード後にサーバーから削除されます。',
'If this record should be restricted then select which role is required to access the record here.': 'このレコードへのアクセスを制限する際には、アクセスに必要となる権限を選択してください',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'このレコードを制限したい場合、アクセスを許可する権限を指定してください。',
'If yes, specify what and by whom': '「はい」の場合、供給される食料と供給元',
'If yes, which and how': '「はい」の場合、混乱している場所や原因を記載',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': '参照文書を入力しない場合は、データ検証のために入力者の電子メールが表示されます。',
'If you know what the Geonames ID of this location is then you can enter it here.': 'このロケーションの Geonames ID がある場合、ここに入力してください。',
'If you know what the OSM ID of this location is then you can enter it here.': 'このロケーションの OSM ID がある場合、ここに入力してください。',
'If you need to add a new document then you can click here to attach one.': '文書の添付はこのページから可能です。',
'If you want several values, then separate with': '複数の値を入力したい場合、この文字で分割してください : ',
'If you would like to help, then please': 'ご協力いただける方は登録をお願いします',
'Illegal Immigrant': '不法移民',
'Image Details': '画像の詳細',
'Image Tags': '画像のタグ',
'Image Type': '画像のタイプ',
'Image Upload': '画像のアップロード',
'Image added': '画像を追加しました',
'Image deleted': '画像を削除しました',
'Image updated': '画像を更新しました',
'Image': '画像',
'Image/Attachment': '画像/添付資料',
'Image/Other Attachment': '画像/その他の添付ファイル',
'Imagery': '画像',
'Images': '画像',
'Immediate reconstruction assistance, Rank': '建築物の緊急修理 / 再建築支援、ランク',
'Impact Assessment Summaries': '災害影響範囲アセスメントの概要',
'Impact Assessments': '災害影響範囲アセスメント',
'Impact Baselines': '影響範囲の基準値',
'Impact Details': '被害の詳細',
'Impact Type Details': '災害影響のタイプ詳細',
'Impact Type added': '災害の影響タイプを追加しました',
'Impact Type deleted': '影響範囲タイプを削除しました',
'Impact Type updated': '災害影響のタイプを更新しました',
'Impact Type': '災害影響タイプ',
'Impact Types': '災害影響のタイプ',
'Impact added': '被災影響を追加しました',
'Impact deleted': '影響範囲を削除しました',
'Impact updated': '被災状況を更新しました',
'Impacts': '影響',
'Import & Export Data': 'データのインポートとエクスポート',
'Import Data': 'データのインポート',
'Import Job': 'Jobのインポート',
'Import Jobs': 'Jobsのインポート',
'Import and Export': 'インポートとエクスポート',
'Import from Ushahidi Instance': 'Ushahidi インスタンスから設定をインポート',
'Import if Master': 'マスターなら取り込む',
'Import job created': 'Import jobを作成しました',
'Import multiple tables as CSV': '複数のテーブルをCSVとしてインポート',
'Import': 'インポート',
'Import/Export': 'インポート/エクスポート',
'Important': '重要',
'Importantly where there are no aid services being provided': '救護サービスが提供されていない地域において重要となります',
'Imported': 'インポートしました',
'Importing data from spreadsheets': 'スプレッドシートからデータをインポートしています',
'Improper decontamination': '不適切な汚染の除去',
'Improper handling of dead bodies': '誤った扱いをされている遺体',
'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'In GeoServerでは、これはレイヤ名です。WFS getCapabilitiesでは、これはコロン(:)後のFeatureType名の部分です。',
'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'GeoServer では、これはワークスペース名です。WFS getCapabilities では、これはコロン「:」の前の FeatureType の部分となります。',
'In Inventories': 'この物資の在処',
'In Process': '実行中',
'In Progress': '実行中',
'In Transit': '輸送中',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'この地図のウィンドウレイアウトは、全体を覆い隠します。従って、ここで大きな値を入力する必要はありません',
'In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?': '一般的に、コミュニティ内の高齢者、障がい者、子供、青年、女性たちが最も必要としている物資やサービスがなんであるかを記載してください',
'Inbound Mail Settings': '着信メール設定',
'Inbox': '受信箱',
'Incident Categories': 'インシデントカテゴリ',
'Incident Details': 'インシデントの詳細',
'Incident Report Details': 'インシデントレポートの詳細',
'Incident Report added': '災害影響範囲レポートを追加しました',
'Incident Report deleted': 'インシデントレポートを削除しました',
'Incident Report updated': 'インシデントレポートを更新しました',
'Incident Report': 'インシデントレポート',
'Incident Reporting System': 'インシデントの報告を行ないます',
'Incident Reporting': 'インシデントレポート',
'Incident Reports': 'インシデントレポート',
'Incident added': 'インシデントを追加しました',
'Incident deleted': 'インシデントを削除しました',
'Incident updated': 'インシデントを更新しました',
'Incident': 'インシデント',
'Incidents': 'インシデント',
'Incoming Shipment canceled': '到着する配送が取消しされました',
'Incoming Shipment updated': '入荷した物資が更新されました',
'Incoming': '入荷',
'Incomplete': '未完了',
'Individuals': '個人',
'Industrial Crime': '産業犯罪',
'Industrial': '産業',
'Industry Fire': '工場から出火',
'Industry close to village/camp': '村落/仮泊施設の周辺に工場が存在',
'Infant (0-1)': '乳児(0-1歳)',
'Infectious Disease': '感染症',
'Infectious Diseases': '感染症',
'Infestation': '感染',
'Informal Leader': '非公式なリーダー',
'Informal camp': '非指定避難所',
'Information gaps': '情報のギャップ',
'Infusion catheters available': '注入カテーテルが利用可能',
'Infusion catheters need per 24h': '24時間毎に必要な注入カテーテル数',
'Infusion catheters needed per 24h': '24時間ごとに、注入カテーテルが必要',
'Infusions available': '点滴が利用可能',
'Infusions needed per 24h': '24時間毎に必要な点滴の数',
'Input Job': 'Jobのインポート',
'Inspected': '調査済み',
'Inspection Date': '調査した日付',
'Inspection date and time': '調査日時',
'Inspection time': '調査した時刻',
'Inspector ID': '調査者ID',
'Instance Type': 'インスタンスタイプ',
'Instant Porridge': 'インスタント粥',
'Institution': 'その他の組織',
'Insufficient Privileges': '権限が足りません',
'Insufficient vars: Need module, resource, jresource, instance': '不十分な変数: module, resource, jresource, instance が必要です',
'Insufficient': '不足',
'Intake Items': 'アイテムの受け入れ',
'Intergovernmental Organization': '国際政府間組織',
'Interior walls, partitions': '室内の壁、仕切り',
'Internal Features': '内部機能',
'Internal State': '内部状態',
'International NGO': '国際NGO',
'International Organization': '国際機関',
'International Staff': '国外からのスタッフ',
'Intervention': '介入',
'Interview taking place at': 'インタビュー実施場所',
'Invalid Query': '無効なクエリ',
'Invalid email': '無効な電子メール',
'Invalid login': '無効なログイン',
'Invalid request!': 'リクエストは無効です。',
'Invalid ticket': '無効なチケット',
'Invalid': '無効な',
'Inventories with Item': '在庫アイテム',
'Inventories': '在庫管理',
'Inventory Item Details': '救援物資の在庫詳細',
'Inventory Item added': '救援物資の在庫を追加しました',
'Inventory Item deleted': '備蓄物資を削除しました',
'Inventory Item updated': '備蓄物資を更新しました',
'Inventory Item': '備蓄物資',
'Inventory Items Available for Request Item': '要求された物資に適合する、倉庫内の物資',
'Inventory Items': '備蓄物資',
'Inventory Management': '物資の管理',
'Inventory Store Details': '物資集積地点の詳細',
'Inventory Store added': '物資集積地点を追加しました',
'Inventory Store deleted': '物資集積地点を削除しました',
'Inventory Store updated': '物資集積地点を更新しました',
'Inventory Store': '物資集積地点',
'Inventory Stores': '物資集積地点',
'Inventory functionality is available for:': '備蓄機能を利用可能:',
'Inventory of Effects': '救援物資の影響',
'Inventory': '在庫',
'Inventory/Ledger': '在庫 / 元帳',
'Is adequate food and water available for these institutions?': '関係者に対して十分な水と食料が供給されていますか?',
'Is it safe to collect water?': '水の確保は安全に行えるか?',
'Is there any industrial or agro-chemical production close to the affected area/village?': '村落/集落の近くに、工場あるいは農業化学プラントなどが存在しますか?',
'Is this a strict hierarchy?': 'これは厳密な階層構造ですか?',
'Issuing Authority': '発行機関',
'It is built using the Template agreed by a group of NGOs working together as the': '聞き取り項目のテンプレートは、以下リンクのNGO組織と協同で作成されています。',
'Item Added to Shipment': '輸送情報に物資を追加する',
'Item Catalog Categories': '物資カタログカテゴリ',
'Item Catalog Category Details': '救援物資カタログのカテゴリ詳細',
'Item Catalog Category added': '救援物資カタログのカテゴリを追加しました',
'Item Catalog Category deleted': '救援物資カタログのカテゴリを削除しました',
'Item Catalog Category updated': '物資カタログカテゴリを更新しました',
'Item Catalog Category': '救援物資カタログのカテゴリ',
'Item Catalog Details': '物資カタログの詳細',
'Item Catalog added': '救援物資カタログを追加しました',
'Item Catalog deleted': '物資カタログを削除しました',
'Item Catalog updated': '物資カタログを更新しました',
'Item Catalogs': '救援物資カタログ',
'Item Categories': '物資カテゴリ',
'Item Category Details': '物資カテゴリの詳細',
'Item Category added': '救援物資カテゴリを追加しました',
'Item Category deleted': '救援物資カテゴリを削除しました',
'Item Category updated': '物資カテゴリを更新しました',
'Item Category': '物資カテゴリ',
'Item Details': '救援物資の詳細',
'Item Pack Details': '救援物資パックの詳細',
'Item Pack added': '物資パックを追加しました',
'Item Pack deleted': '救援物資のパックを削除しました',
'Item Pack updated': '救援物資パックを更新しました',
'Item Packs': '物資パック',
'Item Sub-Categories': '救援物資のサブカテゴリ',
'Item Sub-Category Details': '物資サブカテゴリの詳細',
'Item Sub-Category added': '救援物資のサブカテゴリを追加しました',
'Item Sub-Category deleted': '物資サブカテゴリを削除しました',
'Item Sub-Category updated': '救援物資サブカテゴリを更新しました',
'Item Sub-Category': '物資サブカテゴリ',
'Item added to shipment': '物資が輸送に回りました',
'Item added': '救援物資を追加しました',
'Item already in Bundle!': '物資がすでにバンドルに存在しています。',
'Item already in Kit!': '救援物資は既にキットに存在しています',
'Item already in budget!': '物資は既に予算に登録されています',
'Item deleted': '物資を削除しました',
'Item updated': '救援物資を更新しました',
'Item': '物資',
'Items': '救援物資',
'Japan': '日本',
'Japanese': '日本語',
'Jerry can': 'ジェリ缶',
'Jew': 'ユダヤ教徒',
'Job Market': '求人',
'Job Title': '肩書き',
'Jobs': '職業',
'Just Once': '一度だけ',
'KPIs': 'KPI',
'Key Details': 'Keyの詳細',
'Key added': 'キーを追加しました',
'Key deleted': 'キーを削除しました',
'Key updated': 'キーを更新しました',
'Key': 'キー',
'Keys': 'キー',
'Kit Contents': 'Kitの内容',
'Kit Details': 'Kitの詳細',
'Kit Updated': 'キットを更新しました',
'Kit added': 'キットを追加しました',
'Kit deleted': 'キットを削除しました',
'Kit updated': 'キットを更新しました',
'Kit': 'キット',
'Kits': 'キット',
'Known Identities': '既知のID',
'Known incidents of violence against women/girls': '女性に対する暴力行為が発生した',
'Known incidents of violence since disaster': '災害発生後に暴力行為が発生した',
'LICENSE': 'ライセンス',
'LMS Administration': 'LMSの管理',
'Label': 'ラベル',
'Lack of material': '資材不足',
'Lack of school uniform': '学校制服が不足',
'Lack of supplies at school': '学校用物資の不足',
'Lack of transport to school': '学校への輸送手段の不足',
'Lactating women': '授乳中の女性の数',
'Lahar': 'ラハール',
'Landslide': '地すべり',
'Language': 'Language 言語',
'Last Name': '名前',
'Last known location': '最後に目撃された場所',
'Last name': '名前',
'Last synchronization time': 'データ同期の最終実施時刻',
'Last updated': '最終更新日',
'Last updated by': '最終更新者',
'Last updated on': '直近のアップデート実施時刻',
'Latitude & Longitude': '緯度&経度',
'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '緯度は南北方向(上下)を定義します。赤道ではゼロ、北半球ではプラス、南半球ではマイナスとなります。',
'Latitude is North-South (Up-Down).': '緯度は南北(上下)です',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '緯度は赤道では0、北半球ではプラス、南半球ではマイナスになります',
'Latitude should be between': '緯度の値として有効な値は',
'Latitude': '緯度',
'Latrines': 'トイレ',
'Law enforcement, military, homeland and local/private security': '法執行機関、自衛隊、警察および警備会社',
'Layer Details': 'レイヤの詳細',
'Layer added': 'レイヤを追加しました',
'Layer deleted': 'レイヤを削除しました',
'Layer updated': 'レイヤを更新しました',
'Layer': 'レイヤ',
'Layers updated': 'レイヤを更新しました',
'Layers': 'レイヤ',
'Layout': 'レイアウト',
'Legend Format': '凡例形式',
'Length': '長さ',
'Level 1 Assessment Details': 'レベル1アセスメントの詳細',
'Level 1 Assessment added': 'レベル1アセスメントを追加しました',
'Level 1 Assessment deleted': 'レベル1のアセスメントを削除しました',
'Level 1 Assessment updated': 'レベル1アセスメントを更新しました',
'Level 1 Assessments': 'レベル1 アセスメント',
'Level 1': 'レベル1',
'Level 2 Assessment Details': 'レベル2アセスメントの詳細',
'Level 2 Assessment added': 'レベル2アセスメントを追加しました',
'Level 2 Assessment deleted': 'レベル2アセスメントを削除しました',
'Level 2 Assessment updated': 'レベル2アセスメントを更新しました',
'Level 2 Assessments': 'レベル2アセスメント',
'Level 2 or detailed engineering evaluation recommended': 'レベル2あるいは詳細な技術的評価を行うことを推奨します',
'Level 2': 'レベル2',
'Level': 'レベル',
'Library support not available for OpenID': 'OpenIDのライブラリサポートが利用できません',
'License Plate': '個人認証カード',
'Line': '行',
'LineString': '折れ線',
'Link Item & Shipment': 'アイテムと輸送を紐付ける',
'Link an Item & Shipment': 'アイテムと出荷を結び付ける',
'Linked Records': '参照しているレコード',
'Linked records': '関連しているレコード',
'List / Add Baseline Types': '基準値タイプの一覧 / 追加',
'List / Add Impact Types': '災害影響のタイプを表示 / 追加',
'List / Add Services': 'サービスの一覧表示 / 追加',
'List / Add Types': 'タイプの一覧表示 / 追加',
'List Activities': '支援活動一覧',
'List Aid Requests': '援助要請の一覧',
'List All Entries': '全てのエントリ一覧',
'List All Memberships': '全てのメンバシップ一覧',
'List All Reports': '報告すべての一覧',
'List All': '全項目一覧',
'List Alternative Items': '代わりの物資一覧',
'List Assessment Summaries': 'アセスメント要約の一覧',
'List Assessments': 'アセスメント一覧',
'List Asset Assignments': '資産割り当ての一覧',
'List Assets': '資産一覧',
'List Baseline Types': '基準値タイプ一覧',
'List Baselines': '基準値一覧',
'List Brands': '銘柄の一覧',
'List Budgets': '予算の一覧',
'List Bundles': 'Bundleの一覧',
'List Catalog Items': '物資カタログの一覧',
'List Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 関係一覧',
'List Checklists': 'チェックリスト一覧',
'List Cluster Subsectors': 'クラスタのサブセクタ一覧',
'List Cluster': 'クラスタ一覧',
'List Clusters': 'クラスタ一覧',
'List Commitment Items': 'コミットされた救援物資の一覧',
'List Commitments': 'コミットメントの一覧',
'List Configs': '設定一覧',
'List Conflicts': 'データ競合一覧',
'List Contact Information': '連絡先情報の一覧',
'List Contacts': '連絡先一覧',
'List Credentials': '証明書一覧',
'List Current': '現在の一覧',
'List Distribution Items': '配給物資リスト',
'List Distributions': '配給所リスト',
'List Documents': '文書の一覧',
'List Donors': '資金提供組織一覧',
'List Feature Layers': 'Featureレイヤリスト',
'List Flood Reports': '洪水レポート一覧',
'List GPX Layers': 'GPXレイヤ一覧',
'List Groups': 'グループ一覧',
'List Groups/View Members': 'グループを一覧/メンバーを表示',
'List Hospitals': '病院の一覧',
'List Identities': 'ID一覧',
'List Images': '画像の一覧',
'List Impact Assessments': '災害影響範囲アセスメント一覧',
'List Impact Types': '災害影響のタイプ一覧',
'List Impacts': '被害一覧',
'List Incident Reports': 'インシデントレポート一覧',
'List Incidents': 'インシデント一覧',
'List Inventory Items': '備蓄物資リスト',
'List Inventory Stores': '物資集積地点リスト',
'List Item Catalog Categories': '救援物資カタログのカテゴリ一覧',
'List Item Catalogs': '救援物資カタログ一覧',
'List Item Categories': '物資カテゴリ一覧',
'List Item Packs': '物資パックの一覧',
'List Item Sub-Categories': '物資サブカテゴリ一覧',
'List Items': '救援物資一覧',
'List Keys': 'Keyの一覧',
'List Kits': 'Kit一覧',
'List Layers': 'レイヤ一覧',
'List Level 1 Assessments': 'レベル1アセスメントの一覧',
'List Level 1 assessments': 'レベル1アセスメント一覧',
'List Level 2 Assessments': 'レベル2のアセスメント一覧',
'List Level 2 assessments': 'レベル2アセスメント一覧',
'List Locations': 'ロケーション一覧',
'List Log Entries': 'ログエントリ一覧',
'List Map Profiles': '地図設定の一覧',
'List Markers': 'マーカー一覧',
'List Members': 'メンバ一覧',
'List Memberships': 'メンバシップ一覧',
'List Messages': 'メッセージ一覧',
'List Metadata': 'メタデータ一覧',
'List Missing Persons': '行方不明者リストを表示',
'List Need Types': '需要タイプ一覧',
'List Needs': 'ニーズ一覧',
'List Notes': '追加情報一覧',
'List Offices': 'オフィス一覧',
'List Organizations': '団体一覧',
'List Peers': 'データ同期先一覧',
'List Personal Effects': '携帯品のリスト',
'List Persons': '人物情報一覧',
'List Photos': '写真リスト',
'List Positions': '場所一覧',
'List Problems': '問題一覧',
'List Projections': '地図投影法リスト',
'List Projects': 'プロジェクト一覧',
'List Rapid Assessments': '被災地の現況アセスメント一覧',
'List Received Items': '受領された物資の一覧',
'List Received Shipments': '受領された輸送一覧',
'List Records': 'レコード一覧',
'List Registrations': '登録証明書の一覧',
'List Reports': 'レポート一覧',
'List Request Items': '物資要請リスト',
'List Requests': '支援要請の一覧',
'List Resources': 'リソース一覧',
'List Responses': '回答の一覧',
'List Rivers': '河川リスト',
'List Roles': '役割一覧',
'List Sections': 'Section一覧',
'List Sectors': '活動分野の一覧',
'List Sent Items': '送付した物資一覧',
'List Sent Shipments': '送付済み物資一覧',
'List Service Profiles': 'サービスプロファイル一覧',
'List Settings': '設定一覧',
'List Shelter Services': '避難所での提供サービス一覧',
'List Shelter Types': '避難所タイプ一覧',
'List Shelters': '避難所の一覧',
'List Shipment Transit Logs': '物資輸送履歴の一覧',
'List Shipment/Way Bills': '輸送費/渡航費の一覧',
'List Shipment<>Item Relation': '輸送と物資の関連性一覧',
'List Shipments': '配送の一覧',
'List Sites': 'Site一覧',
'List Skill Types': 'スキルタイプを一覧表示',
'List Skills': 'スキルを一覧表示',
'List Solutions': '解決案一覧',
'List Staff Types': 'スタッフタイプ一覧',
'List Staff': 'スタッフ一覧',
'List Status': '状況一覧',
'List Storage Bin Type(s)': 'Storage Binタイプ一覧',
'List Storage Bins': 'Storage Bin一覧',
'List Storage Location': '備蓄地点の一覧',
'List Subscriptions': '寄付申し込み一覧',
'List Support Requests': '支援要求のリスト',
'List Survey Answers': '調査の回答の一覧',
'List Survey Questions': 'Survey Question一覧',
'List Survey Sections': 'Survey Sectionsの一覧',
'List Survey Series': '一連の調査リスト',
'List Survey Templates': '調査テンプレートの一覧',
'List TMS Layers': 'TMS レイヤの一覧',
'List Tasks': 'タスク一覧',
'List Teams': 'チーム一覧',
'List Themes': 'テーマ一覧',
'List Tickets': 'チケット一覧',
'List Tracks': '追跡情報の一覧',
'List Units': '単位一覧',
'List Users': 'ユーザ一覧',
'List Volunteers': 'ボランティアの表示',
'List WMS Layers': 'WMSレイヤ一覧',
'List Warehouse Items': '倉庫に備蓄中の物資一覧',
'List Warehouses': '倉庫の一覧',
'List all': '全項目を表示',
'List of Items': '物資一覧',
'List of Missing Persons': '行方不明者リスト',
'List of Peers': 'データ同期先一覧',
'List of Reports': 'レポート一覧',
'List of Requests': '支援要請の一覧',
'List of Roles': '権限リスト',
'List of Spreadsheets uploaded': 'アップロード済スプレッドシート一覧',
'List of Spreadsheets': 'スプレッドシート一覧',
'List of Volunteers for this skill set': 'このスキルを所持するボランティアの一覧',
'List of addresses': '住所一覧',
'List unidentified': '身元不明者の一覧',
'List': '一覧',
'List/Add': '一覧/追加',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': '救援団体は自身の支援活動の内容と場所を登録し、公開することで、他の組織との活動を調整することが可能となります。',
'Live Help': 'ライブヘルプ',
'Livelihood': '生計',
'Load Cleaned Data into Database': '整形したデータをデータベースへロード',
'Load Details': '詳細情報の読み込み',
'Load Raw File into Grid': 'Rawファイルをグリッドにロードしてください',
'Load the details to help decide which is the best one to keep out of the 2.': '2つのうちどちらを残すほうがよいか判断するため、詳細情報を確認します。',
'Loading Locations': 'ロケーションデータロード中',
'Loading Locations...': '位置を読込みしています ...',
'Loading': '読み込み中',
'Local Name': 'ローカル名',
'Local Names': 'ローカル名',
'Location 1': 'ロケーション 1',
'Location 2': 'ロケーション 2',
'Location De-duplicated': 'ロケーションの重複解消',
'Location Details': 'ロケーションの詳細',
'Location Hierarchy Level 0 Name': 'ロケーション階層レベル0の名前',
'Location Hierarchy Level 1 Name': 'ロケーション階層レベル1の名前',
'Location Hierarchy Level 2 Name': 'ロケーション階層レベル2の名前',
'Location Hierarchy Level 3 Name': 'ロケーション階層レベル3の名前',
'Location Hierarchy Level 4 Name': 'ロケーション階層レベル4の名前',
'Location Hierarchy Level 5 Name': 'ロケーション階層レベル5の名前',
'Location added': 'ロケーションを追加しました',
'Location cannot be converted into a group.': 'ロケーションはグループに変換できません',
'Location deleted': 'ロケーションを削除しました',
'Location details': 'ロケーションの詳細',
'Location group cannot be a parent.': 'ロケーショングループは親にできません',
'Location group cannot have a parent.': 'ロケーショングループに親情報がありません。',
'Location updated': 'ロケーションを更新しました',
'Location': 'ロケーション',
'Location: ': 'ロケーション: ',
'Locations De-duplicator': 'ロケーションの重複解消',
'Locations of this level need to have a parent of level': 'このレベルのロケーションには、親属性となるレベルが必要です',
'Locations should be different!': '異なる位置を設定してください!',
'Locations': 'ロケーション',
'Lockdown': '厳重監禁',
'Log Entry Details': 'ログエントリの詳細',
'Log entry added': 'ログエントリを追加しました',
'Log entry deleted': 'ログエントリを削除しました',
'Log entry updated': 'ログエントリを更新しました',
'Log': 'ログ',
'Logged in': 'ログインしました',
'Logged out': 'ログアウトしました',
'Login': 'ログイン',
'Logistics Management System': '物流管理システム',
'Logistics Management': '物流管理',
'Logistics': '物流',
'Logo file %s missing!': 'ロゴファイル%sが見つかりません。',
'Logo': 'ロゴ',
'Logout': 'ログアウト',
'Long Text': '詳細テキスト',
'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.': '経度は東西方向(横)の座標軸です。緯度は南北方向(上下)の座標軸です。赤道ではゼロ、北半球ではプラス、南半球ではマイナスとなります。経度は、子午線(グリニッジ標準時)をゼロとして、東(ヨーロッパ、アジア)がプラスとなります。西(大西洋、アメリカ)がマイナスです。10進法で記入してください。',
'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '経度は東西(横)です。経度は子午線(グリニッジ標準時)でゼロ、東(ヨーロッパ、アジア)でプラスです。西(大西洋、アメリカ)でマイナスです。',
'Longitude is West - East (sideways).': '緯度は東西です(横方向)',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '経度はグリニッジ子午線(グリニッジ標準時)上が0度です。東側に向かってヨーロッパやアジアの各地で正の値となります。西に向かって大西洋やアメリカの各地で負の値となります。',
'Longitude should be between': '経度の値の有効な範囲は',
'Longitude': '経度',
'Looking up Parents': '親を検索',
'Looting': '略奪',
'Lost Password': 'パスワードの紛失',
'Lost': '行方不明',
'Low': '低',
'Magnetic Storm': '磁気嵐',
'Main cash source': '主な現金収入源',
'Main income sources before disaster': '災害発生前の主な収入源',
'Major expenses': '主な費用',
'Major outward damage': '大きな損傷あり',
'Make Commitment': 'コミットの作成',
'Make Pledge': '寄付の作成',
'Make Request': '支援を要請する',
'Make a Request for Aid': '援助要請を登録',
'Make a Request': '支援要請を登録',
'Make preparations per the <instruction>': '<instruction>毎に準備作業を行う',
'Male': '男性',
'Malnutrition present prior to disaster': '災害前から栄養が失調発生していた',
'Manage Category': 'カテゴリ管理',
'Manage Item catalog': '物資カタログの管理',
'Manage Kits': 'Kitsの管理',
'Manage Relief Item Catalogue': '救援アイテムカタログの管理',
'Manage Sub-Category': 'サブカテゴリの管理',
'Manage Users & Roles': 'ユーザと役割の管理',
'Manage Warehouses/Sites': '倉庫/Sitesの管理',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': '支援物資、資産、人員、その他のリソースに対する要求を管理します。支援物資が要求された時に在庫と照合します。',
'Manage requests of hospitals for assistance.': '病院からの支援要請の管理',
'Manage volunteers by capturing their skills, availability and allocation': 'ボランティアのスキル、稼働状況、割り当て状況を管理します',
'Manage': '管理',
'Manager': 'マネージャ',
'Managing Office': 'オフィスの管理',
'Managing, Storing and Distributing Relief Items': '救援物資の保管、流通、配布状況を管理します',
'Managing, Storing and Distributing Relief Items.': '救援物資の管理、保存、配布状況を管理します。',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': '必須項目。GeoServerでのこの項目はレイヤー名となります。WFSの get Capabilitiesでは、コロン( : )の後に付与される FeatureTypeとして表示されます。',
'Mandatory. The URL to access the service.': '省略できません。サービスにアクセスするためのURLです。',
'Manual Synchronization': 'データ手動同期',
'Manual': 'マニュアル',
'Many': '多数',
'Map Profile added': '地図の設定を追加しました',
'Map Profile deleted': '地図設定を削除しました',
'Map Profile updated': '地図設定を更新しました',
'Map Profile': '地図の設定',
'Map Profiles': '地図の設定',
'Map Height': '地図の縦高',
'Map Service Catalog': '地図サービスカタログ',
'Map Settings': '地図の設定',
'Map Viewing Client': '地図閲覧クライアント',
'Map Width': '地図の横幅',
'Map of Hospitals': '病院の地図',
'Map': '地図',
'Mapping': 'マッピング',
'Marine Security': '海上保安',
'Marital Status': '婚姻状況',
'Marker Details': 'マーカーの詳細',
'Marker added': 'マーカーを追加しました',
'Marker deleted': 'マーカーを削除しました',
'Marker updated': 'マーカーを更新しました',
'Marker': 'マーカー',
'Markers': 'マーカー',
'Master Message Log to process incoming reports & requests': '受け取ったレポートと要求を処理するマスターメッセージログ',
'Master Message Log': 'マスターメッセージログ',
'Match Percentage': '一致率',
'Match Requests': '支援要請マッチ',
'Match percentage indicates the % match between these two records': 'マッチの割合は、2つのレコードの間のマッチ状況をあわらします',
'Matching Catalog Items': '適合する救援物資カタログ',
'Matching Records': '一致するレコード',
'Matrix of Choices (Multiple Answers)': '選択肢 (複数可)',
'Matrix of Choices (Only one answer)': '選択肢 (複数選択不可)',
'Matrix of Text Fields': 'テキストフィールドのマトリックス',
'Max Persons per Dwelling': '住居ごとの最大収容人数',
'Maximum Weight': '最大重量',
'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.': '最大重量| ドロップダウンリストで単位を選択してから、備蓄地点の最大重量を指定します。',
'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.': 'storage binに収容することができるアイテムの最大重量を指定します。ドロップダウンリストから、単位を選択してください。',
'Measure Area: Click the points around the polygon & end with a double-click': '観測領域: 多角形の角をクリックし、ダブルクリックで終了',
'Measure Length: Click the points along the path & end with a double-click': '距離を計測: 経路上の中継点をクリックして、終点でダブルクリックしてください',
'Medical and public health': '医療、公衆衛生',
'Medicine': '薬品',
'Medium': '中',
'Megabytes per Month': '1月毎のメガバイト数',
'Member removed from Group': 'メンバシップを削除しました',
'Members': 'メンバ',
'Membership Details': 'メンバシップの詳細',
'Membership updated': 'メンバシップを更新しました',
'Membership': 'メンバシップ',
'Memberships': 'メンバシップ',
'Message Details': 'メッセージの詳細',
'Message Sent': 'メッセージが送信されました',
'Message Variable': 'メッセージ変数',
'Message added': 'メッセージを追加しました',
'Message deleted': 'メッセージを削除しました',
'Message field is required!': 'メッセージは必須です',
'Message sent to outbox': 'メッセージを送信箱に送りました',
'Message updated': 'メッセージを更新しました',
'Message variable': 'メッセージ変数',
'Message': 'メッセージ',
'Messages': 'メッセージ',
'Messaging settings updated': 'メッセージング設定を更新しました',
'Messaging': 'メッセージング',
'Metadata Details': 'メタデータの詳細',
'Metadata added': 'メタデータを追加しました',
'Metadata can be supplied here to be applied to all uploaded photos, if desired.': '必要に応じて、アップロードした全ての画像に適用されるメタデータをここで入力できます。',
'Metadata deleted': 'メタデータを削除しました',
'Metadata updated': 'メタデータを更新しました',
'Metadata': 'メタデータ',
'Meteorite': '隕石落下',
'Meteorological (inc. flood)': '気象 (洪水を含む)',
'Method used': '使用されるメソッド',
'Micronutrient malnutrition prior to disaster': '災害前から栄養失調傾向あり',
'Middle Name': 'ミドルネーム',
'Migrants or ethnic minorities': '移民、あるいは少数民族の数',
'Military': '軍隊',
'Minimum Bounding Box': '最小:領域を指定した枠組み',
'Minimum shift time is 6 hours': '最小シフト時間は6時間です。',
'Minor/None': '少数 / なし',
'Minorities participating in coping activities': '少数民族が災害対応に従事',
'Minute': '分',
'Minutes must be a number between 0 and 60': '分には0-60の間の数字を記入してください',
'Minutes must be a number greater than 0 and less than 60': '分数は0から60の間で入力してください',
'Minutes per Month': '一ヶ月に数分間',
'Minutes should be a number greater than 0 and less than 60': '分は0から60の間で入力してください',
'Miscellaneous': 'その他',
'Missing Person Details': '行方不明者の詳細',
'Missing Person Reports': '行方不明者レポート',
'Missing Person': '行方不明者',
'Missing Persons Registry': '行方不明者の登録',
'Missing Persons Report': '行方不明者のレポート',
'Missing Persons': '行方不明者',
'Missing Report': '行方不明レポート',
'Missing Senior Citizen': '高齢者の行方不明',
'Missing Vulnerable Person': '被介護者の行方不明',
'Missing': '行方不明',
'Mobile Assess.': '移動端末アクセス',
'Mobile Basic Assessment': 'モバイルの基本アセスメント',
'Mobile Basic': 'モバイルの基礎',
'Mobile Phone': '携帯番号',
'Mobile': 'モバイル',
'Mode': 'モード',
'Modem Settings': 'モバイル機器の設定',
'Modem settings updated': 'モバイル機器の設定を更新しました',
'Moderate': 'モデレート',
'Moderator': 'モデレータ',
'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': '地物の変更: 変形する地物を選択し、点の一つをドラッグすることで地物の形を修正可能です。',
'Modify Information on groups and individuals': 'グループと個人の情報更新',
'Modifying data in spreadsheet before importing it to the database': 'データベース登録前に、スプレッドシート内のデータ項目を修正',
'Module Administration': 'モジュール管理',
'Module disabled!': 'モジュールが無効です',
'Module provides access to information on current Flood Levels.': 'このモジュールにより、洪水の現在の水位情報にアクセス可能です',
'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.': 'モジュールでは、専門団体によって作成された調査文書を管理します。データには、WFP(国連世界食糧計画)アセスメントも含まれます。',
'Monday': '月曜日',
'Monthly Cost': '月額費用',
'Monthly Salary': '給与(月額)',
'Months': '月',
'Morgue Status': '死体安置所のステータス',
'Morgue Units Available': '死体公示所の収容可能数',
'Mosque': 'モスク',
'Motorcycle': 'オートバイ',
'Moustache': '口ひげ',
'Move Feature: Drag feature to desired location': 'Featureの移動: Feature を希望するロケーションにドラッグしてください',
'Movements (Filter In/Out/Lost)': '活動 (フィルター イン/アウト/ロスト)',
'MultiPolygon': 'マルチポリゴン',
'Multiple Choice (Multiple Answers)': '複数選択(複数回答)',
'Multiple Choice (Only One Answer)': '複数選択(1つだけ回答)',
'Multiple Matches': '複数の結果が適合しました',
'Multiple Text Fields': '複数の入力項目',
'Multiple': '複数',
'Multiplicator': '乗数',
'Muslim': 'イスラム教徒',
'Must a location have a parent location?': 'ある場所にはその親の場所が無ければならないですか?',
'My Current function': '現在登録している機能',
'My Tasks': '自分のタスク',
'N/A': '該当なし',
'NZSEE Level 1': 'NZSEE レベル1',
'NZSEE Level 2': 'NZSEE レベル 2',
'Name and/or ID Label': '名前および/またはIDラベル',
'Name and/or ID': '名前および/またはID',
'Name of Storage Bin Type.': '物資保管タイプの名前です。',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'ヘッダーの背景に使用される、static にあるファイルの名前 (オプションでサブパス)。',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': '左上の画像で静的位置を表すファイル名(サブパス名はオプション)',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'フッターに使われるビューにあるファイル名 (オプションとしてサブパス)。',
'Name of the person in local language and script (optional).': '現地言語での名前と表記(オプション)',
'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.': 'このレポートに関連する組織や部署の名前。部署をもたない病院の場合は空欄にしてください。',
'Name or Job Title': '名前あるいは役職名',
'Name': '名前',
'Name, Org and/or ID': '名前、組織、IDなど',
'Name/Model/Type': '名前/ モデル/タイプ',
'Name: ': '名前: ',
'Names can be added in multiple languages': '名前は、複数の言語で記述することができます。',
'National ID Card': 'ナショナルIDカード',
'National NGO': '国内NPO',
'National Staff': '現地スタッフ',
'Nationality of the person.': 'この人物の国籍です。',
'Nationality': '国籍',
'Nautical Accident': '船舶事故',
'Nautical Hijacking': '船舶ハイジャック',
'Need Type Details': '需要タイプの詳細',
'Need Type added': '需要タイプを追加しました',
'Need Type deleted': '需要タイプを削除しました',
'Need Type updated': '需要タイプを更新しました',
'Need Type': '需要タイプ',
'Need Types': '需要タイプ',
'Need added': 'ニーズを追加しました',
'Need deleted': 'ニーズを削除しました',
'Need to be logged-in to be able to submit assessments': '評価を確定させるには、ログインが必要です',
'Need to configure Twitter Authentication': 'Twitterの認証を設定する必要があります',
'Need to select 2 Locations': 'ロケーションを2つ指定してください',
'Need to specify a Budget!': '予算を指定する必要があります。',
'Need to specify a Kit!': 'Kitを指定する必要があります。',
'Need to specify a Resource!': 'リソースを指定する必要があります。',
'Need to specify a bundle!': 'bundleを指定する必要があります。',
'Need to specify a group!': 'グループを指定する必要があります。',
'Need to specify a location to search for.': '検索対象となるロケーションを指定する必要があります。',
'Need to specify a role!': '役割を指定する必要があります。',
'Need to specify a service!': 'サービスを指定してください!',
'Need to specify a table!': 'テーブルを指定する必要があります。',
'Need to specify a user!': 'ユーザを指定する必要があります。',
'Need updated': 'ニーズを更新しました',
'Needs Details': '需要の詳細',
'Needs to reduce vulnerability to violence': '暴力行為の対策として必要な物資 / サービス',
'Needs': '要求',
'Negative Flow Isolation': '逆流の分離',
'Neighbourhood': '近隣',
'Neighbouring building hazard': '隣接ビルが危険な状態',
'Neonatal ICU': '新生児ICU',
'Neonatology': '新生児科',
'Network': 'ネットワーク',
'Neurology': '神経科',
'New Assessment reported from': '新規アセスメントの報告元',
'New Checklist': '新規チェックリスト',
'New Peer': '新しいデータ同期先',
'New Record': '新規レコード',
'New Report': '新規レポート',
'New Request': '新規の支援要請',
'New Solution Choice': '新しい解決案を選択',
'New Support Request': '新しい支援要請',
'New Synchronization Peer': '新しい同期先',
'New cases in the past 24h': '過去24時間の新規ケース数',
'New': '新規',
'News': 'ニュース',
'Next View': '次を表示',
'Next': '次へ',
'No Activities Found': '支援活動が見つかりませんでした',
'No Addresses currently registered': '住所は、まだ登録がありません。',
'No Aid Requests have been made yet': '援助要請がまだ作成されていません',
'No Alternative Items currently registered': '代替物資は現在登録されていません',
'No Assessment Summaries currently registered': 'アセスメントの要約が登録されていません',
'No Assessments currently registered': '登録済みのアセスメントがありません',
'No Asset Assignments currently registered': '現在のところ資産割り当ては登録されていません',
'No Assets currently registered': '登録されている資産は現在ありません。',
'No Baseline Types currently registered': '登録済みのBaseline Typesはありません',
'No Baselines currently registered': '登録されている基準値はありません',
'No Brands currently registered': '登録されている銘柄がありません',
'No Budgets currently registered': '予算は、まだ登録がありません。',
'No Bundles currently registered': 'Bundleは、まだ登録がありません。',
'No Catalog Items currently registered': '登録済みのカタログアイテムがありません',
'No Category<>Sub-Category<>Catalog Relation currently registered': 'Category<>Sub-Category<>Catalog間の関係は、まだ登録がありません。',
'No Checklist available': '利用可能なチェックリストがありません',
'No Cluster Subsectors currently registered': 'クラスタのサブセクタはまだ登録がありません',
'No Clusters currently registered': '登録済みのクラスタはありません',
'No Commitment Items currently registered': '現在のところコミット済み物資は登録されていません',
'No Commitments': 'コミットメントがありません',
'No Configs currently defined': '設定は、まだ定義されていません',
'No Credentials currently set': '現在のところ証明書が設定されていません',
'No Details currently registered': '詳細は、まだ登録されていません',
'No Distribution Items currently registered': '配給物資の登録がありません',
'No Distributions currently registered': '配給所の登録がありません',
'No Documents found': '文書が見つかりませんでした。',
'No Donors currently registered': '資金提供組織はまだ登録されていません',
'No Feature Layers currently defined': 'Feature Layersはまだ定義されていません',
'No Flood Reports currently registered': '登録済みの洪水情報はありません',
'No GPX Layers currently defined': 'GPXレイヤはまだ定義されていません',
'No Groups currently defined': 'グループはまだ定義されていません',
'No Groups currently registered': 'グループはまだ登録されていません',
'No Hospitals currently registered': '病院はまだ登録されていません',
'No Identification Report Available': '利用可能なIDレポートはありません',
'No Identities currently registered': '登録されているIDはありません',
'No Image': '画像なし',
'No Images currently registered': '画像の登録はありません',
'No Impact Types currently registered': '被害の種類は未登録です',
'No Impacts currently registered': 'これまでに登録されたImpactはありません',
'No Incident Reports currently registered': '登録されているインシデントレポートはありません',
'No Incidents currently registered': '登録済みのインシデントはありません。',
'No Incoming Shipments': '到着予定の輸送物資',
'No Inventory Items currently registered': '備蓄物資の登録がありません',
'No Inventory Stores currently registered': '現在登録されている物資集積地点はありません',
'No Item Catalog Category currently registered': '救援物資カタログのカテゴリはまだ登録がありません',
'No Item Catalog currently registered': 'アイテムカタログはまだ登録されていません',
'No Item Categories currently registered': '救援物資カテゴリの登録がありません',
'No Item Packs currently registered': '救援物資のパックは、まだ登録がありません',
'No Item Sub-Category currently registered': '救援物資のサブカテゴリはまだ登録されていません',
'No Item currently registered': 'アイテムはまだ登録されていません',
'No Items currently registered': '物資はまだ登録されていません',
'No Items currently requested': '要求されている物資はありません',
'No Keys currently defined': 'Keyはまだ定義されていません',
'No Kits currently registered': 'Kitはまだ登録されていません',
'No Level 1 Assessments currently registered': '現在のところ、レベル1アセスメントは登録されていません',
'No Level 2 Assessments currently registered': '現在のところ、レベル2アセスメントは登録されていません',
'No Locations currently available': '現在利用可能なロケーションはありません',
'No Locations currently registered': 'ロケーションはまだ登録されていません',
'No Map Profiles currently defined': '地図の設定が定義されていません',
'No Markers currently available': '現在利用可能なマーカーはありません',
'No Match': '合致する結果がありません',
'No Matching Catalog Items': '適合する救援物資はありませんでした',
'No Matching Records': '適合する検索結果がありませんでした',
'No Members currently registered': 'メンバはまだ登録されていません',
'No Memberships currently defined': 'メンバシップはまだ登録されていません',
'No Messages currently in Outbox': '送信箱にメッセージがありません',
'No Metadata currently defined': 'メタデータはまだ定義されていません',
'No Need Types currently registered': '現在登録されている需要タイプはありません',
'No Needs currently registered': '現在要求は登録されていません',
'No Offices currently registered': 'オフィスはまだ登録されていません',
'No Offices found!': 'オフィスが見つかりませんでした',
'No Organizations currently registered': '団体はまだ登録されていません',
'No Packs for Item': 'この物資に対する救援物資パックはありません',
'No Peers currently registered': '登録済みのデータ同期先はありません',
'No People currently registered in this shelter': 'この避難所に登録されている人物情報はありません',
'No Persons currently registered': '人物情報はまだ登録されていません',
'No Persons currently reported missing': '現在、行方不明者の登録はありません',
'No Persons found': '該当する人物はいませんでした',
'No Photos found': '写真の登録がありません',
'No Picture': '写真がありません',
'No Presence Log Entries currently registered': '所在地履歴の登録がありません',
'No Problems currently defined': '定義済みの問題がありません',
'No Projections currently defined': '地図投影法は、まだ定義されていません。',
'No Projects currently registered': '定義済みのプロジェクトはありません',
'No Rapid Assessments currently registered': '被災地の現況アセスメントはまだ登録されていません',
'No Received Items currently registered': '受領された救援物資の登録はありません',
'No Received Shipments': '受け取った輸送はありません',
'No Records currently available': '利用可能なレコードはありません',
'No Records matching the query': '条件に当てはまるレコードが存在しません',
'No Request Items currently registered': '物資要請の登録がありません',
'No Requests have been made yet': '支援要請は、まだ行われていません',
'No Requests match this criteria': 'この条件に一致する支援要請はありません',
'No Requests': '支援要請がありません',
'No Responses currently registered': '現在登録されていて返答が無いもの',
'No Rivers currently registered': '河川情報の登録がありません',
'No Roles currently defined': '役割はまだ定義されていません',
'No Sections currently registered': 'このセクションの登録情報がありません',
'No Sectors currently registered': '登録済みの活動分野がありません',
'No Sent Items currently registered': '送付した物資の登録がありません',
'No Sent Shipments': '送付が行われた輸送がありません',
'No Settings currently defined': '設定は、まだ定義されていません',
'No Shelter Services currently registered': '登録されている避難所サービスがありません',
'No Shelter Types currently registered': '登録済みの避難所タイプがありません',
'No Shelters currently registered': '避難所はまだ登録されていません',
'No Shipment Transit Logs currently registered': '物資輸送履歴の登録がありません',
'No Shipment/Way Bills currently registered': '輸送費/Way Billsはまだ登録されていません',
'No Shipment<>Item Relation currently registered': '輸送とアイテムの関連付けはまだ登録されていません',
'No Sites currently registered': '登録されているサイトはありません',
'No Skill Types currently set': '設定済みのスキルタイプはありません',
'No Solutions currently defined': '解決案はまだ定義されていません',
'No Staff Types currently registered': 'スタッフタイプはまだ登録されていません',
'No Staff currently registered': 'スタッフはまだ登録されていません',
'No Storage Bin Type currently registered': '登録済みのStorage Binタイプがありません',
'No Storage Bins currently registered': 'Storage Binはまだ登録されていません',
'No Storage Locations currently registered': '登録されている備蓄地点がありません',
'No Subscription available': '寄付の申し込みがありません',
'No Support Requests currently registered': '現在のところ、支援要請は登録されていません',
'No Survey Answers currently registered': 'これまでに登録されたフィードバックの回答はありません',
'No Survey Questions currently registered': '登録済みのSurvey Questionsはありません',
'No Survey Sections currently registered': '登録済みのSurvey Sectionはありません',
'No Survey Series currently registered': '現在、調査報告は登録されていません',
'No Survey Template currently registered': '登録されている調査テンプレートがありません',
'No TMS Layers currently defined': 'TMS レイヤーがまだ定義されていません',
'No Tasks with Location Data': 'ロケーション情報を持っているタスクがありません',
'No Themes currently defined': 'テーマはまだ定義されていません',
'No Tickets currently registered': 'チケットはまだ定義されていません',
'No Tracks currently available': '利用可能な追跡情報はありません',
'No Units currently registered': '単位はまだ登録されていません',
'No Users currently registered': '登録済みのユーザがありません',
'No Volunteers currently registered': 'ボランティアの登録がありません',
'No Warehouse Items currently registered': '現在登録済みの倉庫物資はありません',
'No Warehouses currently registered': '倉庫が登録されていません',
'No Warehouses match this criteria': '条件に合致する倉庫がありません',
'No access at all': '完全に孤立中',
'No access to this record!': 'このレコードにはアクセスできません',
'No action recommended': 'アクション無しを推奨',
'No calculations made': '見積が作成されていません',
'No conflicts logged': 'コンフリクトのログはありません。',
'No contact information available': '利用可能な連絡先情報はありません',
'No contacts currently registered': '連絡先が登録されていません',
'No data in this table - cannot create PDF!': 'テーブルにデータがありません。PDF を作成できません。',
'No databases in this application': 'このアプリケーションにデータベースはありません',
'No dead body reports available': '遺体情報のレポートはありません',
'No entries found': 'エントリが見つかりません',
'No entries matching the query': 'クエリに一致するエントリはありませんでした。',
'No import jobs': 'インポートされたJobがありません',
'No linked records': 'リンクされているレコードはありません',
'No location known for this person': 'この人物の消息が不明です',
'No locations found for members of this team': 'このチームのメンバーの場所が見つかりませんでした',
'No locations registered at this level': 'この階層に登録されているロケーションはありません',
'No log entries matching the query': '検索に合致するログエントリがありません',
'No matching items for this request': 'この支援要請に適合する物資はありません',
'No matching records found.': '一致するレコードがありませんでした。',
'No messages in the system': 'システム上にメッセージが存在しません',
'No notes available': '追加情報はありません',
'No peers currently registered': '現在登録されているデータ同期先はありません',
'No pending registrations found': '処理保留中の登録申請はありません',
'No pending registrations matching the query': '検索に合致する処理保留登録申請がありません。',
'No person record found for current user.': '現在のユーザの人物情報レコードが見つかりませんでした。',
'No positions currently registered': '登録されているpositionがありません',
'No problem group defined yet': '定義済みの問題グループがありません。',
'No records matching the query': '条件に当てはまるレコードが存在しません',
'No records to delete': '削除するレコードがありません',
'No recovery reports available': '利用可能な遺体回収レポートはありません',
'No report available.': '利用可能なレポートはありません。',
'No reports available.': '利用可能なレポートがありません。',
'No reports currently available': '利用可能なレポートはありません',
'No requests found': '支援要請は見つかりませんでした',
'No resources currently registered': 'リソースはまだ登録されていません',
'No resources currently reported': 'レポート済みのリソースはありません',
'No service profile available': '利用可能なサービスプロファイルはありません',
'No skills currently set': 'スキルが登録されていません',
'No status information available': '状況に関する情報はありません',
'No synchronization': '同期なし',
'No tasks currently registered': 'タスクはまだ登録されていません',
'No template found!': 'テンプレートが見つかりません。',
'No units currently registered': '単位はまだ登録されていません',
'No volunteer information registered': 'ボランティア情報はまだ登録されていません',
'No': 'いいえ',
'Non-structural Hazards': 'その他の災害',
'None (no such record)': 'なし(記録がありません)',
'None': 'なし',
'Noodles': '麺',
'Normal food sources disrupted': '普段の食料供給源が混乱している',
'Normal': '通常どおり',
'Not Applicable': '該当なし',
'Not Authorised!': '認証されていません',
'Not Possible': '対応不可',
'Not Set': '設定されていません',
'Not Authorized': '認証されていません',
'Not installed or incorrectly configured.': 'インストールされていないか、適切な設定がされていません',
'Not yet a Member of any Group': 'メンバシップはまだ登録されていません',
'Note Details': '追加情報の詳細',
'Note Status': '状態を記録',
'Note Type': '追加情報の種類',
'Note added': '追加情報を追加しました',
'Note deleted': '追加情報を削除しました',
'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead': '注意:このリストは、活動中のボランティアのみ表示しています。システムに登録しているすべての人をみるには、ホーム・スクリーンから検索してください。',
'Note updated': '追加情報を更新しました',
'Note': '追加情報',
'Notes': '追加情報',
'Notice to Airmen': 'NOTAM (航空従事者用)',
'Number of Columns': '列数',
'Number of Patients': '患者数',
'Number of Rows': '行数',
'Number of Vehicles': '車両数',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'この施設において、今後24時間以内に利用可能になると予測されている、このタイプの追加ベッド数。',
'Number of alternative places for studying': '授業用に確保できる場所の数',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'このタイプの利用可能/空きベッド数(報告時点)',
'Number of deaths during the past 24 hours.': '過去24時間以内の死亡者数',
'Number of discharged patients during the past 24 hours.': '退院患者数(過去24時間以内)',
'Number of doctors actively working': '現在活動中の医師の数',
'Number of doctors': '医者の人数',
'Number of houses damaged, but usable': '破損しているが利用可能な家屋の数',
'Number of houses destroyed/uninhabitable': '全壊/居住不可になった家屋数',
'Number of in-patients at the time of reporting.': 'レポート時の患者数です。',
'Number of latrines': 'トイレ総数',
'Number of midwives actively working': '現在活動中の助産師の数',
'Number of newly admitted patients during the past 24 hours.': '入院患者数(過去24時間以内)',
'Number of non-medical staff': '医療従事以外のスタッフ数',
'Number of nurses actively working': '現在活動中の看護師の数',
'Number of nurses': '看護師の人数',
'Number of private schools': '私立学校の数',
'Number of public schools': '公立学校の数',
'Number of religious schools': '宗教学校の数',
'Number of residential units not habitable': '住めなくなった住居の数',
'Number of residential units': '居住施設の数',
'Number of schools damaged but usable': '破損しているが利用可能な校舎の数',
'Number of schools destroyed/uninhabitable': '全壊 / 利用不可能な校舎の数',
'Number of schools open before disaster': '災害前に開校していた学校数',
'Number of schools open now': '現在開校している学校の数',
'Number of teachers affected by disaster': '被災した教師の数',
'Number of teachers before disaster': '災害発生前の教師の数',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': '病院に設置されている、現在利用可能なベッドの数。日時レポートにより、自動的に更新されます。',
'Number of vacant/available units to which victims can be transported immediately.': '現在利用可能なユニット数。犠牲者を即座に安置できる数。',
'Number or Label on the identification tag this person is wearing (if any).': 'この人物の衣服につけられているタグの番号、あるいはラベル名(ある場合のみ).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'この場所をあとで検索するための番号かコード 例: フラグ番号、グリッドの位置、サイトの参照番号など',
'Number': '番号',
'Number/Percentage of affected population that is Female & Aged 0-5': '女性(0-5歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 13-17': '女性(13-17歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 18-25': '女性(18-25歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 26-60': '女性(26-60歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 6-12': '女性(6-12歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 61+': '女性(61歳以上)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 0-5': '男性(0-5歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 13-17': '男性(13-17歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 18-25': '男性(18-25歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 26-60': '男性(26-60歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 6-12': '男性(6-12歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 61+': '男性(61歳以上)の被災者数 / 割合',
'Numbers Only': '数値のみ',
'Nursery Beds': '看護ベッド',
'Nutrition problems': '栄養問題',
'Nutrition': '食料・栄養',
'OR Reason': '手術室の詳細',
'OR Status Reason': '手術室の状態理由',
'OR Status': '手術室の状態',
'Observer': 'オブザーバ',
'Obsolete': '廃止済み',
'Obstetrics/Gynecology': '産婦人科',
'Office Address': 'オフィスの住所',
'Office Details': 'オフィスの詳細',
'Office added': 'オフィスを追加しました',
'Office deleted': 'オフィスを削除しました',
'Office updated': 'オフィスを更新しました',
'Office': 'オフィス',
'Offices': 'オフィス',
'Offline Sync (from USB/File Backup)': 'データのオフライン同期(USB/バックアップファイル利用)',
'Offline Sync': 'データのオフライン同期',
'Old': '古い',
'Older people as primary caregivers of children': '子供の介護を、高齢者が担当',
'Older people in care homes': '介護施設で生活する高齢者がいる',
'Older people participating in coping activities': '高齢者が災害対応に従事',
'Older people with chronical illnesses': '慢性疾患をもつ高齢者がいる',
'Older person (>60 yrs)': '高齢者(60歳以上)',
'On by default? (only applicable to Overlays)': 'デフォルトでオン(オーバーレイにのみ有効)',
'On by default?': 'デフォルトでON?',
'One Time Cost': '1回毎の費用',
'One time cost': '一回毎の費用',
'One-time costs': '一回毎の費用',
'One-time': '1回毎',
'Oops! Something went wrong...': '申し訳ありません、何か問題が発生しています。',
'Oops! something went wrong on our side.': '申し訳ありません、システム側に問題が発生しています。',
'Opacity (1 for opaque, 0 for fully-transparent)': '不透明度(1は不透明、0は完全に透明)',
'Open Assessment': '未解決のアセスメント',
'Open Map': '地図を開く',
'Open area': '空き地',
'Open recent': '最近使用したものを開く',
'Open': '開く',
'OpenStreetMap Editor': 'OpenStreetMap エディタ',
'Operating Rooms': '手術室',
'Optional link to an Incident which this Assessment was triggered by.': 'このアセスメントの端緒となった事故へのオプション・リンク',
'Optional': '任意',
'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'オプション。GeoServerでは、ワークスペース名前空間のURIです。WFS getCapabilitiesでは、FeatureType名のコロンの前の部分です。',
'Options': 'オプション',
'Organization Details': '団体の詳細',
'Organization Registry': '団体情報の登録',
'Organization added': '団体を追加しました',
'Organization deleted': '団体を削除しました',
'Organization updated': '団体を更新しました',
'Organization': '団体',
'Organizations': '団体',
'Origin of the separated children': '離別した子供たちの出身地',
'Origin': '出身地',
'Other (describe)': 'その他 (要記述)',
'Other (specify)': 'その他(具体的に)',
'Other Evidence': 'その他の証跡',
'Other Faucet/Piped Water': 'その他 蛇口/パイプによる水源',
'Other Isolation': 'その他の孤立',
'Other Name': 'その他の名前',
'Other activities of boys 13-17yrs before disaster': 'その他、災害発生前の13-17歳男子の活動状況',
'Other activities of boys 13-17yrs': 'その他、13-17歳男子の活動状況',
'Other activities of boys <12yrs before disaster': 'その他、災害発生前の12歳以下男子の活動状況',
'Other activities of boys <12yrs': 'その他、12歳以下男子の活動状況',
'Other activities of girls 13-17yrs before disaster': 'その他、災害発生前の13-17歳女子の活動状況',
'Other activities of girls 13-17yrs': 'その他、13-17歳女子の活動状況',
'Other activities of girls<12yrs before disaster': 'その他、災害発生前の12歳以下女子の活動状況',
'Other activities of girls<12yrs': 'その他、12歳以下女子の活動状況',
'Other alternative infant nutrition in use': 'その他、使用されている乳児用代替食',
'Other alternative places for study': 'その他、授業開設に利用可能な施設',
'Other assistance needed': 'その他に必要な援助活動',
'Other assistance, Rank': 'その他の援助、ランク',
'Other current health problems, adults': 'その他の健康問題(成人)',
'Other current health problems, children': 'その他の健康問題(小児)',
'Other events': '他のイベント',
'Other factors affecting school attendance': 'その他、生徒の就学に影響する要因',
'Other major expenses': 'その他の主な支出',
'Other non-food items': '食料以外の救援物資',
'Other recommendations': '他の推薦',
'Other residential': '住宅その他',
'Other school assistance received': 'その他の学校用品を受領した',
'Other school assistance, details': '受領した学校用品の内訳',
'Other school assistance, source': 'その他の学校用品の送付元',
'Other side dishes in stock': '在庫のあるその他食材',
'Other types of water storage containers': 'それ以外の水貯蔵容器タイプ',
'Other ways to obtain food': 'それ以外の食料調達方法',
'Other': 'その他',
'Outbound Mail settings are configured in models/000_config.py.': '送信メール設定は、models/000_config.py で定義されています。',
'Outbox': '送信箱',
'Outgoing SMS Handler': 'SMS 送信ハンドラ',
'Outgoing SMS handler': 'SMS送信ハンドラ',
'Overall Hazards': 'すべての危険',
'Overhead falling hazard': '頭上落下物の危険',
'Overland Flow Flood': '陸上の洪水流量',
'Overlays': 'オーバーレイ',
'Owned Records': '自身のレコード',
'Owned Resources': '保持しているリソース',
'PDAM': '水道会社(PDAM)',
'PIN number ': 'PIN 番号',
'PIN': '暗証番号',
'PL Women': 'PL 女性',
'Pack': 'パック',
'Packs': 'パック',
'Pan Map: keep the left mouse button pressed and drag the map': 'マップをパン: マウスの左ボタンを押したまま、地図をドラッグしてください',
'Parameters': 'パラメータ',
'Parapets, ornamentation': '欄干、オーナメント',
'Parent Office': '親組織のオフィス',
'Parent needs to be of the correct level': '適切なレベルの親属性を指定してください',
'Parent needs to be set for locations of level': 'ロケーションのレベルには親属性が必要です',
'Parent needs to be set': '親情報が設定される必要があります',
'Parent': '親',
'Parents/Caregivers missing children': '親/介護者とはぐれた子供たち',
'Partial': '一部 / 不足',
'Participant': '参加者',
'Pashto': 'パシュトー語',
'Passport': 'パスポート',
'Password for authentication at the peer. Note that only HTTP Basic authentication is supported.': 'Password for authentication at the peer. HTTPベーシック認証のみサポートしています。',
'Password': 'パスワード',
'Path': 'パス',
'Pathology': '病理学',
'Patients': '患者数',
'Pediatric ICU': '小児ICU',
'Pediatric Psychiatric': '小児精神科',
'Pediatrics': '小児科医',
'Peer Details': 'データ同期先の詳細',
'Peer Registration Details': 'データ同期先登録の詳細',
'Peer Registration Request': 'データ同期先の登録要求',
'Peer Registration': 'データ同期先登録',
'Peer Type': '同期先タイプ',
'Peer UID': '同期先UID',
'Peer added': 'データ同期先を追加しました',
'Peer deleted': 'データ同期先を削除しました',
'Peer not allowed to push': '同期先がデータのプッシュを許可していません',
'Peer registration request added': 'データ同期先の登録要求を追加しました',
'Peer registration request deleted': 'データ同期先の登録要求を削除しました',
'Peer registration request updated': 'データ同期先の登録要求を更新しました',
'Peer updated': '同期先を更新しました',
'Peer': 'データ同期先',
'Peers': '同期先',
'Pending Requests': '保留中の支援要請',
'Pending': '中断',
'People Needing Food': '食料不足',
'People Needing Shelter': '避難所が必要',
'People Needing Water': '水が必要',
'People Trapped': '救難者',
'People with chronical illnesses': '慢性疾患をもつ成人がいる',
'People': '人物情報',
'Person 1': '人物 1',
'Person 1, Person 2 are the potentially duplicate records': '人物情報1と人物情報2は重複したレコードの可能性があります。',
'Person 2': '人物 2',
'Person Data': '人物データ',
'Person De-duplicator': '人物情報の重複削除',
'Person Details': '人物情報の詳細',
'Person Finder': '消息情報',
'Person Registry': '人物情報の登録',
'Person added to Group': 'グループメンバを追加しました',
'Person added to Team': 'グループメンバを追加しました',
'Person added': '人物情報を追加しました',
'Person deleted': '人物情報を削除しました',
'Person details updated': '人物情報を更新しました',
'Person interviewed': 'インタビュー担当者',
'Person missing': '行方不明中',
'Person must be specified!': '登録がありません',
'Person reporting': 'レポート報告者',
'Person who has actually seen the person/group.': '人物/グループで実際に目撃された人物情報',
'Person who is reporting about the presence.': 'この所在報告を行った人物です。',
'Person who observed the presence (if different from reporter).': '人物の所在を確認したひとの情報(報告者と異なる場合のみ記入)。',
'Person': '人物情報',
'Person/Group': '人物/グループ',
'Personal Data': '個人情報',
'Personal Effects Details': '個人の影響の詳細',
'Personal Effects': '所持品',
'Personal impact of disaster': 'この人物の被災状況',
'Personal': '個人',
'Persons in institutions': '施設居住中の住人',
'Persons with disability (mental)': '障がい者数(精神的障がい者を含む)',
'Persons with disability (physical)': '肉体的な障がい者の数',
'Persons': '人物情報',
'Phone 1': '電話番号',
'Phone 2': '電話番号(予備)',
'Phone': '電話番号',
'Phone/Business': '電話番号/仕事',
'Phone/Emergency': '電話番号/緊急連絡先',
'Phone/Exchange': '電話/とりつぎ',
'Photo Details': '写真の詳細',
'Photo Taken?': '写真撮影済み?',
'Photo added': '写真を追加しました',
'Photo deleted': '写真を削除しました',
'Photo updated': '写真を更新しました',
'Photo': '写真',
'Photograph': '写真',
'Photos': '写真',
'Physical Description': '身体外見の説明',
'Physical Safety': '身体的安全',
'Picture upload and finger print upload facility': '指紋や写真のアップロード機能',
'Picture': '写真',
'Place for solid waste disposal': '廃棄物の処理を行う場所を記載してください',
'Place of Recovery': '遺体回収場所',
'Place on Map': '地図上の場所',
'Places for defecation': 'トイレ',
'Places the children have been sent to': '子供たちの避難先',
'Planner': '立案者',
'Playing': '家庭内/外で遊ぶ',
'Please correct all errors.': 'すべてのエラーを修正してください。',
'Please enter a First Name': '苗字を入力してください',
'Please enter a valid email address': '有効な電子メールアドレスを入力してください。',
'Please enter the first few letters of the Person/Group for the autocomplete.': '自動入力するには人物あるいはグループの最初の数文字を入力してください',
'Please enter the recipient': '受取担当者を入力してください',
'Please fill this!': 'ここに入力してください',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened. If a ticket was issued then please provide the Ticket ID.': '言及先のURLを明示し、期待する結果と実際に発生した結果を記述してください。不具合チケットが発行された場合は、そのチケットIDも記載してください。',
'Please report here where you are:': 'いまあなたが居る場所を入力してください。',
'Please select another level': '別のレベルを選択してください',
'Please select': '選んでください',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': '携帯電話番号でサインアップし、Sahanaからのテキストメッセージを受け取れるようにします。国際電話コードまで含めた形式で入力してください',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': '病気の治療に当たって問題となる事象の詳細を記載します。状況を改善するための提案も、もしあれば記載してください。',
'Please use this field to record any additional information, including a history of the record if it is updated.': '追加情報はこの項目に記載してください。レコードの変更履歴などにも利用可能です。',
'Please use this field to record any additional information, including any Special Needs.': '特別な要求など、どんな追加情報でも構いませんので、この部分に記録してください',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'UshahidiのインスタンスIDなど、追加情報がある場合はこの項目に記載してください。レコードの変更履歴などにも利用可能です。',
'Pledge Aid to match these Requests': 'これらの要求に一致する支援に寄付する',
'Pledge Aid': '寄付する',
'Pledge Status': '寄付のステータス',
'Pledge Support': '寄付サポート',
'Pledge': '寄付',
'Pledged': '寄付済み',
'Pledges': '寄付',
'Point': 'ポイント',
'Poisoning': '中毒',
'Poisonous Gas': '有毒ガス',
'Police': '警察',
'Pollution and other environmental': '汚染、あるいはその他の環境要因',
'Polygon reference of the rating unit': 'その評価単位への参照ポリゴン',
'Polygon': 'ポリゴン',
'Population and number of households': '人口と世帯数',
'Population': '利用者数',
'Porridge': 'おかゆ',
'Port Closure': '港湾閉鎖',
'Port': 'ポート',
'Position Details': 'ポジションの詳細',
'Position added': 'Position を追加しました',
'Position deleted': 'ポジションを削除しました',
'Position type': '場所のタイプ',
'Position updated': 'ポジションを更新しました',
'Positions': 'ポジション',
'Postcode': '郵便番号',
'Poultry restocking, Rank': '家禽の補充、ランク',
'Poultry': '家禽(ニワトリ)',
'Pounds': 'ポンド',
'Power Failure': '停電',
'Pre-cast connections': 'プレキャスト連結',
'Preferred Name': '呼び名',
'Pregnant women': '妊婦の数',
'Preliminary': '予備',
'Presence Condition': '所在情報',
'Presence Log': '所在履歴',
'Presence': '所在',
'Previous View': '前を表示',
'Previous': '前へ',
'Primary Name': '基本名',
'Primary Occupancy': '主要な従事者',
'Priority Level': '優先度レベル',
'Priority': '優先度',
'Private': '企業',
'Problem Administration': '問題管理',
'Problem Details': '問題の詳細',
'Problem Group': '問題グループ',
'Problem Title': '問題の名称',
'Problem added': '問題を追加しました',
'Problem connecting to twitter.com - please refresh': 'twitter.comに接続できません。更新ボタンを押してください',
'Problem deleted': '問題を削除しました',
'Problem updated': '問題を更新しました',
'Problem': '問題',
'Problems': '問題',
'Procedure': '手続き',
'Procurements': '物資の調達',
'Product Description': '製品の説明',
'Product Name': '製品名',
'Profile': 'プロファイル',
'Project Activities': 'プロジェクト活動状況',
'Project Details': 'プロジェクトの詳細',
'Project Management': 'プロジェクト管理',
'Project Status': 'プロジェクトのステータス',
'Project Tracking': 'プロジェクト追跡',
'Project added': 'プロジェクトを追加しました',
'Project deleted': 'プロジェクトを削除しました',
'Project has no Lat/Lon': 'プロジェクトの緯度/経度情報はありません',
'Project updated': 'プロジェクトを更新しました',
'Project': 'プロジェクト',
'Projection Details': '地図投影法の詳細',
'Projection added': '地図投影法を追加しました',
'Projection deleted': '地図投影法を削除しました',
'Projection updated': '地図投影法を更新しました',
'Projection': '地図投影法',
'Projections': '地図投影法',
'Projects': 'プロジェクト',
'Property reference in the council system': '評議システムで使用されるプロパティリファレンス',
'Protected resource': '保護されたリソース',
'Protection': '被災者保護',
'Provide Metadata for your media files': 'メディアファイルにメタデータを提供',
'Provide a password': 'パスワードを入力',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': '建物全体か損傷箇所のスケッチを提供し、損傷箇所を明示してください。',
'Province': '都道府県',
'Proxy-server': 'プロキシサーバ',
'Psychiatrics/Adult': '精神病/成人',
'Psychiatrics/Pediatric': '精神病/小児',
'Public Event': '公開イベント',
'Public and private transportation': '公共および民営の交通機関',
'Public assembly': '公会堂',
'Public': '公開',
'Pull tickets from external feed': '外部フィードからのticketの取得',
'Punjabi': 'パンジャブ',
'Push tickets to external system': '外部システムにチケットの発信',
'Put a choice in the box': '箱の中から選んで取る',
'Pyroclastic Flow': '火砕流',
'Pyroclastic Surge': '火砕サージ',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'PythonでPython Serial moduleが利用できません。モデムの有効化に必要です。',
'Python needs the ReportLab module installed for PDF export': '実行中のPythonでReportLabモジュールが利用できません。PDF出力に必要です。',
'Quantity Committed': '引き受けた量',
'Quantity Fulfilled': '十分な量がある',
'Quantity in Transit': '運送中の数量',
'Quantity': '数量',
'Quarantine': '隔離施設',
'Queries': 'クエリ',
'Query Feature': '問合せ機能',
'Query': 'クエリ',
'Queryable?': '検索可能?',
'RC frame with masonry infill': '鉄骨入りコンクリートブロック',
'RECORD A': 'レコード A',
'RECORD B': 'レコード B',
'RESPONSE': '対応',
'Race': '人種',
'Radiological Hazard': '放射能災害',
'Radiology': '放射線科',
'Railway Accident': '鉄道事故',
'Railway Hijacking': '鉄道ハイジャック',
'Rain Fall': '降雨',
'Rapid Assessment Details': '被災地の現況アセスメントの詳細',
'Rapid Assessment added': '被災地の現況アセスメントを追加しました',
'Rapid Assessment deleted': '被災地の現況アセスメントを削除しました',
'Rapid Assessment updated': '被災地の現況アセスメントを更新しました',
'Rapid Assessment': '被災地の現況アセスメント',
'Rapid Assessments & Flexible Impact Assessments': '被災地の現況アセスメントと、災害影響範囲アセスメント',
'Rapid Assessments': '被災地の現況アセスメント',
'Rapid Close Lead': '急いで閉め、先導してください。',
'Rapid Data Entry': 'データ入力簡易版',
'Rating Scale': '評価尺度',
'Raw Database access': 'データベースへの直接アクセス',
'Read-Only': '読み込み専用',
'Read-only': '登録内容の編集を禁止',
'Real World Arbitrary Units': '実在の任意単位',
'Receive Items': '物資を受領',
'Receive Shipment': '輸送を受け取る',
'Receive this shipment?': 'この物資送付を受領しますか?',
'Receive': '物資受領',
'Received By': '物資受領責任者',
'Received Item Details': '配送済み物資の詳細',
'Received Item deleted': '受領した物資を削除しました',
'Received Item updated': '受領された物資を更新しました',
'Received Shipment Details': '受け取った輸送の詳細',
'Received Shipment canceled and items removed from Inventory': '受領した輸送をキャンセルしました。物資は備蓄から削除されます',
'Received Shipment canceled': '受け取った輸送をキャンセルしました',
'Received Shipment updated': '受領済みの配送物の情報が更新されました',
'Received Shipments': '受諾した輸送物資',
'Received': '受領済み',
'Receiving and Sending Items': '送付 / 受領した救援物資',
'Recipient': '受け取り担当者',
'Recipients': '受信者',
'Recommendations for Repair and Reconstruction or Demolition': '再築や取り壊し、修繕を推奨',
'Record %(id)s created': 'レコード %(id)s が作成されました',
'Record Created': '作成されたレコード',
'Record Details': 'レコードの詳細',
'Record ID': 'レコードID',
'Record Saved': 'レコードが保存されました',
'Record added': 'レコードを追加しました',
'Record any restriction on use or entry': '利用や入力に当たっての制限事項を記載',
'Record deleted': 'レコードを削除しました',
'Record last updated': '最近更新されたレコード',
'Record not found!': 'レコードが見つかりませんでした',
'Record updated': 'レコードを更新しました',
'Record': 'レコード',
'Recording and Assigning Assets': '物資の割り当てと記録',
'Records': 'レコード',
'Recovery Request added': '遺体の回収要請を追加しました',
'Recovery Request deleted': '遺体回収要請を削除しました',
'Recovery Request updated': '遺体回収要請を更新しました',
'Recovery Request': '遺体回収の要請',
'Recovery Requests': '遺体回収要請',
'Recovery report added': '遺体回収レポートを追加しました',
'Recovery report deleted': '遺体回収レポートを削除しました',
'Recovery report updated': '遺体回収レポートを更新しました',
'Recovery': '遺体回収',
'Recruitment': '人材募集',
'Recurring Cost': '経常費用',
'Recurring cost': '経常費用',
'Recurring costs': '経常費用',
'Recurring': '採用活動',
'Red': '赤',
'Reference Document': '関連文書',
'Region Location': '地域のロケーション',
'Regional': '国際支部',
'Register Person into this Shelter': 'この避難所に人物情報を登録',
'Register Person': '人物情報を登録',
'Register them as a volunteer': 'ボランティアとして登録',
'Register': '登録',
'Registered People': '登録した人物情報',
'Registered users can': '登録済みのユーザは',
'Registering ad-hoc volunteers willing to contribute': '貢献を希望する臨時ボランティアを登録',
'Registration Details': '登録情報詳細',
'Registration Disabled!': '現在アカウント登録は受け付けていません。',
'Registration added': '登録を追加しました',
'Registration entry deleted': '登録を削除しました',
'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': '登録はまだ承認されていません (承認者:(%s)) -- 確認メールが届くまでもうしばらくお待ちください。',
'Registration key': '登録key',
'Registration successful': '登録に成功しました',
'Registration updated': '登録を更新しました',
'Registration': '登録',
'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '地域内で活動する全ての支援団体を追跡し、情報を保持します。これにより、各団体が活動している地域の情報だけでなく、それぞれの地域でどのような活動が行われているかも掌握することができます。',
'Rehabilitation/Long Term Care': 'リハビリ/長期介護',
'Reinforced masonry': 'コンクリートブロック壁',
'Rejected': '拒否されました',
'Reliable access to sanitation/hygiene items': 'サニタリ / 衛生用品の安定供給がある',
'Relief Item Catalog': '救援物資カタログ',
'Relief Item': '救援物資',
'Relief Items': '救援物資',
'Relief Team': '救援チーム',
'Relief': '救援',
'Religion': '宗教',
'Religious Leader': '宗教指導者',
'Religious': '宗教',
'Relocate as instructed in the <instruction>': '<instruction>の内容に従って再配置',
'Remove Feature: Select the feature you wish to remove & press the delete key': 'Featureの削除: 削除したいfeatureを選択し、削除キーを押下してください',
'Remove Person from Group': 'メンバシップを削除',
'Remove Person from Team': 'メンバシップを削除',
'Remove': '削除',
'Removed from Group': 'メンバシップを削除しました',
'Removed from Team': 'メンバシップを削除しました',
'Repeat your password': 'パスワードをもう一度入力してください',
'Replace if Master': 'マスターなら置換',
'Replace if Newer': '新しいものがあれば置き換える',
'Replace': '置換',
'Report Another Assessment...': '別のアセスメントをレポートする',
'Report Details': 'レポートの詳細',
'Report Resource': 'レポートリソース',
'Report Type': 'レポートタイプ',
'Report Types Include': 'レポートタイプを含む',
'Report a Problem with the Software': 'ソフトウェアの不具合を報告',
'Report added': 'レポートを追加しました',
'Report deleted': 'レポートを削除しました',
'Report my location': '自分の現在地を報告',
'Report that person missing': '行方不明者の情報を報告',
'Report the contributing factors for the current EMS status.': '現在の緊急受け入れ状態に影響している事由を記載',
'Report the contributing factors for the current OR status.': '現在の手術室の状況報告',
'Report the person as found': '人物の所在情報を報告',
'Report them as found': '発見として報告',
'Report them missing': '行方不明として報告',
'Report updated': 'レポートを更新しました',
'Report': 'レポート',
'Reporter Name': 'レポーターの氏名',
'Reporter': 'レポーター',
'Reporting on the projects in the region': 'この地域で展開しているプロジェクトのレポート',
'Reports': 'レポート',
'Request Added': '支援要請を追加しました',
'Request Canceled': '支援要請をキャンセルしました',
'Request Details': '支援要請の詳細',
'Request Item Details': '救援物資要請の詳細',
'Request Item added': '救援物資の要請を追加しました',
'Request Item deleted': '救援物資の要請を削除しました',
'Request Item updated': '救援物資の要請を更新しました',
'Request Item': '物資を要請',
'Request Items': '物資の要請',
'Request Status': '支援要請の状況',
'Request Type': '支援要請のタイプ',
'Request Updated': '支援要請を更新しました',
'Request added': '支援要請を追加しました',
'Request deleted': '支援要請を削除しました',
'Request for Role Upgrade': '上位権限の取得要求',
'Request updated': '支援要請を更新しました',
'Request': '支援要請',
'Request, Response & Session': '要求、応答、およびセッション',
'Requested By Site': '支援要請を行ったサイト',
'Requested By Warehouse': '倉庫からの要請',
'Requested By': '支援要求元',
'Requested Items': '支援要請が行われた物資',
'Requested by': '要求元',
'Requested on': 'に関する要請',
'Requested': '要求済み',
'Requester': '要請の実施者',
'Requestor': '要請者',
'Requests From': '支援要請フォーム',
'Requests for Item': '物資に関する要請',
'Requests': '支援要請',
'Requires Login!': 'ログインしてください。',
'Requires login': 'ログインが必要です',
'Rescue and recovery': '救出、あるいは遺体回収作業',
'Reset Password': 'パスワードのリセット',
'Reset form': 'フォームをクリア',
'Reset': 'リセット',
'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size': 'Featureのリサイズ: リサイズしたいfeatureを選択し、適切なサイズになるようドラッグしてください',
'Resolve Conflict': '競合の解決',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': '"解決"リンクでは、新しい画面を開き、重複している情報を解決してデータベースを更新します',
'Resolve': '解決済みか',
'Resource Details': 'リソースの詳細',
'Resource added': 'リソースを追加しました',
'Resource deleted': 'リソースを削除しました',
'Resource updated': 'リソースを更新しました',
'Resource': 'リソース',
'Resources': 'リソース',
'Respiratory Infections': '呼吸器感染症',
'Response Details': '応答の詳細',
'Response added': '返答を追加しました',
'Response deleted': 'Responseを削除しました',
'Response updated': '返答を更新しました',
'Response': '対応',
'Responses': '対応',
'Restricted Access': 'アクセス制限中',
'Restricted Use': '制限された目的での使用',
'Restrictions': '制限',
'Results': '結果',
'Retail Crime': '小売犯罪',
'Retrieve Password': 'パスワードの取得',
'Rice': '米穀',
'Riot': '暴動',
'River Details': '河川の詳細',
'River added': '河川を追加しました',
'River deleted': '河川を削除しました',
'River updated': '河川を更新しました',
'River': '河川',
'Rivers': '河川',
'Road Accident': '道路障害',
'Road Closed': '道路(通行止め)',
'Road Conditions': '路面の状況',
'Road Delay': '道路遅延',
'Road Hijacking': '道路ハイジャック',
'Road Usage Condition': '道路の路面状況',
'Role Details': '権限の詳細',
'Role Name': '権限の名称',
'Role Required': '権限が必要',
'Role Updated': '権限を更新しました',
'Role added': '権限を追加しました',
'Role deleted': '権限を削除しました',
'Role updated': '権限を更新しました',
'Role': '権限',
'Role-based': '権限に基づいた',
'Roles Permitted': '許可された権限',
'Roles': '権限',
'Roof tile': '屋根瓦',
'Roofs, floors (vertical load)': '屋根、床板 (vertical load)',
'Roster': '名簿',
'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location': '地物の回転: 回転させたい地物を選択し、目的の位置に回転させるために関連付けられた点をドラッグします。',
'Row Choices (One Per Line)': '行の選択 (One Per Line)',
'Rows in table': 'テーブルの行',
'Rows selected': '行が選択されました',
'Run Functional Tests': '動作テストの実行',
'Run Interval': '実行間隔',
'Running Cost': 'ランニングコスト',
'SITUATION': '状況',
'Safe environment for vulnerable groups': '被災者にとって安全な環境である',
'Safety Assessment Form': '安全性アセスメントフォーム',
'Safety of children and women affected by disaster': '被災した女性と未成年が保護されている',
'Sahana Administrator': 'Sahana管理者',
'Sahana Blue': 'Sahana ブルー',
'Sahana Community Chat': 'Sahanaコミュニティチャット',
'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=> その他 (Sahana Agasti, Ushahidi 等.)',
'Sahana Eden <=> Other': 'Sahana Eden <=> 他のシステム',
'Sahana Eden Disaster Management Platform': 'Sahana Eden 被災地支援情報共有プラットフォーム',
'Sahana Eden Website': 'Sahana Eden公式ページ',
'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organizations working in disaster management.': 'Sahana Edenは、災害復旧に関わる様々な支援団体が、お互いに協力しあうために存在します。',
'Sahana FOSS Disaster Management System': 'Sahana オープンソース 被災地情報共有システム',
'Sahana Green': 'Sahana グリーン',
'Sahana Login Approval Pending': 'Sahana ログインは承認待ちです',
'Sahana access granted': 'Sahanaへのアクセス権を付与',
'Sahana: new request has been made. Please login to see if you can fulfil the request.': 'Sahana: 新しい支援要請が行われました。ログインして、支援要請を実現できるか確認してください。',
'Salted Fish': '塩漬けの魚',
'Salvage material usable from destroyed houses': '全壊した家屋から回収した物品(使用可能)',
'Salvage material usable from destroyed schools': '全壊した校舎から回収した物品(使用可能)',
'Sanitation problems': '衛生設備に問題',
'Satellite Office': '現地活動拠点',
'Satellite': '衛星',
'Saturday': '土曜日',
'Save any Changes in the one you wish to keep': '残す方の候補地へ行った変更を保存します。',
'Save': '保存',
'Save: Default Lat, Lon & Zoom for the Viewport': 'デフォルト表示範囲の緯度,経度,ズームレベルを保存',
'Saved.': '保存しました',
'Saving...': '保存しています...',
'Scale of Results': '結果の規模',
'Schedule': 'スケジュール',
'School Closure': '学校閉鎖',
'School Lockdown': '学校の厳重封鎖',
'School Reports': '学校のレポート',
'School Teacher': '学校教師',
'School activities': '学校の活動',
'School assistance received/expected': '学校用支援品を受領済み/受領予定',
'School assistance': '学校の援助',
'School attendance': '学校へ出席者',
'School destroyed': '校舎全壊',
'School heavily damaged': '校舎の深刻な損壊',
'School tents received': '仮校舎用テントを受領',
'School tents, source': '仮校舎用テント、送付元',
'School used for other purpose': '校舎を他目的で利用中',
'School': '学校',
'School/studying': '学校/勉強',
'Schools': '学校',
'Search & List Bin Types': 'Bin Typeを検索して一覧表示',
'Search & List Bins': 'Binsを検索して一覧表示',
'Search & List Catalog': 'カタログを検索して一覧表示',
'Search & List Category': 'カテゴリを検索して一覧表示',
'Search & List Items': '救援物資を検索して一覧表示',
'Search & List Locations': 'ロケーションを検索して一覧表示',
'Search & List Site': 'Siteを検索して一覧表示',
'Search & List Sub-Category': 'サブカテゴリを検索して一覧表示',
'Search & List Unit': '単位を検索して一覧表示',
'Search Activities': '支援活動の検索',
'Search Activity Report': '支援活動レポートの検索',
'Search Addresses': '住所を検索',
'Search Aid Requests': '援助要請を検索',
'Search Alternative Items': 'その他のアイテムを検索',
'Search Assessment Summaries': 'アセスメントの要約を検索',
'Search Assessments': 'アセスメントを検索',
'Search Asset Assignments': '資産割り当ての検索',
'Search Assets': '資産の検索',
'Search Baseline Type': 'Baseline Typeを検索',
'Search Baselines': '基準値の検索',
'Search Brands': '銘柄を検索',
'Search Budgets': '予算を検索',
'Search Bundles': 'Bundleを検索',
'Search Catalog Items': '救援物資カタログを検索',
'Search Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog関係の検索',
'Search Checklists': 'チェックリストを検索',
'Search Cluster Subsectors': 'クラスタのサブセクタを検索',
'Search Clusters': 'クラスタを検索',
'Search Commitment Items': 'コミットされた救援物資の検索',
'Search Commitments': 'コミットの検索',
'Search Configs': '設定を検索',
'Search Contact Information': '連絡先情報を検索',
'Search Contacts': '連絡先を検索',
'Search Credentials': '証明書の検索',
'Search Distribution Items': '配給物資を検索',
'Search Distributions': '配給所を検索',
'Search Documents': 'ドキュメントを検索',
'Search Donors': '資金提供組織の検索',
'Search Existing Locations': '既存のロケーションを検索する',
'Search Feature Layers': 'Feature Layersの検索',
'Search Flood Reports': '洪水レポートの検索',
'Search Geonames': 'Geonamesの検索',
'Search Groups': 'グループの検索',
'Search Hospitals': '病院情報の検索',
'Search Identity': 'ID情報の検索',
'Search Images': '画像の検索',
'Search Impact Type': '被害の種類を検索',
'Search Impacts': '影響の検索',
'Search Incident Reports': 'インシデントレポートを検索',
'Search Incidents': 'インシデントの検索',
'Search Inventory Items': '備蓄物資を検索',
'Search Inventory Stores': '物資集積地点の検索',
'Search Item Catalog Category(s)': 'アイテムカタログカテゴリの検索',
'Search Item Catalog(s)': '救援物資カタログの検索',
'Search Item Categories': '救援物資カテゴリを検索',
'Search Item Packs': '物資のパックを検索',
'Search Item Sub-Category(s)': 'アイテムサブカテゴリの検索',
'Search Items': 'アイテムの検索',
'Search Keys': 'Keyの検索',
'Search Kits': 'Kitsの検索',
'Search Layers': 'レイヤの検索',
'Search Level 1 Assessments': 'レベル1アセスメントの検索',
'Search Level 2 Assessments': 'レベル2のアセスメントを検索',
'Search Locations': 'ロケーションの検索',
'Search Log Entry': 'ログエントリの検索',
'Search Map Profiles': '地図設定の検索',
'Search Markers': 'マーカーの検索',
'Search Members': 'メンバーの検索',
'Search Membership': 'メンバシップの検索',
'Search Memberships': 'メンバシップの検索',
'Search Metadata': 'メタデータの検索',
'Search Need Type': '需要タイプの検索',
'Search Needs': '必要な物資を検索',
'Search Notes': '追加情報を検索',
'Search Offices': 'オフィスの検索',
'Search Organizations': '団体の検索',
'Search Peer': '同期先を検索',
'Search Peers': 'データ同期先を検索',
'Search Personal Effects': 'Personal Effectsの検索',
'Search Persons': '人物情報の検索',
'Search Photos': '写真の検索',
'Search Positions': 'Positionsの検索',
'Search Problems': '問題の検索',
'Search Projections': '地図投影法の検索',
'Search Projects': 'プロジェクトの検索',
'Search Rapid Assessments': '被災地の現況アセスメントを検索',
'Search Received Items': '受領済み救援物資の検索',
'Search Received Shipments': '受信済みの出荷の検索',
'Search Records': 'レコードの検索',
'Search Recovery Reports': '遺体回収レポートを検索',
'Search Registations': '登録情報の検索',
'Search Registration Request': '登録要請を検索',
'Search Report': 'レポートの検索',
'Search Reports': 'レポートの検索',
'Search Request Items': '物資の要請を検索',
'Search Request': '支援要請の検索',
'Search Requested Items': '支援要請されている物資を検索',
'Search Requests': '支援要請の検索',
'Search Resources': 'リソースの検索',
'Search Responses': '検索の応答',
'Search Rivers': '河川を検索',
'Search Roles': '役割の検索',
'Search Sections': 'セクションの検索',
'Search Sectors': '活動分野を検索',
'Search Sent Items': '送付した物資を検索',
'Search Sent Shipments': '送信した出荷の検索',
'Search Service Profiles': 'サービスプロファイルの検索',
'Search Settings': '設定の検索',
'Search Shelter Services': '避難所での提供サービスを検索',
'Search Shelter Types': '避難所タイプの検索',
'Search Shelters': '避難所の検索',
'Search Shipment Transit Logs': '輸送履歴の検索',
'Search Shipment/Way Bills': '輸送費/渡航費の検索',
'Search Shipment<>Item Relation': '輸送と救援物資の関係性の検索',
'Search Site(s)': 'Siteの検索',
'Search Skill Types': 'スキルタイプの検索',
'Search Skills': 'スキルを検索',
'Search Solutions': '解決案の検索',
'Search Staff Types': 'スタッフタイプの検索',
'Search Staff': 'スタッフの検索',
'Search Status': '状態の検索',
'Search Storage Bin Type(s)': 'Storage Bin Typeの検索',
'Search Storage Bin(s)': 'Storage Bin(s)の検索',
'Search Storage Location(s)': '備蓄地点の検索',
'Search Subscriptions': '寄付申し込みを検索',
'Search Support Requests': '支援要求の検索',
'Search Tasks': 'タスクの検索',
'Search Teams': 'チームの検索',
'Search Themes': 'テーマの検索',
'Search Tickets': 'チケットの検索',
'Search Tracks': '追跡情報の検索',
'Search Twitter Tags': 'Twitterのタグを検索',
'Search Units': '単位の検索',
'Search Users': 'ユーザの検索',
'Search Volunteer Registrations': 'ボランティア登録の検索',
'Search Volunteers': 'ボランティアの検索',
'Search Warehouse Items': '倉庫の物資を検索',
'Search Warehouses': 'Warehousesの検索',
'Search and Edit Group': 'グループを検索して編集',
'Search and Edit Individual': '人物情報を検索して個別に編集',
'Search by ID Tag': 'IDタグで検索',
'Search for Items': '物資の検索',
'Search for a Hospital': '病院を探す',
'Search for a Location': '検索地域を指定します',
'Search for a Person': '人物を探す',
'Search for a Project': 'プロジェクトを探す',
'Search for a Request': '支援要請の検索',
'Search for a shipment received between these dates': 'ある期間内に受け取られた輸送を検索する',
'Search for an item by category.': 'カテゴリで物資を検索',
'Search for an item by text.': 'テキストで項目を検索',
'Search here for a person record in order to:': '人物情報を検索することで、以下の事柄を行うことができます。',
'Search messages': 'メッセージの検索',
'Search': '検索',
'Searching for different groups and individuals': '他のグループと個人を探す',
'Secondary Server (Optional)': 'セカンダリサーバ(オプション)',
'Seconds must be a number between 0 and 60': '秒には0-60の間の数字を記入してください',
'Seconds must be a number greater than 0 and less than 60': '秒は0から60の間で入力してください',
'Section Details': 'Sectionの詳細',
'Section deleted': 'Sectionを削除しました',
'Section updated': 'セクションを更新しました',
'Sections': 'セクション',
'Sector Details': '活動分野の詳細',
'Sector added': '活動分野を追加しました',
'Sector deleted': '活動分野を削除しました',
'Sector updated': '活動分野を更新しました',
'Sector': '活動分野',
'Sectors': '活動分野',
'Security Policy': 'セキュリティポリシー',
'Security Status': 'セキュリティステータス',
'Security problems': 'セキュリティーの問題',
'See unassigned recovery requests': 'まだ割り当てられていない遺体回収要請を見る',
'Seen': '発見情報あり',
'Select 2 potential locations from the dropdowns.': '候補地を2つ、ドロップダウンから選択します。',
'Select Items from the Request': '支援要請を基にアイテムを選択する',
'Select Items from this Inventory': '備蓄中の物資から選択',
'Select Language': '言語選択',
'Select Organization': '団体の選択',
'Select Photos': '写真の選択',
'Select a location': 'ロケーションを選択',
'Select a question from the list': 'リストから質問を選択してください',
'Select a range for the number of total beds': 'ベッド総数の範囲を選択',
'Select all that apply': '該当する項目を全て選択',
'Select an Organization to see a list of offices': '団体を選択すると、所属するオフィスが表示されます',
'Select an existing Location': '既に登録してあるロケーションを選択してください',
'Select the Cluster Layers for Assessments and Activities to analyse the Gaps:': 'アセスメントと支援活動のギャップを解析するクラスタの層を選択:',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'オーバーレイを指定し、適切なアセスメントと支援活動を表示させてニーズを明確にします。',
'Select the person assigned to this role for this project.': 'この人物に、プロジェクト内の権限を担当させます。',
'Select the person associated with this scenario.': 'このタスクに関連する人物を選択してください。',
'Select to see a list of subdivisions.': '項目を選択すると、より細かい分類を選択できます。',
'Select to show this configuration in the Regions menu.': '範囲メニューで表示する構成を選択して下さい',
'Select': '選択',
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'SMS送信時に、モデム、Tropoまたはゲートウェイのどちらを使用するかを選択',
'Selects whether to use the gateway or the Modem for sending out SMS': 'SMS送信時、モデムとゲートウェイのどちらを使用するか選択',
'Self Registration': '本人による登録',
'Self-registration': '本人による登録',
'Send Alerts using Email &/or SMS': '電子メールまたはSMSを使用してアラートを送信',
'Send Items': '物資を送付',
'Send Mail': 'メール送信',
'Send Message': 'メッセージを送る',
'Send Notification': '通知を送信',
'Send Shipment': '輸送を開始する',
'Send from %s': '依頼主( %s )',
'Send message': 'メッセージ送信',
'Send new message': '新規メッセージ送信',
'Send': '物資送付',
'Sends & Receives Alerts via Email & SMS': '電子メール/SMS 経由でアラート送信/受信',
'Senior (50+)': '高齢者 (50+)',
'Sensitivity': '感度',
'Sent Item Details': '送付した物資の詳細',
'Sent Item deleted': '輸送済み物資を削除しました',
'Sent Item updated': '送付した救援物資を更新しました',
'Sent Shipment Details': '送付物資の詳細',
'Sent Shipment canceled and items returned to Inventory': '送付処理した輸送がキャンセルされ、物資は倉庫に戻りました',
'Sent Shipment canceled': '輸送開始をキャンセルしました',
'Sent Shipment updated': '送信した物資が更新されました',
'Sent Shipments': '物資を送付しました',
'Sent': '送信',
'Separate latrines for women and men': 'トイレは男女別である',
'Separated children, caregiving arrangements': '親と離れた子供だちのための保育手配',
'Seraiki': 'セライキ',
'Serial Number': 'シリアルナンバー',
'Series': 'シリーズ',
'Server': 'サーバ',
'Service Catalog': 'サービスカタログ',
'Service or Facility': 'サービス、または施設',
'Service profile added': 'サービスプロファイルを追加しました',
'Service profile deleted': 'サービスプロファイルを削除しました',
'Service profile updated': 'サービスプロファイルを更新しました',
'Service': 'サービス',
'Services Available': '利用可能なサービス',
'Services': 'サービス',
'Setting Details': '設定の詳細',
'Setting added': '設定を追加しました',
'Setting deleted': '設定を削除しました',
'Setting updated': '設定を更新しました',
'Settings updated': '設定を更新しました',
'Settings were reset because authenticating with Twitter failed': 'Twitterの認証に失敗したため、設定をクリアします',
'Settings': '設定',
'Severe': '深刻',
'Severity': '深刻度',
'Severity:': '深刻度:',
'Share a common Marker (unless over-ridden at the Feature level)': 'マーカーの共有 (機能レイヤで上書きされない限り)',
'Shelter & Essential NFIs': '避難所/生活用品',
'Shelter Details': '避難所の詳細',
'Shelter Name': '避難所名称',
'Shelter Registry': '避難所登録',
'Shelter Service Details': '避難所サービスの詳細',
'Shelter Service added': '避難所サービスを追加しました',
'Shelter Service deleted': '避難所サービスを削除しました',
'Shelter Service updated': '避難所サービスを更新しました',
'Shelter Service': '避難所サービス',
'Shelter Services': '避難所サービス',
'Shelter Type Details': '避難所タイプの詳細',
'Shelter Type added': '避難所タイプを追加しました',
'Shelter Type deleted': '避難所タイプを削除しました',
'Shelter Type updated': '避難所サービスを更新しました',
'Shelter Type': '避難所タイプ',
'Shelter Types and Services': '避難所のタイプとサービス',
'Shelter Types': '避難所タイプ',
'Shelter added': '避難所を追加しました',
'Shelter deleted': '避難所を削除しました',
'Shelter updated': '避難所を更新しました',
'Shelter': '避難所',
'Shelter/NFI Assistance': '避難所 / 生活用品支援',
'Shelter/NFI assistance received/expected': '避難所 / 生活必需品の支援を受領済み、あるいは受領予定',
'Shelters': '避難所',
'Shipment Created': '輸送が作成されました',
'Shipment Details': '輸送の詳細',
'Shipment Items received by Inventory': '物資備蓄地点から送付された救援物資',
'Shipment Items sent from Inventory': '備蓄物資から輸送を行いました',
'Shipment Items': '救援物資の輸送',
'Shipment Transit Log Details': '輸送履歴の詳細',
'Shipment Transit Log added': '輸送履歴を追加しました',
'Shipment Transit Log deleted': '輸送履歴を削除しました',
'Shipment Transit Log updated': '輸送履歴を更新しました',
'Shipment Transit Logs': '輸送履歴',
'Shipment/Way Bill added': '輸送/移動費を追加しました',
'Shipment/Way Bills Details': '輸送/移動費の詳細',
'Shipment/Way Bills deleted': '輸送/移動費を削除しました',
'Shipment/Way Bills updated': '輸送/移動費を更新しました',
'Shipment/Way Bills': '輸送/移動費',
'Shipment<>Item Relation added': '輸送<>物資間の関係を追加しました',
'Shipment<>Item Relation deleted': '輸送<>アイテム間の関係を削除しました',
'Shipment<>Item Relation updated': '輸送<>物資間の関係を更新しました',
'Shipment<>Item Relations Details': '輸送<>物資間の関係詳細',
'Shipment<>Item Relations': '輸送<>物資間の関係',
'Shipments To': '輸送先',
'Shipments': '輸送',
'Shooting': '銃撃',
'Short Assessment': '簡易評価',
'Short Description': '概要',
'Show Checklist': 'チェックリストを表示',
'Show Details': '詳細を表示',
'Show Map': '地図の表示',
'Show Region in Menu?': '地域をメニューで表示しますか?',
'Show on map': '地図上に表示',
'Sign-up as a volunteer': 'ボランティアとして登録する',
'Sign-up for Account': 'アカウント登録',
'Sign-up succesful - you should hear from us soon!': '登録できました。すぐに連絡が送られます。',
'Sindhi': 'シンド語',
'Site Address': 'サイトの住所',
'Site Administration': 'このサイト自体の管理',
'Site Description': 'サイトの説明',
'Site Details': 'Siteの詳細',
'Site ID': 'サイトID',
'Site Location Description': 'サイト ロケーションの説明',
'Site Location Name': 'サイトロケーション名',
'Site Manager': 'Site 管理者',
'Site Name': 'Site の名前',
'Site added': 'サイトを追加しました',
'Site deleted': 'サイトを削除しました',
'Site updated': 'サイトを更新しました',
'Site': 'サイト',
'Site/Warehouse': 'サイト/倉庫',
'Sites': 'サイト',
'Situation Awareness & Geospatial Analysis': '広域情報の取得や、地理情報の分析を行ないます',
'Sketch': 'スケッチ',
'Skill Details': 'スキルの詳細',
'Skill Status': 'スキル状況',
'Skill Type Details': 'スキルタイプの詳細',
'Skill Type added': 'スキルタイプを追加しました',
'Skill Type deleted': 'スキルタイプを削除しました',
'Skill Type updated': 'スキルタイプを更新しました',
'Skill Types': 'スキルタイプ',
'Skill added': 'スキルを追加しました',
'Skill deleted': 'スキルを削除しました',
'Skill updated': 'スキルを更新しました',
'Skill': 'スキル',
'Skills': 'スキル',
'Slope failure, debris': '斜面崩壊・崩壊堆積物',
'Small Trade': '小規模取引',
'Smoke': '煙',
'Snapshot Report': 'スナップショットレポート',
'Snapshot': 'スナップショット',
'Snow Fall': '降雪',
'Snow Squall': '豪雪',
'Soil bulging, liquefaction': '土壌隆起・液状化',
'Solid waste': '固形廃棄物',
'Solution Details': '解決案の詳細',
'Solution Item': '解決案項目',
'Solution added': '解決案を追加しました',
'Solution deleted': '解決案を削除しました',
'Solution updated': '解決案を更新しました',
'Solution': '解決案',
'Solutions': '解決案',
'Some': '散見',
'Sorry - the server has a problem, please try again later.': 'すみません、サーバーに問題が発生しています。時間を置いてやり直してください。',
'Sorry that location appears to be outside the area of the Parent.': 'このロケーションは親属性のエリアの外に表示されます。',
'Sorry that location appears to be outside the area supported by this deployment.': 'すいません、この位置は、このデプロイメントでサポートされている領域の外です。',
'Sorry, I could not understand your request': '残念ながら、リクエストが理解できませんでした。',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': '申し訳ありませんが、 MapAdmin 権限を持つユーザだけがロケーションのグループを作れます',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': '申し訳ありませんが、ロケーションの編集を行うにはMapAdmin権限を持ったユーザである必要があります。',
'Sorry, something went wrong.': 'すいません、何か問題が発生しています。',
'Sorry, that page is forbidden for some reason.': 'すいません、都合により、このページは閲覧禁止です。',
'Sorry, that service is temporary unavailable.': 'すいません、このサービスは一時的に利用不可となっています。',
'Sorry, there are no addresses to display': 'すいません、表示する住所がありません',
'Source ID': '情報元ID',
'Source Time': '情報ソース入手時刻',
'Source Type': '情報ソース種別',
'Source': '情報元',
'Sources of income': '収入源',
'Space Debris': '宇宙廃棄物',
'Spanish': 'スペイン語',
'Special Ice': '特別な氷',
'Special Marine': '特別海上',
'Special needs': '特別な要求',
'Specialized Hospital': '専門病院',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'ある人々やグループが見られるロケーションの中の特別な場所 (建物、部屋等)',
'Specific Location': '特定のロケーション',
'Specific locations need to have a parent of level': 'ロケーションを指定するには、そのロケーションの親属性指定が必要です',
'Specify a descriptive title for the image.': '画像の説明として一言タイトルをつけてください。',
'Specify the bed type of this unit.': 'この施設にある寝具の種別を指定してください',
'Specify the minimum sustainability in weeks or days.': '最短で何週間、あるいは何日以内に枯渇の可能性があるかを記載してください',
'Specify the number of available sets': '利用可能なセットの個数を入力してください',
'Specify the number of available units (adult doses)': '(成人が使用するとして)使用可能な個数を入力してください',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': '使用可能な乳酸リンゲル液あるいは同等品のリッター数を入力してください',
'Specify the number of sets needed per 24h': '24時間ごとに必要なセットの数を指定する',
'Specify the number of units (adult doses) needed per 24h': '(成人が使用するとして)24時間ごとに必要な個数を入力してください',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': '24時間ごとに必要な乳酸リンゲル液あるいは同等品のリッター数を入力してください',
'Spherical Mercator?': '球面メルカトル?',
'Spreadsheet Importer': 'スプレッドシートの取り込み',
'Spreadsheet uploaded': 'スプレッドシートがアップロードされました',
'Spring': '湧き水',
'Squall': 'スコール',
'Staff 2': 'スタッフ 2',
'Staff Details': 'スタッフの詳細',
'Staff Type Details': 'スタッフタイプの詳細',
'Staff Type added': 'スタッフタイプを追加しました',
'Staff Type deleted': 'スタッフタイプを削除しました',
'Staff Type updated': 'スタッフタイプを更新しました',
'Staff Types': 'スタッフ分類',
'Staff added': 'スタッフを追加しました',
'Staff deleted': 'スタッフを削除しました',
'Staff present and caring for residents': '上記施設にスタッフが配置され、ケアを行っている',
'Staff updated': 'スタッフを更新しました',
'Staff': 'スタッフ',
'Staffing': 'スタッフ配備',
'Stairs': '階段',
'Start date and end date should have valid date values': '開始日と終了日は正しい値である必要があります',
'Start date': '開始日',
'Start of Period': '開始期間',
'Stationery': '文房具',
'Status Report': 'ステータスレポート',
'Status added': '状況が追加されました',
'Status deleted': 'ステータスを削除しました',
'Status of clinical operation of the facility.': '施設で行われている診療の状況を記載してください。',
'Status of general operation of the facility.': '施設の運用状況情報を記載してください。',
'Status of morgue capacity.': '死体安置所の収容状況です。',
'Status of operations of the emergency department of this hospital.': 'この病院の緊急手術室の状態です。',
'Status of security procedures/access restrictions in the hospital.': '病院のアクセス制限/セキュリティ手順の状態。',
'Status of the operating rooms of this hospital.': 'この病院の手術室の状態。',
'Status updated': '状況を更新しました',
'Status': 'ステータス',
'Steel frame': '鉄骨',
'Storage Bin Details': '物資保管場所の詳細',
'Storage Bin Number': 'Storage Bin番号',
'Storage Bin Type Details': '物資保管タイプの詳細',
'Storage Bin Type added': '物資保管タイプを追加しました',
'Storage Bin Type deleted': 'Storage Binタイプを削除しました',
'Storage Bin Type updated': 'Storage Binタイプを更新しました',
'Storage Bin Type': 'Storage Binタイプ',
'Storage Bin Types': '収納箱のタイプ',
'Storage Bin added': 'Storage Binを追加しました',
'Storage Bin deleted': 'Storage Bin を削除しました',
'Storage Bin updated': 'Storage Bin を更新しました',
'Storage Bin': '物資貯蔵容器',
'Storage Bins': '物資保管場所',
'Storage Location Details': '備蓄地点の詳細',
'Storage Location ID': '備蓄地点ID',
'Storage Location Name': '備蓄地点名称',
'Storage Location added': '備蓄地点を追加しました',
'Storage Location deleted': '備蓄地点を削除しました',
'Storage Location updated': '備蓄地点を更新しました',
'Storage Location': '備蓄地点',
'Storage Locations': '備蓄地点',
'Store spreadsheets in the Eden database': 'Edenのデータベースにスプレッドシートを格納',
'Storeys at and above ground level': '階層、あるいは地面より上部',
'Storm Force Wind': '嵐の風の強さ',
'Storm Surge': '高潮',
'Stowaway': '密航者',
'Street (continued)': '住所 (続き)',
'Street Address': '住所',
'Street': 'ストリート',
'Strong Wind': '強風',
'Structural Hazards': '構造破壊',
'Structural': '構造的な',
'Sub Category': 'サブカテゴリ',
'Sub-type': 'サブタイプ',
'Subject': '件名',
'Submission successful - please wait': '送信に成功しました。しばらくお待ちください',
'Submission successful - please wait...': '送信に成功しました。しばらくお待ちください',
'Submit New (full form)': '(完全なフォームで)新しく投稿する',
'Submit New (triage)': '新しい (トリアージ) を追加',
'Submit New': '新規登録',
'Submit a request for recovery': '遺体回収要請を作成する',
'Submit new Level 1 assessment (full form)': 'レベル1のアセスメントを投稿する(完全なフォーム)',
'Submit new Level 1 assessment (triage)': '新しいレベル1アセスメント(トリアージ)を追加',
'Submit new Level 2 assessment': '新しいレベル2アセスメントの登録',
'Submit': '送信',
'Subscription Details': '寄付申し込みの詳細',
'Subscription added': '寄付申し込みを追加しました',
'Subscription deleted': '寄付申し込みを削除しました',
'Subscription updated': '寄付申し込みを更新しました',
'Subscriptions': '寄付申し込み',
'Subsistence Cost': '生存コスト',
'Suburb': '郊外',
'Sufficient care/assistance for chronically ill': '慢性疾患罹患者への十分なケア / 介護がある',
'Suggest not changing this field unless you know what you are doing.': 'よくわからない場合は、この項目を変更しないでください。',
'Summary by Administration Level': '管理レベルの概要',
'Summary': '要約',
'Sunday': '日曜',
'Supervisor': '管理権限を追加',
'Supplies': '支給品',
'Support Request': '支援要請',
'Support Requests': '支援の要請',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': '危機管理の専門グループの助言を取り入れることで、救援活動の優先順位を作成しやすくします。',
'Sure you want to delete this object?': 'このオブジェクトを削除してもよろしいですか?',
'Surgery': '外科',
'Survey Answer Details': '調査回答詳細',
'Survey Answer added': '調査の回答を追加しました',
'Survey Answer deleted': '調査の回答を削除しました',
'Survey Answer updated': '調査回答を更新しました',
'Survey Answer': '調査回答',
'Survey Module': '調査モジュール',
'Survey Name': 'Survey 名',
'Survey Question Details': '調査項目の詳細',
'Survey Question Display Name': 'フィードバックの質問の表示名',
'Survey Question added': '調査の質問を追加しました',
'Survey Question deleted': '調査の質問を削除しました',
'Survey Question updated': 'Survey Questionを更新しました',
'Survey Question': '調査の質問',
'Survey Section Details': 'フィードバック項目の詳細',
'Survey Section Display Name': '調査項目の表示名',
'Survey Section added': '調査項目を追加しました',
'Survey Section deleted': 'フィードバック項目を削除しました',
'Survey Section updated': 'サーベイセクションを更新しました',
'Survey Section': '調査項目',
'Survey Series Details': 'Survey Seriesの詳細',
'Survey Series Name': 'フィードバックシリーズ名',
'Survey Series added': '一連の調査を追加しました',
'Survey Series deleted': '一連の調査を削除しました',
'Survey Series updated': '連続調査を更新しました',
'Survey Series': '一連の調査',
'Survey Template Details': '調査テンプレートの詳細',
'Survey Template added': 'Surveyテンプレートを追加しました',
'Survey Template deleted': '調査テンプレートを削除しました',
'Survey Template updated': '調査のテンプレートを更新しました',
'Survey Template': '調査テンプレート',
'Survey Templates': '調査のテンプレート',
'Switch this on to use individual CSS/Javascript files for diagnostics during development.': '開発時にこのスイッチをONにすることで、CSS/Javascriptファイルの診断を行なえます。',
'Symbology': 'コード',
'Sync Conflicts': 'データ同期中に競合が発生しました',
'Sync History': 'データ同期履歴',
'Sync Now': 'データ同期中',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'データ同期先とは、情報の同期を行うインスタンスやピアのことを指します。(Sahana EdenやSahanaAgasti、Ushahidiなどと同期可能です) 同期先の登録や検索、登録情報の変更を行う際は、リンクをクリックしてページを表示してください。',
'Sync Partners': 'データ同期パートナー',
'Sync Pools': 'プールの同期',
'Sync Schedule': 'データ同期スケジュール',
'Sync Settings': 'データ同期設定',
'Sync process already started on ': 'データ同期プロセスは既に開始しています',
'Synchronisation History': 'データ同期履歴',
'Synchronisation': '同期',
'Synchronization Conflicts': '同期のコンフリクト',
'Synchronization Details': 'データ同期の詳細',
'Synchronization History': 'データ同期履歴',
'Synchronization Peers': 'データ同期先',
'Synchronization Settings': 'データ同期設定',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'データ同期を使用すると、他の端末とデータを共有し、自身のデータを最新の状態に更新することができます。このページには、SahanaEdenにおいてデータ同期を行う方法が記載されています。',
'Synchronization not configured.': 'データ同期が設定されていません',
'Synchronization settings updated': 'データ同期設定を更新しました',
'Synchronization': 'データ同期',
'Syncronisation History': 'データ同期履歴',
'Syncronisation Schedules': 'データ同期スケジュール',
'System allows the General Public to Report Incidents & have these Tracked.': 'システムを使うことで、一般市民によるインシデントの報告、および報告されたインシデントの追跡を行うことができます。',
'System allows the tracking & discovery of Items stored in Locations.': 'システムにより、物資がどこで保持されているかを追跡、明確化することができます。',
'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': 'このシステムは、支援団体、個々の支援者、政府職員、そして避難所に移動した人々の間で、援助の需要と供給の調整を図るための、オンラインの中央データベースです。このシステムを用いて、利用可能な資源を、需要を満たすように、有効かつ効率的に割り当てることができます。',
'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': 'この仕組みでは、災害地域の全てのボランティア情報を提供します。ボランティアの活動場所に加え、そこで提供する支援内容も提供します。',
'TMS Layers': 'TMSレイヤ',
'Table name': 'テーブル名',
'Tags': 'タグ',
'Take shelter in place or per <instruction>': '場所や<instruction>ごとに避難してください',
'Task Details': 'タスクの詳細',
'Task List': 'タスク一覧',
'Task Status': 'タスクの状況',
'Task added': 'タスクを追加しました',
'Task deleted': 'タスクを削除しました',
'Task status': 'タスク状況',
'Task updated': 'タスクを更新しました',
'Tasks': 'タスク',
'Team Description': 'チーム概要',
'Team Details': 'チームの詳細',
'Team Head': 'チーム代表者',
'Team Id': 'チームID',
'Team Leader': 'チームリーダー',
'Team Member added': 'チームメンバーを追加しました',
'Team Members': 'チームメンバー',
'Team Name': 'チーム名',
'Team Type': 'チームタイプ',
'Team added': 'チームを追加しました',
'Team deleted': 'チームを削除しました',
'Team updated': 'チームを更新しました',
'Team': 'チーム',
'Teams': 'チーム',
'Technical testing only, all recipients disregard': '技術検証のみで、すべての受取人は無視されます',
'Telecommunications': '通信・情報',
'Telephone': '電話',
'Telephony': '電話',
'Temp folder %s not writable - unable to apply theme!': '一時フォルダ%sが書き込み不可になっています。テーマを適用できません。',
'Template file %s not readable - unable to apply theme!': 'テンプレートファイル %s が読み込み不可になっています。テーマを適用できません。',
'Templates': 'テンプレート',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': '国内における第五段階管理部門を示す用語(例: 郵便番号の下位部分)。このレベルは通常使われません。',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': '国内で第4の行政区域を示す用語 (例えば村、地区)',
'Term for the primary within-country administrative division (e.g. State or Province).': '国内で最大の行政区域を示す用語 (例えば州や都道府県)',
'Term for the secondary within-country administrative division (e.g. District).': '国内で二番目の管理部門の用語 (例: 区)',
'Term for the third-level within-country administrative division (e.g. City or Town).': '国内で三番目の管理部門を示す用語 (例: 市や町)',
'Term for the top-level administrative division (typically Country).': '最上位の統制区域を示す用語 (一般的には国)',
'Territorial Authority': '地方機関',
'Terrorism': 'テロリズム',
'Tertiary Server (Optional)': '三番目のサーバ(オプション)',
'Test Results': 'テスト結果',
'Text Color for Text blocks': 'テキストブロックのテキスト色',
'Text before each Text Field (One per line)': 'テキストフィールドの前のテキスト (一行に一つ)',
'Text in Message': 'メッセージのテキスト',
'Text in Message: ': 'メッセージのテキスト: ',
'Text': 'テキスト',
'Thanks for your assistance': 'ご協力ありがとうございます',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': '"query"は"db.table1.field1==\'value\'"のような条件です。SQL JOINの"db.table1.field1 == db.table2.field2"結果のようなものです。',
'The Area which this Site is located within.': 'このサイトが含まれる地域',
'The Assessments module allows field workers to send in assessments. 2 different options are provided here currently:': 'アセスメントモジュールは、被災現場で活動する人々による現状の査定報告を記録することができます。現在は、2種類のオプションが提供されています。',
'The Assessments module allows field workers to send in assessments.': 'アセスメントモジュールは、被災現場で活動する人々による現状の査定報告を記録することができます。',
'The Author of this Document (optional)': 'この文書の作成者氏名(オプション)',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'ビルアセスメントモジュールではビルの安全性評価を行います (例:地震の後など)',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'その人物/グループの現在地は報告用の概要レベルの情報あるいは地図上の表示のため正確な情報いずれの場合もあります。場所名の数文字を入力すると、登録済みの場所から検索できます。',
'The District for this Report.': 'このレポートが関連する地区。',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': '承認依頼が送信されるメールアドレス(通常は個人のメールアドレスではなく、グループのメールアドレス)。この欄が空白の場合、ドメインが一致すれば依頼は自動的に承認されます',
'The Group whose members can edit data in this record.': 'このグループのメンバーは、レコード上のデータを修正することができます。',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': '一般ユーザは、インシデント・レポートシステムからインシデントを報告し、その結果を表示させることができます。',
'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).': 'Siteのロケーション、(レポート用で)おおまかな場合と、(地図表示用で)正確な場合とがあります。',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'その人物がやって来たロケーションで、報告のためのだいたいの場所、あるいは地図で表示するための正確な緯度経度です。使用可能なロケーションを検索するには最初の数文字を入力してください',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'その人物が向かう場所は報告用の概要レベルの情報あるいは地図上の表示のため正確な情報いずれの場合もあります。場所名の数文字を入力すると、登録済みの場所から検索できます。',
'The Media Library provides a catalog of digital media.': 'メディア・ライブラリーは、デジタル・メディアの一覧を提供します。',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'メッセージング・モジュールは、SAHANAシステムのコミュニケーション中心となります。災害の前、災害中または災害の後に様々なグループや個人にSMSとeメールで警報やメッセージを送ります。',
'The Office this record is associated with.': 'このレコードに関連するオフィス',
'The Organization Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '団体情報を登録することで、被災地域で活動するすべての団体の活動を追跡します。また、それぞれの地域において、彼らがどこで活動しているかという情報だけでなく、彼らが各地で提供しているプロジェクトの範囲についての情報も提供します。',
'The Organization this record is associated with.': 'このレコードに関連する団体',
'The Organization which is funding this Activity.': 'この支援活動に資金を提供する団体',
'The Person currently filling this Role.': '現在この役割に属している人物',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'プロジェクト追跡モジュールでは、支援活動(アクティビティ)を作成し、必要な物資 / サービスのギャップを満たすことを目的とします。',
'The Rapid Assessments Module stores structured reports done by Professional Organizations.': '被災地の現況アセスメントには、専門団体によって行われたレポートの結果が格納されます。',
'The Request this record is associated with.': 'このレコードに関連する支援要請',
'The Requests Management System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': '支援要請管理システムは、全ての支援団体、救援者、政府職員、および避難所に暮らす避難者たち自身が、要求に応じて援助の供給を調整できる中央のオンラインデータベースです。支援要請管理システムは効果的かつ効率的に要求を満たすことができる利用可能な資源の割り当てを可能にします。',
'The Role this person plays within this Office/Project.': 'オフィス/プロジェクトにおける役割',
'The Role this person plays within this hospital.': '病院内における役割',
'The Role to which this Role reports.': 'この権限の報告先となる権限',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': '避難所登録は、避難所を追跡し、それらの詳細を蓄積します。避難所に関連付けられた人、利用可能なサービス等の他のモジュールと協業します。',
'The Shelter this Request is from (optional).': '要請を行った避難所(オプション)',
'The Shelter this person is checking into.': 'この人物がチェックインした避難所',
'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': '地図を用いてレイヤを利用できる WMS サービスの GetCapabilities の URL。',
'The URL of your web gateway without the post parameters': 'ポストパラメータを指定しないWebゲートウェイのURL',
'The URL to access the service.': 'サービスにアクセスするためのURL',
'The Unique Identifier (UUID) as assigned to this facility by the government.': '政府UUID|政府がこの施設に割り当てている汎用一意識別子(UUID)。',
'The area is ': 'この地域は',
'The attribute within the KML which is used for the title of popups.': 'このKML属性はポップアップのタイトルに使われます。',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'KMLで定義されている属性はポップアップの本文に使用されます。(各属性ごとに半角スペースで分割して記載してください)',
'The body height (crown to heel) in cm.': '頭頂からかかとまでの身長(単位はcm)',
'The category of the Item.': 'この救援物資のカテゴリです',
'The contact person for this organization.': '団体の代表窓口',
'The country the person usually lives in.': 'この人物が普段の生活を営む国',
'The default policy for data import from this peer.': 'このデータ同期先からデータをインポートする際のデフォルト設定。',
'The descriptive name of the peer.': 'データ同期先のわかりやすい名称',
'The duplicate record will be deleted': '重複したレコードは削除されます',
'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.': '入力した単位をこのユニットにリンクします。例えば、mをメートルとする場合、(存在するなら) kilometer を選択して、乗数に値 0.001 を入力します。',
'The first or only name of the person (mandatory).': '人物の苗字(必須)。 外国籍の方等については避難所等での管理上の主たる表記/順に従ってください。',
'The following modules are available': '利用可能なモジュールは以下のとおりです。',
'The hospital this record is associated with.': 'このレコードに関連のある病院。',
'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.': 'ある特定のプロジェクトや、人々、市町村への物資または、交付コード等のついた特定区域への寄付等のは物資は、送付されることになっています。',
'The language to use for notifications.': '通知に使用する言語',
'The language you wish the site to be displayed in.': 'このサイトを表示するための言語',
'The last known location of the missing person before disappearance.': '行方不明者が最後に目撃された場所',
'The length is ': '長さは',
'The list of Brands are maintained by the Administrators.': '銘柄一覧の整備は、管理者によって可能です',
'The list of Item categories are maintained by the Administrators.': '供給物資カテゴリの一覧は、管理者によってメンテナンスされています。',
'The name to be used when calling for or directly addressing the person (optional).': '電話をかける際など、直接連絡をとりたい場合に使われる名前(オプション)',
'The next screen will allow you to detail the number of people here & their needs.': '次の画面では、人数および必要な物資/サービスの詳細を確認できます。',
'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...': '次のスクリーンで、項目の詳細なリストと量を入力できる場合があります。',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': '元の物資一つと同じだけの、代替品の測定単位での数量',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': '表示している地図の周辺タイルをダウンロードする数。0は最初のページの読み込みがより早い事を意味し、数字を大きくすると視点をパンした際に表示がより早くなります。',
'The person at the location who is reporting this incident (optional)': '現地からこのインシデントを報告した人物(オプション)',
'The person reporting about the missing person.': '行方不明者情報の提供者。',
'The person reporting the missing person.': '行方不明者を報告した人',
'The post variable containing the phone number': '電話番号を含む post 変数',
'The post variable on the URL used for sending messages': 'メッセージ送信に使用するURLのPOST変数',
'The post variables other than the ones containing the message and the phone number': 'メッセージや電話番号以外を含むpost変数',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'モデムが接続されているシリアルポート - Linuxでは /dev/ttyUSB0 等、Windowsでは com1, com2 等',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': '要求を満たすためアクセスしていた別のサーバーからの応答がありませんでした。',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': '要求を満たすためアクセスしていた別のサーバーから不正な応答が返ってきました。',
'The simple policy allows anonymous users to Read & registered users to Edit. The full security policy allows the administrator to set permissions on individual tables or records - see models/zzz.py.': 'シンプルポリシーでは、匿名ユーザーによるデータの閲覧、および、登録ユーザーによる編集が許可されます。完全版ポリシーでは、個々のテーブルやレコードに対して管理権限を設定することができます。詳細はmodels/zzz.pyを参照してください。',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': '件名のイベントはこれ以上の脅威や懸案事項を引き起こすことはありません。よって、<instruction>には、今後実施すべきアクションが記述されていません。',
'The time difference between UTC and your timezone, specify as +HHMM for eastern or -HHMM for western timezones.': 'あなたのタイムゾーンとUTCとの差を、東では+HHMMで、西では-HHMMで指定してください',
'The title of the WMS Browser panel in the Tools panel.': '[ツール]パネルのWMS Browserパネルのタイトル',
'The token associated with this application on': 'このアプリケーションが関連づけられているトークン',
'The unique identifier of the peer. Leave blank if the peer is no Sahana Eden instance, it will be auto-assigned in that case.': '一意のデータ同期先識別子です。データ同期先がSahana Edenシステムではない場合は、空白にしておくことで自動的に割り当てが行われます。',
'The unique identifier which identifies this instance to other instances.': 'このインスタンスを他のインスタンスと区別するための固有識別子',
'The way in which an item is normally distributed': '物資が配給される際の通常経路',
'The weight in kg.': '重量(単位:kg)',
'The': ' ',
'Theme Details': 'テーマの詳細',
'Theme added': 'テーマを追加しました',
'Theme deleted': 'テーマを削除しました',
'Theme updated': 'テーマを更新しました',
'Theme': 'テーマ',
'Themes': 'テーマ',
'There are errors': 'エラーが発生しました',
'There are multiple records at this location': 'このロケーションに複数のレコードが存在します',
'There are not sufficient items in the Inventory to send this shipment': 'この輸送を開始するために十分な量の物資が備蓄されていません',
'There is no address for this person yet. Add new address.': 'この人物の住所がまだありません。新しい住所を入力してください',
'There was a problem, sorry, please try again later.': '問題が発生しています。すみませんが、時間を置いてからやり直してください。',
'These are settings for Inbound Mail.': '電子メール受信箱の設定です',
'These are the Incident Categories visible to normal End-Users': '普通のユーザーが見ることができるインシデント一覧です',
'These are the default settings for all users. To change settings just for you, click ': 'これらは、全てのユーザーのデフォルト設定です。個人用の設定を変更するには、以下をクリックしてください。',
'These need to be added in Decimal Degrees.': 'これらは、十進角で追加する必要があります。',
'They': 'それら',
'This Group has no Members yet': 'メンバはまだ登録されていません',
'This Team has no Members yet': 'メンバはまだ登録されていません',
'This appears to be a duplicate of ': 'これは、以下のものと重複しているようです。',
'This can either be the postal address or a simpler description (such as `Next to the Fuel Station`).': '住所か、あるいは簡単な記述(ガソリンスタンドの隣、など)を記載しています。',
'This email address is already in use': 'このメールアドレスは使用されています',
'This file already exists on the server as': 'このファイルは別の名前でサーバに既に存在しています : ',
'This form allows the administrator to remove a duplicate location.': '管理者はこのフォームを使うことで、重複したロケーションデータを削除できます。',
'This is the way to transfer data between machines as it maintains referential integrity.': '参照整合性を保ちつつ、端末間でデータを転送する方法が記載されています。',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': '参照整合性を保ちつつ、端末間でデータを転送する方法が記載されています。...重複したデータは最初に手動で削除する必要があります。',
'This might be due to a temporary overloading or maintenance of the server.': 'サーバーが一時的に過負荷状態になっているか、あるいはメンテナンスを行っています。',
'This page shows you logs of past syncs. Click on the link below to go to this page.': '過去に行ったデータ同期履歴を表示します。以下のリンクをクリックしてください。',
'This screen allows you to upload a collection of photos to the server.': 'この画面では、複数の画像をサーバーにアップロードすることができます。',
'This shipment has already been received.': '輸送が開始され、物資が受領されました',
'This shipment has already been sent.': '輸送が開始され、送付されました',
'This shipment has not been received - it has NOT been canceled because it can still be edited.': 'この輸送は受領されていません。 - まだ編集可能であり、キャンセルされてはいません',
'This shipment has not been sent - it has NOT been canceled because it can still be edited.': '輸送はまだ開始されていませんが、キャンセルされてはいません。編集可能です。',
'This shipment will be confirmed as received.': 'この輸送された物資は、受信済み扱いになります',
'This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': 'この値はその地点の外側までの距離の小さなマウントを追加します。この値が無い場合は、一番外側の地点が境界ボックスになり、表示されない可能性があります。',
'This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': 'この値はこの地域を表示する時に使う最小の幅と高さを示します。この値がない場合、ある単一の地点を表示するときにその周辺の範囲は表示されません。地図が表示された後では、好きな大きさに拡大・縮小できます。',
'Thunderstorm': '雷雨',
'Thursday': '木曜日',
'Ticket Details': 'チケットの詳細',
'Ticket ID': 'チケットID',
'Ticket added': 'チケットを追加しました',
'Ticket deleted': 'チケットを削除しました',
'Ticket updated': 'チケットを更新しました',
'Ticket': 'チケット',
'Ticketing Module': 'チケット発行モジュール',
'Tickets': 'チケット',
'Tilt-up concrete': 'ティルトアップ式コンクリート',
'Timber frame': '木造',
'Time needed to collect water': '水の確保に必要な時間',
'Time of Request': '要求発生時刻',
'Timeline Report': 'タイムラインレポート',
'Timeline': 'タイムライン',
'Timestamp': 'タイムスタンプ',
'Title': 'タイトル',
'To Location': '送付先ロケーション',
'To Organization': '送付先団体',
'To Person': '送付先人物情報',
'To Site': '送付先サイト',
'To begin the sync process, click the button on the right => ': '右のボタンを押すと、データ同期が開始されます。',
'To begin the sync process, click this button => ': 'このボタンを押すと、データ同期が開始されます。=>',
'To create a personal map configuration, click ': '個人用の地図設定を作成するにはクリックしてください',
'To delete': '削除する側',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'OpenStreetMapを編集する際は、models/000_config.pyで定義されている設定を編集してください',
'To submit a new job, use the': 'jobを新規送信するには、以下を使用してください。',
'To variable': '変数に',
'To': ' ',
'Tools': 'ツール',
'Tornado': '竜巻',
'Total # of Beneficiaries Reached ': '支援が到達した受益者の合計数 ',
'Total # of Target Beneficiaries': '受益対象者の合計人数',
'Total # of households of site visited': '訪問した世帯数',
'Total Beds': '合計ベッド数',
'Total Beneficiaries': '受益者の総数',
'Total Cost per Megabyte': 'メガバイト毎の合計費用',
'Total Cost per Minute': '一分毎の合計費用',
'Total Households': '総世帯数',
'Total Monthly Cost': '月額総計',
'Total Monthly Cost: ': '月毎の費用の合計: ',
'Total Monthly': '月ごとの合計',
'Total One-time Costs': '1回毎の費用総計',
'Total Persons': '合計者数',
'Total Recurring Costs': '経常費用総計',
'Total Unit Cost': '単価合計',
'Total Unit Cost: ': '単価合計: ',
'Total Units': '総数',
'Total gross floor area (square meters)': '延面積(平方メートル)',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'この病院のベッド数総計。日時レポートにより、自動的に更新されます。',
'Total number of houses in the area': 'この地域の家屋総数',
'Total number of schools in affected area': '被災地内の学校総数',
'Total population of site visited': '訪問地域の総人口数',
'Total': '合計数',
'Totals for Budget:': '予算の合計:',
'Totals for Bundle:': 'Bundleの合計:',
'Totals for Kit:': 'Kitの合計:',
'Tourist Group': '旅行者グループ',
'Town': '町',
'Traces internally displaced people (IDPs) and their needs': '国内の避難している人(IDP)と彼らの必要としている物資/サービスの追跡',
'Tracing': '履歴の追跡',
'Track Details': '追跡情報の詳細',
'Track deleted': '追跡情報を削除しました',
'Track updated': '追跡情報を更新しました',
'Track uploaded': '追跡情報をアップデートしました',
'Track': '追跡情報',
'Tracking of Projects, Activities and Tasks': 'プロジェクトや支援活動、タスクの追跡',
'Tracking of basic information on the location, facilities and size of the Shelters': '避難所の基本情報(場所、施設、規模等)を追跡',
'Tracks requests for aid and matches them against donors who have pledged aid': '支援要請を管理し、救援物資の提供者とマッチングします。',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': '避難所のロケーション、配置、収容能力と被災者の状態を追跡します。',
'Tracks': 'トラック',
'Traffic Report': 'トラフィックレポート',
'Transfer': '輸送',
'Transit Status': '輸送状態',
'Transit': '移動中の立ち寄り',
'Transit. Status': '輸送状態',
'Transition Effect': '推移への影響',
'Transparent?': '透明ですか?',
'Transportation assistance, Rank': '移動 / 輸送支援、ランク',
'Trauma Center': '心的外傷センター',
'Travel Cost': '移動費',
'Tree': '樹木',
'Tropical Storm': '熱帯低気圧',
'Tropo Messaging Token': 'Tropo メッセージのトークン',
'Tropo Settings': 'Tropo 設定',
'Tropo Voice Token': 'Tropo 音声トークン',
'Tropo settings updated': 'Tropo 設定を更新しました',
'Truck': 'トラック',
'Try checking the URL for errors, maybe it was mistyped.': '入力したURLに間違いがないか確認してください。',
'Try hitting refresh/reload button or trying the URL from the address bar again.': 'ページの再読み込みを行うか、あるいはアドレスバーに直接URLを入力してみてください。',
'Try refreshing the page or hitting the back button on your browser.': 'ページを再読込するか、ブラウザの[戻る]ボタンを押してください。',
'Tsunami': '津波',
'Tuesday': '火曜日',
'Twitter ID or #hashtag': 'Twitter ID あるいは #ハッシュタグ',
'Twitter Settings': 'Twitter設定',
'Type of Construction': '建物の種類',
'Type of cause': '原因のタイプ',
'Type of latrines': 'トイレの種類',
'Type of place for defecation': '排泄用地の種類',
'Type of water source before the disaster': '災害発生前の水の確保方法',
'Type': 'タイプ',
'Types of health services available': '利用可能な健康サービスの種別',
'Types of water storage containers available': '利用可能な水貯蔵容器の種別',
'UID': 'ユニークID',
'UN': '国連',
'UTC Offset': 'UTC(世界標準時刻)との差',
'Unable to parse CSV file!': 'CSVファイルをパースできません。',
'Understaffed': '人員不足',
'Unidentified': '詳細不明',
'Unit Bed Capacity': 'ベッド収容数',
'Unit Cost': '単価',
'Unit Details': '単位の詳細',
'Unit Name': '単位名',
'Unit Set': '単位の設定',
'Unit Short Code for e.g. m for meter.': '単位の略称、例えばメートルはmと表記。',
'Unit added': '単位を追加しました',
'Unit deleted': '単位を削除しました',
'Unit of Measure': '1個口の内訳',
'Unit updated': '単位を更新しました',
'Unit': '単位',
'Units of Measure': '測定単位',
'Units': '単位',
'Unknown Peer': '登録に無いデータ同期先',
'Unknown type of facility': '施設規模不明',
'Unknown': '不明',
'Unreinforced masonry': '補強されていない石造建築物',
'Unresolved Conflicts': '未解決のデータ競合',
'Unsafe': '危険な',
'Unselect to disable the modem': 'モデムを無効化するにはチェックを外す',
'Unsent': '未送信',
'Unsupported data format!': 'サポートされていないデータフォーマットです。',
'Unsupported method!': 'サポートされていないメソッドです。',
'Unsupported method': 'サポートされていないメソッドです',
'Update Activity Report': '支援活動レポートの更新',
'Update Cholera Treatment Capability Information': 'コレラ対策能力情報を更新',
'Update Import Job': 'Import Jobの更新',
'Update Request': '支援要請を更新',
'Update Service Profile': 'サービスプロファイルの更新',
'Update Task Status': 'タスク状況の更新',
'Update Unit': '単位の更新',
'Update if Master': 'マスターサイトなら更新する',
'Update if Newer': '新しいものがあれば更新する',
'Update your current ordered list': '現在の順序付きリストの更新',
'Update': '更新',
'Upload Photos': '写真のアップロード',
'Upload Spreadsheet': 'スプレッドシートのアップロード',
'Upload Track': '追跡情報のアップロード',
'Upload a Spreadsheet': 'スプレッドシートをアップロード',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': '画像ファイルをアップロード(bmp,gif,jpeg,png) 最大300x300ピクセル',
'Upload an image file here.': '画像ファイルをここにアップロードしてください',
'Upload an image, such as a photo': '写真などのイメージをアップロードしてください',
'Upload': 'アップロード',
'Urban Fire': '都市火災',
'Urban area': '市街地',
'Urdu': 'ウルドゥー語',
'Urgent': '緊急',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '複雑なクエリを構築するには、ANDは (...)&(...) を、ORは (...)|(...) を、NOTは ~(...) を使用してください。',
'Use default': 'デフォルト値を使用',
'Use these links to download data that is currently in the database.': 'これらのリンクを使用して、現在データベースにあるデータをダウンロードします。',
'Use this space to add a description about the Bin Type.': 'Bin Typeに関する説明は、このスペースに記載してください。',
'Use this space to add a description about the site location.': 'このスペースを使って、サイトの位置の説明を追加してください。',
'Use this space to add a description about the warehouse/site.': '倉庫/Siteに関する説明は、このスペースに記載してください。',
'Use this space to add additional comments and notes about the Site/Warehouse.': 'Site/倉庫に関する追加情報を記載するには、このスペースを使用してください。',
'Used to import data from spreadsheets into the database': 'スプレッドシートからデータベースにデータをインポートするために使われます',
'User %(first_name)s %(last_name)s Approved': '%(first_name)s %(last_name)s のユーザー登録が承認されました',
'User %(id)s Logged-in': 'ユーザー %(id)s がログインしています',
'User %(id)s Logged-out': 'ユーザー %(id)s がログアウトしました',
'User %(id)s Profile updated': 'ユーザ %(id)s のプロファイルを更新しました',
'User %(id)s Registered': 'ユーザー%(id)sを登録しました',
'User Account has been Disabled': 'ユーザアカウントが無効になっています',
'User Details': 'ユーザーの詳細',
'User ID': 'ユーザーID',
'User Management': 'ユーザー管理',
'User Profile': 'ユーザープロファイル',
'User Requests': 'ユーザー要求',
'User Updated': 'ユーザーを更新しました',
'User added': 'ユーザーを追加しました',
'User already has this role': 'この権限のあるユーザー',
'User deleted': 'ユーザーを削除しました',
'User updated': 'ユーザーを更新しました',
'User': 'ユーザー',
'Username for authentication at the peer. Note that only HTTP Basic authentication is supported.': 'データ同期先との認証に使うユーザ名。HTTPベーシック認証のみサポートしています。',
'Username': 'ユーザー名',
'Users removed': 'ユーザーを削除しました',
'Users': 'ユーザー',
'Usual food sources in the area': 'この地域の普段の食料調達方法',
'Utilities': 'ユーティリティ',
'Utility, telecommunication, other non-transport infrastructure': 'ユーティリティ、通信、その他のインフラ設備(交通以外)',
'Vacancies': '欠員',
'Value': '値',
'Various Reporting functionalities': '多種多様な報告を行う機能',
'Vehicle Crime': '車両犯罪',
'Vehicle Types': '車両の種別',
'Vehicle': '車両',
'Vendor': 'ベンダー',
'Verification Email sent - please check your email to validate. If you do not receive this email please check you junk email or spam filters': 'メールアドレス確認用のメールを送信しました。メールに記載された確認用URLにアクセスしてください。もしメールが届かない場合迷惑メールフォルダに入ってしまっている可能性がありますのでご確認ください。',
'Verification Status': '認証ステータス',
'Verified': '認証済み',
'Verified?': '認証(ログイン)できません.メールアドレス・パスワードを確認してください.',
'Verify Password': 'パスワード再確認',
'Verify password': 'パスワードの確認',
'Version': 'バージョン',
'Very High': '非常に高い',
'View Alerts received using either Email or SMS': '電子メールまたはSMSで受信したアラートの閲覧',
'View Fullscreen Map': '地図をフルスクリーン表示',
'View Image': '画像の閲覧',
'View On Map': '地図上で閲覧',
'View Outbox': '送信箱の表示',
'View Picture': '写真の表示',
'View Requests for Aid': '援助要請を閲覧',
'View Settings': '設定の確認',
'View Tickets': 'チケットの閲覧',
'View and/or update their details': '詳細の閲覧および更新',
'View or update the status of a hospital.': '病院のステータスの閲覧と更新',
'View pending requests and pledge support.': '処理中の要求と寄付サポートの閲覧',
'View the hospitals on a map.': '病院の場所を地図上で表示します。',
'Village Leader': '村長',
'Village': '村落',
'Visible?': '表示しますか?',
'Visual Recognition': '画像認識',
'Volcanic Ash Cloud': '火山灰雲',
'Volcanic Event': '火山活動',
'Volume - Fluids': '流量 - 液状物',
'Volume - Solids': '流量 - 固形物',
'Volume Capacity': '容量',
'Volume/Dimensions': '容量/外形寸法',
'Volunteer Data': 'ボランティアデータ',
'Volunteer Details': 'ボランティアの詳細',
'Volunteer Management': 'ボランティアの管理',
'Volunteer Project': 'ボランティアプロジェクト',
'Volunteer Registration': 'ボランティア登録',
'Volunteer Registrations': 'ボランティア登録',
'Volunteer Request': 'ボランティア要請',
'Volunteer added': 'ボランティアを追加しました',
'Volunteer deleted': 'ボランティアを削除しました',
'Volunteer details updated': 'ボランティアの詳細を更新しました',
'Volunteer registration added': 'ボランティア登録を追加しました',
'Volunteer registration deleted': 'ボランティア登録を削除しました',
'Volunteer registration updated': 'ボランティア登録を更新しました',
'Volunteers were notified!': 'ボランティアに通知されました',
'Volunteers': 'ボランティア',
'Vote': '投票',
'Votes': '投票',
'WASH': '除染',
'WMS Browser Name': 'WMSブラウザ名',
'WMS Browser URL': 'WMSブラウザのURL',
'Walking Only': '徒歩のみ',
'Walking time to the health service': '医療サービス提供所までの徒歩時間',
'Wall or other structural damage': '壁やその他の構造の損傷',
'Warehouse Details': '倉庫の詳細',
'Warehouse Item Details': '倉庫物資の詳細',
'Warehouse Item added': '倉庫物資を追加しました',
'Warehouse Item deleted': '倉庫内物資を削除しました',
'Warehouse Item updated': '倉庫物資を更新しました',
'Warehouse Items': '倉庫に備蓄中の物資',
'Warehouse Management': '倉庫管理',
'Warehouse added': '倉庫を追加しました',
'Warehouse deleted': '倉庫を削除しました',
'Warehouse updated': '倉庫を更新しました',
'Warehouse': '倉庫',
'Warehouse/Sites Registry': '倉庫/Siteの登録',
'Warehouses': '倉庫',
'WatSan': '給水と衛生',
'Water Level still high?': '水位はまだ高いままですか?',
'Water Sanitation Hygiene': '水質衛生',
'Water collection': '給水',
'Water gallon': 'ガロン容器',
'Water storage containers available for HH': '世帯用の水貯蔵容器が利用可能である',
'Water storage containers in households': '世帯の水貯蔵容器',
'Water storage containers sufficient per HH': '世帯毎に1つ以上の水貯蔵容器が利用可能である',
'Water supply': '水の供給',
'Water': '水',
'Waterspout': '水上竜巻',
'Way Bill(s)': '移動費',
'We have tried': '私達は試行しました',
'Website': 'ウェブサイト',
'Wednesday': '水曜日',
'Weekly': '週次',
'Weight (kg)': '体重 (kg)',
'Weight': '体重',
'Welcome to the Sahana Eden Disaster Management Platform': 'Sahana Eden -災害情報管理プラットフォームへようこそ',
'Welcome to the Sahana Eden Disaster Management System': 'Sahana Eden -災害情報管理システムへようこそ',
'Welcome to the Sahana Portal at ': 'Sahana ポータルへようこそ: ',
'Welcome to the Sahana Portal at': 'Sahanaポータルにようこそ',
'Well-Known Text': '既知の文章',
'Were basic medical supplies available for health services prior to the disaster?': '災害前に、基本的な医療サービスが機能していたかどうかを記載してください',
'Were breast milk substitutes used prior to the disaster?': '災害前に利用していた母乳代用品の入手源を記載してください',
'Were there cases of malnutrition in this area prior to the disaster?': 'この地域で、災害前に栄養失調が発生していたかどうかを記載してください',
'Were there health services functioning for the community prior to the disaster?': '災害前、共同体でヘルスサービスが機能していたかどうかを記載してください',
'Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?': '災害発生前から栄養失調の報告があった、あるいはその証跡があったかどうかを記載します',
'What are the factors affecting school attendance?': '生徒の就学状況に影響する要因を記載してください',
'What are your main sources of cash to restart your business?': 'ビジネス再開に必要な現金の、主な調達源を記載してください',
'What are your main sources of income now?': '現在の主な収入源を記載してください',
'What do you spend most of your income on now?': '現在の主な支出要因を記載してください',
'What food stocks exist? (main dishes)': '備蓄食料の種類(主皿)',
'What food stocks exist? (side dishes)': '備蓄食料の種類(副皿)',
'What is the estimated total number of people in all of these institutions?': '上記施設内の居住者を総計すると、おおよそどの程度になるかを記載してください',
'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?': '洗濯、料理、入浴など、日常生活で必要となる清潔な水の、主な入手源を記載してください',
'What is your major source of drinking water?': '飲料水の主な入手源を記載してください',
'What type of latrines are available in the village/IDP centre/Camp?': '村落/IDPセンター/仮泊施設内で利用可能なトイレのタイプは?',
'What type of salvage material can be used from destroyed houses?': '全壊した家屋から回収した部材が流用可能な用途を記載します',
'What type of salvage material can be used from destroyed schools?': '倒壊した校舎において、再利用できる部材は何ですか?',
'What types of health problems do children currently have?': '小児が現在抱えている健康問題のタイプを記載してください',
'What types of health problems do people currently have?': '住人たちが現在抱えている健康問題のタイプを記載してください',
'What types of health services are still functioning in the affected area?': '現在、被災地で機能しているヘルスサービスの種類を選択してください',
'What types of household water storage containers are available?': '世帯で使っている水貯蔵容器のタイプを選択してください',
'What were your main sources of income before the disaster?': '災害発生前の主な収入源を選択してください',
'Wheat': '小麦',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': '地図上に複数のポイントが表示されている場合、それらポイント全てを表示できる縮尺で地図が表示されます。この値は、それらポイントの外に余白を付与します。指定しない場合、表示領域とポイントが重なり、表示範囲から外れてしまう可能性があります。',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': '地図上に複数のポイントが表示されている場合、それらポイント全てを表示できる縮尺で地図が表示されます。この値は、地域を表示する際の横幅と縦高の最小値となります。指定しない場合、対象の一点のみ表示され、その周辺は表示されません。一度表示された後であれば、縮尺の変更が可能です。',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points.': '地点の集合にフォーカスを合わせた地図を表示すると、この地図はそれら地点の集合を表示できる範囲に拡大・縮小します',
'When reports were entered': 'いつ報告が入力されたか',
'Where are the alternative places for studying?': '学校以外で、学習が可能な施設の種類を選択してください',
'Where are the separated children originally from?': '保護者が居ない児童の住居地はどこですか?',
'Where do the majority of people defecate?': 'トイレはどこで済ませますか?',
'Where have the children been sent?': '疎開先の情報がある場合は記載してください',
'Where is solid waste disposed in the village/camp?': '村落/仮泊施設内での、固形廃棄物処理場所を記載してください',
'Whether this is a Sahana Eden, Sahana Agasti, Ushahidi or Other instance.': 'Sahana Eden, Sahana Agasti, Ushahidi あるいは他のシステムの場合も',
'Whiskers': 'ほおひげ',
'Who is doing what and where': '誰がどこで何をしているか',
'Who usually collects water for the family?': '日頃、家族のために水を採取しているのは誰か?',
'Width': '横幅',
'Wild Fire': '野火',
'Wind Chill': '風速冷却',
'Window frame': 'ウィンドウ枠',
'Winter Storm': '吹雪',
'Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?': '災害発生後、女性や少女に対する暴力事件が発生したかどうかを記載してください。具体的な人名や場所を記載する必要はありません',
'Women of Child Bearing Age': '出産年齢の女性',
'Women participating in coping activities': '女性が災害対応に従事',
'Women who are Pregnant or in Labour': '妊娠中、あるいは労働中の女性',
'Womens Focus Groups': '女性のフォーカスグループ(Womens Focus Groups)',
'Wooden plank': '木製板',
'Wooden poles': '木製の柱',
'Working hours end': '作業終了時刻',
'Working hours start': '作業開始時刻',
'Working or other to provide money/food': '金銭/食料調達のため就労、あるいは活動を実施',
'Would you like to display the photos on the map?': '地図上に写真を表示しますか?',
'X-Ray': 'X線',
'Year built': '建築年',
'Year of Manufacture': '製造年',
'Yellow': '黄色',
'Yes': 'はい',
'You are a recovery team?': 'あなたが遺体回収チームの場合',
'You are attempting to delete your own account - are you sure you want to proceed?': '自分のアカウントを削除しようとしています。本当に削除しますか?',
'You are currently reported missing!': 'あなたが行方不明者として登録されています!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': '同期に関する設定は、「設定」セクションで行うことができます。設定には、UUID(unique identification number)、同期スケジュール、ビーコンサービス等が含まれます。同期設定は以下のリンクから変更可能です。',
'You can click on the map below to select the Lat/Lon fields': '下の地図をクリックすることで、緯度経度情報を入力できます',
'You can click on the map below to select the Lat/Lon fields:': '緯度と経度の設定は、以下の地図をクリックすることでも可能です:',
'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.': '経度/緯度の項目は、地図を選択することでも登録可能です。経度は東西方向(横)の座標軸です。緯度は南北方向(上下)の座標軸です。赤道ではゼロ、北半球ではプラス、南半球ではマイナスとなります。経度は、子午線(グリニッジ標準時)をゼロとして、東(ヨーロッパ、アジア)がプラスとなります。西(大西洋、アメリカ)がマイナスです。10進法で記入してください。',
'You can select the Draw tool (': '選択可能な描画ツール (',
'You can select the Draw tool': 'ドローツールを選択できます',
'You can set the modem settings for SMS here.': 'SMS用モデムの設定をすることができます。',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': '変換ツールを使うことで、GPS、あるいはDegrees/Minutes/Seconds形式からデータを変換できます。',
'You do no have permission to cancel this received shipment.': '輸送の受け取りをキャンセルする権限がありません',
'You do no have permission to cancel this sent shipment.': '輸送の送付をキャンセルする権限がありません',
'You do no have permission to make this commitment.': 'このコミットを作成する権限がありません',
'You do no have permission to receive this shipment.': 'この輸送を受け取る権限がありません',
'You do no have permission to send this shipment.': 'この輸送を開始する権限がありません',
'You do not have permission for any site to add an inventory item.': 'あなたには他の場所から在庫アイテムを追加する権限はありません',
'You do not have permission for any site to make a commitment.': 'どの場所にも受け入れを示す権限が有りません。',
'You do not have permission for any site to make a request.': '支援要請を作成する権限がありません',
'You do not have permission for any site to perform this action.': 'この操作をするための権限がありません',
'You do not have permission for any site to receive a shipment.': '物資の輸送を受け取る権限がありません',
'You do not have permission for any site to send a shipment.': '物資の輸送をする権限がありません',
'You do not have permission to send a shipment from this site.': 'あなたはこのサイトから物資を送る権限はありません',
'You have a personal map configuration. To change your personal configuration, click ': '個人用地図設定があります。あなたの個人用地図設定を編集するにはクリックしてください',
'You have found a dead body?': '遺体を発見しましたか?',
'You must be logged in to register volunteers.': 'ボランティアとして登録するには、ログインする必要があります',
'You must be logged in to report persons missing or found.': '行方不明者の発見状況を登録するには、ログインする必要があります。',
'You must provide a series id to proceed.': '処理を行うにはシリーズIDを指定する必要があります。',
'You should edit OpenStreetMap settings in models/000_config.py': 'OpenStreetMapの設定を変更するには、models/000_config.pyを編集してください',
'You should edit Twitter settings in models/000_config.py': 'Twitter設定を変更するには、models/000_config.pyを編集してください。',
'Your Account is Approved - you can now login\n %s%s/': '利用者登録が完了しました。リンク先のログインページで あなたが登録したユーザー名とパスワードを入力してログインしてください。\n %s%s/',
'Your Account is Approved': '利用者登録が完了しました',
'Your action is required. Please approve user %s asap: ': 'あなたの行動が要求されています。ただちにユーザー %s を承認してください。',
'Your action is required. Please approve user': 'ユーザーから承認の依頼が届いています。承諾お願いします',
'Your current ordered list of solution items is shown below. You can change it by voting again.': '解決項目の順番付きリストは以下です。再度投票することによって変更可能です。',
'Your post was added successfully.': '投稿が成功しました',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'あなたがお使いのシステムには、ユニークID (UUID) が割り当てられており、このIDを用いて他のコンピュータがあなたのシステムを同定します。あなたの UUID を閲覧するには、同期 -> 同期設定と進んでください。そのページでは、他の設定を閲覧することもできます。',
'ZIP/Postcode': '郵便番号',
'Zinc roof': 'トタン屋根',
'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': 'ズームイン: マップをクリックするか、拡大したい場所をドラッグで選択してください',
'Zoom Levels': 'ズームレベル',
'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': 'ズームアウト: マップをクリックするか、拡大したい地点をマウスの左ボタンでドラッグしてください',
'Zoom to Current Location': '現在の場所を拡大',
'Zoom to maximum map extent': 'マップの最大範囲までズーム',
'Zoom': 'ズーム',
'act': '活動',
'active': 'アクティブ',
'added': '追加しました',
'all records': '全てのレコード',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'では、スタッフや設備、それらの管理コストまで含めた予算編成を行ないます。',
'allows for creation and management of surveys to assess the damage following a natural disaster.': '自然災害による被災影響調査の作成、および管理を許可する',
'an individual/team to do in 1-2 days': '個人やチーム単位で、1-2日中に実施するべき事柄をさします。',
'approved': '承認された',
'assigned': '担当者・部門が確定',
'average': '平均的',
'black': '黒',
'blond': 'ブロンド',
'blue': '青',
'brown': '茶色',
'business_damaged': 'ビジネスへの損害',
'by': ' ',
'c/o Name': 'c/o 名前',
'can be used to extract data from spreadsheets and put them into database tables.': 'スプレッドシートからデータを抽出して、データベーステーブルに挿入できます。',
'can use this to identify the Location': 'ここからロケーションの特定が可能です',
'caucasoid': '白人',
'check all': '全てチェック',
'click for more details': '詳細はクリック',
'collateral event': '付帯イベント',
'completed': '完了',
'confirmed': '確認済',
'consider': '考慮',
'criminal intent': '犯罪目的',
'crud': '性病',
'curly': '縮れ毛',
'currently registered': '登録済み',
'daily': '日次',
'dark': '濃い',
'data uploaded': 'データがアップロードされました',
'database %s select': 'データベース%sの選択',
'database': 'データベース',
'db': 'データベース',
'delete all checked': 'チェックされた項目を全て削除',
'deleted': '削除されました',
'denied': '拒否されました',
'description': '説明',
'design': 'デザイン',
'diseased': '罹患中',
'displaced': '避難中',
'divorced': '離別',
'done!': '完了!',
'duplicate': '重複',
'edit': '編集',
'editor': '編集者',
'eg. gas, electricity, water': 'ガス、電気、水道など',
'embedded': '埋め込まれた',
'enclosed area': '専用地',
'export as csv file': 'csvファイルとしてエクスポート',
'fat': '肥満',
'feedback': '現地からの要望',
'female': '女性',
'final report': '最終報告書',
'flush latrine with septic tank': '浄化槽つき水洗トイレ',
'follow-up assessment': 'アセスメントのフォローアップ',
'food_sources': '食糧供給源',
'forehead': 'ひたい',
'form data': 'フォームデータ',
'from Twitter': 'Twitter経由',
'full': '完全',
'getting': '取得中',
'green': '緑',
'grey': '灰色',
'here': 'ここ',
'high': '高い',
'hourly': '1時間毎',
'households': '世帯情報',
'human error': 'ヒューマンエラー',
'identified': '身元確認済み',
'ignore': '無視する',
'immediately': '即応',
'in Deg Min Sec format': 'Deg Min Sec フォーマットで',
'in GPS format': 'GPS フォーマットで',
'in Inv.': '個',
'inactive': '休止中',
'initial assessment': '初期アセスメント',
'injured': '負傷中',
'insert new %s': '%sの新規挿入',
'insert new': '新規挿入',
'invalid request': '無効な要求',
'invalid': '無効',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'は、災害犠牲者とその家族、特に身元の判明した遺体、避難者、難民など、全ての情報を集約可能な中央オンラインレポジトリです。名前、年齢、連絡先番号、IDカード番号、避難した場所、その他の詳細が記録されます。人物の写真や指紋をアップロードすることができます。効率性と利便性のため、人物をグループ分けすることができます。',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'は、支援団体による救援活動や復興プロジェクトの作業を管理するために、複数のサブモジュールを組み合わせて高度な機能を実現しようと考えており、物資の受け入れ、貯蔵設備の管理、必要な物資の記録、サプライチェーン・マネジメント、輸送管理、調達、財務記録、その他様々な資産やリソースの管理といった機能を備えています',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': '全ての入荷伝票を追跡することで、カテゴリー分けや適切な実行場所への配分を行う',
'kilogram': 'キログラム',
'kit': 'キット',
'latrines': 'トイレ',
'leave empty to detach account': 'アカウントを取り外すには空欄のままにしてください',
'legend URL': '凡例の URL',
'light': '淡い',
'liter': 'リットル',
'locations': 'ロケーション',
'login': 'ログイン',
'long': '長い',
'long>12cm': '12cm以上',
'low': '低い',
'male': '男性',
'manual': 'マニュアル',
'married': '既婚',
'max': '最大',
'maxExtent': '最大範囲',
'maxResolution': '最高分解能',
'medium': '中',
'medium<12cm': '12cm未満',
'menu item': 'メニューアイテム',
'message_id': 'メッセージID',
'meter cubed': '立方メートル',
'meter': 'メートル',
'meters': 'メートル',
'min': '最小',
'module allows the an inspector to fill information for buildings.': 'モジュールでは、建築物の調査情報を記録できます。',
'module allows the site administrator to configure various options.': 'モジュールを使うことで、サイト管理者が様々な項目を設定する際の手間を省くことができます。',
'module helps monitoring the status of hospitals.': 'モジュールでは、病院の状態をモニタできます。',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'モジュールでは、オンラインマッピング(GIS)を使用して、現在の災害地域の状態を俯瞰することができます。',
'mongoloid': '黄色人種',
'more': 'その他の項目 ',
'n/a': 'データなし',
'natural hazard': '自然災害',
'negroid': '黒人',
'never': 'まだ',
'new ACL': '新規ACL',
'new record inserted': '新規レコードを挿入しました',
'new': '新規登録',
'next 100 rows': '次の100行',
'no': ' ',
'none': 'なし',
'normal': '通常',
'not accessible - no cached version available!': 'アクセスできません - キャッシュされたバージョンがありません!',
'not accessible - using cached version from': 'アクセス不可 - キャッシュ版を使用しています',
'not specified': '未指定',
'num Zoom Levels': 'ズーム倍率',
'obsolete': '孤立中',
'on': ' ',
'once': '一度',
'open defecation': '野外',
'operational intent': '運用目的',
'or import from csv file': 'またはcsvファイルからインポート',
'other': 'その他',
'over one hour': '1時間以上',
'pack of 10': '10のパック',
'people': '居住者情報',
'piece': 'ピース(単位)',
'pit latrine': '穴掘りトイレ',
'pit': '堀穴',
'postponed': '実施を延期',
'preliminary template or draft, not actionable in its current form': '現行フォーム内で実用的でない予備テンプレートまたはドラフト',
'previous 100 rows': '前の100行',
'primary incident': '優先すべきインシデント',
'problem connecting to twitter.com - please refresh': 'twitter.comへの接続に問題が発生しました。再読込を行ってください',
'provides a catalogue of digital media.': 'デジタルメディアのカタログを提供します',
'record does not exist': 'レコードが存在しません',
'record id': 'レコードID',
'records deleted': 'レコードを削除しました',
'red': '赤い',
'reported': '報告済み',
'reports successfully imported.': 'レポートは正しくインポートできました',
'representation of the Polygon/Line.': 'Polygon/Lineの表現',
'retired': '終了',
'retry': '再試行',
'river': '河川',
'sack 20kg': '袋 20kg',
'sack 50kg': '袋 50kg',
'secondary effect': '副次効果',
'see comment': 'コメント参照',
'selected': '選択された',
'separated from family': '家族とはぐれた',
'separated': '別居',
'shaved': '坊主',
'shift_start': 'シフト開始',
'short': '小柄',
'short<6cm': '6cm未満',
'sides': '側面',
'sign-up now': '今すぐ登録',
'simple': '単純な',
'single': '独身',
'slim': 'やせ型',
'specify': '明記してください',
'staff': 'スタッフ',
'state location': 'ステートロケーション',
'state': '状態',
'straight': '直毛',
'suffered financial losses': '経済的損失',
'table': 'テーブル',
'table_name': 'テーブル名',
'tall': '大柄',
'technical failure': '技術的な原因',
'this': 'この',
'times and it is still not working. We give in. Sorry.': '回繰り返しましたが、処理を完了できません。ご迷惑をおかけしますが、処理を中止します。',
'to access the system': 'してシステムにアクセスしてください',
'ton': 'トン',
'tonsure': '剃髪',
'total': '合計',
'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': '避難所を追跡し、それらの詳細を蓄積します。避難所に関連付けられた人、利用可能なサービス等の他のモジュールと協業します。',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': '実行中のPythonで tweepyモジュールが利用できません。Tropo以外でのTwitter機能利用で必要です',
'unable to parse csv file': 'csvファイルをパースできません。',
'unapproved': '承認されていない',
'uncheck all': 'チェックをすべて外す',
'unidentified': '詳細不明',
'uninhabitable = foundation and structure destroyed': '利用不可能 = 基礎構造や土台部分の破壊など',
'unknown': '不明',
'unspecified': 'その他',
'unverified': '未検証',
'updated': '更新しました',
'updates only': '更新のみ',
'urgent': '緊急',
'using default': '標準値を使用',
'verified': '確認済み',
'volunteer': 'ボランティア',
'wavy': '波状',
'weekly': '週次',
'white': '白',
'wider area, longer term, usually contain multiple Activities': '活動範囲が広く、長期的目標を有しており、複数の支援活動を包括します。',
'widowed': '死別',
'window': '窓',
'windows broken, cracks in walls, roof slightly damaged': '窓破損、壁にひび割れ、屋根の一部損傷',
'within human habitat': '人間の居住地域内',
'xlwt module not available within the running Python - this needs installing for XLS output!': '実行中のPythonでxlwtモジュールが利用できません。XLS出力に必要です。',
'yes': 'はい',
}
|
flavour/ifrc_qa
|
languages/ja.py
|
Python
|
mit
| 353,257
|
[
"VisIt"
] |
9eee2afc37a69e8dae8267d942501ac235ff64e799c8d9ecbcd7e09af61ab487
|
"""
@name: Modules.Entertainment.sharp
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2016-2017 by D. Brian Kimmel
@note: Created on Jul 12, 2016
@license: MIT License
@summary:
192.168.1.92
pink
poppy
10002
"""
__updated__ = '2019-12-30'
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/House/Entertainment/Sharp/sharp.py
|
Python
|
mit
| 304
|
[
"Brian"
] |
1813f624b349d55fe1627ee86a454e355b40ca364d936dd32a67c76e76e04340
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2007-2009 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2013 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
SVG document generator.
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
from io import StringIO
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import DOCGEN_OPTIONS
from gramps.gen.errors import ReportError
from gramps.gen.plug.docgen import BaseDoc, DrawDoc, SOLID, FONT_SANS_SERIF
from gramps.gen.plug.menu import EnumeratedListOption
from gramps.gen.plug.report import DocOptions
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# SvgDrawDoc
#
#-------------------------------------------------------------------------
class SvgDrawDoc(BaseDoc, DrawDoc):
def __init__(self, styles, type, options=None, uistate=None):
BaseDoc.__init__(self, styles, type, uistate=uistate)
self.file = None
self.filename = None
self.level = 0
self.time = "0000-00-00T00:00:00"
self.page = 0
self._bg = 'none' # SVG background, in case options are ignored
if options:
menu = options.menu
self._bg = menu.get_option_by_name('svg_background').get_value()
if self._bg == 'transparent':
self._bg = 'none'
def open(self, filename):
if filename[-4:] != ".svg":
self.root = filename
else:
self.root = filename[:-4]
def close(self):
pass
def start_page(self):
self.page += 1
if self.page != 1:
name = "%s-%d.svg" % (self.root, self.page)
else:
name = "%s.svg" % self.root
try:
self.file = open(name, "w", encoding="utf-8")
except IOError as msg:
raise ReportError(_("Could not create %s") % name, msg)
except:
raise ReportError(_("Could not create %s") % name)
self.buffer = StringIO()
width = self.paper.get_size().get_width()
height = self.paper.get_size().get_height()
self.file.write(
'<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n'
'<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
'"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">\n'
'<svg width="%4.2fcm" height="%4.2fcm" '
'xmlns="http://www.w3.org/2000/svg">\n'
'<rect width="%4.2fcm" height="%4.2fcm" '
'style="fill: %s;"/>\n'
% (width, height, width, height, self._bg)
)
def rotate_text(self, style, text, x, y, angle, mark=None):
""" @param mark: IndexMark to use for indexing (not supported) """
style_sheet = self.get_style_sheet()
stype = style_sheet.get_draw_style(style)
pname = stype.get_paragraph_style()
para = style_sheet.get_paragraph_style(pname)
font = para.get_font()
size = font.get_size()
width = height = 0
for line in text:
width = max(width, self.string_width(font, line))
height += size
centerx, centery = units((x+self.paper.get_left_margin(),
y+self.paper.get_top_margin()))
xpos = (centerx - (width/2.0))
ypos = (centery - (height/2.0))
self.buffer.write(
'<text ' +
'x="%4.2f" y="%4.2f" ' % (xpos, ypos) +
'transform="rotate(%d %4.2f %4.2f)" ' % (angle, centerx, centery) +
'style="fill:#%02x%02x%02x; '% font.get_color()
)
if font.get_bold():
self.buffer.write('font-weight:bold;')
if font.get_italic():
self.buffer.write('font-style:italic;')
self.buffer.write('font-size:%dpt; ' % size)
if font.get_type_face() == FONT_SANS_SERIF:
self.buffer.write('font-family:sans-serif;')
else:
self.buffer.write('font-family:serif;')
self.buffer.write('">')
for line in text:
# Center this line relative to the rest of the text
linex = xpos + (width - self.string_width(font, line)) / 2
self.buffer.write(
'<tspan x="%4.2f" dy="%d">' % (linex, size) +
line +
'</tspan>'
)
self.buffer.write('</text>\n')
def end_page(self):
# Print the text last for each page so that it is rendered on top of
# other graphic elements.
self.file.write(self.buffer.getvalue())
self.buffer.close()
self.file.write('</svg>\n')
self.file.close()
def draw_line(self, style, x1, y1, x2, y2):
x1 += self.paper.get_left_margin()
x2 += self.paper.get_left_margin()
y1 += self.paper.get_top_margin()
y2 += self.paper.get_top_margin()
style_sheet = self.get_style_sheet()
draw_style = style_sheet.get_draw_style(style)
line_out = '<line x1="%4.2fcm" y1="%4.2fcm" ' % (x1, y1)
line_out += 'x2="%4.2fcm" y2="%4.2fcm" ' % (x2, y2)
line_out += 'style="stroke:#%02x%02x%02x; ' % draw_style.get_color()
if draw_style.get_line_style() != SOLID:
line_out += 'stroke-dasharray: %s; ' % (
",".join(map(str, draw_style.get_dash_style()))
)
line_out += 'stroke-width:%.2fpt;"/>\n' % draw_style.get_line_width()
self.file.write(line_out)
def draw_path(self, style, path):
style_sheet = self.get_style_sheet()
stype = style_sheet.get_draw_style(style)
point = path[0]
line_out = '<polygon fill="#%02x%02x%02x"' % stype.get_fill_color()
line_out += ' style="stroke:#%02x%02x%02x; ' % stype.get_color()
if stype.get_line_style() != SOLID:
line_out += 'stroke-dasharray: %s; ' % (
",".join(map(str, stype.get_dash_style()))
)
line_out += ' stroke-width:%.2fpt;"' % stype.get_line_width()
line_out += ' points="%.2f,%.2f' % units(
(point[0]+self.paper.get_left_margin(),
point[1]+self.paper.get_top_margin()))
self.file.write(line_out)
for point in path[1:]:
self.file.write(
' %.2f,%.2f'
% units((point[0]+self.paper.get_left_margin(),
point[1]+self.paper.get_top_margin()))
)
self.file.write('"/>\n')
def draw_box(self, style, text, x, y, w, h, mark=None):
""" @param mark: IndexMark to use for indexing (not supported) """
x += self.paper.get_left_margin()
y += self.paper.get_top_margin()
style_sheet = self.get_style_sheet()
box_style = style_sheet.get_draw_style(style)
shadow_width = box_style.get_shadow_space()
if box_style.get_shadow() and shadow_width > 0:
self.file.write(
'<rect ' +
'x="%4.2fcm" ' % (x + shadow_width) +
'y="%4.2fcm" ' % (y + shadow_width) +
'width="%4.2fcm" ' % w +
'height="%4.2fcm" ' % h +
'style="fill:#808080; stroke:#808080; stroke-width:1;"/>\n'
)
line_out = '<rect '
line_out += 'x="%4.2fcm" ' % x
line_out += 'y="%4.2fcm" ' % y
line_out += 'width="%4.2fcm" ' % w
line_out += 'height="%4.2fcm" ' % h
line_out += 'style="fill:#%02x%02x%02x; ' % box_style.get_fill_color()
line_out += 'stroke:#%02x%02x%02x; ' % box_style.get_color()
if box_style.get_line_style() != SOLID:
line_out += 'stroke-dasharray: %s; ' % (
",".join(map(str, box_style.get_dash_style()))
)
line_out += 'stroke-width:%f;"/>\n' % box_style.get_line_width()
self.file.write(line_out)
if text:
para_name = box_style.get_paragraph_style()
assert para_name != ''
para = style_sheet.get_paragraph_style(para_name)
font = para.get_font()
font_size = font.get_size()
lines = text.split('\n')
mar = 10/28.35
fsize = (font_size/28.35) * 1.2
center = y + (h + fsize)/2.0 + (fsize*0.2)
ystart = center - (fsize/2.0) * len(lines)
for i, line in enumerate(lines):
ypos = ystart + (i * fsize)
self.buffer.write(
'<text ' +
'x="%4.2fcm" ' % (x+mar) +
'y="%4.2fcm" ' % ypos +
'style="fill:#%02x%02x%02x; '% font.get_color()
)
if font.get_bold():
self.buffer.write(' font-weight:bold;')
if font.get_italic():
self.buffer.write(' font-style:italic;')
self.buffer.write(' font-size:%dpt;' % font_size)
if font.get_type_face() == FONT_SANS_SERIF:
self.buffer.write(' font-family:sans-serif;')
else:
self.buffer.write(' font-family:serif;')
self.buffer.write(
'">' +
line +
'</text>\n'
)
def draw_text(self, style, text, x, y, mark=None):
""" @param mark: IndexMark to use for indexing (not supported) """
x += self.paper.get_left_margin()
y += self.paper.get_top_margin()
style_sheet = self.get_style_sheet()
box_style = style_sheet.get_draw_style(style)
para_name = box_style.get_paragraph_style()
para = style_sheet.get_paragraph_style(para_name)
font = para.get_font()
font_size = font.get_size()
fsize = (font_size/28.35) * 1.2
self.buffer.write(
'<text ' +
'x="%4.2fcm" ' % x +
'y="%4.2fcm" ' % (y+fsize) +
'style="fill:#%02x%02x%02x;'% font.get_color()
)
if font.get_bold():
self.buffer.write('font-weight:bold;')
if font.get_italic():
self.buffer.write('font-style:italic;')
self.buffer.write('font-size:%dpt; ' % font_size)
if font.get_type_face() == FONT_SANS_SERIF:
self.buffer.write('font-family:sans-serif;')
else:
self.buffer.write('font-family:serif;')
self.buffer.write(
'">' +
text +
'</text>\n'
)
def center_text(self, style, text, x, y, mark=None):
""" @param mark: IndexMark to use for indexing (not supported) """
style_sheet = self.get_style_sheet()
box_style = style_sheet.get_draw_style(style)
para_name = box_style.get_paragraph_style()
para = style_sheet.get_paragraph_style(para_name)
font = para.get_font()
width = self.string_width(font, text) / 72
x -= width
self.draw_text(style, text, x, y)
def units(val):
return (val[0]*35.433, val[1]*35.433)
#------------------------------------------------------------------------
#
# SvgDrawDocOptions class
#
#------------------------------------------------------------------------
class SvgDrawDocOptions(DocOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
DocOptions.__init__(self, name)
def add_menu_options(self, menu):
"""
Add options to the document menu for the docgen.
"""
category_name = DOCGEN_OPTIONS
background = EnumeratedListOption(_('SVG background color'),
'transparent')
background.set_items([('transparent', _('transparent background')),
('white', _('white')),
('black', _('black')),
('red', _('red')),
('green', _('green')),
('blue', _('blue')),
('cyan', _('cyan')),
('magenta', _('magenta')),
('yellow', _('yellow'))])
background.set_help(_('The color, if any, of the SVG background'))
menu.add_option(category_name, 'svg_background', background)
|
jralls/gramps
|
gramps/plugins/docgen/svgdrawdoc.py
|
Python
|
gpl-2.0
| 13,465
|
[
"Brian"
] |
5bb8dd57492ad9291644afc4195517975dc3c1f890bc09866a49711c650e62e7
|
#!/usr/bin/env python
'''
This example shows how to use pseudo spectral integrals in SCF calculation.
'''
from pyscf import gto
from pyscf import scf
from pyscf import sgx
mol = gto.M(
atom='''O 0. 0. 0.
H 0. -0.757 0.587
H 0. 0.757 0.587
''',
basis = 'ccpvdz',
)
mf = sgx.sgx_fit(scf.RHF(mol))
mf.kernel()
# Using RI for Coulomb matrix while K-matrix is constructed with COS-X method
mf.with_df.dfj = True
mf.kernel()
|
gkc1000/pyscf
|
examples/sgx/00-simple_sgx.py
|
Python
|
apache-2.0
| 484
|
[
"PySCF"
] |
5b506c556d11808ec266ce32fbc59a14611eb679270dfc0b7069f01b053f0ddc
|
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2016-2019 The Atlite Authors
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""
Management of data retrieval and structure.
"""
import pandas as pd
import xarray as xr
import os
from numpy import atleast_1d
from tempfile import mkstemp, mkdtemp
from shutil import rmtree
from functools import wraps
from dask import delayed, compute
from dask.utils import SerializableLock
from dask.diagnostics import ProgressBar
import logging
logger = logging.getLogger(__name__)
from .datasets import modules as datamodules
def get_features(cutout, module, features, tmpdir=None):
"""
Load the feature data for a given module.
This get the data for a set of features from a module. All modules in
`atlite.datasets` are allowed.
"""
parameters = cutout.data.attrs
lock = SerializableLock()
datasets = []
get_data = datamodules[module].get_data
for feature in features:
feature_data = delayed(get_data)(cutout, feature, tmpdir=tmpdir,
lock=lock, **parameters)
datasets.append(feature_data)
datasets = compute(*datasets)
ds = xr.merge(datasets, compat='equals')
for v in ds:
ds[v].attrs['module'] = module
fd = datamodules[module].features.items()
ds[v].attrs['feature'] = [k for k, l in fd if v in l].pop()
return ds
def available_features(module=None):
"""
Inspect the available features of all or a selection of modules.
Parameters
----------
module : str/list, optional
Module name(s) which to inspect. The default None will result in all
modules
Returns
-------
pd.Series
A Series of all variables. The MultiIndex indicated which module
provides the variable and with which feature name the variable can be
obtained.
"""
features = {name: m.features for name, m in datamodules.items()}
features = pd.DataFrame(features).unstack().dropna() \
.rename_axis(index=['module', 'feature']).rename('variables')
if module is not None:
features = features.reindex(atleast_1d(module), level='module')
return features.explode()
def non_bool_dict(d):
"""Convert bool to int for netCDF4 storing"""
return {k: v if not isinstance(v, bool) else int(v) for k,v in d.items()}
def maybe_remove_tmpdir(func):
'Use this wrapper to make tempfile deletion compatible with windows machines.'
@wraps(func)
def wrapper(*args, **kwargs):
if kwargs.get('tmpdir', None):
res = func(*args, **kwargs)
else:
kwargs['tmpdir'] = mkdtemp()
try:
res = func(*args, **kwargs)
finally:
rmtree(kwargs['tmpdir'])
return res
return wrapper
@maybe_remove_tmpdir
def cutout_prepare(cutout, features=None, tmpdir=None, overwrite=False):
"""
Prepare all or a selection of features in a cutout.
This function loads the feature data of a cutout, e.g. influx or runoff.
When not specifying the `feature` argument, all available features will be
loaded. The function compares the variables which are already included in
the cutout with the available variables of the modules specified by the
cutout. It detects missing variables and stores them into the netcdf file
of the cutout.
Parameters
----------
cutout : atlite.Cutout
features : str/list, optional
Feature(s) to be prepared. The default slice(None) results in all
available features.
tmpdir : str/Path, optional
Directory in which temporary files (for example retrieved ERA5 netcdf
files) are stored. If set, the directory will not be deleted and the
intermediate files can be examined.
overwrite : bool, optional
Whether to overwrite variables which are already included in the
cutout. The default is False.
Returns
-------
cutout : atlite.Cutout
Cutout with prepared data. The variables are stored in `cutout.data`.
"""
if cutout.prepared and not overwrite:
logger.info('Cutout already prepared.')
return cutout
logger.info(f'Storing temporary files in {tmpdir}')
modules = atleast_1d(cutout.module)
features = atleast_1d(features) if features else slice(None)
prepared = set(atleast_1d(cutout.data.attrs['prepared_features']))
# target is series of all available variables for given module and features
target = available_features(modules).loc[:, features].drop_duplicates()
for module in target.index.unique('module'):
missing_vars = target[module]
if not overwrite:
missing_vars = missing_vars[lambda v: ~v.isin(cutout.data)]
if missing_vars.empty:
continue
logger.info(f'Calculating and writing with module {module}:')
missing_features = missing_vars.index.unique('feature')
ds = get_features(cutout, module, missing_features, tmpdir=tmpdir)
prepared |= set(missing_features)
cutout.data.attrs.update(dict(prepared_features=list(prepared)))
attrs = non_bool_dict(cutout.data.attrs)
attrs.update(ds.attrs)
ds = (cutout.data.merge(ds[missing_vars.values]).assign_attrs(**attrs))
# write data to tmp file, copy it to original data, this is much safer
# than appending variables
directory, filename = os.path.split(str(cutout.path))
fd, tmp = mkstemp(suffix=filename, dir=directory)
os.close(fd)
with ProgressBar():
ds.to_netcdf(tmp)
if cutout.path.exists():
cutout.data.close()
cutout.path.unlink()
os.rename(tmp, cutout.path)
cutout.data = xr.open_dataset(cutout.path, chunks=cutout.chunks)
return cutout
|
FRESNA/atlite
|
atlite/data.py
|
Python
|
gpl-3.0
| 5,883
|
[
"NetCDF"
] |
de4116aa05e7b0486a758a479c3ff6cc318f561ce83bfbf307ad1b4d09b48950
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.