repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Jeff-Tian/mybnb | Python27/Lib/test/test_commands.py | 13 | 2714 | '''
Tests for commands module
Nick Mathewson
'''
import unittest
import os, tempfile, re
from test.test_support import run_unittest, reap_children, import_module, \
check_warnings
# Silence Py3k warning
commands = import_module('commands', deprecated=True)
# The module says:
# "NB This only works (and is only relevant) for UNIX."
#
# Actually, getoutput should work on any platform with an os.popen, but
# I'll take the comment as given, and skip this suite.
if os.name != 'posix':
raise unittest.SkipTest('Not posix; skipping test_commands')
class CommandTests(unittest.TestCase):
def test_getoutput(self):
self.assertEqual(commands.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(commands.getstatusoutput('echo xyzzy'), (0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = commands.getstatusoutput('cat ' + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test_getstatus(self):
# This pattern should match 'ls -ld /.' on any posix
# system, however perversely configured. Even on systems
# (e.g., Cygwin) where user and group names can have spaces:
# drwxr-xr-x 15 Administ Domain U 4096 Aug 12 12:50 /
# drwxr-xr-x 15 Joe User My Group 4096 Aug 12 12:50 /
# Note that the first case above has a space in the group name
# while the second one has a space in both names.
# Special attributes supported:
# + = has ACLs
# @ = has Mac OS X extended attributes
# . = has a SELinux security context
pat = r'''d......... # It is a directory.
[.+@]? # It may have special attributes.
\s+\d+ # It has some number of links.
[^/]* # Skip user, group, size, and date.
/\. # and end with the name of the file.
'''
with check_warnings((".*commands.getstatus.. is deprecated",
DeprecationWarning)):
self.assertTrue(re.match(pat, commands.getstatus("/."), re.VERBOSE))
def test_main():
run_unittest(CommandTests)
reap_children()
if __name__ == "__main__":
test_main()
| apache-2.0 |
zielmicha/hera | hera/models.py | 1 | 4979 | import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'hera.settings'
from django.db import models
from django.db import transaction
from django.contrib.auth import models as auth_models
from django.core.exceptions import PermissionDenied
import datetime
import os
import jsonfield
import binascii
import json
def MoneyField(*args, **kwargs):
return models.DecimalField(*args, decimal_places=10, max_digits=20, **kwargs)
class VM(models.Model):
creator = models.ForeignKey('Account')
stats = jsonfield.JSONField(blank=True, null=True)
vm_id = models.CharField(max_length=120)
address = models.CharField(max_length=120)
@property
def stats_parsed(self):
return json.loads(self.stats)
def get_privileged_account(self, user):
if not self.creator.is_privileged(user):
raise PermissionDenied()
return self.creator
def is_user_privileged(self, user):
self.get_privileged_account(user)
class QueuedCreation(models.Model):
created = models.DateTimeField(auto_now_add=True)
params = jsonfield.JSONField()
resource = models.ForeignKey('DerivativeResource')
vm = models.ForeignKey('VM', null=True, blank=True)
stats = jsonfield.JSONField()
owner = models.ForeignKey('Account')
class DerivativeResource(models.Model):
owner = models.ForeignKey('Account')
created = models.DateTimeField(auto_now_add=True)
expiry = models.DateTimeField()
closed_at = models.DateTimeField(null=True)
base_price_per_second = MoneyField()
custom = jsonfield.JSONField(blank=True, null=True)
user_type = models.CharField(max_length=100)
user_id = models.CharField(max_length=100)
@property
def expired(self):
return datetime.datetime.now() > self.expiry
@property
def running_time(self):
if self.closed_at:
return self.closed_at - self.created
else:
return self.expiry - self.created
def close(self):
if self.id:
if not self.closed_at:
self.closed_at = datetime.datetime.now()
self.save()
def __str__(self):
return 'DerivativeResource (%s, %s)' % (self.user_type, self.user_id)
class DerivativeResourceUsed(models.Model):
resource = models.ForeignKey(DerivativeResource)
start_time = models.DateTimeField()
end_time = models.DateTimeField()
price = MoneyField()
class Account(models.Model):
billing_owner = models.ForeignKey(auth_models.User,
related_name='accounts')
is_main = models.BooleanField()
name = models.CharField(max_length=100, unique=True)
api_key = models.CharField(max_length=50)
price_per_second_limit = models.FloatField(default=1e100)
price_used = MoneyField(default=0.0)
price_transferred_to = MoneyField(default=0.0)
@property
def price_balance(self):
return self.price_transferred_to - self.price_used
def __str__(self):
return 'Account ' + self.name
def get_api_key(self):
if not self.api_key:
self.regen_api_key()
self.save()
return self.api_key.encode()
def regen_api_key(self):
self.api_key = binascii.hexlify(os.urandom(16)).decode()
def is_privileged(self, user):
return user == self.billing_owner
@property
def api_auth(self):
return (self.name, self.api_key)
@classmethod
def get_account(self, name):
try:
return Account.objects.get(name=name)
except Account.DoesNotExist as err:
# maybe main account not yet created for user?
try:
user = auth_models.User.objects.get(username=name)
except auth_models.User.DoesNotExist:
raise err
else:
return Account.get_main_for_user(user)
@classmethod
@transaction.atomic
def get_main_for_user(self, user):
account, _ = self.objects.get_or_create(billing_owner=user, is_main=True,
defaults=dict(name=user.username))
return account
class Disk(models.Model):
owner = models.ForeignKey('Account', null=True, blank=True)
refcount = models.IntegerField(default=0)
backing = models.ForeignKey('Disk', null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
timeout = models.FloatField(default=1e100)
def is_privileged(self, account):
return account == self.owner
class Template(models.Model):
owner = models.ForeignKey('Account', null=True, blank=True, related_name='templates')
public = models.BooleanField(default=False)
disk = models.ForeignKey('Disk')
name = models.CharField(max_length=300, null=True, blank=True)
def is_privileged(self, account, operation):
if operation == 'read' and self.public:
return True
else:
return account == self.owner
| agpl-3.0 |
asm-products/movie-database-service | ani/lib/python2.7/site-packages/pymongo/ssl_match_hostname.py | 74 | 3599 | # Backport of the match_hostname logic introduced in python 3.2
# http://hg.python.org/releasing/3.3.5/file/993955b807b3/Lib/ssl.py
import re
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
| agpl-3.0 |
kaushik94/sympy | sympy/physics/tests/test_secondquant.py | 1 | 48111 | from sympy.physics.secondquant import (
Dagger, Bd, VarBosonicBasis, BBra, B, BKet, FixedBosonicBasis,
matrix_rep, apply_operators, InnerProduct, Commutator, KroneckerDelta,
AnnihilateBoson, CreateBoson, BosonicOperator,
F, Fd, FKet, BosonState, CreateFermion, AnnihilateFermion,
evaluate_deltas, AntiSymmetricTensor, contraction, NO, wicks,
PermutationOperator, simplify_index_permutations,
_sort_anticommuting_fermions, _get_ordered_dummies,
substitute_dummies, FockStateBosonKet,
ContractionAppliesOnlyToFermions
)
from sympy import (Dummy, expand, Function, I, S, simplify, sqrt, Sum,
Symbol, symbols, srepr, Rational)
from sympy.core.compatibility import range
from sympy.utilities.pytest import XFAIL, slow, raises
from sympy.printing.latex import latex
def test_PermutationOperator():
p, q, r, s = symbols('p,q,r,s')
f, g, h, i = map(Function, 'fghi')
P = PermutationOperator
assert P(p, q).get_permuted(f(p)*g(q)) == -f(q)*g(p)
assert P(p, q).get_permuted(f(p, q)) == -f(q, p)
assert P(p, q).get_permuted(f(p)) == f(p)
expr = (f(p)*g(q)*h(r)*i(s)
- f(q)*g(p)*h(r)*i(s)
- f(p)*g(q)*h(s)*i(r)
+ f(q)*g(p)*h(s)*i(r))
perms = [P(p, q), P(r, s)]
assert (simplify_index_permutations(expr, perms) ==
P(p, q)*P(r, s)*f(p)*g(q)*h(r)*i(s))
assert latex(P(p, q)) == 'P(pq)'
def test_index_permutations_with_dummies():
a, b, c, d = symbols('a b c d')
p, q, r, s = symbols('p q r s', cls=Dummy)
f, g = map(Function, 'fg')
P = PermutationOperator
# No dummy substitution necessary
expr = f(a, b, p, q) - f(b, a, p, q)
assert simplify_index_permutations(
expr, [P(a, b)]) == P(a, b)*f(a, b, p, q)
# Cases where dummy substitution is needed
expected = P(a, b)*substitute_dummies(f(a, b, p, q))
expr = f(a, b, p, q) - f(b, a, q, p)
result = simplify_index_permutations(expr, [P(a, b)])
assert expected == substitute_dummies(result)
expr = f(a, b, q, p) - f(b, a, p, q)
result = simplify_index_permutations(expr, [P(a, b)])
assert expected == substitute_dummies(result)
# A case where nothing can be done
expr = f(a, b, q, p) - g(b, a, p, q)
result = simplify_index_permutations(expr, [P(a, b)])
assert expr == result
def test_dagger():
i, j, n, m = symbols('i,j,n,m')
assert Dagger(1) == 1
assert Dagger(1.0) == 1.0
assert Dagger(2*I) == -2*I
assert Dagger(S.Half*I/3.0) == I*Rational(-1, 2)/3.0
assert Dagger(BKet([n])) == BBra([n])
assert Dagger(B(0)) == Bd(0)
assert Dagger(Bd(0)) == B(0)
assert Dagger(B(n)) == Bd(n)
assert Dagger(Bd(n)) == B(n)
assert Dagger(B(0) + B(1)) == Bd(0) + Bd(1)
assert Dagger(n*m) == Dagger(n)*Dagger(m) # n, m commute
assert Dagger(B(n)*B(m)) == Bd(m)*Bd(n)
assert Dagger(B(n)**10) == Dagger(B(n))**10
assert Dagger('a') == Dagger(Symbol('a'))
assert Dagger(Dagger('a')) == Symbol('a')
def test_operator():
i, j = symbols('i,j')
o = BosonicOperator(i)
assert o.state == i
assert o.is_symbolic
o = BosonicOperator(1)
assert o.state == 1
assert not o.is_symbolic
def test_create():
i, j, n, m = symbols('i,j,n,m')
o = Bd(i)
assert latex(o) == "b^\\dagger_{i}"
assert isinstance(o, CreateBoson)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = Bd(0)
assert o.apply_operator(BKet([n])) == sqrt(n + 1)*BKet([n + 1])
o = Bd(n)
assert o.apply_operator(BKet([n])) == o*BKet([n])
def test_annihilate():
i, j, n, m = symbols('i,j,n,m')
o = B(i)
assert latex(o) == "b_{i}"
assert isinstance(o, AnnihilateBoson)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = B(0)
assert o.apply_operator(BKet([n])) == sqrt(n)*BKet([n - 1])
o = B(n)
assert o.apply_operator(BKet([n])) == o*BKet([n])
def test_basic_state():
i, j, n, m = symbols('i,j,n,m')
s = BosonState([0, 1, 2, 3, 4])
assert len(s) == 5
assert s.args[0] == tuple(range(5))
assert s.up(0) == BosonState([1, 1, 2, 3, 4])
assert s.down(4) == BosonState([0, 1, 2, 3, 3])
for i in range(5):
assert s.up(i).down(i) == s
assert s.down(0) == 0
for i in range(5):
assert s[i] == i
s = BosonState([n, m])
assert s.down(0) == BosonState([n - 1, m])
assert s.up(0) == BosonState([n + 1, m])
def test_basic_apply():
n = symbols("n")
e = B(0)*BKet([n])
assert apply_operators(e) == sqrt(n)*BKet([n - 1])
e = Bd(0)*BKet([n])
assert apply_operators(e) == sqrt(n + 1)*BKet([n + 1])
def test_complex_apply():
n, m = symbols("n,m")
o = Bd(0)*B(0)*Bd(1)*B(0)
e = apply_operators(o*BKet([n, m]))
answer = sqrt(n)*sqrt(m + 1)*(-1 + n)*BKet([-1 + n, 1 + m])
assert expand(e) == expand(answer)
def test_number_operator():
n = symbols("n")
o = Bd(0)*B(0)
e = apply_operators(o*BKet([n]))
assert e == n*BKet([n])
def test_inner_product():
i, j, k, l = symbols('i,j,k,l')
s1 = BBra([0])
s2 = BKet([1])
assert InnerProduct(s1, Dagger(s1)) == 1
assert InnerProduct(s1, s2) == 0
s1 = BBra([i, j])
s2 = BKet([k, l])
r = InnerProduct(s1, s2)
assert r == KroneckerDelta(i, k)*KroneckerDelta(j, l)
def test_symbolic_matrix_elements():
n, m = symbols('n,m')
s1 = BBra([n])
s2 = BKet([m])
o = B(0)
e = apply_operators(s1*o*s2)
assert e == sqrt(m)*KroneckerDelta(n, m - 1)
def test_matrix_elements():
b = VarBosonicBasis(5)
o = B(0)
m = matrix_rep(o, b)
for i in range(4):
assert m[i, i + 1] == sqrt(i + 1)
o = Bd(0)
m = matrix_rep(o, b)
for i in range(4):
assert m[i + 1, i] == sqrt(i + 1)
def test_fixed_bosonic_basis():
b = FixedBosonicBasis(2, 2)
# assert b == [FockState((2, 0)), FockState((1, 1)), FockState((0, 2))]
state = b.state(1)
assert state == FockStateBosonKet((1, 1))
assert b.index(state) == 1
assert b.state(1) == b[1]
assert len(b) == 3
assert str(b) == '[FockState((2, 0)), FockState((1, 1)), FockState((0, 2))]'
assert repr(b) == '[FockState((2, 0)), FockState((1, 1)), FockState((0, 2))]'
assert srepr(b) == '[FockState((2, 0)), FockState((1, 1)), FockState((0, 2))]'
@slow
def test_sho():
n, m = symbols('n,m')
h_n = Bd(n)*B(n)*(n + S.Half)
H = Sum(h_n, (n, 0, 5))
o = H.doit(deep=False)
b = FixedBosonicBasis(2, 6)
m = matrix_rep(o, b)
# We need to double check these energy values to make sure that they
# are correct and have the proper degeneracies!
diag = [1, 2, 3, 3, 4, 5, 4, 5, 6, 7, 5, 6, 7, 8, 9, 6, 7, 8, 9, 10, 11]
for i in range(len(diag)):
assert diag[i] == m[i, i]
def test_commutation():
n, m = symbols("n,m", above_fermi=True)
c = Commutator(B(0), Bd(0))
assert c == 1
c = Commutator(Bd(0), B(0))
assert c == -1
c = Commutator(B(n), Bd(0))
assert c == KroneckerDelta(n, 0)
c = Commutator(B(0), B(0))
assert c == 0
c = Commutator(B(0), Bd(0))
e = simplify(apply_operators(c*BKet([n])))
assert e == BKet([n])
c = Commutator(B(0), B(1))
e = simplify(apply_operators(c*BKet([n, m])))
assert e == 0
c = Commutator(F(m), Fd(m))
assert c == +1 - 2*NO(Fd(m)*F(m))
c = Commutator(Fd(m), F(m))
assert c.expand() == -1 + 2*NO(Fd(m)*F(m))
C = Commutator
X, Y, Z = symbols('X,Y,Z', commutative=False)
assert C(C(X, Y), Z) != 0
assert C(C(X, Z), Y) != 0
assert C(Y, C(X, Z)) != 0
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
p, q, r, s = symbols('p,q,r,s')
D = KroneckerDelta
assert C(Fd(a), F(i)) == -2*NO(F(i)*Fd(a))
assert C(Fd(j), NO(Fd(a)*F(i))).doit(wicks=True) == -D(j, i)*Fd(a)
assert C(Fd(a)*F(i), Fd(b)*F(j)).doit(wicks=True) == 0
c1 = Commutator(F(a), Fd(a))
assert Commutator.eval(c1, c1) == 0
c = Commutator(Fd(a)*F(i),Fd(b)*F(j))
assert latex(c) == r'\left[a^\dagger_{a} a_{i},a^\dagger_{b} a_{j}\right]'
assert repr(c) == 'Commutator(CreateFermion(a)*AnnihilateFermion(i),CreateFermion(b)*AnnihilateFermion(j))'
assert str(c) == '[CreateFermion(a)*AnnihilateFermion(i),CreateFermion(b)*AnnihilateFermion(j)]'
def test_create_f():
i, j, n, m = symbols('i,j,n,m')
o = Fd(i)
assert isinstance(o, CreateFermion)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = Fd(1)
assert o.apply_operator(FKet([n])) == FKet([1, n])
assert o.apply_operator(FKet([n])) == -FKet([n, 1])
o = Fd(n)
assert o.apply_operator(FKet([])) == FKet([n])
vacuum = FKet([], fermi_level=4)
assert vacuum == FKet([], fermi_level=4)
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
p, q, r, s = symbols('p,q,r,s')
assert Fd(i).apply_operator(FKet([i, j, k], 4)) == FKet([j, k], 4)
assert Fd(a).apply_operator(FKet([i, b, k], 4)) == FKet([a, i, b, k], 4)
assert Dagger(B(p)).apply_operator(q) == q*CreateBoson(p)
assert repr(Fd(p)) == 'CreateFermion(p)'
assert srepr(Fd(p)) == "CreateFermion(Symbol('p'))"
assert latex(Fd(p)) == r'a^\dagger_{p}'
def test_annihilate_f():
i, j, n, m = symbols('i,j,n,m')
o = F(i)
assert isinstance(o, AnnihilateFermion)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = F(1)
assert o.apply_operator(FKet([1, n])) == FKet([n])
assert o.apply_operator(FKet([n, 1])) == -FKet([n])
o = F(n)
assert o.apply_operator(FKet([n])) == FKet([])
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
p, q, r, s = symbols('p,q,r,s')
assert F(i).apply_operator(FKet([i, j, k], 4)) == 0
assert F(a).apply_operator(FKet([i, b, k], 4)) == 0
assert F(l).apply_operator(FKet([i, j, k], 3)) == 0
assert F(l).apply_operator(FKet([i, j, k], 4)) == FKet([l, i, j, k], 4)
assert str(F(p)) == 'f(p)'
assert repr(F(p)) == 'AnnihilateFermion(p)'
assert srepr(F(p)) == "AnnihilateFermion(Symbol('p'))"
assert latex(F(p)) == 'a_{p}'
def test_create_b():
i, j, n, m = symbols('i,j,n,m')
o = Bd(i)
assert isinstance(o, CreateBoson)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = Bd(0)
assert o.apply_operator(BKet([n])) == sqrt(n + 1)*BKet([n + 1])
o = Bd(n)
assert o.apply_operator(BKet([n])) == o*BKet([n])
def test_annihilate_b():
i, j, n, m = symbols('i,j,n,m')
o = B(i)
assert isinstance(o, AnnihilateBoson)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = B(0)
def test_wicks():
p, q, r, s = symbols('p,q,r,s', above_fermi=True)
# Testing for particles only
str = F(p)*Fd(q)
assert wicks(str) == NO(F(p)*Fd(q)) + KroneckerDelta(p, q)
str = Fd(p)*F(q)
assert wicks(str) == NO(Fd(p)*F(q))
str = F(p)*Fd(q)*F(r)*Fd(s)
nstr = wicks(str)
fasit = NO(
KroneckerDelta(p, q)*KroneckerDelta(r, s)
+ KroneckerDelta(p, q)*AnnihilateFermion(r)*CreateFermion(s)
+ KroneckerDelta(r, s)*AnnihilateFermion(p)*CreateFermion(q)
- KroneckerDelta(p, s)*AnnihilateFermion(r)*CreateFermion(q)
- AnnihilateFermion(p)*AnnihilateFermion(r)*CreateFermion(q)*CreateFermion(s))
assert nstr == fasit
assert (p*q*nstr).expand() == wicks(p*q*str)
assert (nstr*p*q*2).expand() == wicks(str*p*q*2)
# Testing CC equations particles and holes
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
p, q, r, s = symbols('p q r s', cls=Dummy)
assert (wicks(F(a)*NO(F(i)*F(j))*Fd(b)) ==
NO(F(a)*F(i)*F(j)*Fd(b)) +
KroneckerDelta(a, b)*NO(F(i)*F(j)))
assert (wicks(F(a)*NO(F(i)*F(j)*F(k))*Fd(b)) ==
NO(F(a)*F(i)*F(j)*F(k)*Fd(b)) -
KroneckerDelta(a, b)*NO(F(i)*F(j)*F(k)))
expr = wicks(Fd(i)*NO(Fd(j)*F(k))*F(l))
assert (expr ==
-KroneckerDelta(i, k)*NO(Fd(j)*F(l)) -
KroneckerDelta(j, l)*NO(Fd(i)*F(k)) -
KroneckerDelta(i, k)*KroneckerDelta(j, l) +
KroneckerDelta(i, l)*NO(Fd(j)*F(k)) +
NO(Fd(i)*Fd(j)*F(k)*F(l)))
expr = wicks(F(a)*NO(F(b)*Fd(c))*Fd(d))
assert (expr ==
-KroneckerDelta(a, c)*NO(F(b)*Fd(d)) -
KroneckerDelta(b, d)*NO(F(a)*Fd(c)) -
KroneckerDelta(a, c)*KroneckerDelta(b, d) +
KroneckerDelta(a, d)*NO(F(b)*Fd(c)) +
NO(F(a)*F(b)*Fd(c)*Fd(d)))
def test_NO():
i, j, k, l = symbols('i j k l', below_fermi=True)
a, b, c, d = symbols('a b c d', above_fermi=True)
p, q, r, s = symbols('p q r s', cls=Dummy)
assert (NO(Fd(p)*F(q) + Fd(a)*F(b)) ==
NO(Fd(p)*F(q)) + NO(Fd(a)*F(b)))
assert (NO(Fd(i)*NO(F(j)*Fd(a))) ==
NO(Fd(i)*F(j)*Fd(a)))
assert NO(1) == 1
assert NO(i) == i
assert (NO(Fd(a)*Fd(b)*(F(c) + F(d))) ==
NO(Fd(a)*Fd(b)*F(c)) +
NO(Fd(a)*Fd(b)*F(d)))
assert NO(Fd(a)*F(b))._remove_brackets() == Fd(a)*F(b)
assert NO(F(j)*Fd(i))._remove_brackets() == F(j)*Fd(i)
assert (NO(Fd(p)*F(q)).subs(Fd(p), Fd(a) + Fd(i)) ==
NO(Fd(a)*F(q)) + NO(Fd(i)*F(q)))
assert (NO(Fd(p)*F(q)).subs(F(q), F(a) + F(i)) ==
NO(Fd(p)*F(a)) + NO(Fd(p)*F(i)))
expr = NO(Fd(p)*F(q))._remove_brackets()
assert wicks(expr) == NO(expr)
assert NO(Fd(a)*F(b)) == - NO(F(b)*Fd(a))
no = NO(Fd(a)*F(i)*F(b)*Fd(j))
l1 = [ ind for ind in no.iter_q_creators() ]
assert l1 == [0, 1]
l2 = [ ind for ind in no.iter_q_annihilators() ]
assert l2 == [3, 2]
no = NO(Fd(a)*Fd(i))
assert no.has_q_creators == 1
assert no.has_q_annihilators == -1
assert str(no) == ':CreateFermion(a)*CreateFermion(i):'
assert repr(no) == 'NO(CreateFermion(a)*CreateFermion(i))'
assert latex(no) == r'\left\{a^\dagger_{a} a^\dagger_{i}\right\}'
raises(NotImplementedError, lambda: NO(Bd(p)*F(q)))
def test_sorting():
i, j = symbols('i,j', below_fermi=True)
a, b = symbols('a,b', above_fermi=True)
p, q = symbols('p,q')
# p, q
assert _sort_anticommuting_fermions([Fd(p), F(q)]) == ([Fd(p), F(q)], 0)
assert _sort_anticommuting_fermions([F(p), Fd(q)]) == ([Fd(q), F(p)], 1)
# i, p
assert _sort_anticommuting_fermions([F(p), Fd(i)]) == ([F(p), Fd(i)], 0)
assert _sort_anticommuting_fermions([Fd(i), F(p)]) == ([F(p), Fd(i)], 1)
assert _sort_anticommuting_fermions([Fd(p), Fd(i)]) == ([Fd(p), Fd(i)], 0)
assert _sort_anticommuting_fermions([Fd(i), Fd(p)]) == ([Fd(p), Fd(i)], 1)
assert _sort_anticommuting_fermions([F(p), F(i)]) == ([F(i), F(p)], 1)
assert _sort_anticommuting_fermions([F(i), F(p)]) == ([F(i), F(p)], 0)
assert _sort_anticommuting_fermions([Fd(p), F(i)]) == ([F(i), Fd(p)], 1)
assert _sort_anticommuting_fermions([F(i), Fd(p)]) == ([F(i), Fd(p)], 0)
# a, p
assert _sort_anticommuting_fermions([F(p), Fd(a)]) == ([Fd(a), F(p)], 1)
assert _sort_anticommuting_fermions([Fd(a), F(p)]) == ([Fd(a), F(p)], 0)
assert _sort_anticommuting_fermions([Fd(p), Fd(a)]) == ([Fd(a), Fd(p)], 1)
assert _sort_anticommuting_fermions([Fd(a), Fd(p)]) == ([Fd(a), Fd(p)], 0)
assert _sort_anticommuting_fermions([F(p), F(a)]) == ([F(p), F(a)], 0)
assert _sort_anticommuting_fermions([F(a), F(p)]) == ([F(p), F(a)], 1)
assert _sort_anticommuting_fermions([Fd(p), F(a)]) == ([Fd(p), F(a)], 0)
assert _sort_anticommuting_fermions([F(a), Fd(p)]) == ([Fd(p), F(a)], 1)
# i, a
assert _sort_anticommuting_fermions([F(i), Fd(j)]) == ([F(i), Fd(j)], 0)
assert _sort_anticommuting_fermions([Fd(j), F(i)]) == ([F(i), Fd(j)], 1)
assert _sort_anticommuting_fermions([Fd(a), Fd(i)]) == ([Fd(a), Fd(i)], 0)
assert _sort_anticommuting_fermions([Fd(i), Fd(a)]) == ([Fd(a), Fd(i)], 1)
assert _sort_anticommuting_fermions([F(a), F(i)]) == ([F(i), F(a)], 1)
assert _sort_anticommuting_fermions([F(i), F(a)]) == ([F(i), F(a)], 0)
def test_contraction():
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
p, q, r, s = symbols('p,q,r,s')
assert contraction(Fd(i), F(j)) == KroneckerDelta(i, j)
assert contraction(F(a), Fd(b)) == KroneckerDelta(a, b)
assert contraction(F(a), Fd(i)) == 0
assert contraction(Fd(a), F(i)) == 0
assert contraction(F(i), Fd(a)) == 0
assert contraction(Fd(i), F(a)) == 0
assert contraction(Fd(i), F(p)) == KroneckerDelta(i, p)
restr = evaluate_deltas(contraction(Fd(p), F(q)))
assert restr.is_only_below_fermi
restr = evaluate_deltas(contraction(F(p), Fd(q)))
assert restr.is_only_above_fermi
raises(ContractionAppliesOnlyToFermions, lambda: contraction(B(a), Fd(b)))
def test_evaluate_deltas():
i, j, k = symbols('i,j,k')
r = KroneckerDelta(i, j) * KroneckerDelta(j, k)
assert evaluate_deltas(r) == KroneckerDelta(i, k)
r = KroneckerDelta(i, 0) * KroneckerDelta(j, k)
assert evaluate_deltas(r) == KroneckerDelta(i, 0) * KroneckerDelta(j, k)
r = KroneckerDelta(1, j) * KroneckerDelta(j, k)
assert evaluate_deltas(r) == KroneckerDelta(1, k)
r = KroneckerDelta(j, 2) * KroneckerDelta(k, j)
assert evaluate_deltas(r) == KroneckerDelta(2, k)
r = KroneckerDelta(i, 0) * KroneckerDelta(i, j) * KroneckerDelta(j, 1)
assert evaluate_deltas(r) == 0
r = (KroneckerDelta(0, i) * KroneckerDelta(0, j)
* KroneckerDelta(1, j) * KroneckerDelta(1, j))
assert evaluate_deltas(r) == 0
def test_Tensors():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
p, q, r, s = symbols('p q r s')
AT = AntiSymmetricTensor
assert AT('t', (a, b), (i, j)) == -AT('t', (b, a), (i, j))
assert AT('t', (a, b), (i, j)) == AT('t', (b, a), (j, i))
assert AT('t', (a, b), (i, j)) == -AT('t', (a, b), (j, i))
assert AT('t', (a, a), (i, j)) == 0
assert AT('t', (a, b), (i, i)) == 0
assert AT('t', (a, b, c), (i, j)) == -AT('t', (b, a, c), (i, j))
assert AT('t', (a, b, c), (i, j, k)) == AT('t', (b, a, c), (i, k, j))
tabij = AT('t', (a, b), (i, j))
assert tabij.has(a)
assert tabij.has(b)
assert tabij.has(i)
assert tabij.has(j)
assert tabij.subs(b, c) == AT('t', (a, c), (i, j))
assert (2*tabij).subs(i, c) == 2*AT('t', (a, b), (c, j))
assert tabij.symbol == Symbol('t')
assert latex(tabij) == 't^{ab}_{ij}'
assert str(tabij) == 't((_a, _b),(_i, _j))'
assert AT('t', (a, a), (i, j)).subs(a, b) == AT('t', (b, b), (i, j))
assert AT('t', (a, i), (a, j)).subs(a, b) == AT('t', (b, i), (b, j))
def test_fully_contracted():
i, j, k, l = symbols('i j k l', below_fermi=True)
a, b, c, d = symbols('a b c d', above_fermi=True)
p, q, r, s = symbols('p q r s', cls=Dummy)
Fock = (AntiSymmetricTensor('f', (p,), (q,))*
NO(Fd(p)*F(q)))
V = (AntiSymmetricTensor('v', (p, q), (r, s))*
NO(Fd(p)*Fd(q)*F(s)*F(r)))/4
Fai = wicks(NO(Fd(i)*F(a))*Fock,
keep_only_fully_contracted=True,
simplify_kronecker_deltas=True)
assert Fai == AntiSymmetricTensor('f', (a,), (i,))
Vabij = wicks(NO(Fd(i)*Fd(j)*F(b)*F(a))*V,
keep_only_fully_contracted=True,
simplify_kronecker_deltas=True)
assert Vabij == AntiSymmetricTensor('v', (a, b), (i, j))
def test_substitute_dummies_without_dummies():
i, j = symbols('i,j')
assert substitute_dummies(att(i, j) + 2) == att(i, j) + 2
assert substitute_dummies(att(i, j) + 1) == att(i, j) + 1
def test_substitute_dummies_NO_operator():
i, j = symbols('i j', cls=Dummy)
assert substitute_dummies(att(i, j)*NO(Fd(i)*F(j))
- att(j, i)*NO(Fd(j)*F(i))) == 0
def test_substitute_dummies_SQ_operator():
i, j = symbols('i j', cls=Dummy)
assert substitute_dummies(att(i, j)*Fd(i)*F(j)
- att(j, i)*Fd(j)*F(i)) == 0
def test_substitute_dummies_new_indices():
i, j = symbols('i j', below_fermi=True, cls=Dummy)
a, b = symbols('a b', above_fermi=True, cls=Dummy)
p, q = symbols('p q', cls=Dummy)
f = Function('f')
assert substitute_dummies(f(i, a, p) - f(j, b, q), new_indices=True) == 0
def test_substitute_dummies_substitution_order():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
f = Function('f')
from sympy.utilities.iterables import variations
for permut in variations([i, j, k, l], 4):
assert substitute_dummies(f(*permut) - f(i, j, k, l)) == 0
def test_dummy_order_inner_outer_lines_VT1T1T1():
ii = symbols('i', below_fermi=True)
aa = symbols('a', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
# Coupled-Cluster T1 terms with V*T1*T1*T1
# t^{a}_{k} t^{c}_{i} t^{d}_{l} v^{lk}_{dc}
exprs = [
# permut v and t <=> swapping internal lines, equivalent
# irrespective of symmetries in v
v(k, l, c, d)*t(c, ii)*t(d, l)*t(aa, k),
v(l, k, c, d)*t(c, ii)*t(d, k)*t(aa, l),
v(k, l, d, c)*t(d, ii)*t(c, l)*t(aa, k),
v(l, k, d, c)*t(d, ii)*t(c, k)*t(aa, l),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_dummy_order_inner_outer_lines_VT1T1T1T1():
ii, jj = symbols('i j', below_fermi=True)
aa, bb = symbols('a b', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
# Coupled-Cluster T2 terms with V*T1*T1*T1*T1
exprs = [
# permut t <=> swapping external lines, not equivalent
# except if v has certain symmetries.
v(k, l, c, d)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(k, l, c, d)*t(c, jj)*t(d, ii)*t(aa, k)*t(bb, l),
v(k, l, c, d)*t(c, ii)*t(d, jj)*t(bb, k)*t(aa, l),
v(k, l, c, d)*t(c, jj)*t(d, ii)*t(bb, k)*t(aa, l),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [
# permut v <=> swapping external lines, not equivalent
# except if v has certain symmetries.
#
# Note that in contrast to above, these permutations have identical
# dummy order. That is because the proximity to external indices
# has higher influence on the canonical dummy ordering than the
# position of a dummy on the factors. In fact, the terms here are
# similar in structure as the result of the dummy substitutions above.
v(k, l, c, d)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(l, k, c, d)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(k, l, d, c)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(l, k, d, c)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
]
for permut in exprs[1:]:
assert dums(exprs[0]) == dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [
# permut t and v <=> swapping internal lines, equivalent.
# Canonical dummy order is different, and a consistent
# substitution reveals the equivalence.
v(k, l, c, d)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(k, l, d, c)*t(c, jj)*t(d, ii)*t(aa, k)*t(bb, l),
v(l, k, c, d)*t(c, ii)*t(d, jj)*t(bb, k)*t(aa, l),
v(l, k, d, c)*t(c, jj)*t(d, ii)*t(bb, k)*t(aa, l),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_get_subNO():
p, q, r = symbols('p,q,r')
assert NO(F(p)*F(q)*F(r)).get_subNO(1) == NO(F(p)*F(r))
assert NO(F(p)*F(q)*F(r)).get_subNO(0) == NO(F(q)*F(r))
assert NO(F(p)*F(q)*F(r)).get_subNO(2) == NO(F(p)*F(q))
def test_equivalent_internal_lines_VT1T1():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
exprs = [ # permute v. Different dummy order. Not equivalent.
v(i, j, a, b)*t(a, i)*t(b, j),
v(j, i, a, b)*t(a, i)*t(b, j),
v(i, j, b, a)*t(a, i)*t(b, j),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v. Different dummy order. Equivalent
v(i, j, a, b)*t(a, i)*t(b, j),
v(j, i, b, a)*t(a, i)*t(b, j),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [ # permute t. Same dummy order, not equivalent.
v(i, j, a, b)*t(a, i)*t(b, j),
v(i, j, a, b)*t(b, i)*t(a, j),
]
for permut in exprs[1:]:
assert dums(exprs[0]) == dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v and t. Different dummy order, equivalent
v(i, j, a, b)*t(a, i)*t(b, j),
v(j, i, a, b)*t(a, j)*t(b, i),
v(i, j, b, a)*t(b, i)*t(a, j),
v(j, i, b, a)*t(b, j)*t(a, i),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_equivalent_internal_lines_VT2conjT2():
# this diagram requires special handling in TCE
i, j, k, l, m, n = symbols('i j k l m n', below_fermi=True, cls=Dummy)
a, b, c, d, e, f = symbols('a b c d e f', above_fermi=True, cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
from sympy.utilities.iterables import variations
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
# v(abcd)t(abij)t(ijcd)
template = v(p1, p2, p3, p4)*t(p1, p2, i, j)*t(i, j, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
template = v(p1, p2, p3, p4)*t(p1, p2, j, i)*t(j, i, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
# v(abcd)t(abij)t(jicd)
template = v(p1, p2, p3, p4)*t(p1, p2, i, j)*t(j, i, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
template = v(p1, p2, p3, p4)*t(p1, p2, j, i)*t(i, j, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
def test_equivalent_internal_lines_VT2conjT2_ambiguous_order():
# These diagrams invokes _determine_ambiguous() because the
# dummies can not be ordered unambiguously by the key alone
i, j, k, l, m, n = symbols('i j k l m n', below_fermi=True, cls=Dummy)
a, b, c, d, e, f = symbols('a b c d e f', above_fermi=True, cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
from sympy.utilities.iterables import variations
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
# v(abcd)t(abij)t(cdij)
template = v(p1, p2, p3, p4)*t(p1, p2, i, j)*t(p3, p4, i, j)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
template = v(p1, p2, p3, p4)*t(p1, p2, j, i)*t(p3, p4, i, j)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
def test_equivalent_internal_lines_VT2():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
exprs = [
# permute v. Same dummy order, not equivalent.
#
# This test show that the dummy order may not be sensitive to all
# index permutations. The following expressions have identical
# structure as the resulting terms from of the dummy substitutions
# in the test above. Here, all expressions have the same dummy
# order, so they cannot be simplified by means of dummy
# substitution. In order to simplify further, it is necessary to
# exploit symmetries in the objects, for instance if t or v is
# antisymmetric.
v(i, j, a, b)*t(a, b, i, j),
v(j, i, a, b)*t(a, b, i, j),
v(i, j, b, a)*t(a, b, i, j),
v(j, i, b, a)*t(a, b, i, j),
]
for permut in exprs[1:]:
assert dums(exprs[0]) == dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [
# permute t.
v(i, j, a, b)*t(a, b, i, j),
v(i, j, a, b)*t(b, a, i, j),
v(i, j, a, b)*t(a, b, j, i),
v(i, j, a, b)*t(b, a, j, i),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v and t. Relabelling of dummies should be equivalent.
v(i, j, a, b)*t(a, b, i, j),
v(j, i, a, b)*t(a, b, j, i),
v(i, j, b, a)*t(b, a, i, j),
v(j, i, b, a)*t(b, a, j, i),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_internal_external_VT2T2():
ii, jj = symbols('i j', below_fermi=True)
aa, bb = symbols('a b', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
exprs = [
v(k, l, c, d)*t(aa, c, ii, k)*t(bb, d, jj, l),
v(l, k, c, d)*t(aa, c, ii, l)*t(bb, d, jj, k),
v(k, l, d, c)*t(aa, d, ii, k)*t(bb, c, jj, l),
v(l, k, d, c)*t(aa, d, ii, l)*t(bb, c, jj, k),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [
v(k, l, c, d)*t(aa, c, ii, k)*t(d, bb, jj, l),
v(l, k, c, d)*t(aa, c, ii, l)*t(d, bb, jj, k),
v(k, l, d, c)*t(aa, d, ii, k)*t(c, bb, jj, l),
v(l, k, d, c)*t(aa, d, ii, l)*t(c, bb, jj, k),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [
v(k, l, c, d)*t(c, aa, ii, k)*t(bb, d, jj, l),
v(l, k, c, d)*t(c, aa, ii, l)*t(bb, d, jj, k),
v(k, l, d, c)*t(d, aa, ii, k)*t(bb, c, jj, l),
v(l, k, d, c)*t(d, aa, ii, l)*t(bb, c, jj, k),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_internal_external_pqrs():
ii, jj = symbols('i j')
aa, bb = symbols('a b')
k, l = symbols('k l', cls=Dummy)
c, d = symbols('c d', cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
exprs = [
v(k, l, c, d)*t(aa, c, ii, k)*t(bb, d, jj, l),
v(l, k, c, d)*t(aa, c, ii, l)*t(bb, d, jj, k),
v(k, l, d, c)*t(aa, d, ii, k)*t(bb, c, jj, l),
v(l, k, d, c)*t(aa, d, ii, l)*t(bb, c, jj, k),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_dummy_order_well_defined():
aa, bb = symbols('a b', above_fermi=True)
k, l, m = symbols('k l m', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
p, q = symbols('p q', cls=Dummy)
A = Function('A')
B = Function('B')
C = Function('C')
dums = _get_ordered_dummies
# We go through all key components in the order of increasing priority,
# and consider only fully orderable expressions. Non-orderable expressions
# are tested elsewhere.
# pos in first factor determines sort order
assert dums(A(k, l)*B(l, k)) == [k, l]
assert dums(A(l, k)*B(l, k)) == [l, k]
assert dums(A(k, l)*B(k, l)) == [k, l]
assert dums(A(l, k)*B(k, l)) == [l, k]
# factors involving the index
assert dums(A(k, l)*B(l, m)*C(k, m)) == [l, k, m]
assert dums(A(k, l)*B(l, m)*C(m, k)) == [l, k, m]
assert dums(A(l, k)*B(l, m)*C(k, m)) == [l, k, m]
assert dums(A(l, k)*B(l, m)*C(m, k)) == [l, k, m]
assert dums(A(k, l)*B(m, l)*C(k, m)) == [l, k, m]
assert dums(A(k, l)*B(m, l)*C(m, k)) == [l, k, m]
assert dums(A(l, k)*B(m, l)*C(k, m)) == [l, k, m]
assert dums(A(l, k)*B(m, l)*C(m, k)) == [l, k, m]
# same, but with factor order determined by non-dummies
assert dums(A(k, aa, l)*A(l, bb, m)*A(bb, k, m)) == [l, k, m]
assert dums(A(k, aa, l)*A(l, bb, m)*A(bb, m, k)) == [l, k, m]
assert dums(A(k, aa, l)*A(m, bb, l)*A(bb, k, m)) == [l, k, m]
assert dums(A(k, aa, l)*A(m, bb, l)*A(bb, m, k)) == [l, k, m]
assert dums(A(l, aa, k)*A(l, bb, m)*A(bb, k, m)) == [l, k, m]
assert dums(A(l, aa, k)*A(l, bb, m)*A(bb, m, k)) == [l, k, m]
assert dums(A(l, aa, k)*A(m, bb, l)*A(bb, k, m)) == [l, k, m]
assert dums(A(l, aa, k)*A(m, bb, l)*A(bb, m, k)) == [l, k, m]
# index range
assert dums(A(p, c, k)*B(p, c, k)) == [k, c, p]
assert dums(A(p, k, c)*B(p, c, k)) == [k, c, p]
assert dums(A(c, k, p)*B(p, c, k)) == [k, c, p]
assert dums(A(c, p, k)*B(p, c, k)) == [k, c, p]
assert dums(A(k, c, p)*B(p, c, k)) == [k, c, p]
assert dums(A(k, p, c)*B(p, c, k)) == [k, c, p]
assert dums(B(p, c, k)*A(p, c, k)) == [k, c, p]
assert dums(B(p, k, c)*A(p, c, k)) == [k, c, p]
assert dums(B(c, k, p)*A(p, c, k)) == [k, c, p]
assert dums(B(c, p, k)*A(p, c, k)) == [k, c, p]
assert dums(B(k, c, p)*A(p, c, k)) == [k, c, p]
assert dums(B(k, p, c)*A(p, c, k)) == [k, c, p]
def test_dummy_order_ambiguous():
aa, bb = symbols('a b', above_fermi=True)
i, j, k, l, m = symbols('i j k l m', below_fermi=True, cls=Dummy)
a, b, c, d, e = symbols('a b c d e', above_fermi=True, cls=Dummy)
p, q = symbols('p q', cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
p5, p6, p7, p8 = symbols('p5 p6 p7 p8', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
h5, h6, h7, h8 = symbols('h5 h6 h7 h8', below_fermi=True, cls=Dummy)
A = Function('A')
B = Function('B')
from sympy.utilities.iterables import variations
# A*A*A*A*B -- ordering of p5 and p4 is used to figure out the rest
template = A(p1, p2)*A(p4, p1)*A(p2, p3)*A(p3, p5)*B(p5, p4)
permutator = variations([a, b, c, d, e], 5)
base = template.subs(zip([p1, p2, p3, p4, p5], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4, p5], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
# A*A*A*A*A -- an arbitrary index is assigned and the rest are figured out
template = A(p1, p2)*A(p4, p1)*A(p2, p3)*A(p3, p5)*A(p5, p4)
permutator = variations([a, b, c, d, e], 5)
base = template.subs(zip([p1, p2, p3, p4, p5], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4, p5], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
# A*A*A -- ordering of p5 and p4 is used to figure out the rest
template = A(p1, p2, p4, p1)*A(p2, p3, p3, p5)*A(p5, p4)
permutator = variations([a, b, c, d, e], 5)
base = template.subs(zip([p1, p2, p3, p4, p5], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4, p5], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
def atv(*args):
return AntiSymmetricTensor('v', args[:2], args[2:] )
def att(*args):
if len(args) == 4:
return AntiSymmetricTensor('t', args[:2], args[2:] )
elif len(args) == 2:
return AntiSymmetricTensor('t', (args[0],), (args[1],))
def test_dummy_order_inner_outer_lines_VT1T1T1_AT():
ii = symbols('i', below_fermi=True)
aa = symbols('a', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
# Coupled-Cluster T1 terms with V*T1*T1*T1
# t^{a}_{k} t^{c}_{i} t^{d}_{l} v^{lk}_{dc}
exprs = [
# permut v and t <=> swapping internal lines, equivalent
# irrespective of symmetries in v
atv(k, l, c, d)*att(c, ii)*att(d, l)*att(aa, k),
atv(l, k, c, d)*att(c, ii)*att(d, k)*att(aa, l),
atv(k, l, d, c)*att(d, ii)*att(c, l)*att(aa, k),
atv(l, k, d, c)*att(d, ii)*att(c, k)*att(aa, l),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_dummy_order_inner_outer_lines_VT1T1T1T1_AT():
ii, jj = symbols('i j', below_fermi=True)
aa, bb = symbols('a b', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
# Coupled-Cluster T2 terms with V*T1*T1*T1*T1
# non-equivalent substitutions (change of sign)
exprs = [
# permut t <=> swapping external lines
atv(k, l, c, d)*att(c, ii)*att(d, jj)*att(aa, k)*att(bb, l),
atv(k, l, c, d)*att(c, jj)*att(d, ii)*att(aa, k)*att(bb, l),
atv(k, l, c, d)*att(c, ii)*att(d, jj)*att(bb, k)*att(aa, l),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == -substitute_dummies(permut)
# equivalent substitutions
exprs = [
atv(k, l, c, d)*att(c, ii)*att(d, jj)*att(aa, k)*att(bb, l),
# permut t <=> swapping external lines
atv(k, l, c, d)*att(c, jj)*att(d, ii)*att(bb, k)*att(aa, l),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_equivalent_internal_lines_VT1T1_AT():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
exprs = [ # permute v. Different dummy order. Not equivalent.
atv(i, j, a, b)*att(a, i)*att(b, j),
atv(j, i, a, b)*att(a, i)*att(b, j),
atv(i, j, b, a)*att(a, i)*att(b, j),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v. Different dummy order. Equivalent
atv(i, j, a, b)*att(a, i)*att(b, j),
atv(j, i, b, a)*att(a, i)*att(b, j),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [ # permute t. Same dummy order, not equivalent.
atv(i, j, a, b)*att(a, i)*att(b, j),
atv(i, j, a, b)*att(b, i)*att(a, j),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v and t. Different dummy order, equivalent
atv(i, j, a, b)*att(a, i)*att(b, j),
atv(j, i, a, b)*att(a, j)*att(b, i),
atv(i, j, b, a)*att(b, i)*att(a, j),
atv(j, i, b, a)*att(b, j)*att(a, i),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_equivalent_internal_lines_VT2conjT2_AT():
# this diagram requires special handling in TCE
i, j, k, l, m, n = symbols('i j k l m n', below_fermi=True, cls=Dummy)
a, b, c, d, e, f = symbols('a b c d e f', above_fermi=True, cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
from sympy.utilities.iterables import variations
# atv(abcd)att(abij)att(ijcd)
template = atv(p1, p2, p3, p4)*att(p1, p2, i, j)*att(i, j, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
template = atv(p1, p2, p3, p4)*att(p1, p2, j, i)*att(j, i, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
# atv(abcd)att(abij)att(jicd)
template = atv(p1, p2, p3, p4)*att(p1, p2, i, j)*att(j, i, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
template = atv(p1, p2, p3, p4)*att(p1, p2, j, i)*att(i, j, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
def test_equivalent_internal_lines_VT2conjT2_ambiguous_order_AT():
# These diagrams invokes _determine_ambiguous() because the
# dummies can not be ordered unambiguously by the key alone
i, j, k, l, m, n = symbols('i j k l m n', below_fermi=True, cls=Dummy)
a, b, c, d, e, f = symbols('a b c d e f', above_fermi=True, cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
from sympy.utilities.iterables import variations
# atv(abcd)att(abij)att(cdij)
template = atv(p1, p2, p3, p4)*att(p1, p2, i, j)*att(p3, p4, i, j)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
template = atv(p1, p2, p3, p4)*att(p1, p2, j, i)*att(p3, p4, i, j)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
def test_equivalent_internal_lines_VT2_AT():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
exprs = [
# permute v. Same dummy order, not equivalent.
atv(i, j, a, b)*att(a, b, i, j),
atv(j, i, a, b)*att(a, b, i, j),
atv(i, j, b, a)*att(a, b, i, j),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [
# permute t.
atv(i, j, a, b)*att(a, b, i, j),
atv(i, j, a, b)*att(b, a, i, j),
atv(i, j, a, b)*att(a, b, j, i),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v and t. Relabelling of dummies should be equivalent.
atv(i, j, a, b)*att(a, b, i, j),
atv(j, i, a, b)*att(a, b, j, i),
atv(i, j, b, a)*att(b, a, i, j),
atv(j, i, b, a)*att(b, a, j, i),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_internal_external_VT2T2_AT():
ii, jj = symbols('i j', below_fermi=True)
aa, bb = symbols('a b', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
exprs = [
atv(k, l, c, d)*att(aa, c, ii, k)*att(bb, d, jj, l),
atv(l, k, c, d)*att(aa, c, ii, l)*att(bb, d, jj, k),
atv(k, l, d, c)*att(aa, d, ii, k)*att(bb, c, jj, l),
atv(l, k, d, c)*att(aa, d, ii, l)*att(bb, c, jj, k),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [
atv(k, l, c, d)*att(aa, c, ii, k)*att(d, bb, jj, l),
atv(l, k, c, d)*att(aa, c, ii, l)*att(d, bb, jj, k),
atv(k, l, d, c)*att(aa, d, ii, k)*att(c, bb, jj, l),
atv(l, k, d, c)*att(aa, d, ii, l)*att(c, bb, jj, k),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [
atv(k, l, c, d)*att(c, aa, ii, k)*att(bb, d, jj, l),
atv(l, k, c, d)*att(c, aa, ii, l)*att(bb, d, jj, k),
atv(k, l, d, c)*att(d, aa, ii, k)*att(bb, c, jj, l),
atv(l, k, d, c)*att(d, aa, ii, l)*att(bb, c, jj, k),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_internal_external_pqrs_AT():
ii, jj = symbols('i j')
aa, bb = symbols('a b')
k, l = symbols('k l', cls=Dummy)
c, d = symbols('c d', cls=Dummy)
exprs = [
atv(k, l, c, d)*att(aa, c, ii, k)*att(bb, d, jj, l),
atv(l, k, c, d)*att(aa, c, ii, l)*att(bb, d, jj, k),
atv(k, l, d, c)*att(aa, d, ii, k)*att(bb, c, jj, l),
atv(l, k, d, c)*att(aa, d, ii, l)*att(bb, c, jj, k),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_canonical_ordering_AntiSymmetricTensor():
v = symbols("v")
c, d = symbols(('c','d'), above_fermi=True,
cls=Dummy)
k, l = symbols(('k','l'), below_fermi=True,
cls=Dummy)
# formerly, the left gave either the left or the right
assert AntiSymmetricTensor(v, (k, l), (d, c)
) == -AntiSymmetricTensor(v, (l, k), (d, c))
| bsd-3-clause |
shishaochen/TensorFlow-0.8-Win | third_party/eigen-eigen-50812b426b7c/scripts/relicense.py | 315 | 2368 | # This file is part of Eigen, a lightweight C++ template library
# for linear algebra.
#
# Copyright (C) 2012 Keir Mierle <mierle@gmail.com>
#
# This Source Code Form is subject to the terms of the Mozilla
# Public License v. 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: mierle@gmail.com (Keir Mierle)
#
# Make the long-awaited conversion to MPL.
lgpl3_header = '''
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
'''
mpl2_header = """
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
import os
import sys
exclusions = set(['relicense.py'])
def update(text):
if text.find(lgpl3_header) == -1:
return text, False
return text.replace(lgpl3_header, mpl2_header), True
rootdir = sys.argv[1]
for root, sub_folders, files in os.walk(rootdir):
for basename in files:
if basename in exclusions:
print 'SKIPPED', filename
continue
filename = os.path.join(root, basename)
fo = file(filename)
text = fo.read()
fo.close()
text, updated = update(text)
if updated:
fo = file(filename, "w")
fo.write(text)
fo.close()
print 'UPDATED', filename
else:
print ' ', filename
| apache-2.0 |
ViDA-NYU/reprozip | reprounzip-qt/reprounzip_qt/main.py | 1 | 2565 | # Copyright (C) 2014-2017 New York University
# This file is part of ReproZip which is released under the Revised BSD License
# See file LICENSE for full license details.
from __future__ import division, print_function, unicode_literals
import argparse
import locale
import logging
import sys
from reprounzip.common import setup_logging
from reprounzip_qt import __version__
from reprounzip_qt.usage import record_usage, submit_usage_report
logger = logging.getLogger('reprounzip_qt')
def main():
"""Entry point when called on the command-line.
"""
# Locale
locale.setlocale(locale.LC_ALL, '')
parser = argparse.ArgumentParser(
description="Graphical user interface for reprounzip",
epilog="Please report issues to reprozip-users@vgc.poly.edu")
parser.add_argument('--version', action='version',
version="reprounzip-qt version %s" % __version__)
parser.add_argument('-v', '--verbose', action='count', default=1,
dest='verbosity', help="augments verbosity level")
parser.add_argument('package', nargs=argparse.OPTIONAL)
parser.add_argument('--unpacked', action='append', default=[])
argv = sys.argv[1:]
i = 0
while i < len(argv):
if argv[i].startswith('-psn'):
del argv[i]
else:
i += 1
args = parser.parse_args(argv)
setup_logging('REPROUNZIP-QT', args.verbosity)
from reprounzip_qt.gui import Application, ReprounzipUi
app = Application(sys.argv)
window_args = {}
if args.package and args.unpacked:
sys.stderr.write("You can't pass both a package and a unpacked "
"directory\n")
sys.exit(2)
elif args.package:
logger.info("Got package on the command-line: %s", args.package)
record_usage(cmdline='package')
window_args = dict(unpack=dict(package=args.package))
elif len(args.unpacked) == 1:
logger.info("Got unpacked directory on the command-line: %s",
args.unpacked)
record_usage(cmdline='directory')
window_args = dict(run=dict(unpacked_directory=args.unpacked[0]),
tab=1)
elif args.unpacked:
sys.stderr.write("You may only use --unpacked once\n")
sys.exit(2)
else:
record_usage(cmdline='empty')
window = ReprounzipUi(**window_args)
app.set_first_window(window)
window.setVisible(True)
app.exec_()
submit_usage_report()
sys.exit(0)
if __name__ == '__main__':
main()
| bsd-3-clause |
AnalogJ/lexicon | lexicon/providers/mythicbeasts.py | 1 | 10918 | """Module provider for Mythic Beasts"""
import binascii
import json
import logging
import requests
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry # type: ignore
from lexicon.exceptions import AuthenticationError
from lexicon.providers.base import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
NAMESERVER_DOMAINS = ["mythic-beasts.com"]
def provider_parser(subparser):
"""Return the parser for this provider"""
subparser.description = """
There are two ways to provide an authentication granting access to the Mythic Beasts API
1 - With your API credentials (user/password),
with --auth-username and --auth-password flags.
2 - With an API token, using --auth-token flags.
These credentials and tokens must be generated using the Mythic Beasts API v2.
"""
subparser.add_argument(
"--auth-username",
help="specify API credentials username",
)
subparser.add_argument(
"--auth-password",
help="specify API credentials password",
)
subparser.add_argument(
"--auth-token",
help="specify API token for authentication",
)
class Provider(BaseProvider):
"""Provider class for Mythic Beasts"""
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = "https://api.mythic-beasts.com/dns/v2"
self.auth_token = None
def _authenticate(self):
# may need to get auth token
if self.auth_token is None and self._get_provider_option("auth_token") is None:
auth_request = requests.request(
"POST",
"https://auth.mythic-beasts.com/login",
data={"grant_type": "client_credentials"},
auth=(
self._get_provider_option("auth_username"),
self._get_provider_option("auth_password"),
),
)
auth_request.raise_for_status()
post_result = auth_request.json()
if not post_result["access_token"]:
raise AuthenticationError(
"Error, could not get access token "
f"for Mythic Beasts API for user: {self._get_provider_option('auth_username')}"
)
self.auth_token = post_result["access_token"]
elif self.auth_token is None:
self.auth_token = self._get_provider_option("auth_token")
payload = self._get("/zones")
if self.domain is None:
if not payload["zones"]:
raise AuthenticationError("No domain found")
if len(payload["zones"]) > 1:
raise AuthenticationError(
"Too many domains found. This should not happen"
)
else:
self.domain = payload["zones"][0]
else:
if not payload["zones"]:
raise AuthenticationError("No domain found")
if self.domain not in payload["zones"]:
raise AuthenticationError("Requested domain not found")
self.domain_id = self.domain
# Create record. If record already exists with the same content, do nothing'
def _create_record(self, rtype, name, content):
LOGGER.debug("type %s", rtype)
LOGGER.debug("name %s", name)
LOGGER.debug("content %s", content)
if rtype == "CNAME":
content = self._fqdn_name(content)
data = {
"records": [
{
"host": self._relative_name(name),
"type": rtype,
"data": content,
}
]
}
if self._get_lexicon_option("ttl"):
data["records"][0]["ttl"] = self._get_lexicon_option("ttl")
payload = {"success": True}
try:
payload = self._post(f"/zones/{self.domain}/records", data)
except requests.exceptions.HTTPError as err:
if (
err.response.status_code == 400
and err.response.json()["errors"][0][0:16] == "Duplicate record"
):
LOGGER.debug("create_record (ignored, duplicate)")
else:
raise
if rtype == "A" or rtype == "TXT":
# need to wait and poll here until verified that DNS change is live
try:
self._get(
f"/zones/{self.domain}/records/{self._relative_name(name)}/{rtype}?verify"
)
except requests.exceptions.HTTPError:
LOGGER.debug("Timed out trying to verify changes were live")
raise
if "message" in payload:
return payload["message"]
elif "success" in payload:
return payload["success"]
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def _list_records(self, rtype=None, name=None, content=None):
filter_obj = {}
if rtype:
filter_obj["type"] = rtype
if name:
filter_obj["host"] = self._relative_name(name)
if content:
filter_obj["data"] = content
records = []
payload = self._get(f"/zones/{self.domain}/records", filter_obj)
LOGGER.debug("payload: %s", payload)
for record in payload["records"]:
processed_record = {
"type": record["type"],
"name": self._full_name(record["host"]),
"ttl": record["ttl"],
"content": record["data"],
# no id is available, so we need to make our own
"id": _identifier(record),
}
if record["type"] == "MX" and record["mx_priority"]:
processed_record["options"] = {
"mx": {"priority": record["mx_priority"]}
}
records.append(processed_record)
LOGGER.debug("list_records: %s", records)
LOGGER.debug("Number of records retrieved: %d", len(records))
return records
# Create or update a record.
def _update_record(self, identifier, rtype=None, name=None, content=None):
if identifier is None:
records = self._list_records(rtype, self._full_name(name))
if len(records) == 1:
matching_record = records[0]
filter_obj = {}
filter_obj["type"] = matching_record["type"]
filter_obj["host"] = self._relative_name(matching_record["name"])
elif len(records) < 1:
raise Exception(
"No records found matching type and name - won't update"
)
else:
raise Exception(
"Multiple records found matching type and name - won't update"
)
else:
records = self._list_records()
for record in records:
if record["id"] == identifier:
matching_record = record
break
else:
raise Exception("Can't find record with that id!")
filter_obj = {}
filter_obj["type"] = matching_record["type"]
filter_obj["host"] = self._relative_name(matching_record["name"])
filter_obj["data"] = matching_record["content"]
data = {"records": [{}]}
if rtype:
data["type"] = rtype
if name:
data["host"] = self._relative_name(name)
if content:
data["records"][0]["data"] = content
if self._get_lexicon_option("ttl"):
data["records"][0]["ttl"] = self._get_lexicon_option("ttl")
LOGGER.debug(data)
payload = self._put(
f"/zones/{self.domain}/records/{self._relative_name(matching_record['name'])}/{matching_record['type']}",
data,
filter_obj,
)
LOGGER.debug("update_record: %s", payload["message"])
return payload["message"]
# Delete an existing record.
# If record does not exist, do nothing.
def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
filter_obj = {}
if rtype:
filter_obj["type"] = rtype
if name:
filter_obj["host"] = self._relative_name(name)
if content:
filter_obj["data"] = content
records = self._list_records(rtype, name, content)
for record in records:
LOGGER.debug("delete_records: %s", record)
name = self._relative_name(record["name"])
rtype = record["type"]
if identifier is not None and identifier != record["id"]:
continue
self._delete(f"/zones/{self.domain}/records/{name}/{rtype}", filter_obj)
LOGGER.debug("delete_record: %s", True)
return True
# Helpers
def _request(self, action="GET", url="/", data=None, query_params=None):
if data is None:
data = {}
if query_params is None:
query_params = {}
# When editing DNS zone, results are not live immediately.
# In this case, call to verify will return 409 HTTP error.
# We use the Retry extension to retry the requests until
# we get a processable response (402 HTTP status, or an HTTP error != 409)
retries = Retry(
total=10,
backoff_factor=0.5,
status_forcelist=[409],
allowed_methods=frozenset(["GET", "PUT", "POST", "DELETE", "PATCH"]),
)
session = requests.Session()
session.mount("https://", HTTPAdapter(max_retries=retries))
headers = {"Content-Type": "application/json"}
headers["Authorization"] = f"Bearer {self.auth_token}"
response = session.request(
action,
self.api_endpoint + url,
params=query_params,
data=json.dumps(data),
headers=headers,
)
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json()
# Return hash id for record
def _identifier(record):
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(("type=" + record.get("type", "") + ",").encode("utf-8"))
digest.update(("name=" + record.get("name", "") + ",").encode("utf-8"))
digest.update(("content=" + record.get("content", "") + ",").encode("utf-8"))
return binascii.hexlify(digest.finalize()).decode("utf-8")[0:7]
| mit |
aestrivex/mne-python | examples/plot_compute_mne_inverse.py | 21 | 1885 | """
================================================
Compute MNE-dSPM inverse solution on evoked data
================================================
Compute dSPM inverse solution on MNE evoked dataset
and stores the solution in stc files for visualisation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import read_evokeds
from mne.minimum_norm import apply_inverse, read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori=None)
# Save result in stc files
stc.save('mne_%s_inverse' % method)
###############################################################################
# View activation time-series
plt.plot(1e3 * stc.times, stc.data[::100, :].T)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.show()
# Plot brain in 3D with PySurfer if available
brain = stc.plot(hemi='rh', subjects_dir=subjects_dir)
brain.show_view('lateral')
# use peak getter to move vizualization to the time point of the peak
vertno_max, time_idx = stc.get_peak(hemi='rh', time_as_index=True)
brain.set_data_time_index(time_idx)
# draw marker at maximum peaking vertex
brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue',
scale_factor=0.6)
brain.save_image('dSPM_map.png')
| bsd-3-clause |
imjennyli/pursuedpybear | tests/test_engine.py | 1 | 1885 | import unittest
from unittest import mock
from ppb import GameEngine, BaseScene
CONTINUE = True
STOP = False
class TestEngine(unittest.TestCase):
def test_initialize(self):
pass
def test_start(self):
mock_scene = mock.Mock(spec=BaseScene)
mock_scene_class = mock.Mock(spec=BaseScene, return_value=mock_scene)
engine = GameEngine(mock_scene_class)
engine.start()
self.assertIs(engine.current_scene, mock_scene)
class TestEngineSceneActivate(unittest.TestCase):
def setUp(self):
self.mock_scene = mock.Mock(spec=BaseScene)
self.mock_scene_class = mock.Mock(return_value=self.mock_scene)
self.engine = GameEngine(self.mock_scene_class)
self.engine.start()
def test_continue_running(self):
"""
Test that a Scene.change that returns (False, {}) doesn't change
state.
"""
self.mock_scene.change = mock.Mock(return_value=(CONTINUE, {}))
self.engine.manage_scene(*self.engine.current_scene.change())
self.assertIs(self.engine.current_scene, self.mock_scene)
def test_stop_scene_no_new_scene(self):
"""
Test a Scene.change that returns (True, {}) leaves the scene
stack empty.
"""
self.mock_scene.change = mock.Mock(return_value=(STOP, {}))
self.engine.manage_scene(*self.engine.current_scene.change())
self.assertIsNone(self.engine.current_scene)
def test_next_scene_none(self):
self.mock_scene.change = mock.Mock(return_value=(CONTINUE,
{"scene_class": None}
)
)
self.engine.manage_scene(*self.engine.current_scene.change())
self.assertIs(self.engine.current_scene, self.mock_scene)
| artistic-2.0 |
dya2/python-for-android | python-modules/twisted/twisted/python/otp.py | 60 | 25572 | # -*- test-case-name: twisted.python.test.test_otp -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A One-Time Password System based on RFC 2289
The class Authenticator contains the hashing-logic, and the parser for the
readable output. It also contains challenge which returns a string describing
the authentication scheme for a client.
OTP is a password container for an user on a server.
NOTE: Does not take care of transmitting the shared secret password.
At the end there's a dict called dict which is dictionary contain 2048
words for storing pronouncable 11-bit values. Taken from RFC 1760.
Uses the MD5- and SHA-algorithms for hashing
Todo: RFC2444, SASL (perhaps), parsing hex-responses
This module is deprecated. Consider using U{another Python OTP
library<http://labix.org/python-otp>} instead.
"""
import warnings
import string
import random
warnings.warn(
"twisted.python.otp is deprecated since Twisted 8.3.",
category=DeprecationWarning,
stacklevel=2)
def stringToLong(s):
""" Convert digest to long """
result = 0L
for byte in s:
result = (256 * result) + ord(byte)
return result
def stringToDWords(s):
""" Convert digest to a list of four 32-bits words """
result = []
for a in xrange(len(s) / 4):
tmp = 0L
for byte in s[-4:]:
tmp = (256 * tmp) + ord(byte)
result.append(tmp)
s = s[:-4]
return result
def longToString(l):
""" Convert long to digest """
result = ""
while l > 0L:
result = chr(l % 256) + result
l = l / 256L
return result
from twisted.python.hashlib import md5, sha1
hashid = {md5: 'md5', sha1: 'sha1'}
INITIALSEQUENCE = 1000
MINIMUMSEQUENCE = 50
class Unauthorized(Exception):
"""the Unauthorized exception
This exception is raised when an action is not allowed, or a user is not
authenticated properly.
"""
class OTPAuthenticator:
"""
A One Time Password System
Based on RFC 2289, which is based on a the S/KEY Authentication-scheme.
It uses the MD5- and SHA-algorithms for hashing
The variable OTP is at all times a 64bit string.
@ivar hash: An object which can be used to compute hashes. This is either
L{md5} or L{sha1}.
"""
def __init__(self, hash = md5):
"Set the hash to either md5 or sha1"
self.hash = hash
def generateSeed(self):
"Return a 10 char random seed, with 6 lowercase chars and 4 digits"
seed = ''
for x in range(6):
seed = seed + chr(random.randrange(97,122))
for x in range(4):
seed = seed + chr(random.randrange(48,57))
return seed
def foldDigest(self, otp):
if self.hash == md5:
return self.foldDigest128(otp)
if self.hash == sha1:
return self.foldDigest160(otp)
def foldDigest128(self, otp128):
"Fold a 128 bit digest to 64 bit"
regs = stringToDWords(otp128)
p0 = regs[0] ^ regs[2]
p1 = regs[1] ^ regs[3]
S = ''
for a in xrange(4):
S = chr(p0 & 0xFF) + S
p0 = p0 >> 8
for a in xrange(4):
S = chr(p1 & 0xFF) + S
p1 = p1 >> 8
return S
def foldDigest160(self, otp160):
"Fold a 160 bit digest to 64 bit"
regs = stringToDWords(otp160)
p0 = regs[0] ^ regs[2]
p1 = regs[1] ^ regs[3]
p0 = regs[0] ^ regs[4]
S = ''
for a in xrange(4):
S = chr(p0 & 0xFF) + S
p0 = p0 >> 8
for a in xrange(4):
S = chr(p1 & 0xFF) + S
p1 = p1 >> 8
return S
def hashUpdate(self, digest):
"Run through the hash and fold to 64 bit"
h = self.hash(digest)
return self.foldDigest(h.digest())
def generateOTP(self, seed, passwd, sequence):
"""Return a 64 bit OTP based on inputs
Run through makeReadable to get a 6 word pass-phrase"""
seed = string.lower(seed)
otp = self.hashUpdate(seed + passwd)
for a in xrange(sequence):
otp = self.hashUpdate(otp)
return otp
def calculateParity(self, otp):
"Calculate the parity from a 64bit OTP"
parity = 0
for i in xrange(0, 64, 2):
parity = parity + otp & 0x3
otp = otp >> 2
return parity
def makeReadable(self, otp):
"Returns a 6 word pass-phrase from a 64bit OTP"
digest = stringToLong(otp)
list = []
parity = self.calculateParity(digest)
for i in xrange(4,-1, -1):
list.append(dict[(digest >> (i * 11 + 9)) & 0x7FF])
list.append(dict[(digest << 2) & 0x7FC | (parity & 0x03)])
return string.join(list)
def challenge(self, seed, sequence):
"""Return a challenge in the format otp-<hash> <sequence> <seed>"""
return "otp-%s %i %s" % (hashid[self.hash], sequence, seed)
def parsePhrase(self, phrase):
"""Decode the phrase, and return a 64bit OTP
I will raise Unauthorized if the parity is wrong
TODO: Add support for hex (MUST) and the '2nd scheme'(SHOULD)"""
words = string.split(phrase)
for i in xrange(len(words)):
words[i] = string.upper(words[i])
b = 0L
for i in xrange(0,5):
b = b | ((long(dict.index(words[i])) << ((4-i)*11L+9L)))
tmp = dict.index(words[5])
b = b | (tmp & 0x7FC ) >> 2
if (tmp & 3) <> self.calculateParity(b):
raise Unauthorized("Parity error")
digest = longToString(b)
return digest
class OTP(OTPAuthenticator):
"""An automatic version of the OTP-Authenticator
Updates the sequence and the keeps last approved password on success
On the next authentication, the stored password is hashed and checked
up against the one given by the user. If they match, the sequencecounter
is decreased and the circle is closed.
This object should be glued to each user
Note:
It does NOT reset the sequence when the combinations left approach zero,
This has to be done manuelly by instancing a new object
"""
seed = None
sequence = 0
lastotp = None
def __init__(self, passwd, sequence = INITIALSEQUENCE, hash=md5):
"""Initialize the OTP-Sequence, and discard the password"""
OTPAuthenticator.__init__(self, hash)
seed = self.generateSeed()
# Generate the 'last' password
self.lastotp = OTPAuthenticator.generateOTP(self, seed, passwd, sequence+1)
self.seed = seed
self.sequence = sequence
def challenge(self):
"""Return a challenge string"""
result = OTPAuthenticator.challenge(self, self.seed, self.sequence)
return result
def authenticate(self, phrase):
"""Test the phrase against the last challenge issued"""
try:
digest = self.parsePhrase(phrase)
hasheddigest = self.hashUpdate(digest)
if (self.lastotp == hasheddigest):
self.lastotp = digest
if self.sequence > MINIMUMSEQUENCE:
self.sequence = self.sequence - 1
return "ok"
else:
raise Unauthorized("Failed")
except Unauthorized, msg:
raise Unauthorized(msg)
#
# The 2048 word standard dictionary from RFC 1760
#
dict = ["A", "ABE", "ACE", "ACT", "AD", "ADA", "ADD",
"AGO", "AID", "AIM", "AIR", "ALL", "ALP", "AM", "AMY",
"AN", "ANA", "AND", "ANN", "ANT", "ANY", "APE", "APS",
"APT", "ARC", "ARE", "ARK", "ARM", "ART", "AS", "ASH",
"ASK", "AT", "ATE", "AUG", "AUK", "AVE", "AWE", "AWK",
"AWL", "AWN", "AX", "AYE", "BAD", "BAG", "BAH", "BAM",
"BAN", "BAR", "BAT", "BAY", "BE", "BED", "BEE", "BEG",
"BEN", "BET", "BEY", "BIB", "BID", "BIG", "BIN", "BIT",
"BOB", "BOG", "BON", "BOO", "BOP", "BOW", "BOY", "BUB",
"BUD", "BUG", "BUM", "BUN", "BUS", "BUT", "BUY", "BY",
"BYE", "CAB", "CAL", "CAM", "CAN", "CAP", "CAR", "CAT",
"CAW", "COD", "COG", "COL", "CON", "COO", "COP", "COT",
"COW", "COY", "CRY", "CUB", "CUE", "CUP", "CUR", "CUT",
"DAB", "DAD", "DAM", "DAN", "DAR", "DAY", "DEE", "DEL",
"DEN", "DES", "DEW", "DID", "DIE", "DIG", "DIN", "DIP",
"DO", "DOE", "DOG", "DON", "DOT", "DOW", "DRY", "DUB",
"DUD", "DUE", "DUG", "DUN", "EAR", "EAT", "ED", "EEL",
"EGG", "EGO", "ELI", "ELK", "ELM", "ELY", "EM", "END",
"EST", "ETC", "EVA", "EVE", "EWE", "EYE", "FAD", "FAN",
"FAR", "FAT", "FAY", "FED", "FEE", "FEW", "FIB", "FIG",
"FIN", "FIR", "FIT", "FLO", "FLY", "FOE", "FOG", "FOR",
"FRY", "FUM", "FUN", "FUR", "GAB", "GAD", "GAG", "GAL",
"GAM", "GAP", "GAS", "GAY", "GEE", "GEL", "GEM", "GET",
"GIG", "GIL", "GIN", "GO", "GOT", "GUM", "GUN", "GUS",
"GUT", "GUY", "GYM", "GYP", "HA", "HAD", "HAL", "HAM",
"HAN", "HAP", "HAS", "HAT", "HAW", "HAY", "HE", "HEM",
"HEN", "HER", "HEW", "HEY", "HI", "HID", "HIM", "HIP",
"HIS", "HIT", "HO", "HOB", "HOC", "HOE", "HOG", "HOP",
"HOT", "HOW", "HUB", "HUE", "HUG", "HUH", "HUM", "HUT",
"I", "ICY", "IDA", "IF", "IKE", "ILL", "INK", "INN",
"IO", "ION", "IQ", "IRA", "IRE", "IRK", "IS", "IT",
"ITS", "IVY", "JAB", "JAG", "JAM", "JAN", "JAR", "JAW",
"JAY", "JET", "JIG", "JIM", "JO", "JOB", "JOE", "JOG",
"JOT", "JOY", "JUG", "JUT", "KAY", "KEG", "KEN", "KEY",
"KID", "KIM", "KIN", "KIT", "LA", "LAB", "LAC", "LAD",
"LAG", "LAM", "LAP", "LAW", "LAY", "LEA", "LED", "LEE",
"LEG", "LEN", "LEO", "LET", "LEW", "LID", "LIE", "LIN",
"LIP", "LIT", "LO", "LOB", "LOG", "LOP", "LOS", "LOT",
"LOU", "LOW", "LOY", "LUG", "LYE", "MA", "MAC", "MAD",
"MAE", "MAN", "MAO", "MAP", "MAT", "MAW", "MAY", "ME",
"MEG", "MEL", "MEN", "MET", "MEW", "MID", "MIN", "MIT",
"MOB", "MOD", "MOE", "MOO", "MOP", "MOS", "MOT", "MOW",
"MUD", "MUG", "MUM", "MY", "NAB", "NAG", "NAN", "NAP",
"NAT", "NAY", "NE", "NED", "NEE", "NET", "NEW", "NIB",
"NIL", "NIP", "NIT", "NO", "NOB", "NOD", "NON", "NOR",
"NOT", "NOV", "NOW", "NU", "NUN", "NUT", "O", "OAF",
"OAK", "OAR", "OAT", "ODD", "ODE", "OF", "OFF", "OFT",
"OH", "OIL", "OK", "OLD", "ON", "ONE", "OR", "ORB",
"ORE", "ORR", "OS", "OTT", "OUR", "OUT", "OVA", "OW",
"OWE", "OWL", "OWN", "OX", "PA", "PAD", "PAL", "PAM",
"PAN", "PAP", "PAR", "PAT", "PAW", "PAY", "PEA", "PEG",
"PEN", "PEP", "PER", "PET", "PEW", "PHI", "PI", "PIE",
"PIN", "PIT", "PLY", "PO", "POD", "POE", "POP", "POT",
"POW", "PRO", "PRY", "PUB", "PUG", "PUN", "PUP", "PUT",
"QUO", "RAG", "RAM", "RAN", "RAP", "RAT", "RAW", "RAY",
"REB", "RED", "REP", "RET", "RIB", "RID", "RIG", "RIM",
"RIO", "RIP", "ROB", "ROD", "ROE", "RON", "ROT", "ROW",
"ROY", "RUB", "RUE", "RUG", "RUM", "RUN", "RYE", "SAC",
"SAD", "SAG", "SAL", "SAM", "SAN", "SAP", "SAT", "SAW",
"SAY", "SEA", "SEC", "SEE", "SEN", "SET", "SEW", "SHE",
"SHY", "SIN", "SIP", "SIR", "SIS", "SIT", "SKI", "SKY",
"SLY", "SO", "SOB", "SOD", "SON", "SOP", "SOW", "SOY",
"SPA", "SPY", "SUB", "SUD", "SUE", "SUM", "SUN", "SUP",
"TAB", "TAD", "TAG", "TAN", "TAP", "TAR", "TEA", "TED",
"TEE", "TEN", "THE", "THY", "TIC", "TIE", "TIM", "TIN",
"TIP", "TO", "TOE", "TOG", "TOM", "TON", "TOO", "TOP",
"TOW", "TOY", "TRY", "TUB", "TUG", "TUM", "TUN", "TWO",
"UN", "UP", "US", "USE", "VAN", "VAT", "VET", "VIE",
"WAD", "WAG", "WAR", "WAS", "WAY", "WE", "WEB", "WED",
"WEE", "WET", "WHO", "WHY", "WIN", "WIT", "WOK", "WON",
"WOO", "WOW", "WRY", "WU", "YAM", "YAP", "YAW", "YE",
"YEA", "YES", "YET", "YOU", "ABED", "ABEL", "ABET", "ABLE",
"ABUT", "ACHE", "ACID", "ACME", "ACRE", "ACTA", "ACTS", "ADAM",
"ADDS", "ADEN", "AFAR", "AFRO", "AGEE", "AHEM", "AHOY", "AIDA",
"AIDE", "AIDS", "AIRY", "AJAR", "AKIN", "ALAN", "ALEC", "ALGA",
"ALIA", "ALLY", "ALMA", "ALOE", "ALSO", "ALTO", "ALUM", "ALVA",
"AMEN", "AMES", "AMID", "AMMO", "AMOK", "AMOS", "AMRA", "ANDY",
"ANEW", "ANNA", "ANNE", "ANTE", "ANTI", "AQUA", "ARAB", "ARCH",
"AREA", "ARGO", "ARID", "ARMY", "ARTS", "ARTY", "ASIA", "ASKS",
"ATOM", "AUNT", "AURA", "AUTO", "AVER", "AVID", "AVIS", "AVON",
"AVOW", "AWAY", "AWRY", "BABE", "BABY", "BACH", "BACK", "BADE",
"BAIL", "BAIT", "BAKE", "BALD", "BALE", "BALI", "BALK", "BALL",
"BALM", "BAND", "BANE", "BANG", "BANK", "BARB", "BARD", "BARE",
"BARK", "BARN", "BARR", "BASE", "BASH", "BASK", "BASS", "BATE",
"BATH", "BAWD", "BAWL", "BEAD", "BEAK", "BEAM", "BEAN", "BEAR",
"BEAT", "BEAU", "BECK", "BEEF", "BEEN", "BEER", "BEET", "BELA",
"BELL", "BELT", "BEND", "BENT", "BERG", "BERN", "BERT", "BESS",
"BEST", "BETA", "BETH", "BHOY", "BIAS", "BIDE", "BIEN", "BILE",
"BILK", "BILL", "BIND", "BING", "BIRD", "BITE", "BITS", "BLAB",
"BLAT", "BLED", "BLEW", "BLOB", "BLOC", "BLOT", "BLOW", "BLUE",
"BLUM", "BLUR", "BOAR", "BOAT", "BOCA", "BOCK", "BODE", "BODY",
"BOGY", "BOHR", "BOIL", "BOLD", "BOLO", "BOLT", "BOMB", "BONA",
"BOND", "BONE", "BONG", "BONN", "BONY", "BOOK", "BOOM", "BOON",
"BOOT", "BORE", "BORG", "BORN", "BOSE", "BOSS", "BOTH", "BOUT",
"BOWL", "BOYD", "BRAD", "BRAE", "BRAG", "BRAN", "BRAY", "BRED",
"BREW", "BRIG", "BRIM", "BROW", "BUCK", "BUDD", "BUFF", "BULB",
"BULK", "BULL", "BUNK", "BUNT", "BUOY", "BURG", "BURL", "BURN",
"BURR", "BURT", "BURY", "BUSH", "BUSS", "BUST", "BUSY", "BYTE",
"CADY", "CAFE", "CAGE", "CAIN", "CAKE", "CALF", "CALL", "CALM",
"CAME", "CANE", "CANT", "CARD", "CARE", "CARL", "CARR", "CART",
"CASE", "CASH", "CASK", "CAST", "CAVE", "CEIL", "CELL", "CENT",
"CERN", "CHAD", "CHAR", "CHAT", "CHAW", "CHEF", "CHEN", "CHEW",
"CHIC", "CHIN", "CHOU", "CHOW", "CHUB", "CHUG", "CHUM", "CITE",
"CITY", "CLAD", "CLAM", "CLAN", "CLAW", "CLAY", "CLOD", "CLOG",
"CLOT", "CLUB", "CLUE", "COAL", "COAT", "COCA", "COCK", "COCO",
"CODA", "CODE", "CODY", "COED", "COIL", "COIN", "COKE", "COLA",
"COLD", "COLT", "COMA", "COMB", "COME", "COOK", "COOL", "COON",
"COOT", "CORD", "CORE", "CORK", "CORN", "COST", "COVE", "COWL",
"CRAB", "CRAG", "CRAM", "CRAY", "CREW", "CRIB", "CROW", "CRUD",
"CUBA", "CUBE", "CUFF", "CULL", "CULT", "CUNY", "CURB", "CURD",
"CURE", "CURL", "CURT", "CUTS", "DADE", "DALE", "DAME", "DANA",
"DANE", "DANG", "DANK", "DARE", "DARK", "DARN", "DART", "DASH",
"DATA", "DATE", "DAVE", "DAVY", "DAWN", "DAYS", "DEAD", "DEAF",
"DEAL", "DEAN", "DEAR", "DEBT", "DECK", "DEED", "DEEM", "DEER",
"DEFT", "DEFY", "DELL", "DENT", "DENY", "DESK", "DIAL", "DICE",
"DIED", "DIET", "DIME", "DINE", "DING", "DINT", "DIRE", "DIRT",
"DISC", "DISH", "DISK", "DIVE", "DOCK", "DOES", "DOLE", "DOLL",
"DOLT", "DOME", "DONE", "DOOM", "DOOR", "DORA", "DOSE", "DOTE",
"DOUG", "DOUR", "DOVE", "DOWN", "DRAB", "DRAG", "DRAM", "DRAW",
"DREW", "DRUB", "DRUG", "DRUM", "DUAL", "DUCK", "DUCT", "DUEL",
"DUET", "DUKE", "DULL", "DUMB", "DUNE", "DUNK", "DUSK", "DUST",
"DUTY", "EACH", "EARL", "EARN", "EASE", "EAST", "EASY", "EBEN",
"ECHO", "EDDY", "EDEN", "EDGE", "EDGY", "EDIT", "EDNA", "EGAN",
"ELAN", "ELBA", "ELLA", "ELSE", "EMIL", "EMIT", "EMMA", "ENDS",
"ERIC", "EROS", "EVEN", "EVER", "EVIL", "EYED", "FACE", "FACT",
"FADE", "FAIL", "FAIN", "FAIR", "FAKE", "FALL", "FAME", "FANG",
"FARM", "FAST", "FATE", "FAWN", "FEAR", "FEAT", "FEED", "FEEL",
"FEET", "FELL", "FELT", "FEND", "FERN", "FEST", "FEUD", "FIEF",
"FIGS", "FILE", "FILL", "FILM", "FIND", "FINE", "FINK", "FIRE",
"FIRM", "FISH", "FISK", "FIST", "FITS", "FIVE", "FLAG", "FLAK",
"FLAM", "FLAT", "FLAW", "FLEA", "FLED", "FLEW", "FLIT", "FLOC",
"FLOG", "FLOW", "FLUB", "FLUE", "FOAL", "FOAM", "FOGY", "FOIL",
"FOLD", "FOLK", "FOND", "FONT", "FOOD", "FOOL", "FOOT", "FORD",
"FORE", "FORK", "FORM", "FORT", "FOSS", "FOUL", "FOUR", "FOWL",
"FRAU", "FRAY", "FRED", "FREE", "FRET", "FREY", "FROG", "FROM",
"FUEL", "FULL", "FUME", "FUND", "FUNK", "FURY", "FUSE", "FUSS",
"GAFF", "GAGE", "GAIL", "GAIN", "GAIT", "GALA", "GALE", "GALL",
"GALT", "GAME", "GANG", "GARB", "GARY", "GASH", "GATE", "GAUL",
"GAUR", "GAVE", "GAWK", "GEAR", "GELD", "GENE", "GENT", "GERM",
"GETS", "GIBE", "GIFT", "GILD", "GILL", "GILT", "GINA", "GIRD",
"GIRL", "GIST", "GIVE", "GLAD", "GLEE", "GLEN", "GLIB", "GLOB",
"GLOM", "GLOW", "GLUE", "GLUM", "GLUT", "GOAD", "GOAL", "GOAT",
"GOER", "GOES", "GOLD", "GOLF", "GONE", "GONG", "GOOD", "GOOF",
"GORE", "GORY", "GOSH", "GOUT", "GOWN", "GRAB", "GRAD", "GRAY",
"GREG", "GREW", "GREY", "GRID", "GRIM", "GRIN", "GRIT", "GROW",
"GRUB", "GULF", "GULL", "GUNK", "GURU", "GUSH", "GUST", "GWEN",
"GWYN", "HAAG", "HAAS", "HACK", "HAIL", "HAIR", "HALE", "HALF",
"HALL", "HALO", "HALT", "HAND", "HANG", "HANK", "HANS", "HARD",
"HARK", "HARM", "HART", "HASH", "HAST", "HATE", "HATH", "HAUL",
"HAVE", "HAWK", "HAYS", "HEAD", "HEAL", "HEAR", "HEAT", "HEBE",
"HECK", "HEED", "HEEL", "HEFT", "HELD", "HELL", "HELM", "HERB",
"HERD", "HERE", "HERO", "HERS", "HESS", "HEWN", "HICK", "HIDE",
"HIGH", "HIKE", "HILL", "HILT", "HIND", "HINT", "HIRE", "HISS",
"HIVE", "HOBO", "HOCK", "HOFF", "HOLD", "HOLE", "HOLM", "HOLT",
"HOME", "HONE", "HONK", "HOOD", "HOOF", "HOOK", "HOOT", "HORN",
"HOSE", "HOST", "HOUR", "HOVE", "HOWE", "HOWL", "HOYT", "HUCK",
"HUED", "HUFF", "HUGE", "HUGH", "HUGO", "HULK", "HULL", "HUNK",
"HUNT", "HURD", "HURL", "HURT", "HUSH", "HYDE", "HYMN", "IBIS",
"ICON", "IDEA", "IDLE", "IFFY", "INCA", "INCH", "INTO", "IONS",
"IOTA", "IOWA", "IRIS", "IRMA", "IRON", "ISLE", "ITCH", "ITEM",
"IVAN", "JACK", "JADE", "JAIL", "JAKE", "JANE", "JAVA", "JEAN",
"JEFF", "JERK", "JESS", "JEST", "JIBE", "JILL", "JILT", "JIVE",
"JOAN", "JOBS", "JOCK", "JOEL", "JOEY", "JOHN", "JOIN", "JOKE",
"JOLT", "JOVE", "JUDD", "JUDE", "JUDO", "JUDY", "JUJU", "JUKE",
"JULY", "JUNE", "JUNK", "JUNO", "JURY", "JUST", "JUTE", "KAHN",
"KALE", "KANE", "KANT", "KARL", "KATE", "KEEL", "KEEN", "KENO",
"KENT", "KERN", "KERR", "KEYS", "KICK", "KILL", "KIND", "KING",
"KIRK", "KISS", "KITE", "KLAN", "KNEE", "KNEW", "KNIT", "KNOB",
"KNOT", "KNOW", "KOCH", "KONG", "KUDO", "KURD", "KURT", "KYLE",
"LACE", "LACK", "LACY", "LADY", "LAID", "LAIN", "LAIR", "LAKE",
"LAMB", "LAME", "LAND", "LANE", "LANG", "LARD", "LARK", "LASS",
"LAST", "LATE", "LAUD", "LAVA", "LAWN", "LAWS", "LAYS", "LEAD",
"LEAF", "LEAK", "LEAN", "LEAR", "LEEK", "LEER", "LEFT", "LEND",
"LENS", "LENT", "LEON", "LESK", "LESS", "LEST", "LETS", "LIAR",
"LICE", "LICK", "LIED", "LIEN", "LIES", "LIEU", "LIFE", "LIFT",
"LIKE", "LILA", "LILT", "LILY", "LIMA", "LIMB", "LIME", "LIND",
"LINE", "LINK", "LINT", "LION", "LISA", "LIST", "LIVE", "LOAD",
"LOAF", "LOAM", "LOAN", "LOCK", "LOFT", "LOGE", "LOIS", "LOLA",
"LONE", "LONG", "LOOK", "LOON", "LOOT", "LORD", "LORE", "LOSE",
"LOSS", "LOST", "LOUD", "LOVE", "LOWE", "LUCK", "LUCY", "LUGE",
"LUKE", "LULU", "LUND", "LUNG", "LURA", "LURE", "LURK", "LUSH",
"LUST", "LYLE", "LYNN", "LYON", "LYRA", "MACE", "MADE", "MAGI",
"MAID", "MAIL", "MAIN", "MAKE", "MALE", "MALI", "MALL", "MALT",
"MANA", "MANN", "MANY", "MARC", "MARE", "MARK", "MARS", "MART",
"MARY", "MASH", "MASK", "MASS", "MAST", "MATE", "MATH", "MAUL",
"MAYO", "MEAD", "MEAL", "MEAN", "MEAT", "MEEK", "MEET", "MELD",
"MELT", "MEMO", "MEND", "MENU", "MERT", "MESH", "MESS", "MICE",
"MIKE", "MILD", "MILE", "MILK", "MILL", "MILT", "MIMI", "MIND",
"MINE", "MINI", "MINK", "MINT", "MIRE", "MISS", "MIST", "MITE",
"MITT", "MOAN", "MOAT", "MOCK", "MODE", "MOLD", "MOLE", "MOLL",
"MOLT", "MONA", "MONK", "MONT", "MOOD", "MOON", "MOOR", "MOOT",
"MORE", "MORN", "MORT", "MOSS", "MOST", "MOTH", "MOVE", "MUCH",
"MUCK", "MUDD", "MUFF", "MULE", "MULL", "MURK", "MUSH", "MUST",
"MUTE", "MUTT", "MYRA", "MYTH", "NAGY", "NAIL", "NAIR", "NAME",
"NARY", "NASH", "NAVE", "NAVY", "NEAL", "NEAR", "NEAT", "NECK",
"NEED", "NEIL", "NELL", "NEON", "NERO", "NESS", "NEST", "NEWS",
"NEWT", "NIBS", "NICE", "NICK", "NILE", "NINA", "NINE", "NOAH",
"NODE", "NOEL", "NOLL", "NONE", "NOOK", "NOON", "NORM", "NOSE",
"NOTE", "NOUN", "NOVA", "NUDE", "NULL", "NUMB", "OATH", "OBEY",
"OBOE", "ODIN", "OHIO", "OILY", "OINT", "OKAY", "OLAF", "OLDY",
"OLGA", "OLIN", "OMAN", "OMEN", "OMIT", "ONCE", "ONES", "ONLY",
"ONTO", "ONUS", "ORAL", "ORGY", "OSLO", "OTIS", "OTTO", "OUCH",
"OUST", "OUTS", "OVAL", "OVEN", "OVER", "OWLY", "OWNS", "QUAD",
"QUIT", "QUOD", "RACE", "RACK", "RACY", "RAFT", "RAGE", "RAID",
"RAIL", "RAIN", "RAKE", "RANK", "RANT", "RARE", "RASH", "RATE",
"RAVE", "RAYS", "READ", "REAL", "REAM", "REAR", "RECK", "REED",
"REEF", "REEK", "REEL", "REID", "REIN", "RENA", "REND", "RENT",
"REST", "RICE", "RICH", "RICK", "RIDE", "RIFT", "RILL", "RIME",
"RING", "RINK", "RISE", "RISK", "RITE", "ROAD", "ROAM", "ROAR",
"ROBE", "ROCK", "RODE", "ROIL", "ROLL", "ROME", "ROOD", "ROOF",
"ROOK", "ROOM", "ROOT", "ROSA", "ROSE", "ROSS", "ROSY", "ROTH",
"ROUT", "ROVE", "ROWE", "ROWS", "RUBE", "RUBY", "RUDE", "RUDY",
"RUIN", "RULE", "RUNG", "RUNS", "RUNT", "RUSE", "RUSH", "RUSK",
"RUSS", "RUST", "RUTH", "SACK", "SAFE", "SAGE", "SAID", "SAIL",
"SALE", "SALK", "SALT", "SAME", "SAND", "SANE", "SANG", "SANK",
"SARA", "SAUL", "SAVE", "SAYS", "SCAN", "SCAR", "SCAT", "SCOT",
"SEAL", "SEAM", "SEAR", "SEAT", "SEED", "SEEK", "SEEM", "SEEN",
"SEES", "SELF", "SELL", "SEND", "SENT", "SETS", "SEWN", "SHAG",
"SHAM", "SHAW", "SHAY", "SHED", "SHIM", "SHIN", "SHOD", "SHOE",
"SHOT", "SHOW", "SHUN", "SHUT", "SICK", "SIDE", "SIFT", "SIGH",
"SIGN", "SILK", "SILL", "SILO", "SILT", "SINE", "SING", "SINK",
"SIRE", "SITE", "SITS", "SITU", "SKAT", "SKEW", "SKID", "SKIM",
"SKIN", "SKIT", "SLAB", "SLAM", "SLAT", "SLAY", "SLED", "SLEW",
"SLID", "SLIM", "SLIT", "SLOB", "SLOG", "SLOT", "SLOW", "SLUG",
"SLUM", "SLUR", "SMOG", "SMUG", "SNAG", "SNOB", "SNOW", "SNUB",
"SNUG", "SOAK", "SOAR", "SOCK", "SODA", "SOFA", "SOFT", "SOIL",
"SOLD", "SOME", "SONG", "SOON", "SOOT", "SORE", "SORT", "SOUL",
"SOUR", "SOWN", "STAB", "STAG", "STAN", "STAR", "STAY", "STEM",
"STEW", "STIR", "STOW", "STUB", "STUN", "SUCH", "SUDS", "SUIT",
"SULK", "SUMS", "SUNG", "SUNK", "SURE", "SURF", "SWAB", "SWAG",
"SWAM", "SWAN", "SWAT", "SWAY", "SWIM", "SWUM", "TACK", "TACT",
"TAIL", "TAKE", "TALE", "TALK", "TALL", "TANK", "TASK", "TATE",
"TAUT", "TEAL", "TEAM", "TEAR", "TECH", "TEEM", "TEEN", "TEET",
"TELL", "TEND", "TENT", "TERM", "TERN", "TESS", "TEST", "THAN",
"THAT", "THEE", "THEM", "THEN", "THEY", "THIN", "THIS", "THUD",
"THUG", "TICK", "TIDE", "TIDY", "TIED", "TIER", "TILE", "TILL",
"TILT", "TIME", "TINA", "TINE", "TINT", "TINY", "TIRE", "TOAD",
"TOGO", "TOIL", "TOLD", "TOLL", "TONE", "TONG", "TONY", "TOOK",
"TOOL", "TOOT", "TORE", "TORN", "TOTE", "TOUR", "TOUT", "TOWN",
"TRAG", "TRAM", "TRAY", "TREE", "TREK", "TRIG", "TRIM", "TRIO",
"TROD", "TROT", "TROY", "TRUE", "TUBA", "TUBE", "TUCK", "TUFT",
"TUNA", "TUNE", "TUNG", "TURF", "TURN", "TUSK", "TWIG", "TWIN",
"TWIT", "ULAN", "UNIT", "URGE", "USED", "USER", "USES", "UTAH",
"VAIL", "VAIN", "VALE", "VARY", "VASE", "VAST", "VEAL", "VEDA",
"VEIL", "VEIN", "VEND", "VENT", "VERB", "VERY", "VETO", "VICE",
"VIEW", "VINE", "VISE", "VOID", "VOLT", "VOTE", "WACK", "WADE",
"WAGE", "WAIL", "WAIT", "WAKE", "WALE", "WALK", "WALL", "WALT",
"WAND", "WANE", "WANG", "WANT", "WARD", "WARM", "WARN", "WART",
"WASH", "WAST", "WATS", "WATT", "WAVE", "WAVY", "WAYS", "WEAK",
"WEAL", "WEAN", "WEAR", "WEED", "WEEK", "WEIR", "WELD", "WELL",
"WELT", "WENT", "WERE", "WERT", "WEST", "WHAM", "WHAT", "WHEE",
"WHEN", "WHET", "WHOA", "WHOM", "WICK", "WIFE", "WILD", "WILL",
"WIND", "WINE", "WING", "WINK", "WINO", "WIRE", "WISE", "WISH",
"WITH", "WOLF", "WONT", "WOOD", "WOOL", "WORD", "WORE", "WORK",
"WORM", "WORN", "WOVE", "WRIT", "WYNN", "YALE", "YANG", "YANK",
"YARD", "YARN", "YAWL", "YAWN", "YEAH", "YEAR", "YELL", "YOGA",
"YOKE"]
| apache-2.0 |
mezz64/home-assistant | tests/components/tradfri/test_init.py | 7 | 4352 | """Tests for Tradfri setup."""
from homeassistant.components import tradfri
from homeassistant.helpers.device_registry import (
async_entries_for_config_entry,
async_get_registry as async_get_device_registry,
)
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_config_yaml_host_not_imported(hass):
"""Test that we don't import a configured host."""
MockConfigEntry(domain="tradfri", data={"host": "mock-host"}).add_to_hass(hass)
with patch(
"homeassistant.components.tradfri.load_json", return_value={}
), patch.object(hass.config_entries.flow, "async_init") as mock_init:
assert await async_setup_component(
hass, "tradfri", {"tradfri": {"host": "mock-host"}}
)
await hass.async_block_till_done()
assert len(mock_init.mock_calls) == 0
async def test_config_yaml_host_imported(hass):
"""Test that we import a configured host."""
with patch("homeassistant.components.tradfri.load_json", return_value={}):
assert await async_setup_component(
hass, "tradfri", {"tradfri": {"host": "mock-host"}}
)
await hass.async_block_till_done()
progress = hass.config_entries.flow.async_progress()
assert len(progress) == 1
assert progress[0]["handler"] == "tradfri"
assert progress[0]["context"] == {"source": "import"}
async def test_config_json_host_not_imported(hass):
"""Test that we don't import a configured host."""
MockConfigEntry(domain="tradfri", data={"host": "mock-host"}).add_to_hass(hass)
with patch(
"homeassistant.components.tradfri.load_json",
return_value={"mock-host": {"key": "some-info"}},
), patch.object(hass.config_entries.flow, "async_init") as mock_init:
assert await async_setup_component(hass, "tradfri", {"tradfri": {}})
await hass.async_block_till_done()
assert len(mock_init.mock_calls) == 0
async def test_config_json_host_imported(
hass, mock_gateway_info, mock_entry_setup, gateway_id
):
"""Test that we import a configured host."""
mock_gateway_info.side_effect = lambda hass, host, identity, key: {
"host": host,
"identity": identity,
"key": key,
"gateway_id": gateway_id,
}
with patch(
"homeassistant.components.tradfri.load_json",
return_value={"mock-host": {"key": "some-info"}},
):
assert await async_setup_component(hass, "tradfri", {"tradfri": {}})
await hass.async_block_till_done()
config_entry = mock_entry_setup.mock_calls[0][1][1]
assert config_entry.domain == "tradfri"
assert config_entry.source == "import"
assert config_entry.title == "mock-host"
async def test_entry_setup_unload(hass, api_factory, gateway_id):
"""Test config entry setup and unload."""
entry = MockConfigEntry(
domain=tradfri.DOMAIN,
data={
tradfri.CONF_HOST: "mock-host",
tradfri.CONF_IDENTITY: "mock-identity",
tradfri.CONF_KEY: "mock-key",
tradfri.CONF_IMPORT_GROUPS: True,
tradfri.CONF_GATEWAY_ID: gateway_id,
},
)
entry.add_to_hass(hass)
with patch.object(
hass.config_entries, "async_forward_entry_setup", return_value=True
) as setup:
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert setup.call_count == len(tradfri.PLATFORMS)
dev_reg = await async_get_device_registry(hass)
dev_entries = async_entries_for_config_entry(dev_reg, entry.entry_id)
assert dev_entries
dev_entry = dev_entries[0]
assert dev_entry.identifiers == {
(tradfri.DOMAIN, entry.data[tradfri.CONF_GATEWAY_ID])
}
assert dev_entry.manufacturer == tradfri.ATTR_TRADFRI_MANUFACTURER
assert dev_entry.name == tradfri.ATTR_TRADFRI_GATEWAY
assert dev_entry.model == tradfri.ATTR_TRADFRI_GATEWAY_MODEL
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as unload:
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert unload.call_count == len(tradfri.PLATFORMS)
assert api_factory.shutdown.call_count == 1
| apache-2.0 |
decodio/pos_box | addons/hw_escpos_serial/controllers/main.py | 1 | 14551 | # -*- coding: utf-8 -*-
import commands
import logging
import simplejson
import os
import os.path
import io
import base64
import openerp
import time
import random
import math
import md5
import openerp.addons.hw_proxy.controllers.main as hw_proxy
import pickle
import re
import subprocess
import traceback
from threading import Thread, Lock
from Queue import Queue, Empty
try:
import usb.core
except ImportError:
usb = None
try:
from .. import escpos
from ..escpos import printer
from ..escpos import supported_devices
except ImportError:
escpos = printer = None
from PIL import Image
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class EscposDriver(Thread):
def __init__(self):
Thread.__init__(self)
self.queue = Queue()
self.lock = Lock()
self.status = {'status':'connecting', 'messages':[]}
def supported_devices(self):
if not os.path.isfile('escpos_devices.pickle'):
return supported_devices.device_list
else:
try:
f = open('escpos_devices.pickle','r')
return pickle.load(f)
f.close()
except Exception as e:
self.set_status('error',str(e))
return supported_devices.device_list
def add_supported_device(self,device_string):
r = re.compile('[0-9A-Fa-f]{4}:[0-9A-Fa-f]{4}');
match = r.search(device_string)
if match:
match = match.group().split(':')
vendor = int(match[0],16)
product = int(match[1],16)
name = device_string.split('ID')
if len(name) >= 2:
name = name[1]
else:
name = name[0]
_logger.info('ESC/POS: adding support for device: '+match[0]+':'+match[1]+' '+name)
device_list = supported_devices.device_list[:]
if os.path.isfile('escpos_devices.pickle'):
try:
f = open('escpos_devices.pickle','r')
device_list = pickle.load(f)
f.close()
except Exception as e:
self.set_status('error',str(e))
device_list.append({
'vendor': vendor,
'product': product,
'name': name,
})
try:
f = open('escpos_devices.pickle','w+')
f.seek(0)
pickle.dump(device_list,f)
f.close()
except Exception as e:
self.set_status('error',str(e))
def connected_usb_devices(self):
connected = []
for device in self.supported_devices():
if usb.core.find(idVendor=device['vendor'], idProduct=device['product']) != None:
connected.append(device)
#GK+
try:
ser = escpos.printer.Serial()
if ser.device is not None:
connected.append({
'vendor': 'SERIAL',
'product': ser.devfile,
'name': 'Serial Generic Epson'})
self.device.close()
except:
pass
#GK-
return connected
def lockedstart(self):
with self.lock:
if not self.isAlive():
self.daemon = True
self.start()
def get_escpos_printer(self):
try:
printers = self.connected_usb_devices()
if len(printers) > 0:
self.set_status('connected','Connected to '+printers[0]['name'])
#GK+
if printers[0]['vendor'] == 'SERIAL':
return escpos.printer.Serial()
#GK-
return escpos.printer.Usb(printers[0]['vendor'], printers[0]['product'])
else:
self.set_status('disconnected','Printer Not Found')
return None
except Exception as e:
self.set_status('error',str(e))
return None
def get_status(self):
self.push_task('status')
return self.status
def open_cashbox(self,printer):
printer.cashdraw(2)
printer.cashdraw(5)
def set_status(self, status, message = None):
_logger.info(status+' : '+ (message or 'no message'))
if status == self.status['status']:
if message != None and (len(self.status['messages']) == 0 or message != self.status['messages'][-1]):
self.status['messages'].append(message)
else:
self.status['status'] = status
if message:
self.status['messages'] = [message]
else:
self.status['messages'] = []
if status == 'error' and message:
_logger.error('ESC/POS Error: '+message)
elif status == 'disconnected' and message:
_logger.warning('ESC/POS Device Disconnected: '+message)
def run(self):
if not escpos:
_logger.error('ESC/POS cannot initialize, please verify system dependencies.')
return
while True:
try:
timestamp, task, data = self.queue.get(True)
printer = self.get_escpos_printer()
if printer == None:
if task != 'status':
self.queue.put((timestamp,task,data))
time.sleep(5)
continue
elif task == 'receipt':
if timestamp >= time.time() - 1 * 60 * 60:
self.print_receipt_body(printer,data)
printer.cut()
elif task == 'xml_receipt':
if timestamp >= time.time() - 1 * 60 * 60:
printer.receipt(data)
elif task == 'cashbox':
if timestamp >= time.time() - 12:
self.open_cashbox(printer)
elif task == 'printstatus':
self.print_status(printer)
elif task == 'status':
pass
except Exception as e:
self.set_status('error', str(e))
errmsg = str(e) + '\n' + '-'*60+'\n' + traceback.format_exc() + '-'*60 + '\n'
_logger.error(errmsg);
def push_task(self,task, data = None):
self.lockedstart()
self.queue.put((time.time(),task,data))
def print_status(self,eprint):
localips = ['0.0.0.0','127.0.0.1','127.0.1.1']
ips = [ c.split(':')[1].split(' ')[0] for c in commands.getoutput("/sbin/ifconfig").split('\n') if 'inet addr' in c ]
ips = [ ip for ip in ips if ip not in localips ]
eprint.text('\n\n')
eprint.set(align='center',type='b',height=2,width=2)
eprint.text('PosBox Status\n')
eprint.text('\n')
eprint.set(align='center')
if len(ips) == 0:
eprint.text('ERROR: Could not connect to LAN\n\nPlease check that the PosBox is correc-\ntly connected with a network cable,\n that the LAN is setup with DHCP, and\nthat network addresses are available')
elif len(ips) == 1:
eprint.text('IP Address:\n'+ips[0]+'\n')
else:
eprint.text('IP Addresses:\n')
for ip in ips:
eprint.text(ip+'\n')
if len(ips) >= 1:
eprint.text('\nHomepage:\nhttp://'+ips[0]+':8069\n')
eprint.text('\n\n')
eprint.cut()
def print_receipt_body(self,eprint,receipt):
def check(string):
return string != True and bool(string) and string.strip()
def price(amount):
return ("{0:."+str(receipt['precision']['price'])+"f}").format(amount)
def money(amount):
return ("{0:."+str(receipt['precision']['money'])+"f}").format(amount)
def quantity(amount):
if math.floor(amount) != amount:
return ("{0:."+str(receipt['precision']['quantity'])+"f}").format(amount)
else:
return str(amount)
def printline(left, right='', width=40, ratio=0.5, indent=0):
lwidth = int(width * ratio)
rwidth = width - lwidth
lwidth = lwidth - indent
left = left[:lwidth]
if len(left) != lwidth:
left = left + ' ' * (lwidth - len(left))
right = right[-rwidth:]
if len(right) != rwidth:
right = ' ' * (rwidth - len(right)) + right
return ' ' * indent + left + right + '\n'
def print_taxes():
taxes = receipt['tax_details']
for tax in taxes:
eprint.text(printline(tax['tax']['name'],price(tax['amount']), width=40,ratio=0.6))
# Receipt Header
if receipt['company']['logo']:
eprint.set(align='center')
eprint.print_base64_image(receipt['company']['logo'])
eprint.text('\n')
else:
eprint.set(align='center',type='b',height=2,width=2)
eprint.text(receipt['company']['name'] + '\n')
eprint.set(align='center',type='b')
if check(receipt['company']['contact_address']):
eprint.text(receipt['company']['contact_address'] + '\n')
if check(receipt['company']['phone']):
eprint.text('Tel:' + receipt['company']['phone'] + '\n')
if check(receipt['company']['vat']):
eprint.text('VAT:' + receipt['company']['vat'] + '\n')
if check(receipt['company']['email']):
eprint.text(receipt['company']['email'] + '\n')
if check(receipt['company']['website']):
eprint.text(receipt['company']['website'] + '\n')
if check(receipt['header']):
eprint.text(receipt['header']+'\n')
if check(receipt['cashier']):
eprint.text('-'*32+'\n')
eprint.text('Served by '+receipt['cashier']+'\n')
# Orderlines
eprint.text('\n\n')
eprint.set(align='center')
for line in receipt['orderlines']:
pricestr = price(line['price_display'])
if line['discount'] == 0 and line['unit_name'] == 'Unit(s)' and line['quantity'] == 1:
eprint.text(printline(line['product_name'],pricestr,ratio=0.6))
else:
eprint.text(printline(line['product_name'],ratio=0.6))
if line['discount'] != 0:
eprint.text(printline('Discount: '+str(line['discount'])+'%', ratio=0.6, indent=2))
if line['unit_name'] == 'Unit(s)':
eprint.text( printline( quantity(line['quantity']) + ' x ' + price(line['price']), pricestr, ratio=0.6, indent=2))
else:
eprint.text( printline( quantity(line['quantity']) + line['unit_name'] + ' x ' + price(line['price']), pricestr, ratio=0.6, indent=2))
# Subtotal if the taxes are not included
taxincluded = True
if money(receipt['subtotal']) != money(receipt['total_with_tax']):
eprint.text(printline('','-------'));
eprint.text(printline(_('Subtotal'),money(receipt['subtotal']),width=40, ratio=0.6))
print_taxes()
#eprint.text(printline(_('Taxes'),money(receipt['total_tax']),width=40, ratio=0.6))
taxincluded = False
# Total
eprint.text(printline('','-------'));
eprint.set(align='center',height=2)
eprint.text(printline(_(' TOTAL'),money(receipt['total_with_tax']),width=40, ratio=0.6))
eprint.text('\n\n');
# Paymentlines
eprint.set(align='center')
for line in receipt['paymentlines']:
eprint.text(printline(line['journal'], money(line['amount']), ratio=0.6))
eprint.text('\n');
eprint.set(align='center',height=2)
eprint.text(printline(_(' CHANGE'),money(receipt['change']),width=40, ratio=0.6))
eprint.set(align='center')
eprint.text('\n');
# Extra Payment info
if receipt['total_discount'] != 0:
eprint.text(printline(_('Discounts'),money(receipt['total_discount']),width=40, ratio=0.6))
if taxincluded:
print_taxes()
#eprint.text(printline(_('Taxes'),money(receipt['total_tax']),width=40, ratio=0.6))
# Footer
if check(receipt['footer']):
eprint.text('\n'+receipt['footer']+'\n\n')
eprint.text(receipt['name']+'\n')
eprint.text( str(receipt['date']['date']).zfill(2)
+'/'+ str(receipt['date']['month']+1).zfill(2)
+'/'+ str(receipt['date']['year']).zfill(4)
+' '+ str(receipt['date']['hour']).zfill(2)
+':'+ str(receipt['date']['minute']).zfill(2) )
driver = EscposDriver()
driver.push_task('printstatus')
hw_proxy.drivers['escpos'] = driver
class EscposProxy(hw_proxy.Proxy):
@http.route('/hw_proxy/open_cashbox', type='json', auth='none', cors='*')
def open_cashbox(self):
_logger.info('ESC/POS: OPEN CASHBOX')
driver.push_task('cashbox')
@http.route('/hw_proxy/print_receipt', type='json', auth='none', cors='*')
def print_receipt(self, receipt):
_logger.info('ESC/POS: PRINT RECEIPT')
driver.push_task('receipt',receipt)
@http.route('/hw_proxy/print_xml_receipt', type='json', auth='none', cors='*')
def print_xml_receipt(self, receipt):
_logger.info('ESC/POS: PRINT XML RECEIPT')
driver.push_task('xml_receipt',receipt)
@http.route('/hw_proxy/escpos/add_supported_device', type='http', auth='none', cors='*')
def add_supported_device(self, device_string):
_logger.info('ESC/POS: ADDED NEW DEVICE:'+device_string)
driver.add_supported_device(device_string)
return "The device:\n"+device_string+"\n has been added to the list of supported devices.<br/><a href='/hw_proxy/status'>Ok</a>"
@http.route('/hw_proxy/escpos/reset_supported_devices', type='http', auth='none', cors='*')
def reset_supported_devices(self):
try:
os.remove('escpos_devices.pickle')
except Exception as e:
pass
return 'The list of supported devices has been reset to factory defaults.<br/><a href="/hw_proxy/status">Ok</a>'
| agpl-3.0 |
Flexget/Flexget | flexget/plugins/cli/inject.py | 3 | 3377 | import argparse
import cgi
import random
import string
import yaml
from flexget import options
from flexget.entry import Entry
from flexget.event import event
from flexget.terminal import console
from flexget.utils import requests
@event('manager.subcommand.inject')
def do_cli(manager, options):
if not options.url:
# Determine if first positional argument is a URL or a title
if '://' in options.title:
options.url = options.title
options.title = None
if options.url and not options.title:
# Attempt to get a title from the URL response's headers
try:
value, params = cgi.parse_header(
requests.head(options.url).headers['Content-Disposition']
)
options.title = params['filename']
except KeyError:
console(
'No title given, and couldn\'t get one from the URL\'s HTTP response. Aborting.'
)
return
entry = Entry(title=options.title)
if options.url:
entry['url'] = options.url
else:
entry['url'] = 'http://localhost/inject/%s' % ''.join(
random.sample(string.ascii_letters + string.digits, 30)
)
if options.force:
entry['immortal'] = True
if options.accept:
entry.accept(reason='accepted by CLI inject')
if options.fields:
for key, value in options.fields:
entry[key] = value
options.inject = [entry]
manager.execute_command(options)
def key_equals_value(text):
if '=' not in text:
raise argparse.ArgumentTypeError('must be in the form: <field name>=<value>')
key, value = text.split('=')
return key, yaml.safe_load(value)
# Run after other plugins, so we can get all exec subcommand options
@event('options.register', priority=0)
def register_parser_arguments():
exec_parser = options.get_parser('execute')
inject_parser = options.register_command(
'inject',
do_cli,
add_help=False,
parents=[exec_parser],
help='inject an entry from command line into tasks',
usage='%(prog)s title/url [url] [--accept] [--force] '
'[--fields NAME=VALUE [NAME=VALUE...]] [<execute arguments>]',
epilog=(
'If only a URL and no title is given, Flexget will attempt to '
'find a title in the URL\'s response headers.'
),
)
inject_group = inject_parser.add_argument_group('inject arguments')
inject_group.add_argument(
'title', metavar='title/url', help='title or url of the entry to inject'
)
inject_group.add_argument('url', nargs='?', help='url of the entry to inject')
inject_group.add_argument(
'--force', action='store_true', help='prevent any plugins from rejecting this entry'
)
inject_group.add_argument(
'--accept', action='store_true', help='accept this entry immediately upon injection'
)
inject_group.add_argument('--fields', metavar='NAME=VALUE', nargs='+', type=key_equals_value)
# Hack the title of the exec options a bit (would be 'optional arguments' otherwise)
inject_parser._action_groups[1].title = 'execute arguments'
# The exec arguments show first... unless we switch them
inject_parser._action_groups.remove(inject_group)
inject_parser._action_groups.insert(0, inject_group)
| mit |
petecummings/NewsBlur | utils/munin/newsblur_tasks_pipeline.py | 14 | 1474 | #!/usr/bin/env python
from utils.munin.base import MuninGraph
class NBMuninGraph(MuninGraph):
@property
def graph_config(self):
graph = {
'graph_category' : 'NewsBlur',
'graph_title' : 'NewsBlur Task Pipeline',
'graph_vlabel' : 'Feed fetch pipeline times',
'graph_args' : '-l 0',
'feed_fetch.label': 'feed_fetch',
'feed_process.label': 'feed_process',
'page.label': 'page',
'icon.label': 'icon',
'total.label': 'total',
}
return graph
def calculate_metrics(self):
return self.stats
@property
def stats(self):
import datetime
from django.conf import settings
stats = settings.MONGOANALYTICSDB.nbanalytics.feed_fetches.aggregate([{
"$match": {
"date": {
"$gt": datetime.datetime.now() - datetime.timedelta(minutes=5),
},
},
}, {
"$group": {
"_id": 1,
"feed_fetch": {"$avg": "$feed_fetch"},
"feed_process": {"$avg": "$feed_process"},
"page": {"$avg": "$page"},
"icon": {"$avg": "$icon"},
"total": {"$avg": "$total"},
},
}])
return stats['result'][0]
if __name__ == '__main__':
NBMuninGraph().run()
| mit |
takaaptech/sky_engine | third_party/cython/src/Cython/Compiler/Main.py | 90 | 26733 | #
# Cython Top Level
#
import os, sys, re, codecs
if sys.version_info[:2] < (2, 4):
sys.stderr.write("Sorry, Cython requires Python 2.4 or later\n")
sys.exit(1)
import Errors
# Do not import Parsing here, import it when needed, because Parsing imports
# Nodes, which globally needs debug command line options initialized to set a
# conditional metaclass. These options are processed by CmdLine called from
# main() in this file.
# import Parsing
import Version
from Scanning import PyrexScanner, FileSourceDescriptor
from Errors import PyrexError, CompileError, error, warning
from Symtab import ModuleScope
from Cython import Utils
import Options
module_name_pattern = re.compile(r"[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)*$")
verbose = 0
class CompilationData(object):
# Bundles the information that is passed from transform to transform.
# (For now, this is only)
# While Context contains every pxd ever loaded, path information etc.,
# this only contains the data related to a single compilation pass
#
# pyx ModuleNode Main code tree of this compilation.
# pxds {string : ModuleNode} Trees for the pxds used in the pyx.
# codewriter CCodeWriter Where to output final code.
# options CompilationOptions
# result CompilationResult
pass
class Context(object):
# This class encapsulates the context needed for compiling
# one or more Cython implementation files along with their
# associated and imported declaration files. It includes
# the root of the module import namespace and the list
# of directories to search for include files.
#
# modules {string : ModuleScope}
# include_directories [string]
# future_directives [object]
# language_level int currently 2 or 3 for Python 2/3
cython_scope = None
def __init__(self, include_directories, compiler_directives, cpp=False,
language_level=2, options=None, create_testscope=True):
# cython_scope is a hack, set to False by subclasses, in order to break
# an infinite loop.
# Better code organization would fix it.
import Builtin, CythonScope
self.modules = {"__builtin__" : Builtin.builtin_scope}
self.cython_scope = CythonScope.create_cython_scope(self)
self.modules["cython"] = self.cython_scope
self.include_directories = include_directories
self.future_directives = set()
self.compiler_directives = compiler_directives
self.cpp = cpp
self.options = options
self.pxds = {} # full name -> node tree
standard_include_path = os.path.abspath(os.path.normpath(
os.path.join(os.path.dirname(__file__), os.path.pardir, 'Includes')))
self.include_directories = include_directories + [standard_include_path]
self.set_language_level(language_level)
self.gdb_debug_outputwriter = None
def set_language_level(self, level):
self.language_level = level
if level >= 3:
from Future import print_function, unicode_literals, absolute_import
self.future_directives.update([print_function, unicode_literals, absolute_import])
self.modules['builtins'] = self.modules['__builtin__']
# pipeline creation functions can now be found in Pipeline.py
def process_pxd(self, source_desc, scope, module_name):
import Pipeline
if isinstance(source_desc, FileSourceDescriptor) and source_desc._file_type == 'pyx':
source = CompilationSource(source_desc, module_name, os.getcwd())
result_sink = create_default_resultobj(source, self.options)
pipeline = Pipeline.create_pyx_as_pxd_pipeline(self, result_sink)
result = Pipeline.run_pipeline(pipeline, source)
else:
pipeline = Pipeline.create_pxd_pipeline(self, scope, module_name)
result = Pipeline.run_pipeline(pipeline, source_desc)
return result
def nonfatal_error(self, exc):
return Errors.report_error(exc)
def find_module(self, module_name,
relative_to = None, pos = None, need_pxd = 1, check_module_name = True):
# Finds and returns the module scope corresponding to
# the given relative or absolute module name. If this
# is the first time the module has been requested, finds
# the corresponding .pxd file and process it.
# If relative_to is not None, it must be a module scope,
# and the module will first be searched for relative to
# that module, provided its name is not a dotted name.
debug_find_module = 0
if debug_find_module:
print("Context.find_module: module_name = %s, relative_to = %s, pos = %s, need_pxd = %s" % (
module_name, relative_to, pos, need_pxd))
scope = None
pxd_pathname = None
if check_module_name and not module_name_pattern.match(module_name):
if pos is None:
pos = (module_name, 0, 0)
raise CompileError(pos,
"'%s' is not a valid module name" % module_name)
if "." not in module_name and relative_to:
if debug_find_module:
print("...trying relative import")
scope = relative_to.lookup_submodule(module_name)
if not scope:
qualified_name = relative_to.qualify_name(module_name)
pxd_pathname = self.find_pxd_file(qualified_name, pos)
if pxd_pathname:
scope = relative_to.find_submodule(module_name)
if not scope:
if debug_find_module:
print("...trying absolute import")
scope = self
for name in module_name.split("."):
scope = scope.find_submodule(name)
if debug_find_module:
print("...scope =", scope)
if not scope.pxd_file_loaded:
if debug_find_module:
print("...pxd not loaded")
scope.pxd_file_loaded = 1
if not pxd_pathname:
if debug_find_module:
print("...looking for pxd file")
pxd_pathname = self.find_pxd_file(module_name, pos)
if debug_find_module:
print("......found ", pxd_pathname)
if not pxd_pathname and need_pxd:
package_pathname = self.search_include_directories(module_name, ".py", pos)
if package_pathname and package_pathname.endswith('__init__.py'):
pass
else:
error(pos, "'%s.pxd' not found" % module_name)
if pxd_pathname:
try:
if debug_find_module:
print("Context.find_module: Parsing %s" % pxd_pathname)
rel_path = module_name.replace('.', os.sep) + os.path.splitext(pxd_pathname)[1]
if not pxd_pathname.endswith(rel_path):
rel_path = pxd_pathname # safety measure to prevent printing incorrect paths
source_desc = FileSourceDescriptor(pxd_pathname, rel_path)
err, result = self.process_pxd(source_desc, scope, module_name)
if err:
raise err
(pxd_codenodes, pxd_scope) = result
self.pxds[module_name] = (pxd_codenodes, pxd_scope)
except CompileError:
pass
return scope
def find_pxd_file(self, qualified_name, pos):
# Search include path for the .pxd file corresponding to the
# given fully-qualified module name.
# Will find either a dotted filename or a file in a
# package directory. If a source file position is given,
# the directory containing the source file is searched first
# for a dotted filename, and its containing package root
# directory is searched first for a non-dotted filename.
pxd = self.search_include_directories(qualified_name, ".pxd", pos, sys_path=True)
if pxd is None: # XXX Keep this until Includes/Deprecated is removed
if (qualified_name.startswith('python') or
qualified_name in ('stdlib', 'stdio', 'stl')):
standard_include_path = os.path.abspath(os.path.normpath(
os.path.join(os.path.dirname(__file__), os.path.pardir, 'Includes')))
deprecated_include_path = os.path.join(standard_include_path, 'Deprecated')
self.include_directories.append(deprecated_include_path)
try:
pxd = self.search_include_directories(qualified_name, ".pxd", pos)
finally:
self.include_directories.pop()
if pxd:
name = qualified_name
if name.startswith('python'):
warning(pos, "'%s' is deprecated, use 'cpython'" % name, 1)
elif name in ('stdlib', 'stdio'):
warning(pos, "'%s' is deprecated, use 'libc.%s'" % (name, name), 1)
elif name in ('stl'):
warning(pos, "'%s' is deprecated, use 'libcpp.*.*'" % name, 1)
if pxd is None and Options.cimport_from_pyx:
return self.find_pyx_file(qualified_name, pos)
return pxd
def find_pyx_file(self, qualified_name, pos):
# Search include path for the .pyx file corresponding to the
# given fully-qualified module name, as for find_pxd_file().
return self.search_include_directories(qualified_name, ".pyx", pos)
def find_include_file(self, filename, pos):
# Search list of include directories for filename.
# Reports an error and returns None if not found.
path = self.search_include_directories(filename, "", pos,
include=True)
if not path:
error(pos, "'%s' not found" % filename)
return path
def search_include_directories(self, qualified_name, suffix, pos,
include=False, sys_path=False):
return Utils.search_include_directories(
tuple(self.include_directories), qualified_name, suffix, pos, include, sys_path)
def find_root_package_dir(self, file_path):
return Utils.find_root_package_dir(file_path)
def check_package_dir(self, dir, package_names):
return Utils.check_package_dir(dir, tuple(package_names))
def c_file_out_of_date(self, source_path):
c_path = Utils.replace_suffix(source_path, ".c")
if not os.path.exists(c_path):
return 1
c_time = Utils.modification_time(c_path)
if Utils.file_newer_than(source_path, c_time):
return 1
pos = [source_path]
pxd_path = Utils.replace_suffix(source_path, ".pxd")
if os.path.exists(pxd_path) and Utils.file_newer_than(pxd_path, c_time):
return 1
for kind, name in self.read_dependency_file(source_path):
if kind == "cimport":
dep_path = self.find_pxd_file(name, pos)
elif kind == "include":
dep_path = self.search_include_directories(name, pos)
else:
continue
if dep_path and Utils.file_newer_than(dep_path, c_time):
return 1
return 0
def find_cimported_module_names(self, source_path):
return [ name for kind, name in self.read_dependency_file(source_path)
if kind == "cimport" ]
def is_package_dir(self, dir_path):
return Utils.is_package_dir(dir_path)
def read_dependency_file(self, source_path):
dep_path = Utils.replace_suffix(source_path, ".dep")
if os.path.exists(dep_path):
f = open(dep_path, "rU")
chunks = [ line.strip().split(" ", 1)
for line in f.readlines()
if " " in line.strip() ]
f.close()
return chunks
else:
return ()
def lookup_submodule(self, name):
# Look up a top-level module. Returns None if not found.
return self.modules.get(name, None)
def find_submodule(self, name):
# Find a top-level module, creating a new one if needed.
scope = self.lookup_submodule(name)
if not scope:
scope = ModuleScope(name,
parent_module = None, context = self)
self.modules[name] = scope
return scope
def parse(self, source_desc, scope, pxd, full_module_name):
if not isinstance(source_desc, FileSourceDescriptor):
raise RuntimeError("Only file sources for code supported")
source_filename = source_desc.filename
scope.cpp = self.cpp
# Parse the given source file and return a parse tree.
num_errors = Errors.num_errors
try:
f = Utils.open_source_file(source_filename, "rU")
try:
import Parsing
s = PyrexScanner(f, source_desc, source_encoding = f.encoding,
scope = scope, context = self)
tree = Parsing.p_module(s, pxd, full_module_name)
finally:
f.close()
except UnicodeDecodeError, e:
#import traceback
#traceback.print_exc()
line = 1
column = 0
msg = e.args[-1]
position = e.args[2]
encoding = e.args[0]
f = open(source_filename, "rb")
try:
byte_data = f.read()
finally:
f.close()
# FIXME: make this at least a little less inefficient
for idx, c in enumerate(byte_data):
if c in (ord('\n'), '\n'):
line += 1
column = 0
if idx == position:
break
column += 1
error((source_desc, line, column),
"Decoding error, missing or incorrect coding=<encoding-name> "
"at top of source (cannot decode with encoding %r: %s)" % (encoding, msg))
if Errors.num_errors > num_errors:
raise CompileError()
return tree
def extract_module_name(self, path, options):
# Find fully_qualified module name from the full pathname
# of a source file.
dir, filename = os.path.split(path)
module_name, _ = os.path.splitext(filename)
if "." in module_name:
return module_name
names = [module_name]
while self.is_package_dir(dir):
parent, package_name = os.path.split(dir)
if parent == dir:
break
names.append(package_name)
dir = parent
names.reverse()
return ".".join(names)
def setup_errors(self, options, result):
Errors.reset() # clear any remaining error state
if options.use_listing_file:
result.listing_file = Utils.replace_suffix(source, ".lis")
path = result.listing_file
else:
path = None
Errors.open_listing_file(path=path,
echo_to_stderr=options.errors_to_stderr)
def teardown_errors(self, err, options, result):
source_desc = result.compilation_source.source_desc
if not isinstance(source_desc, FileSourceDescriptor):
raise RuntimeError("Only file sources for code supported")
Errors.close_listing_file()
result.num_errors = Errors.num_errors
if result.num_errors > 0:
err = True
if err and result.c_file:
try:
Utils.castrate_file(result.c_file, os.stat(source_desc.filename))
except EnvironmentError:
pass
result.c_file = None
def create_default_resultobj(compilation_source, options):
result = CompilationResult()
result.main_source_file = compilation_source.source_desc.filename
result.compilation_source = compilation_source
source_desc = compilation_source.source_desc
if options.output_file:
result.c_file = os.path.join(compilation_source.cwd, options.output_file)
else:
if options.cplus:
c_suffix = ".cpp"
else:
c_suffix = ".c"
result.c_file = Utils.replace_suffix(source_desc.filename, c_suffix)
return result
def run_pipeline(source, options, full_module_name=None, context=None):
import Pipeline
source_ext = os.path.splitext(source)[1]
options.configure_language_defaults(source_ext[1:]) # py/pyx
if context is None:
context = options.create_context()
# Set up source object
cwd = os.getcwd()
abs_path = os.path.abspath(source)
full_module_name = full_module_name or context.extract_module_name(source, options)
if options.relative_path_in_code_position_comments:
rel_path = full_module_name.replace('.', os.sep) + source_ext
if not abs_path.endswith(rel_path):
rel_path = source # safety measure to prevent printing incorrect paths
else:
rel_path = abs_path
source_desc = FileSourceDescriptor(abs_path, rel_path)
source = CompilationSource(source_desc, full_module_name, cwd)
# Set up result object
result = create_default_resultobj(source, options)
if options.annotate is None:
# By default, decide based on whether an html file already exists.
html_filename = os.path.splitext(result.c_file)[0] + ".html"
if os.path.exists(html_filename):
line = codecs.open(html_filename, "r", encoding="UTF-8").readline()
if line.startswith(u'<!-- Generated by Cython'):
options.annotate = True
# Get pipeline
if source_ext.lower() == '.py' or not source_ext:
pipeline = Pipeline.create_py_pipeline(context, options, result)
else:
pipeline = Pipeline.create_pyx_pipeline(context, options, result)
context.setup_errors(options, result)
err, enddata = Pipeline.run_pipeline(pipeline, source)
context.teardown_errors(err, options, result)
return result
#------------------------------------------------------------------------
#
# Main Python entry points
#
#------------------------------------------------------------------------
class CompilationSource(object):
"""
Contains the data necesarry to start up a compilation pipeline for
a single compilation unit.
"""
def __init__(self, source_desc, full_module_name, cwd):
self.source_desc = source_desc
self.full_module_name = full_module_name
self.cwd = cwd
class CompilationOptions(object):
"""
Options to the Cython compiler:
show_version boolean Display version number
use_listing_file boolean Generate a .lis file
errors_to_stderr boolean Echo errors to stderr when using .lis
include_path [string] Directories to search for include files
output_file string Name of generated .c file
generate_pxi boolean Generate .pxi file for public declarations
capi_reexport_cincludes
boolean Add cincluded headers to any auto-generated
header files.
timestamps boolean Only compile changed source files.
verbose boolean Always print source names being compiled
compiler_directives dict Overrides for pragma options (see Options.py)
evaluate_tree_assertions boolean Test support: evaluate parse tree assertions
language_level integer The Python language level: 2 or 3
cplus boolean Compile as c++ code
"""
def __init__(self, defaults = None, **kw):
self.include_path = []
if defaults:
if isinstance(defaults, CompilationOptions):
defaults = defaults.__dict__
else:
defaults = default_options
options = dict(defaults)
options.update(kw)
directives = dict(options['compiler_directives']) # copy mutable field
options['compiler_directives'] = directives
if 'language_level' in directives and 'language_level' not in kw:
options['language_level'] = int(directives['language_level'])
if 'cache' in options:
if options['cache'] is True:
options['cache'] = os.path.expanduser("~/.cycache")
elif options['cache'] in (False, None):
del options['cache']
self.__dict__.update(options)
def configure_language_defaults(self, source_extension):
if source_extension == 'py':
if self.compiler_directives.get('binding') is None:
self.compiler_directives['binding'] = True
def create_context(self):
return Context(self.include_path, self.compiler_directives,
self.cplus, self.language_level, options=self)
class CompilationResult(object):
"""
Results from the Cython compiler:
c_file string or None The generated C source file
h_file string or None The generated C header file
i_file string or None The generated .pxi file
api_file string or None The generated C API .h file
listing_file string or None File of error messages
object_file string or None Result of compiling the C file
extension_file string or None Result of linking the object file
num_errors integer Number of compilation errors
compilation_source CompilationSource
"""
def __init__(self):
self.c_file = None
self.h_file = None
self.i_file = None
self.api_file = None
self.listing_file = None
self.object_file = None
self.extension_file = None
self.main_source_file = None
class CompilationResultSet(dict):
"""
Results from compiling multiple Pyrex source files. A mapping
from source file paths to CompilationResult instances. Also
has the following attributes:
num_errors integer Total number of compilation errors
"""
num_errors = 0
def add(self, source, result):
self[source] = result
self.num_errors += result.num_errors
def compile_single(source, options, full_module_name = None):
"""
compile_single(source, options, full_module_name)
Compile the given Pyrex implementation file and return a CompilationResult.
Always compiles a single file; does not perform timestamp checking or
recursion.
"""
return run_pipeline(source, options, full_module_name)
def compile_multiple(sources, options):
"""
compile_multiple(sources, options)
Compiles the given sequence of Pyrex implementation files and returns
a CompilationResultSet. Performs timestamp checking and/or recursion
if these are specified in the options.
"""
# run_pipeline creates the context
# context = options.create_context()
sources = [os.path.abspath(source) for source in sources]
processed = set()
results = CompilationResultSet()
timestamps = options.timestamps
verbose = options.verbose
context = None
for source in sources:
if source not in processed:
if context is None:
context = options.create_context()
if not timestamps or context.c_file_out_of_date(source):
if verbose:
sys.stderr.write("Compiling %s\n" % source)
result = run_pipeline(source, options, context=context)
results.add(source, result)
# Compiling multiple sources in one context doesn't quite
# work properly yet.
context = None
processed.add(source)
return results
def compile(source, options = None, full_module_name = None, **kwds):
"""
compile(source [, options], [, <option> = <value>]...)
Compile one or more Pyrex implementation files, with optional timestamp
checking and recursing on dependecies. The source argument may be a string
or a sequence of strings If it is a string and no recursion or timestamp
checking is requested, a CompilationResult is returned, otherwise a
CompilationResultSet is returned.
"""
options = CompilationOptions(defaults = options, **kwds)
if isinstance(source, basestring) and not options.timestamps:
return compile_single(source, options, full_module_name)
else:
return compile_multiple(source, options)
#------------------------------------------------------------------------
#
# Main command-line entry point
#
#------------------------------------------------------------------------
def setuptools_main():
return main(command_line = 1)
def main(command_line = 0):
args = sys.argv[1:]
any_failures = 0
if command_line:
from CmdLine import parse_command_line
options, sources = parse_command_line(args)
else:
options = CompilationOptions(default_options)
sources = args
if options.show_version:
sys.stderr.write("Cython version %s\n" % Version.version)
if options.working_path!="":
os.chdir(options.working_path)
try:
result = compile(sources, options)
if result.num_errors > 0:
any_failures = 1
except (EnvironmentError, PyrexError), e:
sys.stderr.write(str(e) + '\n')
any_failures = 1
if any_failures:
sys.exit(1)
#------------------------------------------------------------------------
#
# Set the default options depending on the platform
#
#------------------------------------------------------------------------
default_options = dict(
show_version = 0,
use_listing_file = 0,
errors_to_stderr = 1,
cplus = 0,
output_file = None,
annotate = None,
generate_pxi = 0,
capi_reexport_cincludes = 0,
working_path = "",
timestamps = None,
verbose = 0,
quiet = 0,
compiler_directives = {},
evaluate_tree_assertions = False,
emit_linenums = False,
relative_path_in_code_position_comments = True,
c_line_in_traceback = True,
language_level = 2,
gdb_debug = False,
compile_time_env = None,
common_utility_include_dir = None,
)
| bsd-3-clause |
samuelgarcia/python-neo | neo/test/coretest/test_view.py | 5 | 2680 | """
Tests of the neo.core.view.ChannelView class and related functions
"""
import unittest
import numpy as np
import quantities as pq
from numpy.testing import assert_array_equal
from neo.core.analogsignal import AnalogSignal
from neo.core.irregularlysampledsignal import IrregularlySampledSignal
from neo.core.view import ChannelView
class TestView(unittest.TestCase):
def setUp(self):
self.test_data = np.random.rand(100, 8) * pq.mV
channel_names = np.array(["a", "b", "c", "d", "e", "f", "g", "h"])
self.test_signal = AnalogSignal(self.test_data,
sampling_period=0.1 * pq.ms,
name="test signal",
description="this is a test signal",
array_annotations={"channel_names": channel_names},
attUQoLtUaE=42)
def test_create_integer_index(self):
view = ChannelView(self.test_signal, [1, 2, 5, 7],
name="view of test signal",
description="this is a view of a test signal",
array_annotations={"something": np.array(["A", "B", "C", "D"])},
sLaTfat="fish")
assert view.obj is self.test_signal
assert_array_equal(view.index, np.array([1, 2, 5, 7]))
self.assertEqual(view.shape, (100, 4))
self.assertEqual(view.name, "view of test signal")
self.assertEqual(view.annotations["sLaTfat"], "fish")
def test_create_boolean_index(self):
view1 = ChannelView(self.test_signal, [1, 2, 5, 7])
view2 = ChannelView(self.test_signal, np.array([0, 1, 1, 0, 0, 1, 0, 1], dtype=bool))
assert_array_equal(view1.index, view2.index)
self.assertEqual(view1.shape, view2.shape)
def test_resolve(self):
view = ChannelView(self.test_signal, [1, 2, 5, 7],
name="view of test signal",
description="this is a view of a test signal",
array_annotations={"something": np.array(["A", "B", "C", "D"])},
sLaTfat="fish")
signal2 = view.resolve()
self.assertIsInstance(signal2, AnalogSignal)
self.assertEqual(signal2.shape, (100, 4))
for attr in ('name', 'description', 'sampling_period', 'units'):
self.assertEqual(getattr(self.test_signal, attr), getattr(signal2, attr))
assert_array_equal(signal2.array_annotations["channel_names"],
np.array(["b", "c", "f", "h"]))
assert_array_equal(self.test_data[:, [1, 2, 5, 7]], signal2.magnitude)
| bsd-3-clause |
levilucio/SyVOLT | mbeddr2C_MM/transformation_from_mps/Hlayer0rule2.py | 1 | 3330 | from core.himesis import Himesis
import uuid
class Hlayer0rule2(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule layer0rule2.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(Hlayer0rule2, self).__init__(name='Hlayer0rule2', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """layer0rule2"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'layer0rule2')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["attr1"] = """layer0rule2"""
# match class Operation(layer0rule2class0Operation) node
self.add_node()
self.vs[3]["mm__"] = """Operation"""
self.vs[3]["attr1"] = """+"""
# apply class CFunctionPointerStructMember(layer0rule2class2CFunctionPointerStructMember) node
self.add_node()
self.vs[4]["mm__"] = """CFunctionPointerStructMember"""
self.vs[4]["attr1"] = """1"""
# apply class FunctionRefType(layer0rule2class3FunctionRefType) node
self.add_node()
self.vs[5]["mm__"] = """FunctionRefType"""
self.vs[5]["attr1"] = """1"""
# apply class PointerType(layer0rule5class2PointerType) node
self.add_node()
self.vs[6]["mm__"] = """PointerType"""
self.vs[6]["attr1"] = """1"""
# apply class VoidType(layer0rule5class3VoidType) node
self.add_node()
self.vs[7]["mm__"] = """VoidType"""
self.vs[7]["attr1"] = """1"""
# apply association CFunctionPointerStructMember--type-->FunctionRefType node
self.add_node()
self.vs[8]["attr1"] = """type"""
self.vs[8]["mm__"] = """directLink_T"""
# apply association FunctionRefType--argTypes-->PointerType node
self.add_node()
self.vs[9]["attr1"] = """argTypes"""
self.vs[9]["mm__"] = """directLink_T"""
# apply association PointerType--baseType-->VoidType node
self.add_node()
self.vs[10]["attr1"] = """baseType"""
self.vs[10]["mm__"] = """directLink_T"""
# Add the edges
self.add_edges([
(0,3), # matchmodel -> match_class Operation(layer0rule2class0Operation)
(1,4), # applymodel -> apply_classCFunctionPointerStructMember(layer0rule2class2CFunctionPointerStructMember)
(1,5), # applymodel -> apply_classFunctionRefType(layer0rule2class3FunctionRefType)
(1,6), # applymodel -> apply_classPointerType(layer0rule5class2PointerType)
(1,7), # applymodel -> apply_classVoidType(layer0rule5class3VoidType)
(4,8), # apply class CFunctionPointerStructMember(layer0rule2class2CFunctionPointerStructMember) -> association type
(8,5), # associationtype -> apply_classFunctionRefType(layer0rule2class3FunctionRefType)
(5,9), # apply class FunctionRefType(layer0rule2class3FunctionRefType) -> association argTypes
(9,6), # associationargTypes -> apply_classPointerType(layer0rule5class2PointerType)
(6,10), # apply class PointerType(layer0rule5class2PointerType) -> association baseType
(10,7), # associationbaseType -> apply_classVoidType(layer0rule5class3VoidType)
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
self["equations"] = [((4,'name'),(3,'name')),]
| mit |
astokes/SynVinQR | etude/form_elements.py | 1 | 1933 | # WEEK1
# to amass working examples of most of the form elements needed
# by the SynVin data input pages
import bottle
import pymongo
from bottle import request
print "binding master form to http:/"
@bottle.route('/')
def index():
return bottle.template ('form_elements')
print "binding form to http://text/"
@bottle.post('/text')
def text_form():
Q = request.forms.question
A = request.forms.answer
return "<html><body>Text form submitted<br/>Q:%s<br/>A:%s</body></html>" % (Q, A)
print "binding to http://yourhost/password"
@bottle.post('/password')
def password_form():
P = request.forms.password
return "<html><body>Trending upward in the Google Zeitgeist: %s</body></html>" % P
print "binding to http://yourhost/radiobutton"
@bottle.post('/radiobutton')
def radio_form():
# contains logic to prove we remember cookie elsewhere
G = request.forms.sex
S = request.get_cookie('purience', G)
pronoun = ['his', 'her'][G=='female']
return "<html><body>The human %s signals %s desires indirectly.</body></html>" % (
S, pronoun)
# patterned after WEEK1 Bottle framework: using cooki" %" % P
# eliminates problem with user unable to refresh POST requests
# CONTROLLER side
print "binding to http://yourhost/radcook"
@bottle.post('/radcook')
def radcook_form():
P = request.forms.purience
bottle.response.set_cookie('purience',P)
bottle.redirect('/show_radcook')
#
# VIEW side
@bottle.route('/show_radcook')
def show_radcook():
P = request.get_cookie('purience')
return "<html><body>You're a %s, and we'll not forget it!</body></html>" % P
print "binding to http://yourhost/checkbox"
@bottle.post('/checkbox')
def checkbox_form():
OS = request.forms.getall('phoneOS')
OS.insert (0, 'Your landline')
# deprecate everything, picking on the last item only
return "<html><body>%s is obsolete and outdated.</body></html>" % OS [len(OS)-1]
bottle.run(host='0.0.0.0', port=8800)
| bsd-2-clause |
burnpanck/traits | traits/util/tests/test_message_records.py | 1 | 2198 | #----------------------------------------------------------------------------
# Copyright (c) 2014, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in /LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
#----------------------------------------------------------------------------
import unittest
from traits.util.event_tracer import (
SentinelRecord, ChangeMessageRecord, CallingMessageRecord,
ExitMessageRecord)
class TestMessageRecords(unittest.TestCase):
def test_base_message_record(self):
record = SentinelRecord()
# Check unicode output
self.assertEqual(unicode(record), u'\n')
# Check initialization
self.assertRaises(TypeError, SentinelRecord, sdd=0)
def test_change_message_record(self):
record = ChangeMessageRecord(
time=1, indent=3, name='john', old=1, new=1,
class_name='MyClass')
# Check unicode output
self.assertEqual(
unicode(record),
u"1 -----> 'john' changed from 1 to 1 in 'MyClass'\n")
# Check initialization
self.assertRaises(TypeError, ChangeMessageRecord, sdd=0)
def test_exit_message_record(self):
record = ExitMessageRecord(
time=7, indent=5, handler='john', exception='sssss')
# Check unicode output
self.assertEqual(
unicode(record), u"7 <--------- EXIT: 'john'sssss\n")
# Check initialization
self.assertRaises(TypeError, ExitMessageRecord, sdd=0)
def test_calling_message_record(self):
record = CallingMessageRecord(
time=7, indent=5, handler='john', source='sssss')
# Check unicode output
self.assertEqual(
unicode(record), u"7 CALLING: 'john' in sssss\n")
# Check initialization
self.assertRaises(TypeError, CallingMessageRecord, sdd=0)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
frdb194/django | tests/string_lookup/tests.py | 290 | 2573 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from .models import Article, Bar, Base, Child, Foo, Whiz
class StringLookupTests(TestCase):
def test_string_form_referencing(self):
"""
Regression test for #1661 and #1662
Check that string form referencing of
models works, both as pre and post reference, on all RelatedField types.
"""
f1 = Foo(name="Foo1")
f1.save()
f2 = Foo(name="Foo2")
f2.save()
w1 = Whiz(name="Whiz1")
w1.save()
b1 = Bar(name="Bar1", normal=f1, fwd=w1, back=f2)
b1.save()
self.assertEqual(b1.normal, f1)
self.assertEqual(b1.fwd, w1)
self.assertEqual(b1.back, f2)
base1 = Base(name="Base1")
base1.save()
child1 = Child(name="Child1", parent=base1)
child1.save()
self.assertEqual(child1.parent, base1)
def test_unicode_chars_in_queries(self):
"""
Regression tests for #3937
make sure we can use unicode characters in queries.
If these tests fail on MySQL, it's a problem with the test setup.
A properly configured UTF-8 database can handle this.
"""
fx = Foo(name='Bjorn', friend='François')
fx.save()
self.assertEqual(Foo.objects.get(friend__contains='\xe7'), fx)
# We can also do the above query using UTF-8 strings.
self.assertEqual(Foo.objects.get(friend__contains=b'\xc3\xa7'), fx)
def test_queries_on_textfields(self):
"""
Regression tests for #5087
make sure we can perform queries on TextFields.
"""
a = Article(name='Test', text='The quick brown fox jumps over the lazy dog.')
a.save()
self.assertEqual(Article.objects.get(text__exact='The quick brown fox jumps over the lazy dog.'), a)
self.assertEqual(Article.objects.get(text__contains='quick brown fox'), a)
def test_ipaddress_on_postgresql(self):
"""
Regression test for #708
"like" queries on IP address fields require casting with HOST() (on PostgreSQL).
"""
a = Article(name='IP test', text='The body', submitted_from='192.0.2.100')
a.save()
self.assertEqual(repr(Article.objects.filter(submitted_from__contains='192.0.2')),
repr([a]))
# Test that the searches do not match the subnet mask (/32 in this case)
self.assertEqual(Article.objects.filter(submitted_from__contains='32').count(), 0)
| bsd-3-clause |
google-code-export/pyglet | experimental/mt_media/mt_app_xlib.py | 28 | 3878 | #!/usr/bin/env python
'''
Hack synchronisation into pyglet event loop.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import select
import threading
import pyglet
class XlibEventDispatcher(object):
def fileno(self):
raise NotImplementedError('abstract')
def dispatch_events(self):
pass
from pyglet.window.xlib import xlib
class XlibDisplayDevice(pyglet.window.xlib.XlibDisplayDevice):
def dispatch_events(self):
e = xlib.XEvent()
while xlib.XPending(self._display):
xlib.XNextEvent(self._display, e)
# Key events are filtered by the xlib window event
# handler so they get a shot at the prefiltered event.
if e.xany.type not in (xlib.KeyPress, xlib.KeyRelease):
if xlib.XFilterEvent(e, e.xany.window):
continue
try:
window = self._window_map[e.xany.window]
except KeyError:
continue
window.dispatch_platform_event(e)
class XlibPlatform(pyglet.window.xlib.XlibPlatform):
def get_display(self, name):
if name not in self._displays:
self._displays[name] = XlibDisplayDevice(name)
return self._displays[name]
platform = XlibPlatform()
pyglet.window.get_platform = lambda: platform
import os
class SynchronizedEventDispatcher(XlibEventDispatcher):
def __init__(self):
self._sync_file_read, self._sync_file_write = os.pipe()
self._events = []
self._lock = threading.Lock()
def fileno(self):
return self._sync_file_read
def post_event(self, dispatcher, event, *args):
self._lock.acquire()
self._events.append((dispatcher, event, args))
os.write(self._sync_file_write, '1')
self._lock.release()
def dispatch_events(self):
self._lock.acquire()
for dispatcher, event, args in self._events:
dispatcher.dispatch_event(event, *args)
self._events = []
self._lock.release()
class MTXlibEventLoop(pyglet.app.xlib.XlibEventLoop):
def __init__(self, *args, **kwargs):
super(MTXlibEventLoop, self).__init__(*args, **kwargs)
self._synchronized_event_dispatcher = SynchronizedEventDispatcher()
def post_event(self, dispatcher, event, *args):
self._synchronized_event_dispatcher.post_event(dispatcher, event, *args)
def get_select_files(self):
return list(pyglet.app.displays) + [self._synchronized_event_dispatcher]
def run(self):
self._setup()
e = xlib.XEvent()
t = 0
sleep_time = 0.
self.dispatch_event('on_enter')
while not self.has_exit:
# Check for already pending events
for display in pyglet.app.displays:
if xlib.XPending(display._display):
pending_dispatchers = (display,)
break
else:
# None found; select on all file descriptors or timeout
iwtd = self.get_select_files()
pending_dispatchers, _, _ = \
select.select(iwtd, (), (), sleep_time)
# Dispatch platform events
for dispatcher in pending_dispatchers:
dispatcher.dispatch_events()
# Dispatch resize events
# XXX integrate into dispatchers?
for window in pyglet.app.windows:
if window._needs_resize:
window.switch_to()
window.dispatch_event('on_resize',
window._width, window._height)
window.dispatch_event('on_expose')
window._needs_resize = False
sleep_time = self.idle()
self.dispatch_event('on_exit')
pyglet.app.EventLoop = MTXlibEventLoop
| bsd-3-clause |
BRNmod/android_kernel_google_msm | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
Edraak/edx-platform | lms/djangoapps/courseware/features/conditional.py | 31 | 4647 | # pylint: disable=missing-docstring
from lettuce import world, steps
from nose.tools import assert_in, assert_true
from common import i_am_registered_for_the_course, visit_scenario_item
from problems_setup import add_problem_to_course, answer_problem
@steps
class ConditionalSteps(object):
COURSE_NUM = 'test_course'
def setup_conditional(self, step, condition_type, condition, cond_value):
r'that a course has a Conditional conditioned on (?P<condition_type>\w+) (?P<condition>\w+)=(?P<cond_value>\w+)$'
i_am_registered_for_the_course(step, self.COURSE_NUM)
world.scenario_dict['VERTICAL'] = world.ItemFactory(
parent_location=world.scenario_dict['SECTION'].location,
category='vertical',
display_name="Test Vertical",
)
world.scenario_dict['WRAPPER'] = world.ItemFactory(
parent_location=world.scenario_dict['VERTICAL'].location,
category='wrapper',
display_name="Test Poll Wrapper"
)
if condition_type == 'problem':
world.scenario_dict['CONDITION_SOURCE'] = add_problem_to_course(self.COURSE_NUM, 'string')
elif condition_type == 'poll':
world.scenario_dict['CONDITION_SOURCE'] = world.ItemFactory(
parent_location=world.scenario_dict['WRAPPER'].location,
category='poll_question',
display_name='Conditional Poll',
data={
'question': 'Is this a good poll?',
'answers': [
{'id': 'yes', 'text': 'Yes, of course'},
{'id': 'no', 'text': 'Of course not!'}
],
}
)
else:
raise Exception("Unknown condition type: {!r}".format(condition_type))
metadata = {
'xml_attributes': {
condition: cond_value
}
}
world.scenario_dict['CONDITIONAL'] = world.ItemFactory(
parent_location=world.scenario_dict['WRAPPER'].location,
category='conditional',
display_name="Test Conditional",
metadata=metadata,
sources_list=[world.scenario_dict['CONDITION_SOURCE'].location],
)
world.ItemFactory(
parent_location=world.scenario_dict['CONDITIONAL'].location,
category='html',
display_name='Conditional Contents',
data='<html><div class="hidden-contents">Hidden Contents</p></html>'
)
def setup_problem_attempts(self, step, not_attempted=None):
r'that the conditioned problem has (?P<not_attempted>not )?been attempted$'
visit_scenario_item('CONDITION_SOURCE')
if not_attempted is None:
answer_problem(self.COURSE_NUM, 'string', True)
world.css_click("button.check")
def when_i_view_the_conditional(self, step):
r'I view the conditional$'
visit_scenario_item('CONDITIONAL')
world.wait_for_js_variable_truthy('$(".xblock-student_view[data-type=Conditional]").data("initialized")')
def check_visibility(self, step, visible):
r'the conditional contents are (?P<visible>\w+)$'
world.wait_for_ajax_complete()
assert_in(visible, ('visible', 'hidden'))
if visible == 'visible':
world.wait_for_visible('.hidden-contents')
assert_true(world.css_visible('.hidden-contents'))
else:
assert_true(world.is_css_not_present('.hidden-contents'))
assert_true(
world.css_contains_text(
'.conditional-message',
'must be attempted before this will become visible.'
)
)
def answer_poll(self, step, answer):
r' I answer the conditioned poll "([^"]*)"$'
visit_scenario_item('CONDITION_SOURCE')
world.wait_for_js_variable_truthy('$(".xblock-student_view[data-type=Poll]").data("initialized")')
world.wait_for_ajax_complete()
answer_text = [
poll_answer['text']
for poll_answer
in world.scenario_dict['CONDITION_SOURCE'].answers
if poll_answer['id'] == answer
][0]
text_selector = '.poll_answer .text'
poll_texts = world.retry_on_exception(
lambda: [elem.text for elem in world.css_find(text_selector)]
)
for idx, poll_text in enumerate(poll_texts):
if poll_text == answer_text:
world.css_click(text_selector, index=idx)
return
ConditionalSteps()
| agpl-3.0 |
AndrewLvov/django-registration | registration/tests/models.py | 117 | 9770 | import datetime
import re
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core import mail
from django.core import management
from django.test import TestCase
from django.utils.hashcompat import sha_constructor
from registration.models import RegistrationProfile
class RegistrationModelTests(TestCase):
"""
Test the model and manager used in the default backend.
"""
user_info = {'username': 'alice',
'password': 'swordfish',
'email': 'alice@example.com'}
def setUp(self):
self.old_activation = getattr(settings, 'ACCOUNT_ACTIVATION_DAYS', None)
settings.ACCOUNT_ACTIVATION_DAYS = 7
def tearDown(self):
settings.ACCOUNT_ACTIVATION_DAYS = self.old_activation
def test_profile_creation(self):
"""
Creating a registration profile for a user populates the
profile with the correct user and a SHA1 hash to use as
activation key.
"""
new_user = User.objects.create_user(**self.user_info)
profile = RegistrationProfile.objects.create_profile(new_user)
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertEqual(profile.user.id, new_user.id)
self.failUnless(re.match('^[a-f0-9]{40}$', profile.activation_key))
self.assertEqual(unicode(profile),
"Registration information for alice")
def test_activation_email(self):
"""
``RegistrationProfile.send_activation_email`` sends an
email.
"""
new_user = User.objects.create_user(**self.user_info)
profile = RegistrationProfile.objects.create_profile(new_user)
profile.send_activation_email(Site.objects.get_current())
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [self.user_info['email']])
def test_user_creation(self):
"""
Creating a new user populates the correct data, and sets the
user's account inactive.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
self.assertEqual(new_user.username, 'alice')
self.assertEqual(new_user.email, 'alice@example.com')
self.failUnless(new_user.check_password('swordfish'))
self.failIf(new_user.is_active)
def test_user_creation_email(self):
"""
By default, creating a new user sends an activation email.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
self.assertEqual(len(mail.outbox), 1)
def test_user_creation_no_email(self):
"""
Passing ``send_email=False`` when creating a new user will not
send an activation email.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
send_email=False,
**self.user_info)
self.assertEqual(len(mail.outbox), 0)
def test_unexpired_account(self):
"""
``RegistrationProfile.activation_key_expired()`` is ``False``
within the activation window.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
profile = RegistrationProfile.objects.get(user=new_user)
self.failIf(profile.activation_key_expired())
def test_expired_account(self):
"""
``RegistrationProfile.activation_key_expired()`` is ``True``
outside the activation window.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
new_user.date_joined -= datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS + 1)
new_user.save()
profile = RegistrationProfile.objects.get(user=new_user)
self.failUnless(profile.activation_key_expired())
def test_valid_activation(self):
"""
Activating a user within the permitted window makes the
account active, and resets the activation key.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
profile = RegistrationProfile.objects.get(user=new_user)
activated = RegistrationProfile.objects.activate_user(profile.activation_key)
self.failUnless(isinstance(activated, User))
self.assertEqual(activated.id, new_user.id)
self.failUnless(activated.is_active)
profile = RegistrationProfile.objects.get(user=new_user)
self.assertEqual(profile.activation_key, RegistrationProfile.ACTIVATED)
def test_expired_activation(self):
"""
Attempting to activate outside the permitted window does not
activate the account.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
new_user.date_joined -= datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS + 1)
new_user.save()
profile = RegistrationProfile.objects.get(user=new_user)
activated = RegistrationProfile.objects.activate_user(profile.activation_key)
self.failIf(isinstance(activated, User))
self.failIf(activated)
new_user = User.objects.get(username='alice')
self.failIf(new_user.is_active)
profile = RegistrationProfile.objects.get(user=new_user)
self.assertNotEqual(profile.activation_key, RegistrationProfile.ACTIVATED)
def test_activation_invalid_key(self):
"""
Attempting to activate with a key which is not a SHA1 hash
fails.
"""
self.failIf(RegistrationProfile.objects.activate_user('foo'))
def test_activation_already_activated(self):
"""
Attempting to re-activate an already-activated account fails.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
profile = RegistrationProfile.objects.get(user=new_user)
RegistrationProfile.objects.activate_user(profile.activation_key)
profile = RegistrationProfile.objects.get(user=new_user)
self.failIf(RegistrationProfile.objects.activate_user(profile.activation_key))
def test_activation_nonexistent_key(self):
"""
Attempting to activate with a non-existent key (i.e., one not
associated with any account) fails.
"""
# Due to the way activation keys are constructed during
# registration, this will never be a valid key.
invalid_key = sha_constructor('foo').hexdigest()
self.failIf(RegistrationProfile.objects.activate_user(invalid_key))
def test_expired_user_deletion(self):
"""
``RegistrationProfile.objects.delete_expired_users()`` only
deletes inactive users whose activation window has expired.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
expired_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
username='bob',
password='secret',
email='bob@example.com')
expired_user.date_joined -= datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS + 1)
expired_user.save()
RegistrationProfile.objects.delete_expired_users()
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertRaises(User.DoesNotExist, User.objects.get, username='bob')
def test_management_command(self):
"""
The ``cleanupregistration`` management command properly
deletes expired accounts.
"""
new_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
**self.user_info)
expired_user = RegistrationProfile.objects.create_inactive_user(site=Site.objects.get_current(),
username='bob',
password='secret',
email='bob@example.com')
expired_user.date_joined -= datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS + 1)
expired_user.save()
management.call_command('cleanupregistration')
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertRaises(User.DoesNotExist, User.objects.get, username='bob')
| bsd-3-clause |
caisq/tensorflow | tensorflow/python/estimator/export/export_output_test.py | 9 | 15271 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for export."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.estimator.export import export_output as export_output_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
class ExportOutputTest(test.TestCase):
def test_regress_value_must_be_float(self):
value = array_ops.placeholder(dtypes.string, 1, name="output-tensor-1")
with self.assertRaises(ValueError) as e:
export_output_lib.RegressionOutput(value)
self.assertEqual('Regression output value must be a float32 Tensor; got '
'Tensor("output-tensor-1:0", shape=(1,), dtype=string)',
str(e.exception))
def test_classify_classes_must_be_strings(self):
classes = array_ops.placeholder(dtypes.float32, 1, name="output-tensor-1")
with self.assertRaises(ValueError) as e:
export_output_lib.ClassificationOutput(classes=classes)
self.assertEqual('Classification classes must be a string Tensor; got '
'Tensor("output-tensor-1:0", shape=(1,), dtype=float32)',
str(e.exception))
def test_classify_scores_must_be_float(self):
scores = array_ops.placeholder(dtypes.string, 1, name="output-tensor-1")
with self.assertRaises(ValueError) as e:
export_output_lib.ClassificationOutput(scores=scores)
self.assertEqual('Classification scores must be a float32 Tensor; got '
'Tensor("output-tensor-1:0", shape=(1,), dtype=string)',
str(e.exception))
def test_classify_requires_classes_or_scores(self):
with self.assertRaises(ValueError) as e:
export_output_lib.ClassificationOutput()
self.assertEqual("At least one of scores and classes must be set.",
str(e.exception))
def test_build_standardized_signature_def_regression(self):
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.string, 1, name="input-tensor-1")
}
value = array_ops.placeholder(dtypes.float32, 1, name="output-tensor-1")
export_output = export_output_lib.RegressionOutput(value)
actual_signature_def = export_output.as_signature_def(input_tensors)
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[
signature_constants.REGRESS_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(name="input-tensor-1:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.REGRESS_OUTPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(name="output-tensor-1:0",
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = signature_constants.REGRESS_METHOD_NAME
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classify_classes_only(self):
"""Tests classification with one output tensor."""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.string, 1, name="input-tensor-1")
}
classes = array_ops.placeholder(dtypes.string, 1, name="output-tensor-1")
export_output = export_output_lib.ClassificationOutput(classes=classes)
actual_signature_def = export_output.as_signature_def(input_tensors)
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(name="input-tensor-1:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(name="output-tensor-1:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classify_both(self):
"""Tests multiple output tensors that include classes and scores."""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.string, 1, name="input-tensor-1")
}
classes = array_ops.placeholder(dtypes.string, 1,
name="output-tensor-classes")
scores = array_ops.placeholder(dtypes.float32, 1,
name="output-tensor-scores")
export_output = export_output_lib.ClassificationOutput(
scores=scores, classes=classes)
actual_signature_def = export_output.as_signature_def(input_tensors)
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(name="input-tensor-1:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_CLASSES].CopyFrom(
meta_graph_pb2.TensorInfo(name="output-tensor-classes:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(name="output-tensor-scores:0",
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_build_standardized_signature_def_classify_scores_only(self):
"""Tests classification without classes tensor."""
input_tensors = {
"input-1":
array_ops.placeholder(
dtypes.string, 1, name="input-tensor-1")
}
scores = array_ops.placeholder(dtypes.float32, 1,
name="output-tensor-scores")
export_output = export_output_lib.ClassificationOutput(
scores=scores)
actual_signature_def = export_output.as_signature_def(input_tensors)
expected_signature_def = meta_graph_pb2.SignatureDef()
shape = tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=1)])
dtype_float = types_pb2.DataType.Value("DT_FLOAT")
dtype_string = types_pb2.DataType.Value("DT_STRING")
expected_signature_def.inputs[
signature_constants.CLASSIFY_INPUTS].CopyFrom(
meta_graph_pb2.TensorInfo(name="input-tensor-1:0",
dtype=dtype_string,
tensor_shape=shape))
expected_signature_def.outputs[
signature_constants.CLASSIFY_OUTPUT_SCORES].CopyFrom(
meta_graph_pb2.TensorInfo(name="output-tensor-scores:0",
dtype=dtype_float,
tensor_shape=shape))
expected_signature_def.method_name = (
signature_constants.CLASSIFY_METHOD_NAME)
self.assertEqual(actual_signature_def, expected_signature_def)
def test_predict_outputs_valid(self):
"""Tests that no errors are raised when provided outputs are valid."""
outputs = {
"output0": constant_op.constant([0]),
u"output1": constant_op.constant(["foo"]),
}
export_output_lib.PredictOutput(outputs)
# Single Tensor is OK too
export_output_lib.PredictOutput(constant_op.constant([0]))
def test_predict_outputs_invalid(self):
with self.assertRaisesRegexp(
ValueError,
"Prediction output key must be a string"):
export_output_lib.PredictOutput({1: constant_op.constant([0])})
with self.assertRaisesRegexp(
ValueError,
"Prediction output value must be a Tensor"):
export_output_lib.PredictOutput({
"prediction1": sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
})
class MockSupervisedOutput(export_output_lib._SupervisedOutput):
"""So that we can test the abstract class methods directly."""
def _get_signature_def_fn(self):
pass
class SupervisedOutputTest(test.TestCase):
def test_supervised_outputs_valid(self):
"""Tests that no errors are raised when provided outputs are valid."""
loss = {"my_loss": constant_op.constant([0])}
predictions = {u"output1": constant_op.constant(["foo"])}
metrics = {"metrics": (constant_op.constant([0]),
constant_op.constant([10])),
"metrics2": (constant_op.constant([0]),
constant_op.constant([10]))}
outputter = MockSupervisedOutput(loss, predictions, metrics)
self.assertEqual(outputter.loss["loss/my_loss"], loss["my_loss"])
self.assertEqual(
outputter.predictions["predictions/output1"], predictions["output1"])
self.assertEqual(outputter.metrics["metrics/value"], metrics["metrics"][0])
self.assertEqual(
outputter.metrics["metrics2/update_op"], metrics["metrics2"][1])
# Single Tensor is OK too
outputter = MockSupervisedOutput(
loss["my_loss"], predictions["output1"], metrics["metrics"])
self.assertEqual(outputter.loss, {"loss": loss["my_loss"]})
self.assertEqual(
outputter.predictions, {"predictions": predictions["output1"]})
self.assertEqual(outputter.metrics["metrics/value"], metrics["metrics"][0])
def test_supervised_outputs_none(self):
outputter = MockSupervisedOutput(
constant_op.constant([0]), None, None)
self.assertEqual(len(outputter.loss), 1)
self.assertEqual(outputter.predictions, None)
self.assertEqual(outputter.metrics, None)
def test_supervised_outputs_invalid(self):
with self.assertRaisesRegexp(ValueError, "predictions output value must"):
MockSupervisedOutput(constant_op.constant([0]), [3], None)
with self.assertRaisesRegexp(ValueError, "loss output value must"):
MockSupervisedOutput("str", None, None)
with self.assertRaisesRegexp(ValueError, "metrics output value must"):
MockSupervisedOutput(None, None, (15.3, 4))
with self.assertRaisesRegexp(ValueError, "loss output key must"):
MockSupervisedOutput({25: "Tensor"}, None, None)
def test_supervised_outputs_tuples(self):
"""Tests that no errors are raised when provided outputs are valid."""
loss = {("my", "loss"): constant_op.constant([0])}
predictions = {(u"output1", "2"): constant_op.constant(["foo"])}
metrics = {("metrics", "twice"): (constant_op.constant([0]),
constant_op.constant([10]))}
outputter = MockSupervisedOutput(loss, predictions, metrics)
self.assertEqual(set(outputter.loss.keys()), set(["loss/my/loss"]))
self.assertEqual(set(outputter.predictions.keys()),
set(["predictions/output1/2"]))
self.assertEqual(set(outputter.metrics.keys()),
set(["metrics/twice/value", "metrics/twice/update_op"]))
def test_supervised_outputs_no_prepend(self):
"""Tests that no errors are raised when provided outputs are valid."""
loss = {"loss": constant_op.constant([0])}
predictions = {u"predictions": constant_op.constant(["foo"])}
metrics = {u"metrics": (constant_op.constant([0]),
constant_op.constant([10]))}
outputter = MockSupervisedOutput(loss, predictions, metrics)
self.assertEqual(set(outputter.loss.keys()), set(["loss"]))
self.assertEqual(set(outputter.predictions.keys()), set(["predictions"]))
self.assertEqual(set(outputter.metrics.keys()),
set(["metrics/value", "metrics/update_op"]))
def test_train_signature_def(self):
loss = {"my_loss": constant_op.constant([0])}
predictions = {u"output1": constant_op.constant(["foo"])}
metrics = {"metrics": (constant_op.constant([0]),
constant_op.constant([10]))}
outputter = export_output_lib.TrainOutput(loss, predictions, metrics)
receiver = {u"features": constant_op.constant(100, shape=(100, 2)),
"labels": constant_op.constant(100, shape=(100, 1))}
sig_def = outputter.as_signature_def(receiver)
self.assertTrue("loss/my_loss" in sig_def.outputs)
self.assertTrue("metrics/value" in sig_def.outputs)
self.assertTrue("predictions/output1" in sig_def.outputs)
self.assertTrue("features" in sig_def.inputs)
def test_eval_signature_def(self):
loss = {"my_loss": constant_op.constant([0])}
predictions = {u"output1": constant_op.constant(["foo"])}
outputter = export_output_lib.EvalOutput(loss, predictions, None)
receiver = {u"features": constant_op.constant(100, shape=(100, 2)),
"labels": constant_op.constant(100, shape=(100, 1))}
sig_def = outputter.as_signature_def(receiver)
self.assertTrue("loss/my_loss" in sig_def.outputs)
self.assertFalse("metrics/value" in sig_def.outputs)
self.assertTrue("predictions/output1" in sig_def.outputs)
self.assertTrue("features" in sig_def.inputs)
if __name__ == "__main__":
test.main()
| apache-2.0 |
phimpme/generator | Phimpme/site-packages/nose/sphinx/pluginopts.py | 94 | 5638 | """
Adds a sphinx directive that can be used to automatically document a plugin.
this::
.. autoplugin :: nose.plugins.foo
:plugin: Pluggy
produces::
.. automodule :: nose.plugins.foo
Options
-------
.. cmdoption :: --foo=BAR, --fooble=BAR
Do the foo thing to the new thing.
Plugin
------
.. autoclass :: nose.plugins.foo.Pluggy
:members:
Source
------
.. include :: path/to/nose/plugins/foo.py
:literal:
"""
import os
try:
from docutils import nodes, utils
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives
except ImportError:
pass # won't run anyway
from nose.util import resolve_name
from nose.plugins.base import Plugin
from nose.plugins.manager import BuiltinPluginManager
from nose.config import Config
from nose.core import TestProgram
from inspect import isclass
def autoplugin_directive(dirname, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
mod_name = arguments[0]
mod = resolve_name(mod_name)
plug_name = options.get('plugin', None)
if plug_name:
obj = getattr(mod, plug_name)
else:
for entry in dir(mod):
obj = getattr(mod, entry)
if isclass(obj) and issubclass(obj, Plugin) and obj is not Plugin:
plug_name = '%s.%s' % (mod_name, entry)
break
# mod docstring
rst = ViewList()
rst.append('.. automodule :: %s\n' % mod_name, '<autodoc>')
rst.append('', '<autodoc>')
# options
rst.append('Options', '<autodoc>')
rst.append('-------', '<autodoc>')
rst.append('', '<autodoc>')
plug = obj()
opts = OptBucket()
plug.options(opts, {})
for opt in opts:
rst.append(opt.options(), '<autodoc>')
rst.append(' \n', '<autodoc>')
rst.append(' ' + opt.help + '\n', '<autodoc>')
rst.append('\n', '<autodoc>')
# plugin class
rst.append('Plugin', '<autodoc>')
rst.append('------', '<autodoc>')
rst.append('', '<autodoc>')
rst.append('.. autoclass :: %s\n' % plug_name, '<autodoc>')
rst.append(' :members:\n', '<autodoc>')
rst.append(' :show-inheritance:\n', '<autodoc>')
rst.append('', '<autodoc>')
# source
rst.append('Source', '<autodoc>')
rst.append('------', '<autodoc>')
rst.append(
'.. include :: %s\n' % utils.relative_path(
state_machine.document['source'],
os.path.abspath(mod.__file__.replace('.pyc', '.py'))),
'<autodoc>')
rst.append(' :literal:\n', '<autodoc>')
rst.append('', '<autodoc>')
node = nodes.section()
node.document = state.document
surrounding_title_styles = state.memo.title_styles
surrounding_section_level = state.memo.section_level
state.memo.title_styles = []
state.memo.section_level = 0
state.nested_parse(rst, 0, node, match_titles=1)
state.memo.title_styles = surrounding_title_styles
state.memo.section_level = surrounding_section_level
return node.children
def autohelp_directive(dirname, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""produces rst from nose help"""
config = Config(parserClass=OptBucket,
plugins=BuiltinPluginManager())
parser = config.getParser(TestProgram.usage())
rst = ViewList()
for line in parser.format_help().split('\n'):
rst.append(line, '<autodoc>')
rst.append('Options', '<autodoc>')
rst.append('-------', '<autodoc>')
rst.append('', '<autodoc>')
for opt in parser:
rst.append(opt.options(), '<autodoc>')
rst.append(' \n', '<autodoc>')
rst.append(' ' + opt.help + '\n', '<autodoc>')
rst.append('\n', '<autodoc>')
node = nodes.section()
node.document = state.document
surrounding_title_styles = state.memo.title_styles
surrounding_section_level = state.memo.section_level
state.memo.title_styles = []
state.memo.section_level = 0
state.nested_parse(rst, 0, node, match_titles=1)
state.memo.title_styles = surrounding_title_styles
state.memo.section_level = surrounding_section_level
return node.children
class OptBucket(object):
def __init__(self, doc=None, prog='nosetests'):
self.opts = []
self.doc = doc
self.prog = prog
def __iter__(self):
return iter(self.opts)
def format_help(self):
return self.doc.replace('%prog', self.prog).replace(':\n', '::\n')
def add_option(self, *arg, **kw):
self.opts.append(Opt(*arg, **kw))
class Opt(object):
def __init__(self, *arg, **kw):
self.opts = arg
self.action = kw.pop('action', None)
self.default = kw.pop('default', None)
self.metavar = kw.pop('metavar', None)
self.help = kw.pop('help', None)
def options(self):
buf = []
for optstring in self.opts:
desc = optstring
if self.action not in ('store_true', 'store_false'):
desc += '=%s' % self.meta(optstring)
buf.append(desc)
return '.. cmdoption :: ' + ', '.join(buf)
def meta(self, optstring):
# FIXME optparser default metavar?
return self.metavar or 'DEFAULT'
def setup(app):
app.add_directive('autoplugin',
autoplugin_directive, 1, (1, 0, 1),
plugin=directives.unchanged)
app.add_directive('autohelp', autohelp_directive, 0, (0, 0, 1))
| gpl-3.0 |
frouty/odoo_oph | addons/mrp/product.py | 56 | 1606 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class product_product(osv.osv):
_inherit = "product.product"
_columns = {
"bom_ids": fields.one2many('mrp.bom', 'product_id','Bill of Materials', domain=[('bom_id','=',False)]),
}
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'bom_ids': []
})
return super(product_product, self).copy(cr, uid, id, default, context=context)
product_product()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tommytarts/QuantumKernelM8-Sense | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
berkmancenter/mediacloud | apps/common/src/python/mediawords/dbi/stories/ap.py | 1 | 10921 | """
Routines for determining whether a given story is syndicated from the Associated Press.
The algorithm used in this module was developed using a decision tree algorithm:
'ap_mentions_sentences',
'1' => '1', X
'0' => [ X
'associated_press_mentions',
'1' => [ X
'quoted_associated_press_first_quarter_mentions',
'1' => '1', X
'0' => [ X
'dup_sentences_32',
'1' => '1', X
'0' => [ X
'associated_press_near_title',
'1' => '1', X
'0' => [ X
'ap_news_mentions',
'1' => '1', X
'0' => [ X
'ap_mentions',
'1' => '1', X
'0' => '0' X
'2' => [ X
'associated_press_near_title', X
'1' => '1', X
'0' => [ X
'associated_press_tag_mentions', X
'1' => '0', X
'0' => '1' X
'0' => [
'dup_sentences_32',
'1' => [
'ap_mentions',
'1' => [
'ap_mentions_uppercase_location',
'1' => '1',
'0' => '0'
'0' => '0'
'0' => '0',
'2' => '1'
"""
import hashlib
import re
from typing import List, Pattern, Optional
from mediawords.db import DatabaseHandler
from mediawords.languages.factory import LanguageFactory
from mediawords.util.log import create_logger
from mediawords.util.perl import decode_object_from_bytes_if_needed
log = create_logger(__name__)
# All AP stories are expected to be written in English
__AP_LANGUAGE_CODE = 'en'
def get_ap_medium_name() -> str:
return 'Associated Press - Full Feed'
def _get_ap_media_id(db: DatabaseHandler) -> Optional[int]:
ap_media = db.query("""
SELECT media_id
FROM media
WHERE name = %(medium_name)s
""", {'medium_name': get_ap_medium_name()}).flat()
# MC_REWRITE_TO_PYTHON: Perlism
if ap_media is None:
ap_media = []
if len(ap_media) > 0:
return ap_media[0]
else:
return None
def _get_sentences_from_content(story_text: str) -> List[str]:
"""Given raw HML content, extract the content and parse it into sentences."""
story_text = decode_object_from_bytes_if_needed(story_text)
lang = LanguageFactory.language_for_code(__AP_LANGUAGE_CODE)
sentences = lang.split_text_to_sentences(text=story_text)
return sentences
def _get_ap_dup_sentence_lengths(db: DatabaseHandler, story_text: str) -> List[int]:
story_text = decode_object_from_bytes_if_needed(story_text)
ap_media_id = _get_ap_media_id(db=db)
if ap_media_id is None:
return []
sentences = _get_sentences_from_content(story_text=story_text)
md5s = []
for sentence in sentences:
md5_hash = hashlib.md5(sentence.encode('utf-8')).hexdigest()
md5s.append(md5_hash)
sentence_lengths = db.query("""
SELECT length(sentence) AS len
FROM story_sentences
WHERE media_id = %(ap_media_id)s
-- FIXME this probably never worked because the index is half_md5(), not md5()
AND md5(sentence) = ANY(%(md5s)s)
""", {
'ap_media_id': ap_media_id,
'md5s': md5s,
}).flat()
# MC_REWRITE_TO_PYTHON: Perlism
if sentence_lengths is None:
sentence_lengths = []
return sentence_lengths
def _get_content_pattern_matches(story_text: str,
pattern: Pattern[str],
restrict_to_first: int = 0) -> int:
story_text = decode_object_from_bytes_if_needed(story_text)
if isinstance(restrict_to_first, bytes):
restrict_to_first = decode_object_from_bytes_if_needed(restrict_to_first)
restrict_to_first = bool(int(restrict_to_first))
if restrict_to_first:
story_text = story_text[0:int(len(story_text) * restrict_to_first)]
matches = re.findall(pattern=pattern, string=story_text)
return len(matches)
def _get_all_string_match_positions(haystack: str, needle: str) -> List[int]:
haystack = decode_object_from_bytes_if_needed(haystack)
needle = decode_object_from_bytes_if_needed(needle)
positions = []
for match in re.finditer(pattern=needle, string=haystack):
positions.append(match.start())
return positions
def _get_associated_press_near_title(story_title: str, story_text: str) -> bool:
story_title = decode_object_from_bytes_if_needed(story_title)
story_text = decode_object_from_bytes_if_needed(story_text)
story_title = story_title.lower()
story_text = story_text.lower()
content = re.sub(pattern=r'\s+', repl=' ', string=story_text, flags=re.MULTILINE)
title_positions = _get_all_string_match_positions(haystack=content, needle=story_title)
ap_positions = _get_all_string_match_positions(haystack=content, needle='associated press')
for title_p in title_positions:
for ap_p in ap_positions:
if abs(title_p - ap_p) < 256:
return True
return False
def _get_dup_sentences_32(db: DatabaseHandler, story_text: str) -> int:
"""Return the number of sentences in the story that are least 32 characters long and are a duplicate of a sentence
in the associated press media source."""
story_text = decode_object_from_bytes_if_needed(story_text)
sentence_lengths = _get_ap_dup_sentence_lengths(db=db, story_text=story_text)
num_sentences = 0
for sentence_length in sentence_lengths:
if sentence_length >= 32:
num_sentences += 1
if not num_sentences:
return 0
elif num_sentences > 10:
return 2
else:
return 1
def is_syndicated(db: DatabaseHandler,
story_text: str,
story_title: str = '',
story_language: str = '') -> bool:
"""Return True if the stories is syndicated by the Associated Press, False otherwise.
Uses the decision tree at the top of the module.
"""
story_title = decode_object_from_bytes_if_needed(story_title)
story_text = decode_object_from_bytes_if_needed(story_text)
story_language = decode_object_from_bytes_if_needed(story_language)
# If the language code is unset, we're assuming that the story is in English
if not story_language:
story_language = __AP_LANGUAGE_CODE
if not story_text:
log.warning("Story text is unset.")
return False
if story_language != __AP_LANGUAGE_CODE:
log.debug("Story is not in English.")
return False
ap_mentions_sentences = _get_content_pattern_matches(
story_text=story_text,
pattern=re.compile(pattern=r'\(ap\)', flags=re.IGNORECASE),
)
if ap_mentions_sentences:
log.debug('ap: ap_mentions_sentences')
return True
associated_press_mentions = _get_content_pattern_matches(
story_text=story_text,
pattern=re.compile(pattern='associated press', flags=re.IGNORECASE),
)
if associated_press_mentions:
quoted_associated_press_mentions = _get_content_pattern_matches(
story_text=story_text,
pattern=re.compile(pattern=r'["\'\|].{0,8}associated press.{0,8}["\'\|]', flags=re.IGNORECASE),
)
if quoted_associated_press_mentions:
log.debug('ap: quoted_associated_press')
return True
dup_sentences_32 = _get_dup_sentences_32(db=db, story_text=story_text)
if dup_sentences_32 == 1:
log.debug('ap: assoc press -> dup_sentences_32')
return True
elif dup_sentences_32 == 0:
associated_press_near_title = _get_associated_press_near_title(
story_title=story_title,
story_text=story_text,
)
if associated_press_near_title:
log.debug('ap: assoc press -> near title')
return True
ap_news_mentions = _get_content_pattern_matches(
story_text=story_text,
pattern=re.compile('ap news', flags=re.IGNORECASE),
)
if ap_news_mentions:
log.debug('ap: assoc press -> ap news')
return True
else:
log.debug('ap: assoc press -> no ap news')
return False
else: # dup_sentences_32 == 2
associated_press_near_title = _get_associated_press_near_title(
story_title=story_title,
story_text=story_text,
)
if associated_press_near_title:
log.debug('ap: assoc press near title')
return True
else:
associated_press_tag_mentions = _get_content_pattern_matches(
story_text=story_text,
pattern=re.compile(pattern='<[^<>]*associated press[^<>]*>', flags=re.IGNORECASE)
)
if associated_press_tag_mentions:
log.debug('ap: assoc press title -> tag')
return False
else:
log.debug('ap: assoc press title -> no tag')
return True
else:
dup_sentences_32 = _get_dup_sentences_32(db=db, story_text=story_text)
if dup_sentences_32 == 1:
ap_mentions_uppercase_location = _get_content_pattern_matches(
story_text=story_text,
pattern=re.compile(pattern=r'[A-Z]+\s*\(AP\)'), # do not ignore case
)
if ap_mentions_uppercase_location:
log.debug('ap: single dup sentence -> ap upper')
return True
else:
log.debug('ap: single dup sentence -> no upper')
return False
elif dup_sentences_32 == 0:
log.debug('ap: no features')
return False
else:
log.debug('ap: dup sentences > 10')
return True
| agpl-3.0 |
michaelBenin/sqlalchemy | lib/sqlalchemy/orm/session.py | 1 | 96305 | # orm/session.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides the Session class and related utilities."""
import weakref
from .. import util, sql, engine, exc as sa_exc
from ..sql import util as sql_util, expression
from . import (
SessionExtension, attributes, exc, query,
loading, identity
)
from ..inspection import inspect
from .base import (
object_mapper, class_mapper,
_class_to_mapper, _state_mapper, object_state,
_none_set, state_str, instance_str
)
from .unitofwork import UOWTransaction
from . import state as statelib
import sys
__all__ = ['Session', 'SessionTransaction', 'SessionExtension', 'sessionmaker']
_sessions = weakref.WeakValueDictionary()
"""Weak-referencing dictionary of :class:`.Session` objects.
"""
def _state_session(state):
"""Given an :class:`.InstanceState`, return the :class:`.Session`
associated, if any.
"""
if state.session_id:
try:
return _sessions[state.session_id]
except KeyError:
pass
return None
class _SessionClassMethods(object):
"""Class-level methods for :class:`.Session`, :class:`.sessionmaker`."""
@classmethod
def close_all(cls):
"""Close *all* sessions in memory."""
for sess in _sessions.values():
sess.close()
@classmethod
@util.dependencies("sqlalchemy.orm.util")
def identity_key(cls, orm_util, *args, **kwargs):
"""Return an identity key.
This is an alias of :func:`.util.identity_key`.
"""
return orm_util.identity_key(*args, **kwargs)
@classmethod
def object_session(cls, instance):
"""Return the :class:`.Session` to which an object belongs.
This is an alias of :func:`.object_session`.
"""
return object_session(instance)
ACTIVE = util.symbol('ACTIVE')
PREPARED = util.symbol('PREPARED')
COMMITTED = util.symbol('COMMITTED')
DEACTIVE = util.symbol('DEACTIVE')
CLOSED = util.symbol('CLOSED')
class SessionTransaction(object):
"""A :class:`.Session`-level transaction.
:class:`.SessionTransaction` is a mostly behind-the-scenes object
not normally referenced directly by application code. It coordinates
among multiple :class:`.Connection` objects, maintaining a database
transaction for each one individually, committing or rolling them
back all at once. It also provides optional two-phase commit behavior
which can augment this coordination operation.
The :attr:`.Session.transaction` attribute of :class:`.Session`
refers to the current :class:`.SessionTransaction` object in use, if any.
A :class:`.SessionTransaction` is associated with a :class:`.Session`
in its default mode of ``autocommit=False`` immediately, associated
with no database connections. As the :class:`.Session` is called upon
to emit SQL on behalf of various :class:`.Engine` or :class:`.Connection`
objects, a corresponding :class:`.Connection` and associated
:class:`.Transaction` is added to a collection within the
:class:`.SessionTransaction` object, becoming one of the
connection/transaction pairs maintained by the
:class:`.SessionTransaction`.
The lifespan of the :class:`.SessionTransaction` ends when the
:meth:`.Session.commit`, :meth:`.Session.rollback` or
:meth:`.Session.close` methods are called. At this point, the
:class:`.SessionTransaction` removes its association with its parent
:class:`.Session`. A :class:`.Session` that is in ``autocommit=False``
mode will create a new :class:`.SessionTransaction` to replace it
immediately, whereas a :class:`.Session` that's in ``autocommit=True``
mode will remain without a :class:`.SessionTransaction` until the
:meth:`.Session.begin` method is called.
Another detail of :class:`.SessionTransaction` behavior is that it is
capable of "nesting". This means that the :meth:`.Session.begin` method
can be called while an existing :class:`.SessionTransaction` is already
present, producing a new :class:`.SessionTransaction` that temporarily
replaces the parent :class:`.SessionTransaction`. When a
:class:`.SessionTransaction` is produced as nested, it assigns itself to
the :attr:`.Session.transaction` attribute. When it is ended via
:meth:`.Session.commit` or :meth:`.Session.rollback`, it restores its
parent :class:`.SessionTransaction` back onto the
:attr:`.Session.transaction` attribute. The behavior is effectively a
stack, where :attr:`.Session.transaction` refers to the current head of
the stack.
The purpose of this stack is to allow nesting of
:meth:`.Session.rollback` or :meth:`.Session.commit` calls in context
with various flavors of :meth:`.Session.begin`. This nesting behavior
applies to when :meth:`.Session.begin_nested` is used to emit a
SAVEPOINT transaction, and is also used to produce a so-called
"subtransaction" which allows a block of code to use a
begin/rollback/commit sequence regardless of whether or not its enclosing
code block has begun a transaction. The :meth:`.flush` method, whether
called explicitly or via autoflush, is the primary consumer of the
"subtransaction" feature, in that it wishes to guarantee that it works
within in a transaction block regardless of whether or not the
:class:`.Session` is in transactional mode when the method is called.
See also:
:meth:`.Session.rollback`
:meth:`.Session.commit`
:meth:`.Session.begin`
:meth:`.Session.begin_nested`
:attr:`.Session.is_active`
:meth:`.SessionEvents.after_commit`
:meth:`.SessionEvents.after_rollback`
:meth:`.SessionEvents.after_soft_rollback`
"""
_rollback_exception = None
def __init__(self, session, parent=None, nested=False):
self.session = session
self._connections = {}
self._parent = parent
self.nested = nested
self._state = ACTIVE
if not parent and nested:
raise sa_exc.InvalidRequestError(
"Can't start a SAVEPOINT transaction when no existing "
"transaction is in progress")
if self.session._enable_transaction_accounting:
self._take_snapshot()
if self.session.dispatch.after_transaction_create:
self.session.dispatch.after_transaction_create(self.session, self)
@property
def is_active(self):
return self.session is not None and self._state is ACTIVE
def _assert_active(self, prepared_ok=False,
rollback_ok=False,
deactive_ok=False,
closed_msg="This transaction is closed"):
if self._state is COMMITTED:
raise sa_exc.InvalidRequestError(
"This session is in 'committed' state; no further "
"SQL can be emitted within this transaction."
)
elif self._state is PREPARED:
if not prepared_ok:
raise sa_exc.InvalidRequestError(
"This session is in 'prepared' state; no further "
"SQL can be emitted within this transaction."
)
elif self._state is DEACTIVE:
if not deactive_ok and not rollback_ok:
if self._rollback_exception:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"due to a previous exception during flush."
" To begin a new transaction with this Session, "
"first issue Session.rollback()."
" Original exception was: %s"
% self._rollback_exception
)
elif not deactive_ok:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"by a nested rollback() call. To begin a new "
"transaction, issue Session.rollback() first."
)
elif self._state is CLOSED:
raise sa_exc.ResourceClosedError(closed_msg)
@property
def _is_transaction_boundary(self):
return self.nested or not self._parent
def connection(self, bindkey, **kwargs):
self._assert_active()
bind = self.session.get_bind(bindkey, **kwargs)
return self._connection_for_bind(bind)
def _begin(self, nested=False):
self._assert_active()
return SessionTransaction(
self.session, self, nested=nested)
def _iterate_parents(self, upto=None):
if self._parent is upto:
return (self,)
else:
if self._parent is None:
raise sa_exc.InvalidRequestError(
"Transaction %s is not on the active transaction list" % (
upto))
return (self,) + self._parent._iterate_parents(upto)
def _take_snapshot(self):
if not self._is_transaction_boundary:
self._new = self._parent._new
self._deleted = self._parent._deleted
self._dirty = self._parent._dirty
self._key_switches = self._parent._key_switches
return
if not self.session._flushing:
self.session.flush()
self._new = weakref.WeakKeyDictionary()
self._deleted = weakref.WeakKeyDictionary()
self._dirty = weakref.WeakKeyDictionary()
self._key_switches = weakref.WeakKeyDictionary()
def _restore_snapshot(self, dirty_only=False):
assert self._is_transaction_boundary
for s in set(self._new).union(self.session._new):
self.session._expunge_state(s)
if s.key:
del s.key
for s, (oldkey, newkey) in self._key_switches.items():
self.session.identity_map.discard(s)
s.key = oldkey
self.session.identity_map.replace(s)
for s in set(self._deleted).union(self.session._deleted):
if s.deleted:
#assert s in self._deleted
del s.deleted
self.session._update_impl(s, discard_existing=True)
assert not self.session._deleted
for s in self.session.identity_map.all_states():
if not dirty_only or s.modified or s in self._dirty:
s._expire(s.dict, self.session.identity_map._modified)
def _remove_snapshot(self):
assert self._is_transaction_boundary
if not self.nested and self.session.expire_on_commit:
for s in self.session.identity_map.all_states():
s._expire(s.dict, self.session.identity_map._modified)
for s in self._deleted:
s.session_id = None
self._deleted.clear()
elif self.nested:
self._parent._new.update(self._new)
self._parent._deleted.update(self._deleted)
self._parent._key_switches.update(self._key_switches)
def _connection_for_bind(self, bind):
self._assert_active()
if bind in self._connections:
return self._connections[bind][0]
if self._parent:
conn = self._parent._connection_for_bind(bind)
if not self.nested:
return conn
else:
if isinstance(bind, engine.Connection):
conn = bind
if conn.engine in self._connections:
raise sa_exc.InvalidRequestError(
"Session already has a Connection associated for the "
"given Connection's Engine")
else:
conn = bind.contextual_connect()
if self.session.twophase and self._parent is None:
transaction = conn.begin_twophase()
elif self.nested:
transaction = conn.begin_nested()
else:
transaction = conn.begin()
self._connections[conn] = self._connections[conn.engine] = \
(conn, transaction, conn is not bind)
self.session.dispatch.after_begin(self.session, self, conn)
return conn
def prepare(self):
if self._parent is not None or not self.session.twophase:
raise sa_exc.InvalidRequestError(
"'twophase' mode not enabled, or not root transaction; "
"can't prepare.")
self._prepare_impl()
def _prepare_impl(self):
self._assert_active()
if self._parent is None or self.nested:
self.session.dispatch.before_commit(self.session)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_parents(upto=self):
subtransaction.commit()
if not self.session._flushing:
for _flush_guard in range(100):
if self.session._is_clean():
break
self.session.flush()
else:
raise exc.FlushError(
"Over 100 subsequent flushes have occurred within "
"session.commit() - is an after_flush() hook "
"creating new objects?")
if self._parent is None and self.session.twophase:
try:
for t in set(self._connections.values()):
t[1].prepare()
except:
with util.safe_reraise():
self.rollback()
self._state = PREPARED
def commit(self):
self._assert_active(prepared_ok=True)
if self._state is not PREPARED:
self._prepare_impl()
if self._parent is None or self.nested:
for t in set(self._connections.values()):
t[1].commit()
self._state = COMMITTED
self.session.dispatch.after_commit(self.session)
if self.session._enable_transaction_accounting:
self._remove_snapshot()
self.close()
return self._parent
def rollback(self, _capture_exception=False):
self._assert_active(prepared_ok=True, rollback_ok=True)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_parents(upto=self):
subtransaction.close()
if self._state in (ACTIVE, PREPARED):
for transaction in self._iterate_parents():
if transaction._parent is None or transaction.nested:
transaction._rollback_impl()
transaction._state = DEACTIVE
break
else:
transaction._state = DEACTIVE
sess = self.session
if self.session._enable_transaction_accounting and \
not sess._is_clean():
# if items were added, deleted, or mutated
# here, we need to re-restore the snapshot
util.warn(
"Session's state has been changed on "
"a non-active transaction - this state "
"will be discarded.")
self._restore_snapshot(dirty_only=self.nested)
self.close()
if self._parent and _capture_exception:
self._parent._rollback_exception = sys.exc_info()[1]
sess.dispatch.after_soft_rollback(sess, self)
return self._parent
def _rollback_impl(self):
for t in set(self._connections.values()):
t[1].rollback()
if self.session._enable_transaction_accounting:
self._restore_snapshot(dirty_only=self.nested)
self.session.dispatch.after_rollback(self.session)
def close(self):
self.session.transaction = self._parent
if self._parent is None:
for connection, transaction, autoclose in \
set(self._connections.values()):
if autoclose:
connection.close()
else:
transaction.close()
self._state = CLOSED
if self.session.dispatch.after_transaction_end:
self.session.dispatch.after_transaction_end(self.session, self)
if self._parent is None:
if not self.session.autocommit:
self.session.begin()
self.session = None
self._connections = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._assert_active(deactive_ok=True, prepared_ok=True)
if self.session.transaction is None:
return
if type is None:
try:
self.commit()
except:
with util.safe_reraise():
self.rollback()
else:
self.rollback()
class Session(_SessionClassMethods):
"""Manages persistence operations for ORM-mapped objects.
The Session's usage paradigm is described at :doc:`/orm/session`.
"""
public_methods = (
'__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested',
'close', 'commit', 'connection', 'delete', 'execute', 'expire',
'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind',
'is_modified',
'merge', 'query', 'refresh', 'rollback',
'scalar')
def __init__(self, bind=None, autoflush=True, expire_on_commit=True,
_enable_transaction_accounting=True,
autocommit=False, twophase=False,
weak_identity_map=True, binds=None, extension=None,
info=None,
query_cls=query.Query):
"""Construct a new Session.
See also the :class:`.sessionmaker` function which is used to
generate a :class:`.Session`-producing callable with a given
set of arguments.
:param autocommit:
.. warning::
The autocommit flag is **not for general use**, and if it is used,
queries should only be invoked within the span of a
:meth:`.Session.begin` / :meth:`.Session.commit` pair. Executing
queries outside of a demarcated transaction is a legacy mode
of usage, and can in some cases lead to concurrent connection
checkouts.
Defaults to ``False``. When ``True``, the
:class:`.Session` does not keep a persistent transaction running, and
will acquire connections from the engine on an as-needed basis,
returning them immediately after their use. Flushes will begin and
commit (or possibly rollback) their own transaction if no
transaction is present. When using this mode, the
:meth:`.Session.begin` method is used to explicitly start
transactions.
.. seealso::
:ref:`session_autocommit`
:param autoflush: When ``True``, all query operations will issue a
:meth:`~.Session.flush` call to this ``Session`` before proceeding.
This is a convenience feature so that :meth:`~.Session.flush` need
not be called repeatedly in order for database queries to retrieve
results. It's typical that ``autoflush`` is used in conjunction with
``autocommit=False``. In this scenario, explicit calls to
:meth:`~.Session.flush` are rarely needed; you usually only need to
call :meth:`~.Session.commit` (which flushes) to finalize changes.
:param bind: An optional :class:`.Engine` or :class:`.Connection` to
which this ``Session`` should be bound. When specified, all SQL
operations performed by this session will execute via this
connectable.
:param binds: An optional dictionary which contains more granular
"bind" information than the ``bind`` parameter provides. This
dictionary can map individual :class`.Table`
instances as well as :class:`~.Mapper` instances to individual
:class:`.Engine` or :class:`.Connection` objects. Operations which
proceed relative to a particular :class:`.Mapper` will consult this
dictionary for the direct :class:`.Mapper` instance as
well as the mapper's ``mapped_table`` attribute in order to locate a
connectable to use. The full resolution is described in the
:meth:`.Session.get_bind`.
Usage looks like::
Session = sessionmaker(binds={
SomeMappedClass: create_engine('postgresql://engine1'),
somemapper: create_engine('postgresql://engine2'),
some_table: create_engine('postgresql://engine3'),
})
Also see the :meth:`.Session.bind_mapper`
and :meth:`.Session.bind_table` methods.
:param \class_: Specify an alternate class other than
``sqlalchemy.orm.session.Session`` which should be used by the
returned class. This is the only argument that is local to the
:class:`.sessionmaker` function, and is not sent directly to the
constructor for ``Session``.
:param _enable_transaction_accounting: Defaults to ``True``. A
legacy-only flag which when ``False`` disables *all* 0.5-style
object accounting on transaction boundaries, including auto-expiry
of instances on rollback and commit, maintenance of the "new" and
"deleted" lists upon rollback, and autoflush of pending changes upon
:meth:`~.Session.begin`, all of which are interdependent.
:param expire_on_commit: Defaults to ``True``. When ``True``, all
instances will be fully expired after each :meth:`~.commit`,
so that all attribute/object access subsequent to a completed
transaction will load from the most recent database state.
:param extension: An optional
:class:`~.SessionExtension` instance, or a list
of such instances, which will receive pre- and post- commit and
flush events, as well as a post-rollback event. **Deprecated.**
Please see :class:`.SessionEvents`.
:param info: optional dictionary of arbitrary data to be associated
with this :class:`.Session`. Is available via the :attr:`.Session.info`
attribute. Note the dictionary is copied at construction time so
that modifications to the per-:class:`.Session` dictionary will be local
to that :class:`.Session`.
.. versionadded:: 0.9.0
:param query_cls: Class which should be used to create new Query
objects, as returned by the :meth:`~.Session.query` method. Defaults
to :class:`.Query`.
:param twophase: When ``True``, all transactions will be started as
a "two phase" transaction, i.e. using the "two phase" semantics
of the database in use along with an XID. During a
:meth:`~.commit`, after :meth:`~.flush` has been issued for all
attached databases, the :meth:`~.TwoPhaseTransaction.prepare` method
on each database's :class:`.TwoPhaseTransaction` will be called.
This allows each database to roll back the entire transaction,
before each transaction is committed.
:param weak_identity_map: Defaults to ``True`` - when set to
``False``, objects placed in the :class:`.Session` will be
strongly referenced until explicitly removed or the
:class:`.Session` is closed. **Deprecated** - this option
is obsolete.
"""
if weak_identity_map:
self._identity_cls = identity.WeakInstanceDict
else:
util.warn_deprecated("weak_identity_map=False is deprecated. "
"This feature is not needed.")
self._identity_cls = identity.StrongInstanceDict
self.identity_map = self._identity_cls()
self._new = {} # InstanceState->object, strong refs object
self._deleted = {} # same
self.bind = bind
self.__binds = {}
self._flushing = False
self._warn_on_events = False
self.transaction = None
self.hash_key = _new_sessionid()
self.autoflush = autoflush
self.autocommit = autocommit
self.expire_on_commit = expire_on_commit
self._enable_transaction_accounting = _enable_transaction_accounting
self.twophase = twophase
self._query_cls = query_cls
if info:
self.info.update(info)
if extension:
for ext in util.to_list(extension):
SessionExtension._adapt_listener(self, ext)
if binds is not None:
for mapperortable, bind in binds.items():
insp = inspect(mapperortable)
if insp.is_selectable:
self.bind_table(mapperortable, bind)
elif insp.is_mapper:
self.bind_mapper(mapperortable, bind)
else:
assert False
if not self.autocommit:
self.begin()
_sessions[self.hash_key] = self
connection_callable = None
transaction = None
"""The current active or inactive :class:`.SessionTransaction`."""
@util.memoized_property
def info(self):
"""A user-modifiable dictionary.
The initial value of this dictioanry can be populated using the
``info`` argument to the :class:`.Session` constructor or
:class:`.sessionmaker` constructor or factory methods. The dictionary
here is always local to this :class:`.Session` and can be modified
independently of all other :class:`.Session` objects.
.. versionadded:: 0.9.0
"""
return {}
def begin(self, subtransactions=False, nested=False):
"""Begin a transaction on this :class:`.Session`.
If this Session is already within a transaction, either a plain
transaction or nested transaction, an error is raised, unless
``subtransactions=True`` or ``nested=True`` is specified.
The ``subtransactions=True`` flag indicates that this
:meth:`~.Session.begin` can create a subtransaction if a transaction
is already in progress. For documentation on subtransactions, please
see :ref:`session_subtransactions`.
The ``nested`` flag begins a SAVEPOINT transaction and is equivalent
to calling :meth:`~.Session.begin_nested`. For documentation on
SAVEPOINT transactions, please see :ref:`session_begin_nested`.
"""
if self.transaction is not None:
if subtransactions or nested:
self.transaction = self.transaction._begin(
nested=nested)
else:
raise sa_exc.InvalidRequestError(
"A transaction is already begun. Use "
"subtransactions=True to allow subtransactions.")
else:
self.transaction = SessionTransaction(
self, nested=nested)
return self.transaction # needed for __enter__/__exit__ hook
def begin_nested(self):
"""Begin a `nested` transaction on this Session.
The target database(s) must support SQL SAVEPOINTs or a
SQLAlchemy-supported vendor implementation of the idea.
For documentation on SAVEPOINT
transactions, please see :ref:`session_begin_nested`.
"""
return self.begin(nested=True)
def rollback(self):
"""Rollback the current transaction in progress.
If no transaction is in progress, this method is a pass-through.
This method rolls back the current transaction or nested transaction
regardless of subtransactions being in effect. All subtransactions up
to the first real transaction are closed. Subtransactions occur when
:meth:`.begin` is called multiple times.
.. seealso::
:ref:`session_rollback`
"""
if self.transaction is None:
pass
else:
self.transaction.rollback()
def commit(self):
"""Flush pending changes and commit the current transaction.
If no transaction is in progress, this method raises an
:exc:`~sqlalchemy.exc.InvalidRequestError`.
By default, the :class:`.Session` also expires all database
loaded state on all ORM-managed attributes after transaction commit.
This so that subsequent operations load the most recent
data from the database. This behavior can be disabled using
the ``expire_on_commit=False`` option to :class:`.sessionmaker` or
the :class:`.Session` constructor.
If a subtransaction is in effect (which occurs when begin() is called
multiple times), the subtransaction will be closed, and the next call
to ``commit()`` will operate on the enclosing transaction.
When using the :class:`.Session` in its default mode of
``autocommit=False``, a new transaction will
be begun immediately after the commit, but note that the newly begun
transaction does *not* use any connection resources until the first
SQL is actually emitted.
.. seealso::
:ref:`session_committing`
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.commit()
def prepare(self):
"""Prepare the current transaction in progress for two phase commit.
If no transaction is in progress, this method raises an
:exc:`~sqlalchemy.exc.InvalidRequestError`.
Only root transactions of two phase sessions can be prepared. If the
current transaction is not such, an
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.prepare()
def connection(self, mapper=None, clause=None,
bind=None,
close_with_result=False,
**kw):
"""Return a :class:`.Connection` object corresponding to this
:class:`.Session` object's transactional state.
If this :class:`.Session` is configured with ``autocommit=False``,
either the :class:`.Connection` corresponding to the current
transaction is returned, or if no transaction is in progress, a new
one is begun and the :class:`.Connection` returned (note that no
transactional state is established with the DBAPI until the first
SQL statement is emitted).
Alternatively, if this :class:`.Session` is configured with
``autocommit=True``, an ad-hoc :class:`.Connection` is returned
using :meth:`.Engine.contextual_connect` on the underlying
:class:`.Engine`.
Ambiguity in multi-bind or unbound :class:`.Session` objects can be
resolved through any of the optional keyword arguments. This
ultimately makes usage of the :meth:`.get_bind` method for resolution.
:param bind:
Optional :class:`.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes precedence
over ``mapper``, ``clause``.
:param mapper:
Optional :func:`.mapper` mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause``.
:param clause:
A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
:func:`~.sql.expression.text`,
etc.) which will be used to locate a bind, if a bind
cannot otherwise be identified.
:param close_with_result: Passed to :meth:`.Engine.connect`, indicating
the :class:`.Connection` should be considered "single use",
automatically closing when the first result set is closed. This
flag only has an effect if this :class:`.Session` is configured with
``autocommit=True`` and does not already have a transaction
in progress.
:param \**kw:
Additional keyword arguments are sent to :meth:`get_bind()`,
allowing additional arguments to be passed to custom
implementations of :meth:`get_bind`.
"""
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind,
close_with_result=close_with_result)
def _connection_for_bind(self, engine, **kwargs):
if self.transaction is not None:
return self.transaction._connection_for_bind(engine)
else:
return engine.contextual_connect(**kwargs)
def execute(self, clause, params=None, mapper=None, bind=None, **kw):
"""Execute a SQL expression construct or string statement within
the current transaction.
Returns a :class:`.ResultProxy` representing
results of the statement execution, in the same manner as that of an
:class:`.Engine` or
:class:`.Connection`.
E.g.::
result = session.execute(
user_table.select().where(user_table.c.id == 5)
)
:meth:`~.Session.execute` accepts any executable clause construct, such
as :func:`~.sql.expression.select`,
:func:`~.sql.expression.insert`,
:func:`~.sql.expression.update`,
:func:`~.sql.expression.delete`, and
:func:`~.sql.expression.text`. Plain SQL strings can be passed
as well, which in the case of :meth:`.Session.execute` only
will be interpreted the same as if it were passed via a
:func:`~.expression.text` construct. That is, the following usage::
result = session.execute(
"SELECT * FROM user WHERE id=:param",
{"param":5}
)
is equivalent to::
from sqlalchemy import text
result = session.execute(
text("SELECT * FROM user WHERE id=:param"),
{"param":5}
)
The second positional argument to :meth:`.Session.execute` is an
optional parameter set. Similar to that of
:meth:`.Connection.execute`, whether this is passed as a single
dictionary, or a list of dictionaries, determines whether the DBAPI
cursor's ``execute()`` or ``executemany()`` is used to execute the
statement. An INSERT construct may be invoked for a single row::
result = session.execute(users.insert(), {"id": 7, "name": "somename"})
or for multiple rows::
result = session.execute(users.insert(), [
{"id": 7, "name": "somename7"},
{"id": 8, "name": "somename8"},
{"id": 9, "name": "somename9"}
])
The statement is executed within the current transactional context of
this :class:`.Session`. The :class:`.Connection` which is used
to execute the statement can also be acquired directly by
calling the :meth:`.Session.connection` method. Both methods use
a rule-based resolution scheme in order to determine the
:class:`.Connection`, which in the average case is derived directly
from the "bind" of the :class:`.Session` itself, and in other cases
can be based on the :func:`.mapper`
and :class:`.Table` objects passed to the method; see the documentation
for :meth:`.Session.get_bind` for a full description of this scheme.
The :meth:`.Session.execute` method does *not* invoke autoflush.
The :class:`.ResultProxy` returned by the :meth:`.Session.execute`
method is returned with the "close_with_result" flag set to true;
the significance of this flag is that if this :class:`.Session` is
autocommitting and does not have a transaction-dedicated
:class:`.Connection` available, a temporary :class:`.Connection` is
established for the statement execution, which is closed (meaning,
returned to the connection pool) when the :class:`.ResultProxy` has
consumed all available data. This applies *only* when the
:class:`.Session` is configured with autocommit=True and no
transaction has been started.
:param clause:
An executable statement (i.e. an :class:`.Executable` expression
such as :func:`.expression.select`) or string SQL statement
to be executed.
:param params:
Optional dictionary, or list of dictionaries, containing
bound parameter values. If a single dictionary, single-row
execution occurs; if a list of dictionaries, an
"executemany" will be invoked. The keys in each dictionary
must correspond to parameter names present in the statement.
:param mapper:
Optional :func:`.mapper` or mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause`` when locating a bind. See :meth:`.Session.get_bind`
for more details.
:param bind:
Optional :class:`.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes
precedence over ``mapper`` and ``clause`` when locating
a bind.
:param \**kw:
Additional keyword arguments are sent to :meth:`.Session.get_bind()`
to allow extensibility of "bind" schemes.
.. seealso::
:ref:`sqlexpression_toplevel` - Tutorial on using Core SQL
constructs.
:ref:`connections_toplevel` - Further information on direct
statement execution.
:meth:`.Connection.execute` - core level statement execution
method, which is :meth:`.Session.execute` ultimately uses
in order to execute the statement.
"""
clause = expression._literal_as_text(clause)
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind, close_with_result=True).execute(
clause, params or {})
def scalar(self, clause, params=None, mapper=None, bind=None, **kw):
"""Like :meth:`~.Session.execute` but return a scalar result."""
return self.execute(
clause, params=params, mapper=mapper, bind=bind, **kw).scalar()
def close(self):
"""Close this Session.
This clears all items and ends any transaction in progress.
If this session were created with ``autocommit=False``, a new
transaction is immediately begun. Note that this new transaction does
not use any connection resources until they are first needed.
"""
self.expunge_all()
if self.transaction is not None:
for transaction in self.transaction._iterate_parents():
transaction.close()
def expunge_all(self):
"""Remove all object instances from this ``Session``.
This is equivalent to calling ``expunge(obj)`` on all objects in this
``Session``.
"""
for state in self.identity_map.all_states() + list(self._new):
state._detach()
self.identity_map = self._identity_cls()
self._new = {}
self._deleted = {}
# TODO: need much more test coverage for bind_mapper() and similar !
# TODO: + crystallize + document resolution order
# vis. bind_mapper/bind_table
def bind_mapper(self, mapper, bind):
"""Bind operations for a mapper to a Connectable.
mapper
A mapper instance or mapped class
bind
Any Connectable: a :class:`.Engine` or :class:`.Connection`.
All subsequent operations involving this mapper will use the given
`bind`.
"""
if isinstance(mapper, type):
mapper = class_mapper(mapper)
self.__binds[mapper.base_mapper] = bind
for t in mapper._all_tables:
self.__binds[t] = bind
def bind_table(self, table, bind):
"""Bind operations on a Table to a Connectable.
table
A :class:`.Table` instance
bind
Any Connectable: a :class:`.Engine` or :class:`.Connection`.
All subsequent operations involving this :class:`.Table` will use the
given `bind`.
"""
self.__binds[table] = bind
def get_bind(self, mapper=None, clause=None):
"""Return a "bind" to which this :class:`.Session` is bound.
The "bind" is usually an instance of :class:`.Engine`,
except in the case where the :class:`.Session` has been
explicitly bound directly to a :class:`.Connection`.
For a multiply-bound or unbound :class:`.Session`, the
``mapper`` or ``clause`` arguments are used to determine the
appropriate bind to return.
Note that the "mapper" argument is usually present
when :meth:`.Session.get_bind` is called via an ORM
operation such as a :meth:`.Session.query`, each
individual INSERT/UPDATE/DELETE operation within a
:meth:`.Session.flush`, call, etc.
The order of resolution is:
1. if mapper given and session.binds is present,
locate a bind based on mapper.
2. if clause given and session.binds is present,
locate a bind based on :class:`.Table` objects
found in the given clause present in session.binds.
3. if session.bind is present, return that.
4. if clause given, attempt to return a bind
linked to the :class:`.MetaData` ultimately
associated with the clause.
5. if mapper given, attempt to return a bind
linked to the :class:`.MetaData` ultimately
associated with the :class:`.Table` or other
selectable to which the mapper is mapped.
6. No bind can be found, :exc:`~sqlalchemy.exc.UnboundExecutionError`
is raised.
:param mapper:
Optional :func:`.mapper` mapped class or instance of
:class:`.Mapper`. The bind can be derived from a :class:`.Mapper`
first by consulting the "binds" map associated with this
:class:`.Session`, and secondly by consulting the :class:`.MetaData`
associated with the :class:`.Table` to which the :class:`.Mapper`
is mapped for a bind.
:param clause:
A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
:func:`~.sql.expression.text`,
etc.). If the ``mapper`` argument is not present or could not
produce a bind, the given expression construct will be searched
for a bound element, typically a :class:`.Table` associated with
bound :class:`.MetaData`.
"""
if mapper is clause is None:
if self.bind:
return self.bind
else:
raise sa_exc.UnboundExecutionError(
"This session is not bound to a single Engine or "
"Connection, and no context was provided to locate "
"a binding.")
c_mapper = mapper is not None and _class_to_mapper(mapper) or None
# manually bound?
if self.__binds:
if c_mapper:
if c_mapper.base_mapper in self.__binds:
return self.__binds[c_mapper.base_mapper]
elif c_mapper.mapped_table in self.__binds:
return self.__binds[c_mapper.mapped_table]
if clause is not None:
for t in sql_util.find_tables(clause, include_crud=True):
if t in self.__binds:
return self.__binds[t]
if self.bind:
return self.bind
if isinstance(clause, sql.expression.ClauseElement) and clause.bind:
return clause.bind
if c_mapper and c_mapper.mapped_table.bind:
return c_mapper.mapped_table.bind
context = []
if mapper is not None:
context.append('mapper %s' % c_mapper)
if clause is not None:
context.append('SQL expression')
raise sa_exc.UnboundExecutionError(
"Could not locate a bind configured on %s or this Session" % (
', '.join(context)))
def query(self, *entities, **kwargs):
"""Return a new :class:`.Query` object corresponding to this
:class:`.Session`."""
return self._query_cls(entities, self, **kwargs)
@property
@util.contextmanager
def no_autoflush(self):
"""Return a context manager that disables autoflush.
e.g.::
with session.no_autoflush:
some_object = SomeClass()
session.add(some_object)
# won't autoflush
some_object.related_thing = session.query(SomeRelated).first()
Operations that proceed within the ``with:`` block
will not be subject to flushes occurring upon query
access. This is useful when initializing a series
of objects which involve existing database queries,
where the uncompleted object should not yet be flushed.
.. versionadded:: 0.7.6
"""
autoflush = self.autoflush
self.autoflush = False
yield self
self.autoflush = autoflush
def _autoflush(self):
if self.autoflush and not self._flushing:
try:
self.flush()
except sa_exc.StatementError as e:
# note we are reraising StatementError as opposed to
# raising FlushError with "chaining" to remain compatible
# with code that catches StatementError, IntegrityError,
# etc.
e.add_detail(
"raised as a result of Query-invoked autoflush; "
"consider using a session.no_autoflush block if this "
"flush is occurring prematurely")
util.raise_from_cause(e)
def refresh(self, instance, attribute_names=None, lockmode=None):
"""Expire and refresh the attributes on the given instance.
A query will be issued to the database and all attributes will be
refreshed with their current database value.
Lazy-loaded relational attributes will remain lazily loaded, so that
the instance-wide refresh operation will be followed immediately by
the lazy load of that attribute.
Eagerly-loaded relational attributes will eagerly load within the
single refresh operation.
Note that a highly isolated transaction will return the same values as
were previously read in that same transaction, regardless of changes
in database state outside of that transaction - usage of
:meth:`~Session.refresh` usually only makes sense if non-ORM SQL
statement were emitted in the ongoing transaction, or if autocommit
mode is turned on.
:param attribute_names: optional. An iterable collection of
string attribute names indicating a subset of attributes to
be refreshed.
:param lockmode: Passed to the :class:`~sqlalchemy.orm.query.Query`
as used by :meth:`~sqlalchemy.orm.query.Query.with_lockmode`.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.expire_all`
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._expire_state(state, attribute_names)
if loading.load_on_ident(
self.query(object_mapper(instance)),
state.key, refresh_state=state,
lockmode=lockmode,
only_load_props=attribute_names) is None:
raise sa_exc.InvalidRequestError(
"Could not refresh instance '%s'" %
instance_str(instance))
def expire_all(self):
"""Expires all persistent instances within this Session.
When any attributes on a persistent instance is next accessed,
a query will be issued using the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire individual objects and individual attributes
on those objects, use :meth:`Session.expire`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire_all` should not be needed when
autocommit is ``False``, assuming the transaction is isolated.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
"""
for state in self.identity_map.all_states():
state._expire(state.dict, self.identity_map._modified)
def expire(self, instance, attribute_names=None):
"""Expire the attributes on an instance.
Marks the attributes of an instance as out of date. When an expired
attribute is next accessed, a query will be issued to the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire all objects in the :class:`.Session` simultaneously,
use :meth:`Session.expire_all`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire` only makes sense for the specific
case that a non-ORM SQL statement was emitted in the current
transaction.
:param instance: The instance to be refreshed.
:param attribute_names: optional list of string attribute names
indicating a subset of attributes to be expired.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._expire_state(state, attribute_names)
def _expire_state(self, state, attribute_names):
self._validate_persistent(state)
if attribute_names:
state._expire_attributes(state.dict, attribute_names)
else:
# pre-fetch the full cascade since the expire is going to
# remove associations
cascaded = list(state.manager.mapper.cascade_iterator(
'refresh-expire', state))
self._conditional_expire(state)
for o, m, st_, dct_ in cascaded:
self._conditional_expire(st_)
def _conditional_expire(self, state):
"""Expire a state if persistent, else expunge if pending"""
if state.key:
state._expire(state.dict, self.identity_map._modified)
elif state in self._new:
self._new.pop(state)
state._detach()
@util.deprecated("0.7", "The non-weak-referencing identity map "
"feature is no longer needed.")
def prune(self):
"""Remove unreferenced instances cached in the identity map.
Note that this method is only meaningful if "weak_identity_map" is set
to False. The default weak identity map is self-pruning.
Removes any object in this Session's identity map that is not
referenced in user code, modified, new or scheduled for deletion.
Returns the number of objects pruned.
"""
return self.identity_map.prune()
def expunge(self, instance):
"""Remove the `instance` from this ``Session``.
This will free all internal references to the instance. Cascading
will be applied according to the *expunge* cascade rule.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
if state.session_id is not self.hash_key:
raise sa_exc.InvalidRequestError(
"Instance %s is not present in this Session" %
state_str(state))
cascaded = list(state.manager.mapper.cascade_iterator(
'expunge', state))
self._expunge_state(state)
for o, m, st_, dct_ in cascaded:
self._expunge_state(st_)
def _expunge_state(self, state):
if state in self._new:
self._new.pop(state)
state._detach()
elif self.identity_map.contains_state(state):
self.identity_map.discard(state)
self._deleted.pop(state, None)
state._detach()
elif self.transaction:
self.transaction._deleted.pop(state, None)
def _register_newly_persistent(self, states):
for state in states:
mapper = _state_mapper(state)
# prevent against last minute dereferences of the object
obj = state.obj()
if obj is not None:
instance_key = mapper._identity_key_from_state(state)
if _none_set.intersection(instance_key[1]) and \
not mapper.allow_partial_pks or \
_none_set.issuperset(instance_key[1]):
raise exc.FlushError(
"Instance %s has a NULL identity key. If this is an "
"auto-generated value, check that the database table "
"allows generation of new primary key values, and "
"that the mapped Column object is configured to "
"expect these generated values. Ensure also that "
"this flush() is not occurring at an inappropriate "
"time, such aswithin a load() event."
% state_str(state)
)
if state.key is None:
state.key = instance_key
elif state.key != instance_key:
# primary key switch. use discard() in case another
# state has already replaced this one in the identity
# map (see test/orm/test_naturalpks.py ReversePKsTest)
self.identity_map.discard(state)
if state in self.transaction._key_switches:
orig_key = self.transaction._key_switches[state][0]
else:
orig_key = state.key
self.transaction._key_switches[state] = (
orig_key, instance_key)
state.key = instance_key
self.identity_map.replace(state)
statelib.InstanceState._commit_all_states(
((state, state.dict) for state in states),
self.identity_map
)
self._register_altered(states)
# remove from new last, might be the last strong ref
for state in set(states).intersection(self._new):
self._new.pop(state)
def _register_altered(self, states):
if self._enable_transaction_accounting and self.transaction:
for state in states:
if state in self._new:
self.transaction._new[state] = True
else:
self.transaction._dirty[state] = True
def _remove_newly_deleted(self, states):
for state in states:
if self._enable_transaction_accounting and self.transaction:
self.transaction._deleted[state] = True
self.identity_map.discard(state)
self._deleted.pop(state, None)
state.deleted = True
def add(self, instance, _warn=True):
"""Place an object in the ``Session``.
Its state will be persisted to the database on the next flush
operation.
Repeated calls to ``add()`` will be ignored. The opposite of ``add()``
is ``expunge()``.
"""
if _warn and self._warn_on_events:
self._flush_warning("Session.add()")
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._save_or_update_state(state)
def add_all(self, instances):
"""Add the given collection of instances to this ``Session``."""
if self._warn_on_events:
self._flush_warning("Session.add_all()")
for instance in instances:
self.add(instance, _warn=False)
def _save_or_update_state(self, state):
self._save_or_update_impl(state)
mapper = _state_mapper(state)
for o, m, st_, dct_ in mapper.cascade_iterator(
'save-update',
state,
halt_on=self._contains_state):
self._save_or_update_impl(st_)
def delete(self, instance):
"""Mark an instance as deleted.
The database delete operation occurs upon ``flush()``.
"""
if self._warn_on_events:
self._flush_warning("Session.delete()")
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" %
state_str(state))
if state in self._deleted:
return
# ensure object is attached to allow the
# cascade operation to load deferred attributes
# and collections
self._attach(state, include_before=True)
# grab the cascades before adding the item to the deleted list
# so that autoflush does not delete the item
# the strong reference to the instance itself is significant here
cascade_states = list(state.manager.mapper.cascade_iterator(
'delete', state))
self._deleted[state] = state.obj()
self.identity_map.add(state)
for o, m, st_, dct_ in cascade_states:
self._delete_impl(st_)
def merge(self, instance, load=True):
"""Copy the state of a given instance into a corresponding instance
within this :class:`.Session`.
:meth:`.Session.merge` examines the primary key attributes of the
source instance, and attempts to reconcile it with an instance of the
same primary key in the session. If not found locally, it attempts
to load the object from the database based on primary key, and if
none can be located, creates a new instance. The state of each
attribute on the source instance is then copied to the target instance.
The resulting target instance is then returned by the method; the
original source instance is left unmodified, and un-associated with the
:class:`.Session` if not already.
This operation cascades to associated instances if the association is
mapped with ``cascade="merge"``.
See :ref:`unitofwork_merging` for a detailed discussion of merging.
:param instance: Instance to be merged.
:param load: Boolean, when False, :meth:`.merge` switches into
a "high performance" mode which causes it to forego emitting history
events as well as all database access. This flag is used for
cases such as transferring graphs of objects into a :class:`.Session`
from a second level cache, or to transfer just-loaded objects
into the :class:`.Session` owned by a worker thread or process
without re-querying the database.
The ``load=False`` use case adds the caveat that the given
object has to be in a "clean" state, that is, has no pending changes
to be flushed - even if the incoming object is detached from any
:class:`.Session`. This is so that when
the merge operation populates local attributes and
cascades to related objects and
collections, the values can be "stamped" onto the
target object as is, without generating any history or attribute
events, and without the need to reconcile the incoming data with
any existing related objects or collections that might not
be loaded. The resulting objects from ``load=False`` are always
produced as "clean", so it is only appropriate that the given objects
should be "clean" as well, else this suggests a mis-use of the method.
"""
if self._warn_on_events:
self._flush_warning("Session.merge()")
_recursive = {}
if load:
# flush current contents if we expect to load data
self._autoflush()
object_mapper(instance) # verify mapped
autoflush = self.autoflush
try:
self.autoflush = False
return self._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load, _recursive=_recursive)
finally:
self.autoflush = autoflush
def _merge(self, state, state_dict, load=True, _recursive=None):
mapper = _state_mapper(state)
if state in _recursive:
return _recursive[state]
new_instance = False
key = state.key
if key is None:
if not load:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects transient (i.e. unpersisted) objects. flush() "
"all changes on mapped instances before merging with "
"load=False.")
key = mapper._identity_key_from_state(state)
if key in self.identity_map:
merged = self.identity_map[key]
elif not load:
if state.modified:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects marked as 'dirty'. flush() all changes on "
"mapped instances before merging with load=False.")
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_state.key = key
self._update_impl(merged_state)
new_instance = True
elif not _none_set.intersection(key[1]) or \
(mapper.allow_partial_pks and
not _none_set.issuperset(key[1])):
merged = self.query(mapper.class_).get(key[1])
else:
merged = None
if merged is None:
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
new_instance = True
self._save_or_update_state(merged_state)
else:
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
_recursive[state] = merged
# check that we didn't just pull the exact same
# state out.
if state is not merged_state:
# version check if applicable
if mapper.version_id_col is not None:
existing_version = mapper._get_state_attr_by_column(
state,
state_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE)
merged_version = mapper._get_state_attr_by_column(
merged_state,
merged_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE)
if existing_version is not attributes.PASSIVE_NO_RESULT and \
merged_version is not attributes.PASSIVE_NO_RESULT and \
existing_version != merged_version:
raise exc.StaleDataError(
"Version id '%s' on merged state %s "
"does not match existing version '%s'. "
"Leave the version attribute unset when "
"merging to update the most recent version."
% (
existing_version,
state_str(merged_state),
merged_version
))
merged_state.load_path = state.load_path
merged_state.load_options = state.load_options
for prop in mapper.iterate_properties:
prop.merge(self, state, state_dict,
merged_state, merged_dict,
load, _recursive)
if not load:
# remove any history
merged_state._commit_all(merged_dict, self.identity_map)
if new_instance:
merged_state.manager.dispatch.load(merged_state, None)
return merged
def _validate_persistent(self, state):
if not self.identity_map.contains_state(state):
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persistent within this Session" %
state_str(state))
def _save_impl(self, state):
if state.key is not None:
raise sa_exc.InvalidRequestError(
"Object '%s' already has an identity - it can't be registered "
"as pending" % state_str(state))
self._before_attach(state)
if state not in self._new:
self._new[state] = state.obj()
state.insert_order = len(self._new)
self._attach(state)
def _update_impl(self, state, discard_existing=False):
if (self.identity_map.contains_state(state) and
state not in self._deleted):
return
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" %
state_str(state))
if state.deleted:
raise sa_exc.InvalidRequestError(
"Instance '%s' has been deleted. Use the make_transient() "
"function to send this object back to the transient state." %
state_str(state)
)
self._before_attach(state)
self._deleted.pop(state, None)
if discard_existing:
self.identity_map.replace(state)
else:
self.identity_map.add(state)
self._attach(state)
def _save_or_update_impl(self, state):
if state.key is None:
self._save_impl(state)
else:
self._update_impl(state)
def _delete_impl(self, state):
if state in self._deleted:
return
if state.key is None:
return
self._attach(state, include_before=True)
self._deleted[state] = state.obj()
self.identity_map.add(state)
def enable_relationship_loading(self, obj):
"""Associate an object with this :class:`.Session` for related
object loading.
.. warning::
:meth:`.enable_relationship_loading` exists to serve special
use cases and is not recommended for general use.
Accesses of attributes mapped with :func:`.relationship`
will attempt to load a value from the database using this
:class:`.Session` as the source of connectivity. The values
will be loaded based on foreign key values present on this
object - it follows that this functionality
generally only works for many-to-one-relationships.
The object will be attached to this session, but will
**not** participate in any persistence operations; its state
for almost all purposes will remain either "transient" or
"detached", except for the case of relationship loading.
Also note that backrefs will often not work as expected.
Altering a relationship-bound attribute on the target object
may not fire off a backref event, if the effective value
is what was already loaded from a foreign-key-holding value.
The :meth:`.Session.enable_relationship_loading` method is
similar to the ``load_on_pending`` flag on :func:`.relationship`. Unlike
that flag, :meth:`.Session.enable_relationship_loading` allows
an object to remain transient while still being able to load
related items.
To make a transient object associated with a :class:`.Session`
via :meth:`.Session.enable_relationship_loading` pending, add
it to the :class:`.Session` using :meth:`.Session.add` normally.
:meth:`.Session.enable_relationship_loading` does not improve
behavior when the ORM is used normally - object references should be
constructed at the object level, not at the foreign key level, so
that they are present in an ordinary way before flush()
proceeds. This method is not intended for general use.
.. versionadded:: 0.8
.. seealso::
``load_on_pending`` at :func:`.relationship` - this flag
allows per-relationship loading of many-to-ones on items that
are pending.
"""
state = attributes.instance_state(obj)
self._attach(state, include_before=True)
state._load_pending = True
def _before_attach(self, state):
if state.session_id != self.hash_key and \
self.dispatch.before_attach:
self.dispatch.before_attach(self, state.obj())
def _attach(self, state, include_before=False):
if state.key and \
state.key in self.identity_map and \
not self.identity_map.contains_state(state):
raise sa_exc.InvalidRequestError("Can't attach instance "
"%s; another instance with key %s is already "
"present in this session."
% (state_str(state), state.key))
if state.session_id and \
state.session_id is not self.hash_key and \
state.session_id in _sessions:
raise sa_exc.InvalidRequestError(
"Object '%s' is already attached to session '%s' "
"(this is '%s')" % (state_str(state),
state.session_id, self.hash_key))
if state.session_id != self.hash_key:
if include_before and \
self.dispatch.before_attach:
self.dispatch.before_attach(self, state.obj())
state.session_id = self.hash_key
if state.modified and state._strong_obj is None:
state._strong_obj = state.obj()
if self.dispatch.after_attach:
self.dispatch.after_attach(self, state.obj())
def __contains__(self, instance):
"""Return True if the instance is associated with this session.
The instance may be pending or persistent within the Session for a
result of True.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
return self._contains_state(state)
def __iter__(self):
"""Iterate over all pending or persistent instances within this
Session.
"""
return iter(list(self._new.values()) + list(self.identity_map.values()))
def _contains_state(self, state):
return state in self._new or self.identity_map.contains_state(state)
def flush(self, objects=None):
"""Flush all the object changes to the database.
Writes out all pending object creations, deletions and modifications
to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are
automatically ordered by the Session's unit of work dependency
solver.
Database operations will be issued in the current transactional
context and do not affect the state of the transaction, unless an
error occurs, in which case the entire transaction is rolled back.
You may flush() as often as you like within a transaction to move
changes from Python to the database's transaction buffer.
For ``autocommit`` Sessions with no active manual transaction, flush()
will create a transaction on the fly that surrounds the entire set of
operations int the flush.
:param objects: Optional; restricts the flush operation to operate
only on elements that are in the given collection.
This feature is for an extremely narrow set of use cases where
particular objects may need to be operated upon before the
full flush() occurs. It is not intended for general use.
"""
if self._flushing:
raise sa_exc.InvalidRequestError("Session is already flushing")
if self._is_clean():
return
try:
self._flushing = True
self._flush(objects)
finally:
self._flushing = False
def _flush_warning(self, method):
util.warn(
"Usage of the '%s' operation is not currently supported "
"within the execution stage of the flush process. "
"Results may not be consistent. Consider using alternative "
"event listeners or connection-level operations instead."
% method)
def _is_clean(self):
return not self.identity_map.check_modified() and \
not self._deleted and \
not self._new
def _flush(self, objects=None):
dirty = self._dirty_states
if not dirty and not self._deleted and not self._new:
self.identity_map._modified.clear()
return
flush_context = UOWTransaction(self)
if self.dispatch.before_flush:
self.dispatch.before_flush(self, flush_context, objects)
# re-establish "dirty states" in case the listeners
# added
dirty = self._dirty_states
deleted = set(self._deleted)
new = set(self._new)
dirty = set(dirty).difference(deleted)
# create the set of all objects we want to operate upon
if objects:
# specific list passed in
objset = set()
for o in objects:
try:
state = attributes.instance_state(o)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(o)
objset.add(state)
else:
objset = None
# store objects whose fate has been decided
processed = set()
# put all saves/updates into the flush context. detect top-level
# orphans and throw them into deleted.
if objset:
proc = new.union(dirty).intersection(objset).difference(deleted)
else:
proc = new.union(dirty).difference(deleted)
for state in proc:
is_orphan = (
_state_mapper(state)._is_orphan(state) and state.has_identity)
flush_context.register_object(state, isdelete=is_orphan)
processed.add(state)
# put all remaining deletes into the flush context.
if objset:
proc = deleted.intersection(objset).difference(processed)
else:
proc = deleted.difference(processed)
for state in proc:
flush_context.register_object(state, isdelete=True)
if not flush_context.has_work:
return
flush_context.transaction = transaction = self.begin(
subtransactions=True)
try:
self._warn_on_events = True
try:
flush_context.execute()
finally:
self._warn_on_events = False
self.dispatch.after_flush(self, flush_context)
flush_context.finalize_flush_changes()
if not objects and self.identity_map._modified:
len_ = len(self.identity_map._modified)
statelib.InstanceState._commit_all_states(
[(state, state.dict) for state in
self.identity_map._modified],
instance_dict=self.identity_map)
util.warn("Attribute history events accumulated on %d "
"previously clean instances "
"within inner-flush event handlers have been reset, "
"and will not result in database updates. "
"Consider using set_committed_value() within "
"inner-flush event handlers to avoid this warning."
% len_)
# useful assertions:
#if not objects:
# assert not self.identity_map._modified
#else:
# assert self.identity_map._modified == \
# self.identity_map._modified.difference(objects)
self.dispatch.after_flush_postexec(self, flush_context)
transaction.commit()
except:
with util.safe_reraise():
transaction.rollback(_capture_exception=True)
def is_modified(self, instance, include_collections=True,
passive=True):
"""Return ``True`` if the given instance has locally
modified attributes.
This method retrieves the history for each instrumented
attribute on the instance and performs a comparison of the current
value to its previously committed value, if any.
It is in effect a more expensive and accurate
version of checking for the given instance in the
:attr:`.Session.dirty` collection; a full test for
each attribute's net "dirty" status is performed.
E.g.::
return session.is_modified(someobject)
.. versionchanged:: 0.8
When using SQLAlchemy 0.7 and earlier, the ``passive``
flag should **always** be explicitly set to ``True``,
else SQL loads/autoflushes may proceed which can affect
the modified state itself:
``session.is_modified(someobject, passive=True)``\ .
In 0.8 and above, the behavior is corrected and
this flag is ignored.
A few caveats to this method apply:
* Instances present in the :attr:`.Session.dirty` collection may report
``False`` when tested with this method. This is because
the object may have received change events via attribute
mutation, thus placing it in :attr:`.Session.dirty`,
but ultimately the state is the same as that loaded from
the database, resulting in no net change here.
* Scalar attributes may not have recorded the previously set
value when a new value was applied, if the attribute was not loaded,
or was expired, at the time the new value was received - in these
cases, the attribute is assumed to have a change, even if there is
ultimately no net change against its database value. SQLAlchemy in
most cases does not need the "old" value when a set event occurs, so
it skips the expense of a SQL call if the old value isn't present,
based on the assumption that an UPDATE of the scalar value is
usually needed, and in those few cases where it isn't, is less
expensive on average than issuing a defensive SELECT.
The "old" value is fetched unconditionally upon set only if the
attribute container has the ``active_history`` flag set to ``True``.
This flag is set typically for primary key attributes and scalar
object references that are not a simple many-to-one. To set this
flag for any arbitrary mapped column, use the ``active_history``
argument with :func:`.column_property`.
:param instance: mapped instance to be tested for pending changes.
:param include_collections: Indicates if multivalued collections
should be included in the operation. Setting this to ``False`` is a
way to detect only local-column based properties (i.e. scalar columns
or many-to-one foreign keys) that would result in an UPDATE for this
instance upon flush.
:param passive:
.. versionchanged:: 0.8
Ignored for backwards compatibility.
When using SQLAlchemy 0.7 and earlier, this flag should always
be set to ``True``.
"""
state = object_state(instance)
if not state.modified:
return False
dict_ = state.dict
for attr in state.manager.attributes:
if \
(
not include_collections and
hasattr(attr.impl, 'get_collection')
) or not hasattr(attr.impl, 'get_history'):
continue
(added, unchanged, deleted) = \
attr.impl.get_history(state, dict_,
passive=attributes.NO_CHANGE)
if added or deleted:
return True
else:
return False
@property
def is_active(self):
"""True if this :class:`.Session` is in "transaction mode" and
is not in "partial rollback" state.
The :class:`.Session` in its default mode of ``autocommit=False``
is essentially always in "transaction mode", in that a
:class:`.SessionTransaction` is associated with it as soon as
it is instantiated. This :class:`.SessionTransaction` is immediately
replaced with a new one as soon as it is ended, due to a rollback,
commit, or close operation.
"Transaction mode" does *not* indicate whether
or not actual database connection resources are in use; the
:class:`.SessionTransaction` object coordinates among zero or more
actual database transactions, and starts out with none, accumulating
individual DBAPI connections as different data sources are used
within its scope. The best way to track when a particular
:class:`.Session` has actually begun to use DBAPI resources is to
implement a listener using the :meth:`.SessionEvents.after_begin`
method, which will deliver both the :class:`.Session` as well as the
target :class:`.Connection` to a user-defined event listener.
The "partial rollback" state refers to when an "inner" transaction,
typically used during a flush, encounters an error and emits a
rollback of the DBAPI connection. At this point, the
:class:`.Session` is in "partial rollback" and awaits for the user to
call :meth:`.Session.rollback`, in order to close out the
transaction stack. It is in this "partial rollback" period that the
:attr:`.is_active` flag returns False. After the call to
:meth:`.Session.rollback`, the :class:`.SessionTransaction` is replaced
with a new one and :attr:`.is_active` returns ``True`` again.
When a :class:`.Session` is used in ``autocommit=True`` mode, the
:class:`.SessionTransaction` is only instantiated within the scope
of a flush call, or when :meth:`.Session.begin` is called. So
:attr:`.is_active` will always be ``False`` outside of a flush or
:meth:`.Session.begin` block in this mode, and will be ``True``
within the :meth:`.Session.begin` block as long as it doesn't enter
"partial rollback" state.
From all the above, it follows that the only purpose to this flag is
for application frameworks that wish to detect is a "rollback" is
necessary within a generic error handling routine, for
:class:`.Session` objects that would otherwise be in
"partial rollback" mode. In a typical integration case, this is also
not necessary as it is standard practice to emit
:meth:`.Session.rollback` unconditionally within the outermost
exception catch.
To track the transactional state of a :class:`.Session` fully,
use event listeners, primarily the :meth:`.SessionEvents.after_begin`,
:meth:`.SessionEvents.after_commit`,
:meth:`.SessionEvents.after_rollback` and related events.
"""
return self.transaction and self.transaction.is_active
identity_map = None
"""A mapping of object identities to objects themselves.
Iterating through ``Session.identity_map.values()`` provides
access to the full set of persistent objects (i.e., those
that have row identity) currently in the session.
.. seealso::
:func:`.identity_key` - helper function to produce the keys used
in this dictionary.
"""
@property
def _dirty_states(self):
"""The set of all persistent states considered dirty.
This method returns all states that were modified including
those that were possibly deleted.
"""
return self.identity_map._dirty_states()
@property
def dirty(self):
"""The set of all persistent instances considered dirty.
E.g.::
some_mapped_object in session.dirty
Instances are considered dirty when they were modified but not
deleted.
Note that this 'dirty' calculation is 'optimistic'; most
attribute-setting or collection modification operations will
mark an instance as 'dirty' and place it in this set, even if
there is no net change to the attribute's value. At flush
time, the value of each attribute is compared to its
previously saved value, and if there's no net change, no SQL
operation will occur (this is a more expensive operation so
it's only done at flush time).
To check if an instance has actionable net changes to its
attributes, use the :meth:`.Session.is_modified` method.
"""
return util.IdentitySet(
[state.obj()
for state in self._dirty_states
if state not in self._deleted])
@property
def deleted(self):
"The set of all instances marked as 'deleted' within this ``Session``"
return util.IdentitySet(list(self._deleted.values()))
@property
def new(self):
"The set of all instances marked as 'new' within this ``Session``."
return util.IdentitySet(list(self._new.values()))
class sessionmaker(_SessionClassMethods):
"""A configurable :class:`.Session` factory.
The :class:`.sessionmaker` factory generates new
:class:`.Session` objects when called, creating them given
the configurational arguments established here.
e.g.::
# global scope
Session = sessionmaker(autoflush=False)
# later, in a local scope, create and use a session:
sess = Session()
Any keyword arguments sent to the constructor itself will override the
"configured" keywords::
Session = sessionmaker()
# bind an individual session to a connection
sess = Session(bind=connection)
The class also includes a method :meth:`.configure`, which can
be used to specify additional keyword arguments to the factory, which
will take effect for subsequent :class:`.Session` objects generated.
This is usually used to associate one or more :class:`.Engine` objects
with an existing :class:`.sessionmaker` factory before it is first
used::
# application starts
Session = sessionmaker()
# ... later
engine = create_engine('sqlite:///foo.db')
Session.configure(bind=engine)
sess = Session()
.. seealso:
:ref:`session_getting` - introductory text on creating
sessions using :class:`.sessionmaker`.
"""
def __init__(self, bind=None, class_=Session, autoflush=True,
autocommit=False,
expire_on_commit=True,
info=None, **kw):
"""Construct a new :class:`.sessionmaker`.
All arguments here except for ``class_`` correspond to arguments
accepted by :class:`.Session` directly. See the
:meth:`.Session.__init__` docstring for more details on parameters.
:param bind: a :class:`.Engine` or other :class:`.Connectable` with
which newly created :class:`.Session` objects will be associated.
:param class_: class to use in order to create new :class:`.Session`
objects. Defaults to :class:`.Session`.
:param autoflush: The autoflush setting to use with newly created
:class:`.Session` objects.
:param autocommit: The autocommit setting to use with newly created
:class:`.Session` objects.
:param expire_on_commit=True: the expire_on_commit setting to use
with newly created :class:`.Session` objects.
:param info: optional dictionary of information that will be available
via :attr:`.Session.info`. Note this dictionary is *updated*, not
replaced, when the ``info`` parameter is specified to the specific
:class:`.Session` construction operation.
.. versionadded:: 0.9.0
:param \**kw: all other keyword arguments are passed to the constructor
of newly created :class:`.Session` objects.
"""
kw['bind'] = bind
kw['autoflush'] = autoflush
kw['autocommit'] = autocommit
kw['expire_on_commit'] = expire_on_commit
if info is not None:
kw['info'] = info
self.kw = kw
# make our own subclass of the given class, so that
# events can be associated with it specifically.
self.class_ = type(class_.__name__, (class_,), {})
def __call__(self, **local_kw):
"""Produce a new :class:`.Session` object using the configuration
established in this :class:`.sessionmaker`.
In Python, the ``__call__`` method is invoked on an object when
it is "called" in the same way as a function::
Session = sessionmaker()
session = Session() # invokes sessionmaker.__call__()
"""
for k, v in self.kw.items():
if k == 'info' and 'info' in local_kw:
d = v.copy()
d.update(local_kw['info'])
local_kw['info'] = d
else:
local_kw.setdefault(k, v)
return self.class_(**local_kw)
def configure(self, **new_kw):
"""(Re)configure the arguments for this sessionmaker.
e.g.::
Session = sessionmaker()
Session.configure(bind=create_engine('sqlite://'))
"""
self.kw.update(new_kw)
def __repr__(self):
return "%s(class_=%r,%s)" % (
self.__class__.__name__,
self.class_.__name__,
", ".join("%s=%r" % (k, v) for k, v in self.kw.items())
)
def make_transient(instance):
"""Make the given instance 'transient'.
This will remove its association with any
session and additionally will remove its "identity key",
such that it's as though the object were newly constructed,
except retaining its values. It also resets the
"deleted" flag on the state if this object
had been explicitly deleted by its session.
Attributes which were "expired" or deferred at the
instance level are reverted to undefined, and
will not trigger any loads.
"""
state = attributes.instance_state(instance)
s = _state_session(state)
if s:
s._expunge_state(state)
# remove expired state and
# deferred callables
state.callables.clear()
if state.key:
del state.key
if state.deleted:
del state.deleted
def make_transient_to_detached(instance):
"""Make the given transient instance 'detached'.
All attribute history on the given instance
will be reset as though the instance were freshly loaded
from a query. Missing attributes will be marked as expired.
The primary key attributes of the object, which are required, will be made
into the "key" of the instance.
The object can then be added to a session, or merged
possibly with the load=False flag, at which point it will look
as if it were loaded that way, without emitting SQL.
This is a special use case function that differs from a normal
call to :meth:`.Session.merge` in that a given persistent state
can be manufactured without any SQL calls.
.. versionadded:: 0.9.5
.. seealso::
:func:`.make_transient`
"""
state = attributes.instance_state(instance)
if state.session_id or state.key:
raise sa_exc.InvalidRequestError(
"Given object must be transient")
state.key = state.mapper._identity_key_from_state(state)
if state.deleted:
del state.deleted
state._commit_all(state.dict)
state._expire_attributes(state.dict, state.unloaded)
def object_session(instance):
"""Return the ``Session`` to which instance belongs.
If the instance is not a mapped instance, an error is raised.
"""
try:
return _state_session(attributes.instance_state(instance))
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
_new_sessionid = util.counter()
| mit |
sayeghr/harpy-network | manage.py | 1 | 1040 | from flask.ext.script import Manager, prompt, prompt_bool, prompt_pass
from harpy_network import app, db
from harpy_network.models.users import User
manager = Manager(app)
@manager.command
def createdb():
"""
Creates the database tables.
"""
db.create_all()
print("Database created.")
@manager.command
def destroydb():
"""
Destroys the database tables.
"""
if prompt_bool("Are you sure you wish to destroy your database tables? This is not reversible."):
db.drop_all()
print("Database destroyed.")
@manager.command
def create_user(email=None, password=None, admin=None):
"""
Creates a new user.
"""
if email is None:
email = prompt("email")
if password is None:
password = prompt_pass("password")
if admin is None:
admin = prompt_bool("Make this user an admin?")
new_user = User(email, password, admin)
db.session.add(new_user)
db.session.commit()
print("New User Created.")
if __name__ == "__main__":
manager.run() | mit |
screwt/tablib | tablib/packages/yaml/constructor.py | 114 | 25356 |
__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
'ConstructorError']
from error import *
from nodes import *
import datetime
try:
set
except NameError:
from sets import Set as set
import binascii, re, sys, types
class ConstructorError(MarkedYAMLError):
pass
class BaseConstructor(object):
yaml_constructors = {}
yaml_multi_constructors = {}
def __init__(self):
self.constructed_objects = {}
self.recursive_objects = {}
self.state_generators = []
self.deep_construct = False
def check_data(self):
# If there are more documents available?
return self.check_node()
def get_data(self):
# Construct and return the next document.
if self.check_node():
return self.construct_document(self.get_node())
def get_single_data(self):
# Ensure that the stream contains a single document and construct it.
node = self.get_single_node()
if node is not None:
return self.construct_document(node)
return None
def construct_document(self, node):
data = self.construct_object(node)
while self.state_generators:
state_generators = self.state_generators
self.state_generators = []
for generator in state_generators:
for dummy in generator:
pass
self.constructed_objects = {}
self.recursive_objects = {}
self.deep_construct = False
return data
def construct_object(self, node, deep=False):
if deep:
old_deep = self.deep_construct
self.deep_construct = True
if node in self.constructed_objects:
return self.constructed_objects[node]
if node in self.recursive_objects:
raise ConstructorError(None, None,
"found unconstructable recursive node", node.start_mark)
self.recursive_objects[node] = None
constructor = None
tag_suffix = None
if node.tag in self.yaml_constructors:
constructor = self.yaml_constructors[node.tag]
else:
for tag_prefix in self.yaml_multi_constructors:
if node.tag.startswith(tag_prefix):
tag_suffix = node.tag[len(tag_prefix):]
constructor = self.yaml_multi_constructors[tag_prefix]
break
else:
if None in self.yaml_multi_constructors:
tag_suffix = node.tag
constructor = self.yaml_multi_constructors[None]
elif None in self.yaml_constructors:
constructor = self.yaml_constructors[None]
elif isinstance(node, ScalarNode):
constructor = self.__class__.construct_scalar
elif isinstance(node, SequenceNode):
constructor = self.__class__.construct_sequence
elif isinstance(node, MappingNode):
constructor = self.__class__.construct_mapping
if tag_suffix is None:
data = constructor(self, node)
else:
data = constructor(self, tag_suffix, node)
if isinstance(data, types.GeneratorType):
generator = data
data = generator.next()
if self.deep_construct:
for dummy in generator:
pass
else:
self.state_generators.append(generator)
self.constructed_objects[node] = data
del self.recursive_objects[node]
if deep:
self.deep_construct = old_deep
return data
def construct_scalar(self, node):
if not isinstance(node, ScalarNode):
raise ConstructorError(None, None,
"expected a scalar node, but found %s" % node.id,
node.start_mark)
return node.value
def construct_sequence(self, node, deep=False):
if not isinstance(node, SequenceNode):
raise ConstructorError(None, None,
"expected a sequence node, but found %s" % node.id,
node.start_mark)
return [self.construct_object(child, deep=deep)
for child in node.value]
def construct_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError, exc:
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unacceptable key (%s)" % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_pairs(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
pairs = []
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
value = self.construct_object(value_node, deep=deep)
pairs.append((key, value))
return pairs
def add_constructor(cls, tag, constructor):
if not 'yaml_constructors' in cls.__dict__:
cls.yaml_constructors = cls.yaml_constructors.copy()
cls.yaml_constructors[tag] = constructor
add_constructor = classmethod(add_constructor)
def add_multi_constructor(cls, tag_prefix, multi_constructor):
if not 'yaml_multi_constructors' in cls.__dict__:
cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
cls.yaml_multi_constructors[tag_prefix] = multi_constructor
add_multi_constructor = classmethod(add_multi_constructor)
class SafeConstructor(BaseConstructor):
def construct_scalar(self, node):
if isinstance(node, MappingNode):
for key_node, value_node in node.value:
if key_node.tag == u'tag:yaml.org,2002:value':
return self.construct_scalar(value_node)
return BaseConstructor.construct_scalar(self, node)
def flatten_mapping(self, node):
merge = []
index = 0
while index < len(node.value):
key_node, value_node = node.value[index]
if key_node.tag == u'tag:yaml.org,2002:merge':
del node.value[index]
if isinstance(value_node, MappingNode):
self.flatten_mapping(value_node)
merge.extend(value_node.value)
elif isinstance(value_node, SequenceNode):
submerge = []
for subnode in value_node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing a mapping",
node.start_mark,
"expected a mapping for merging, but found %s"
% subnode.id, subnode.start_mark)
self.flatten_mapping(subnode)
submerge.append(subnode.value)
submerge.reverse()
for value in submerge:
merge.extend(value)
else:
raise ConstructorError("while constructing a mapping", node.start_mark,
"expected a mapping or list of mappings for merging, but found %s"
% value_node.id, value_node.start_mark)
elif key_node.tag == u'tag:yaml.org,2002:value':
key_node.tag = u'tag:yaml.org,2002:str'
index += 1
else:
index += 1
if merge:
node.value = merge + node.value
def construct_mapping(self, node, deep=False):
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return BaseConstructor.construct_mapping(self, node, deep=deep)
def construct_yaml_null(self, node):
self.construct_scalar(node)
return None
bool_values = {
u'yes': True,
u'no': False,
u'true': True,
u'false': False,
u'on': True,
u'off': False,
}
def construct_yaml_bool(self, node):
value = self.construct_scalar(node)
return self.bool_values[value.lower()]
def construct_yaml_int(self, node):
value = str(self.construct_scalar(node))
value = value.replace('_', '')
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '0':
return 0
elif value.startswith('0b'):
return sign*int(value[2:], 2)
elif value.startswith('0x'):
return sign*int(value[2:], 16)
elif value[0] == '0':
return sign*int(value, 8)
elif ':' in value:
digits = [int(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*int(value)
inf_value = 1e300
while inf_value != inf_value*inf_value:
inf_value *= inf_value
nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
def construct_yaml_float(self, node):
value = str(self.construct_scalar(node))
value = value.replace('_', '').lower()
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '.inf':
return sign*self.inf_value
elif value == '.nan':
return self.nan_value
elif ':' in value:
digits = [float(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0.0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*float(value)
def construct_yaml_binary(self, node):
value = self.construct_scalar(node)
try:
return str(value).decode('base64')
except (binascii.Error, UnicodeEncodeError), exc:
raise ConstructorError(None, None,
"failed to decode base64 data: %s" % exc, node.start_mark)
timestamp_regexp = re.compile(
ur'''^(?P<year>[0-9][0-9][0-9][0-9])
-(?P<month>[0-9][0-9]?)
-(?P<day>[0-9][0-9]?)
(?:(?:[Tt]|[ \t]+)
(?P<hour>[0-9][0-9]?)
:(?P<minute>[0-9][0-9])
:(?P<second>[0-9][0-9])
(?:\.(?P<fraction>[0-9]*))?
(?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
(?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
def construct_yaml_timestamp(self, node):
value = self.construct_scalar(node)
match = self.timestamp_regexp.match(node.value)
values = match.groupdict()
year = int(values['year'])
month = int(values['month'])
day = int(values['day'])
if not values['hour']:
return datetime.date(year, month, day)
hour = int(values['hour'])
minute = int(values['minute'])
second = int(values['second'])
fraction = 0
if values['fraction']:
fraction = values['fraction'][:6]
while len(fraction) < 6:
fraction += '0'
fraction = int(fraction)
delta = None
if values['tz_sign']:
tz_hour = int(values['tz_hour'])
tz_minute = int(values['tz_minute'] or 0)
delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
if values['tz_sign'] == '-':
delta = -delta
data = datetime.datetime(year, month, day, hour, minute, second, fraction)
if delta:
data -= delta
return data
def construct_yaml_omap(self, node):
# Note: we do not check for duplicate keys, because it's too
# CPU-expensive.
omap = []
yield omap
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
omap.append((key, value))
def construct_yaml_pairs(self, node):
# Note: the same code as `construct_yaml_omap`.
pairs = []
yield pairs
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
pairs.append((key, value))
def construct_yaml_set(self, node):
data = set()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_str(self, node):
value = self.construct_scalar(node)
try:
return value.encode('ascii')
except UnicodeEncodeError:
return value
def construct_yaml_seq(self, node):
data = []
yield data
data.extend(self.construct_sequence(node))
def construct_yaml_map(self, node):
data = {}
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_object(self, node, cls):
data = cls.__new__(cls)
yield data
if hasattr(data, '__setstate__'):
state = self.construct_mapping(node, deep=True)
data.__setstate__(state)
else:
state = self.construct_mapping(node)
data.__dict__.update(state)
def construct_undefined(self, node):
raise ConstructorError(None, None,
"could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
node.start_mark)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:null',
SafeConstructor.construct_yaml_null)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:bool',
SafeConstructor.construct_yaml_bool)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:int',
SafeConstructor.construct_yaml_int)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:float',
SafeConstructor.construct_yaml_float)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:binary',
SafeConstructor.construct_yaml_binary)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:timestamp',
SafeConstructor.construct_yaml_timestamp)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:omap',
SafeConstructor.construct_yaml_omap)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:pairs',
SafeConstructor.construct_yaml_pairs)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:set',
SafeConstructor.construct_yaml_set)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:str',
SafeConstructor.construct_yaml_str)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:seq',
SafeConstructor.construct_yaml_seq)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:map',
SafeConstructor.construct_yaml_map)
SafeConstructor.add_constructor(None,
SafeConstructor.construct_undefined)
class Constructor(SafeConstructor):
def construct_python_str(self, node):
return self.construct_scalar(node).encode('utf-8')
def construct_python_unicode(self, node):
return self.construct_scalar(node)
def construct_python_long(self, node):
return long(self.construct_yaml_int(node))
def construct_python_complex(self, node):
return complex(self.construct_scalar(node))
def construct_python_tuple(self, node):
return tuple(self.construct_sequence(node))
def find_python_module(self, name, mark):
if not name:
raise ConstructorError("while constructing a Python module", mark,
"expected non-empty name appended to the tag", mark)
try:
__import__(name)
except ImportError, exc:
raise ConstructorError("while constructing a Python module", mark,
"cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
return sys.modules[name]
def find_python_name(self, name, mark):
if not name:
raise ConstructorError("while constructing a Python object", mark,
"expected non-empty name appended to the tag", mark)
if u'.' in name:
# Python 2.4 only
#module_name, object_name = name.rsplit('.', 1)
items = name.split('.')
object_name = items.pop()
module_name = '.'.join(items)
else:
module_name = '__builtin__'
object_name = name
try:
__import__(module_name)
except ImportError, exc:
raise ConstructorError("while constructing a Python object", mark,
"cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
module = sys.modules[module_name]
if not hasattr(module, object_name):
raise ConstructorError("while constructing a Python object", mark,
"cannot find %r in the module %r" % (object_name.encode('utf-8'),
module.__name__), mark)
return getattr(module, object_name)
def construct_python_name(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python name", node.start_mark,
"expected the empty value, but found %r" % value.encode('utf-8'),
node.start_mark)
return self.find_python_name(suffix, node.start_mark)
def construct_python_module(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python module", node.start_mark,
"expected the empty value, but found %r" % value.encode('utf-8'),
node.start_mark)
return self.find_python_module(suffix, node.start_mark)
class classobj: pass
def make_python_instance(self, suffix, node,
args=None, kwds=None, newobj=False):
if not args:
args = []
if not kwds:
kwds = {}
cls = self.find_python_name(suffix, node.start_mark)
if newobj and isinstance(cls, type(self.classobj)) \
and not args and not kwds:
instance = self.classobj()
instance.__class__ = cls
return instance
elif newobj and isinstance(cls, type):
return cls.__new__(cls, *args, **kwds)
else:
return cls(*args, **kwds)
def set_python_instance_state(self, instance, state):
if hasattr(instance, '__setstate__'):
instance.__setstate__(state)
else:
slotstate = {}
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if hasattr(instance, '__dict__'):
instance.__dict__.update(state)
elif state:
slotstate.update(state)
for key, value in slotstate.items():
setattr(object, key, value)
def construct_python_object(self, suffix, node):
# Format:
# !!python/object:module.name { ... state ... }
instance = self.make_python_instance(suffix, node, newobj=True)
yield instance
deep = hasattr(instance, '__setstate__')
state = self.construct_mapping(node, deep=deep)
self.set_python_instance_state(instance, state)
def construct_python_object_apply(self, suffix, node, newobj=False):
# Format:
# !!python/object/apply # (or !!python/object/new)
# args: [ ... arguments ... ]
# kwds: { ... keywords ... }
# state: ... state ...
# listitems: [ ... listitems ... ]
# dictitems: { ... dictitems ... }
# or short format:
# !!python/object/apply [ ... arguments ... ]
# The difference between !!python/object/apply and !!python/object/new
# is how an object is created, check make_python_instance for details.
if isinstance(node, SequenceNode):
args = self.construct_sequence(node, deep=True)
kwds = {}
state = {}
listitems = []
dictitems = {}
else:
value = self.construct_mapping(node, deep=True)
args = value.get('args', [])
kwds = value.get('kwds', {})
state = value.get('state', {})
listitems = value.get('listitems', [])
dictitems = value.get('dictitems', {})
instance = self.make_python_instance(suffix, node, args, kwds, newobj)
if state:
self.set_python_instance_state(instance, state)
if listitems:
instance.extend(listitems)
if dictitems:
for key in dictitems:
instance[key] = dictitems[key]
return instance
def construct_python_object_new(self, suffix, node):
return self.construct_python_object_apply(suffix, node, newobj=True)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/none',
Constructor.construct_yaml_null)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/bool',
Constructor.construct_yaml_bool)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/str',
Constructor.construct_python_str)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/unicode',
Constructor.construct_python_unicode)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/int',
Constructor.construct_yaml_int)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/long',
Constructor.construct_python_long)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/float',
Constructor.construct_yaml_float)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/complex',
Constructor.construct_python_complex)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/list',
Constructor.construct_yaml_seq)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/tuple',
Constructor.construct_python_tuple)
Constructor.add_constructor(
u'tag:yaml.org,2002:python/dict',
Constructor.construct_yaml_map)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/name:',
Constructor.construct_python_name)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/module:',
Constructor.construct_python_module)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/object:',
Constructor.construct_python_object)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/object/apply:',
Constructor.construct_python_object_apply)
Constructor.add_multi_constructor(
u'tag:yaml.org,2002:python/object/new:',
Constructor.construct_python_object_new)
| mit |
vijayendrabvs/hap | neutron/plugins/nuage/nuage_models.py | 8 | 2770 | # Copyright 2014 Alcatel-Lucent USA Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ronak Shah, Nuage Networks, Alcatel-Lucent USA Inc.
from sqlalchemy import Boolean, Column, ForeignKey, String
from neutron.db import model_base
from neutron.db import models_v2
class NetPartition(model_base.BASEV2, models_v2.HasId):
__tablename__ = 'net_partitions'
name = Column(String(64))
l3dom_tmplt_id = Column(String(36))
l2dom_tmplt_id = Column(String(36))
class NetPartitionRouter(model_base.BASEV2):
__tablename__ = "net_partition_router_mapping"
net_partition_id = Column(String(36),
ForeignKey('net_partitions.id',
ondelete="CASCADE"),
primary_key=True)
router_id = Column(String(36),
ForeignKey('routers.id', ondelete="CASCADE"),
primary_key=True)
nuage_router_id = Column(String(36))
class RouterZone(model_base.BASEV2):
__tablename__ = "router_zone_mapping"
router_id = Column(String(36),
ForeignKey('routers.id', ondelete="CASCADE"),
primary_key=True)
nuage_zone_id = Column(String(36))
nuage_user_id = Column(String(36))
nuage_group_id = Column(String(36))
class SubnetL2Domain(model_base.BASEV2):
__tablename__ = 'subnet_l2dom_mapping'
subnet_id = Column(String(36),
ForeignKey('subnets.id', ondelete="CASCADE"),
primary_key=True)
net_partition_id = Column(String(36),
ForeignKey('net_partitions.id',
ondelete="CASCADE"))
nuage_subnet_id = Column(String(36))
nuage_l2dom_tmplt_id = Column(String(36))
nuage_user_id = Column(String(36))
nuage_group_id = Column(String(36))
class PortVPortMapping(model_base.BASEV2):
__tablename__ = 'port_mapping'
port_id = Column(String(36),
ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
nuage_vport_id = Column(String(36))
nuage_vif_id = Column(String(36))
static_ip = Column(Boolean())
| apache-2.0 |
Edraak/edx-platform | openedx/core/djangoapps/user_api/tests/test_helpers.py | 31 | 7500 | """
Tests for helper functions.
"""
import json
import mock
import ddt
from django.http import HttpRequest, HttpResponse
from django.test import TestCase
from nose.tools import raises
from ..helpers import (
intercept_errors, shim_student_view,
FormDescription, InvalidFieldError
)
class FakeInputException(Exception):
"""Fake exception that should be intercepted."""
pass
class FakeOutputException(Exception):
"""Fake exception that should be raised."""
pass
@intercept_errors(FakeOutputException, ignore_errors=[ValueError])
def intercepted_function(raise_error=None):
"""Function used to test the intercept error decorator.
Keyword Arguments:
raise_error (Exception): If provided, raise this exception.
"""
if raise_error is not None:
raise raise_error
class InterceptErrorsTest(TestCase):
"""Tests for the decorator that intercepts errors."""
@raises(FakeOutputException)
def test_intercepts_errors(self):
intercepted_function(raise_error=FakeInputException)
def test_ignores_no_error(self):
intercepted_function()
@raises(ValueError)
def test_ignores_expected_errors(self):
intercepted_function(raise_error=ValueError)
@mock.patch('openedx.core.djangoapps.user_api.helpers.LOGGER')
def test_logs_errors(self, mock_logger):
exception = 'openedx.core.djangoapps.user_api.tests.test_helpers.FakeInputException'
expected_log_msg = (
u"An unexpected error occurred when calling 'intercepted_function' with arguments '()' and "
u"keyword arguments '{'raise_error': <class '" + exception + u"'>}': FakeInputException()"
)
# Verify that the raised exception has the error message
try:
intercepted_function(raise_error=FakeInputException)
except FakeOutputException as ex:
self.assertEqual(ex.message, expected_log_msg)
# Verify that the error logger is called
# This will include the stack trace for the original exception
# because it's called with log level "ERROR"
mock_logger.exception.assert_called_once_with(expected_log_msg)
class FormDescriptionTest(TestCase):
"""Tests of helper functions which generate form descriptions."""
def test_to_json(self):
desc = FormDescription("post", "/submit")
desc.add_field(
"name",
label="label",
field_type="text",
default="default",
placeholder="placeholder",
instructions="instructions",
required=True,
restrictions={
"min_length": 2,
"max_length": 10
},
error_messages={
"required": "You must provide a value!"
}
)
self.assertEqual(desc.to_json(), json.dumps({
"method": "post",
"submit_url": "/submit",
"fields": [
{
"name": "name",
"label": "label",
"type": "text",
"defaultValue": "default",
"placeholder": "placeholder",
"instructions": "instructions",
"required": True,
"restrictions": {
"min_length": 2,
"max_length": 10,
},
"errorMessages": {
"required": "You must provide a value!"
}
}
]
}))
def test_invalid_field_type(self):
desc = FormDescription("post", "/submit")
with self.assertRaises(InvalidFieldError):
desc.add_field("invalid", field_type="invalid")
def test_missing_options(self):
desc = FormDescription("post", "/submit")
with self.assertRaises(InvalidFieldError):
desc.add_field("name", field_type="select")
def test_invalid_restriction(self):
desc = FormDescription("post", "/submit")
with self.assertRaises(InvalidFieldError):
desc.add_field("name", field_type="text", restrictions={"invalid": 0})
@ddt.ddt
class StudentViewShimTest(TestCase):
"Tests of the student view shim."
def setUp(self):
super(StudentViewShimTest, self).setUp()
self.captured_request = None
def test_strip_enrollment_action(self):
view = self._shimmed_view(HttpResponse())
request = HttpRequest()
request.POST["enrollment_action"] = "enroll"
request.POST["course_id"] = "edx/101/demo"
view(request)
# Expect that the enrollment action and course ID
# were stripped out before reaching the wrapped view.
self.assertNotIn("enrollment_action", self.captured_request.POST)
self.assertNotIn("course_id", self.captured_request.POST)
def test_include_analytics_info(self):
view = self._shimmed_view(HttpResponse())
request = HttpRequest()
request.POST["analytics"] = json.dumps({
"enroll_course_id": "edX/DemoX/Fall"
})
view(request)
# Expect that the analytics course ID was passed to the view
self.assertEqual(self.captured_request.POST.get("course_id"), "edX/DemoX/Fall")
def test_third_party_auth_login_failure(self):
view = self._shimmed_view(
HttpResponse(status=403),
check_logged_in=True
)
response = view(HttpRequest())
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, "third-party-auth")
def test_non_json_response(self):
view = self._shimmed_view(HttpResponse(content="Not a JSON dict"))
response = view(HttpRequest())
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "Not a JSON dict")
@ddt.data("redirect", "redirect_url")
def test_ignore_redirect_from_json(self, redirect_key):
view = self._shimmed_view(
HttpResponse(content=json.dumps({
"success": True,
redirect_key: "/redirect"
}))
)
response = view(HttpRequest())
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, "")
def test_error_from_json(self):
view = self._shimmed_view(
HttpResponse(content=json.dumps({
"success": False,
"value": "Error!"
}))
)
response = view(HttpRequest())
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, "Error!")
def test_preserve_headers(self):
view_response = HttpResponse()
view_response["test-header"] = "test"
view = self._shimmed_view(view_response)
response = view(HttpRequest())
self.assertEqual(response["test-header"], "test")
def test_check_logged_in(self):
view = self._shimmed_view(HttpResponse(), check_logged_in=True)
response = view(HttpRequest())
self.assertEqual(response.status_code, 403)
def _shimmed_view(self, response, check_logged_in=False): # pylint: disable=missing-docstring
def stub_view(request): # pylint: disable=missing-docstring
self.captured_request = request
return response
return shim_student_view(stub_view, check_logged_in=check_logged_in)
| agpl-3.0 |
Apoyhtari/Irc-Bot | beautifulsoup4-4.1.2/build/lib/bs4/element.py | 82 | 49756 | import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(unicode):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = unicode.__new__(cls, prefix)
else:
obj = unicode.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(unicode):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return unicode.__new__(unicode, original_value)
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substition
FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
@classmethod
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not callable(formatter):
formatter = self.FORMATTERS.get(
formatter, EntitySubstitution.substitute_xml)
if formatter is None:
output = s
else:
output = formatter(s)
return output
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent is not None and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self):
"Finds the last element beneath this object to be parsed."
last_child = self
while hasattr(last_child, 'contents') and last_child.contents:
last_child = last_child.contents[-1]
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, basestring)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant()
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant()
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
elif text is None and not limit and not attrs and not kwargs:
# Optimization to find all tags.
if name is True or name is None:
return [element for element in generator
if isinstance(element, Tag)]
# Optimization to find all tags with a given name.
elif isinstance(name, basestring):
return [element for element in generator
if isinstance(element, Tag) and element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-z0-9]+$')
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
def select(self, selector):
"""Perform a CSS selection operation on the current element."""
tokens = selector.split()
current_context = [self]
for index, token in enumerate(tokens):
if tokens[index - 1] == '>':
# already found direct descendants in last step. skip this
# step.
continue
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag, attribute, operator, value = m.groups()
if not tag:
tag = True
checker = self._attribute_checker(operator, attribute, value)
found = []
for context in current_context:
found.extend(
[el for el in context.find_all(tag) if checker(el)])
current_context = found
continue
if '#' in token:
# ID selector
tag, id = token.split('#', 1)
if tag == "":
tag = True
el = current_context[0].find(tag, {'id': id})
if el is None:
return [] # No match
current_context = [el]
continue
if '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
if not tag_name:
tag_name = True
classes = set(klass.split('.'))
found = []
def classes_match(tag):
if tag_name is not True and tag.name != tag_name:
return False
if not tag.has_attr('class'):
return False
return classes.issubset(tag['class'])
for context in current_context:
found.extend(context.find_all(classes_match))
current_context = found
continue
if token == '*':
# Star selector
found = []
for context in current_context:
found.extend(context.findAll(True))
current_context = found
continue
if token == '>':
# Child selector
tag = tokens[index + 1]
if not tag:
tag = True
found = []
for context in current_context:
found.extend(context.find_all(tag, recursive=False))
current_context = found
continue
# Here we should just have a regular tag
if not self.tag_name_re.match(token):
return []
found = []
for context in current_context:
found.extend(context.findAll(token))
current_context = found
return current_context
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(unicode, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = u'<![CDATA['
SUFFIX = u']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = u'<?'
SUFFIX = u'?>'
class Comment(PreformattedString):
PREFIX = u'<!--'
SUFFIX = u'-->'
class Declaration(PreformattedString):
PREFIX = u'<!'
SUFFIX = u'!>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = u'<!DOCTYPE '
SUFFIX = u'>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False):
"""Yield all child strings, possibly stripping them."""
for descendant in self.descendants:
if not isinstance(descendant, NavigableString):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator=u"", strip=False):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(strip)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, basestring):
val = unicode(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
unicode(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = (indent_level is not None)
if pretty_print:
space = (' ' * (indent_level - 1))
indent_contents = indent_level + 1
else:
space = ''
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if pretty_print:
s.append(space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if pretty_print and closeTag and self.next_sibling:
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level:
text = text.strip()
if text:
if pretty_print:
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print:
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring."""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
# This was kind of misleading because has_key() (attributes) was
# different from __in__ (contents). has_key() is gone in Python 3,
# anyway.
has_key = has_attr
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in attrs.items():
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, unicode)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return unicode(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
if (isinstance(match_against, unicode)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
return (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, unicode):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
| mit |
jbtule/keyczar | cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/Tool/gnulink.py | 19 | 2200 | """SCons.Tool.gnulink
Tool-specific initialization for the gnu linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/gnulink.py 4043 2009/02/23 09:06:45 scons"
import SCons.Util
import link
linkers = ['g++', 'gcc']
def generate(env):
"""Add Builders and construction variables for gnulink to an Environment."""
link.generate(env)
if env['PLATFORM'] == 'hpux':
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared -fPIC')
# __RPATH is set to $_RPATH in the platform specification if that
# platform supports it.
env.Append(LINKFLAGS=['$__RPATH'])
env['RPATHPREFIX'] = '-Wl,-rpath='
env['RPATHSUFFIX'] = ''
env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}'
def exists(env):
return env.Detect(linkers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
MrNuggles/HeyBoet-Telegram-Bot | temboo/Library/Zendesk/Tickets/ListAllTickets.py | 5 | 4558 | # -*- coding: utf-8 -*-
###############################################################################
#
# ListAllTickets
# Retrieves a list of all existing tickets.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListAllTickets(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListAllTickets Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListAllTickets, self).__init__(temboo_session, '/Library/Zendesk/Tickets/ListAllTickets')
def new_input_set(self):
return ListAllTicketsInputSet()
def _make_result_set(self, result, path):
return ListAllTicketsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListAllTicketsChoreographyExecution(session, exec_id, path)
class ListAllTicketsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListAllTickets
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Email(self, value):
"""
Set the value of the Email input for this Choreo. ((required, string) The email address you use to login to your Zendesk account.)
"""
super(ListAllTicketsInputSet, self)._set_input('Email', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) The page number of the results to be returned. Used together with the PerPage parameter to paginate a large set of results.)
"""
super(ListAllTicketsInputSet, self)._set_input('Page', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) Your Zendesk password.)
"""
super(ListAllTicketsInputSet, self)._set_input('Password', value)
def set_PerPage(self, value):
"""
Set the value of the PerPage input for this Choreo. ((optional, integer) The number of results to return per page. Maximum is 100 and default is 100.)
"""
super(ListAllTicketsInputSet, self)._set_input('PerPage', value)
def set_Server(self, value):
"""
Set the value of the Server input for this Choreo. ((required, string) Your Zendesk domain and subdomain (e.g., temboocare.zendesk.com).)
"""
super(ListAllTicketsInputSet, self)._set_input('Server', value)
class ListAllTicketsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListAllTickets Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Zendesk.)
"""
return self._output.get('Response', None)
def get_NextPage(self):
"""
Retrieve the value for the "NextPage" output from this Choreo execution. ((integer) The index for the next page of results.)
"""
return self._output.get('NextPage', None)
def get_PreviousPage(self):
"""
Retrieve the value for the "PreviousPage" output from this Choreo execution. ((integer) The index for the previous page of results.)
"""
return self._output.get('PreviousPage', None)
class ListAllTicketsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListAllTicketsResultSet(response, path)
| gpl-3.0 |
bjuvensjo/scripts | vang/bitbucket/tests/test_has_tag.py | 1 | 3124 | from unittest.mock import call, patch
from pytest import raises
from vang.bitbucket.has_tag import has_tag
from vang.bitbucket.has_tag import main
from vang.bitbucket.has_tag import parse_args
import pytest
@pytest.fixture
def tags_fixture():
return [
(('project_key', 'repo_slug1'), {
'id': 'refs/tags/t1',
'displayId': 't1',
'type': 'TAG',
'latestCommit': 'f89ed5',
'latestChangeset': 'f89ed5',
'hash': '430f79'
}),
(('project_key', 'repo_slug2'), {
'id': 'refs/tags/t1',
'displayId': 't1',
'type': 'TAG',
'latestCommit': 'f89ed5',
'latestChangeset': 'f89ed5',
'hash': '430f79'
}),
]
@pytest.mark.parametrize("tag, expected", [('t1', True), ('t2', False)])
@patch('vang.bitbucket.has_tag.get_tags', autospec=True)
def test_has_tag(mock_get_tags, tag, expected, tags_fixture):
mock_get_tags.return_value = tags_fixture
assert [
(['project_key', 'repo_slug1'], expected),
(['project_key', 'repo_slug2'], expected),
] == list(
has_tag(
[
['project_key', 'repo_slug1'],
['project_key', 'repo_slug2'],
],
tag,
))
@patch('vang.bitbucket.has_tag.print')
@patch('vang.bitbucket.has_tag.get_repo_specs', autospec=True)
@patch('vang.bitbucket.has_tag.get_tags', autospec=True)
def test_main(mock_get_tags, mock_get_repo_specs, mock_print, tags_fixture):
mock_get_tags.return_value = tags_fixture
mock_get_repo_specs.return_value = [
['project_key', 'repo_slug1'],
['project_key', 'repo_slug2'],
]
main('t1', repos=['project_key/repo_slug1', 'project_key/repo_slug2'])
assert [
call(None, [
'project_key/repo_slug1',
'project_key/repo_slug2',
], None)
] == mock_get_repo_specs.mock_calls
assert [
call('project_key/repo_slug1, t1: True'),
call('project_key/repo_slug2, t1: True')
] == mock_print.mock_calls
@pytest.mark.parametrize("args", [
'-d d -r r',
'-d d -p p',
'-r r -p p',
'1 2',
])
def test_parse_args_raises(args):
with raises(SystemExit):
parse_args(args.split(' ') if args else args)
@pytest.mark.parametrize("args, expected", [
['t', {
'tag': 't',
'dirs': ['.'],
'projects': None,
'repos': None,
}],
[
't -d d1 d2',
{
'tag': 't',
'dirs': ['d1', 'd2'],
'projects': None,
'repos': None
}
],
[
't -r p/r1 p/r2',
{
'tag': 't',
'dirs': ['.'],
'projects': None,
'repos': ['p/r1', 'p/r2']
}
],
[
't -p p1 p2',
{
'tag': 't',
'dirs': ['.'],
'projects': ['p1', 'p2'],
'repos': None
}
],
])
def test_parse_args_valid(args, expected):
assert expected == parse_args(args.split(' ') if args else '').__dict__
| apache-2.0 |
FabriceSalvaire/simavr | PySimAvr/Core/test-core-hdl-parser.py | 1 | 2080 | #! /usr/bin/env python
####################################################################################################
#
# PySimAvr - Python binding to simavr.
# Copyright (C) 2015 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
#
# Logging
#
import PySimAvr.Logging.Logging as Logging
logger = Logging.setup_logging('pysimavr')
####################################################################################################
from PySimAvr.Core.CoreHdlParser import Parser
####################################################################################################
parser = Parser()
with open('operations.txt') as f:
source = f.read()
# parser.test_lexer(source)
for line_number, line in enumerate(source.split('\n')):
if line and not line.startswith('#'):
print()
print('='*80)
print(line_number, ':', line)
# print()
# parser.test_lexer(line)
program = parser.parse(line)
print()
print(program)
####################################################################################################
#
# End
#
####################################################################################################
| gpl-3.0 |
overtherain/scriptfile | software/googleAppEngine/lib/webob_0_9/webob/datastruct.py | 35 | 1682 | """
Contains some data structures.
"""
from webob.util.dictmixin import DictMixin
class EnvironHeaders(DictMixin):
"""An object that represents the headers as present in a
WSGI environment.
This object is a wrapper (with no internal state) for a WSGI
request object, representing the CGI-style HTTP_* keys as a
dictionary. Because a CGI environment can only hold one value for
each key, this dictionary is single-valued (unlike outgoing
headers).
"""
def __init__(self, environ):
self.environ = environ
def _trans_name(self, name):
key = 'HTTP_'+name.replace('-', '_').upper()
if key == 'HTTP_CONTENT_LENGTH':
key = 'CONTENT_LENGTH'
elif key == 'HTTP_CONTENT_TYPE':
key = 'CONTENT_TYPE'
return key
def _trans_key(self, key):
if key == 'CONTENT_TYPE':
return 'Content-Type'
elif key == 'CONTENT_LENGTH':
return 'Content-Length'
elif key.startswith('HTTP_'):
return key[5:].replace('_', '-').title()
else:
return None
def __getitem__(self, item):
return self.environ[self._trans_name(item)]
def __setitem__(self, item, value):
self.environ[self._trans_name(item)] = value
def __delitem__(self, item):
del self.environ[self._trans_name(item)]
def __iter__(self):
for key in self.environ:
name = self._trans_key(key)
if name is not None:
yield name
def keys(self):
return list(iter(self))
def __contains__(self, item):
return self._trans_name(item) in self.environ
| mit |
infinnovation/micropython | tests/basics/gen_yield_from_close.py | 27 | 2226 | def gen():
yield 1
yield 2
yield 3
yield 4
def gen2():
yield -1
print((yield from gen()))
yield 10
yield 11
g = gen2()
print(next(g))
print(next(g))
g.close()
try:
print(next(g))
except StopIteration:
print("StopIteration")
# Now variation of same test, but with leaf generator
# swallowing GeneratorExit exception - its upstream gen
# generator should still receive one.
def gen3():
yield 1
try:
yield 2
except GeneratorExit:
print("leaf caught GeneratorExit and swallowed it")
return
yield 3
yield 4
def gen4():
yield -1
try:
print((yield from gen3()))
except GeneratorExit:
print("delegating caught GeneratorExit")
raise
yield 10
yield 11
g = gen4()
print(next(g))
print(next(g))
print(next(g))
g.close()
try:
print(next(g))
except StopIteration:
print("StopIteration")
# Yet another variation - leaf generator gets GeneratorExit,
# but raises StopIteration instead. This still should close chain properly.
def gen5():
yield 1
try:
yield 2
except GeneratorExit:
print("leaf caught GeneratorExit and raised StopIteration instead")
raise StopIteration(123)
yield 3
yield 4
def gen6():
yield -1
try:
print((yield from gen5()))
except GeneratorExit:
print("delegating caught GeneratorExit")
raise
yield 10
yield 11
g = gen6()
print(next(g))
print(next(g))
print(next(g))
g.close()
try:
print(next(g))
except StopIteration:
print("StopIteration")
# case where generator ignores the close request and yields instead
def gen7():
try:
yield 123
except GeneratorExit:
yield 456
g = gen7()
print(next(g))
try:
g.close()
except RuntimeError:
print('RuntimeError')
# case where close is propagated up to a built-in iterator
def gen8():
g = range(2)
yield from g
g = gen8()
print(next(g))
g.close()
# case with a user-defined close method
class Iter:
def __iter__(self):
return self
def __next__(self):
return 1
def close(self):
print('close')
def gen9():
yield from Iter()
g = gen9()
print(next(g))
g.close()
| mit |
fepe55/twitter | tweetdelete/views.py | 1 | 3148 | # -*- encoding: utf-8 -*-
from django.shortcuts import render
import tweetpony
from tweetdelete.forms import AuthForm
from tweetdelete.models import Data
from django.core.context_processors import csrf
CONSUMER_KEY = 'qoHxj86rXhfEbDsT00DflA'
CONSUMER_SECRET = 'PxRKaEkSlI3Il5jX7uUgAuOPR7WtGnMa8uw5WeI'
def get_api(request):
try:
access_data = Data.objects.all()[0]
api = tweetpony.API(CONSUMER_KEY, CONSUMER_SECRET, access_data.access_token, access_data.access_token_secret)
except tweetpony.APIError as err:
data = {
'errors' : [{'code': err.code, 'description' : err.description},],
}
return data
#return render(request, 'auth.html',{ 'data' : data })
except:
return authenticate(request)
else:
data = {
'api': api,
}
return data
return False
def authenticate(request, api=None):
if request.POST:
form = AuthForm(request.POST)
if form.is_valid():
# Falla porque es OTRO objeto api al llegar acá
token = form.cleaned_data['token']
verifier = form.cleaned_data['verifier']
try:
api = tweetpony.API(CONSUMER_KEY, CONSUMER_SECRET, token, verifier)
#api.authenticate(verifier)
except tweetpony.APIError as err:
data = {
'errors' : [{'code': err.code, 'description' : err.description},],
}
return data
#return render(request, 'auth.html',{ 'data' : data, })
else:
access_data = Data(
access_token = api.access_token,
access_token_secret = api.access_token_secret,
)
access_data.save()
data = {
'api' : api,
}
return data
else:
try:
api = tweetpony.API(CONSUMER_KEY, CONSUMER_SECRET)
except tweetpony.APIError as err:
data = {
'errors' : [{'code': err.code, 'description' : err.description},],
}
return data
#return render(request, 'auth.html',{ 'data' : data })
url = api.get_auth_url()
form = AuthForm()
data = {
'url' : url,
'form' : form,
}
return data
#return render(request, 'auth.html', {'data' : data })
def principal(request):
data = get_api(request)
if 'api' in data.keys():
api = data['api']
else:
return render(request, 'auth.html', {'data' : data })
user = api.user
#api.update_status(status = 'Testing')
a = 1
tweets_per_page = 3
timeline = api.user_timeline(count=tweets_per_page)
while True:
for tweet in timeline:
print tweet.text
id = tweet.id
a+=1
if len(timeline) != tweets_per_page or a==5:
break
timeline += api.user_timeline(count=tweets_per_page, max_id=id-1)
data = {
'timeline': timeline,
}
return render(request, 'inicio.html',{ 'data' : data })
| gpl-2.0 |
jeenalee/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_doctest.py | 169 | 22823 | # encoding: utf-8
import sys
import _pytest._code
from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile
import pytest
class TestDoctests:
def test_collect_testtextfile(self, testdir):
w = testdir.maketxtfile(whatever="")
checkfile = testdir.maketxtfile(test_something="""
alskdjalsdk
>>> i = 5
>>> i-1
4
""")
for x in (testdir.tmpdir, checkfile):
#print "checking that %s returns custom items" % (x,)
items, reprec = testdir.inline_genitems(x)
assert len(items) == 1
assert isinstance(items[0], DoctestTextfile)
items, reprec = testdir.inline_genitems(w)
assert len(items) == 1
def test_collect_module_empty(self, testdir):
path = testdir.makepyfile(whatever="#")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 0
def test_collect_module_single_modulelevel_doctest(self, testdir):
path = testdir.makepyfile(whatever='""">>> pass"""')
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
def test_collect_module_two_doctest_one_modulelevel(self, testdir):
path = testdir.makepyfile(whatever="""
'>>> x = None'
def my_func():
">>> magic = 42 "
""")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_collect_module_two_doctest_no_modulelevel(self, testdir):
path = testdir.makepyfile(whatever="""
'# Empty'
def my_func():
">>> magic = 42 "
def unuseful():
'''
# This is a function
# >>> # it doesn't have any doctest
'''
def another():
'''
# This is another function
>>> import os # this one does have a doctest
'''
""")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_simple_doctestfile(self, testdir):
p = testdir.maketxtfile(test_doc="""
>>> x = 1
>>> x == 1
False
""")
reprec = testdir.inline_run(p, )
reprec.assertoutcome(failed=1)
def test_new_pattern(self, testdir):
p = testdir.maketxtfile(xdoc="""
>>> x = 1
>>> x == 1
False
""")
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1)
def test_multiple_patterns(self, testdir):
"""Test support for multiple --doctest-glob arguments (#1255).
"""
testdir.maketxtfile(xdoc="""
>>> 1
1
""")
testdir.makefile('.foo', test="""
>>> 1
1
""")
testdir.maketxtfile(test_normal="""
>>> 1
1
""")
expected = set(['xdoc.txt', 'test.foo', 'test_normal.txt'])
assert set(x.basename for x in testdir.tmpdir.listdir()) == expected
args = ["--doctest-glob=xdoc*.txt", "--doctest-glob=*.foo"]
result = testdir.runpytest(*args)
result.stdout.fnmatch_lines([
'*test.foo *',
'*xdoc.txt *',
'*2 passed*',
])
result = testdir.runpytest()
result.stdout.fnmatch_lines([
'*test_normal.txt *',
'*1 passed*',
])
def test_doctest_unexpected_exception(self, testdir):
testdir.maketxtfile("""
>>> i = 0
>>> 0 / i
2
""")
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines([
"*unexpected_exception*",
"*>>> i = 0*",
"*>>> 0 / i*",
"*UNEXPECTED*ZeroDivision*",
])
def test_docstring_context_around_error(self, testdir):
"""Test that we show some context before the actual line of a failing
doctest.
"""
testdir.makepyfile('''
def foo():
"""
text-line-1
text-line-2
text-line-3
text-line-4
text-line-5
text-line-6
text-line-7
text-line-8
text-line-9
text-line-10
text-line-11
>>> 1 + 1
3
text-line-after
"""
''')
result = testdir.runpytest('--doctest-modules')
result.stdout.fnmatch_lines([
'*docstring_context_around_error*',
'005*text-line-3',
'006*text-line-4',
'013*text-line-11',
'014*>>> 1 + 1',
'Expected:',
' 3',
'Got:',
' 2',
])
# lines below should be trimmed out
assert 'text-line-2' not in result.stdout.str()
assert 'text-line-after' not in result.stdout.str()
def test_doctest_linedata_missing(self, testdir):
testdir.tmpdir.join('hello.py').write(_pytest._code.Source("""
class Fun(object):
@property
def test(self):
'''
>>> a = 1
>>> 1/0
'''
"""))
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines([
"*hello*",
"*EXAMPLE LOCATION UNKNOWN, not showing all tests of that example*",
"*1/0*",
"*UNEXPECTED*ZeroDivision*",
"*1 failed*",
])
def test_doctest_unex_importerror(self, testdir):
testdir.tmpdir.join("hello.py").write(_pytest._code.Source("""
import asdalsdkjaslkdjasd
"""))
testdir.maketxtfile("""
>>> import hello
>>>
""")
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines([
"*>>> import hello",
"*UNEXPECTED*ImportError*",
"*import asdals*",
])
def test_doctestmodule(self, testdir):
p = testdir.makepyfile("""
'''
>>> x = 1
>>> x == 1
False
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1)
def test_doctestmodule_external_and_issue116(self, testdir):
p = testdir.mkpydir("hello")
p.join("__init__.py").write(_pytest._code.Source("""
def somefunc():
'''
>>> i = 0
>>> i + 1
2
'''
"""))
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines([
'004 *>>> i = 0',
'005 *>>> i + 1',
'*Expected:',
"* 2",
"*Got:",
"* 1",
"*:5: DocTestFailure"
])
def test_txtfile_failing(self, testdir):
p = testdir.maketxtfile("""
>>> i = 0
>>> i + 1
2
""")
result = testdir.runpytest(p, "-s")
result.stdout.fnmatch_lines([
'001 >>> i = 0',
'002 >>> i + 1',
'Expected:',
" 2",
"Got:",
" 1",
"*test_txtfile_failing.txt:2: DocTestFailure"
])
def test_txtfile_with_fixtures(self, testdir):
p = testdir.maketxtfile("""
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
""")
reprec = testdir.inline_run(p, )
reprec.assertoutcome(passed=1)
def test_txtfile_with_usefixtures_in_ini(self, testdir):
testdir.makeini("""
[pytest]
usefixtures = myfixture
""")
testdir.makeconftest("""
import pytest
@pytest.fixture
def myfixture(monkeypatch):
monkeypatch.setenv("HELLO", "WORLD")
""")
p = testdir.maketxtfile("""
>>> import os
>>> os.environ["HELLO"]
'WORLD'
""")
reprec = testdir.inline_run(p, )
reprec.assertoutcome(passed=1)
def test_doctestmodule_with_fixtures(self, testdir):
p = testdir.makepyfile("""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_doctestmodule_three_tests(self, testdir):
p = testdir.makepyfile("""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
def my_func():
'''
>>> magic = 42
>>> magic - 42
0
'''
def unuseful():
pass
def another():
'''
>>> import os
>>> os is os
True
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=3)
def test_doctestmodule_two_tests_one_fail(self, testdir):
p = testdir.makepyfile("""
class MyClass:
def bad_meth(self):
'''
>>> magic = 42
>>> magic
0
'''
def nice_meth(self):
'''
>>> magic = 42
>>> magic - 42
0
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=1)
def test_ignored_whitespace(self, testdir):
testdir.makeini("""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
""")
p = testdir.makepyfile("""
class MyClass:
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace(self, testdir):
testdir.makeini("""
[pytest]
doctest_optionflags = ELLIPSIS
""")
p = testdir.makepyfile("""
class MyClass:
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=0)
def test_ignored_whitespace_glob(self, testdir):
testdir.makeini("""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
""")
p = testdir.maketxtfile(xdoc="""
>>> a = "foo "
>>> print(a)
foo
""")
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace_glob(self, testdir):
testdir.makeini("""
[pytest]
doctest_optionflags = ELLIPSIS
""")
p = testdir.maketxtfile(xdoc="""
>>> a = "foo "
>>> print(a)
foo
""")
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1, passed=0)
def test_contains_unicode(self, testdir):
"""Fix internal error with docstrings containing non-ascii characters.
"""
testdir.makepyfile(u'''
# encoding: utf-8
def foo():
"""
>>> name = 'с' # not letter 'c' but instead Cyrillic 's'.
'anything'
"""
''')
result = testdir.runpytest('--doctest-modules')
result.stdout.fnmatch_lines([
'Got nothing',
'* 1 failed in*',
])
def test_ignore_import_errors_on_doctest(self, testdir):
p = testdir.makepyfile("""
import asdf
def add_one(x):
'''
>>> add_one(1)
2
'''
return x + 1
""")
reprec = testdir.inline_run(p, "--doctest-modules",
"--doctest-ignore-import-errors")
reprec.assertoutcome(skipped=1, failed=1, passed=0)
def test_junit_report_for_doctest(self, testdir):
"""
#713: Fix --junit-xml option when used with --doctest-modules.
"""
p = testdir.makepyfile("""
def foo():
'''
>>> 1 + 1
3
'''
pass
""")
reprec = testdir.inline_run(p, "--doctest-modules",
"--junit-xml=junit.xml")
reprec.assertoutcome(failed=1)
class TestLiterals:
@pytest.mark.parametrize('config_mode', ['ini', 'comment'])
def test_allow_unicode(self, testdir, config_mode):
"""Test that doctests which output unicode work in all python versions
tested by pytest when the ALLOW_UNICODE option is used (either in
the ini file or by an inline comment).
"""
if config_mode == 'ini':
testdir.makeini('''
[pytest]
doctest_optionflags = ALLOW_UNICODE
''')
comment = ''
else:
comment = '#doctest: +ALLOW_UNICODE'
testdir.maketxtfile(test_doc="""
>>> b'12'.decode('ascii') {comment}
'12'
""".format(comment=comment))
testdir.makepyfile(foo="""
def foo():
'''
>>> b'12'.decode('ascii') {comment}
'12'
'''
""".format(comment=comment))
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
@pytest.mark.parametrize('config_mode', ['ini', 'comment'])
def test_allow_bytes(self, testdir, config_mode):
"""Test that doctests which output bytes work in all python versions
tested by pytest when the ALLOW_BYTES option is used (either in
the ini file or by an inline comment)(#1287).
"""
if config_mode == 'ini':
testdir.makeini('''
[pytest]
doctest_optionflags = ALLOW_BYTES
''')
comment = ''
else:
comment = '#doctest: +ALLOW_BYTES'
testdir.maketxtfile(test_doc="""
>>> b'foo' {comment}
'foo'
""".format(comment=comment))
testdir.makepyfile(foo="""
def foo():
'''
>>> b'foo' {comment}
'foo'
'''
""".format(comment=comment))
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
def test_unicode_string(self, testdir):
"""Test that doctests which output unicode fail in Python 2 when
the ALLOW_UNICODE option is not used. The same test should pass
in Python 3.
"""
testdir.maketxtfile(test_doc="""
>>> b'12'.decode('ascii')
'12'
""")
reprec = testdir.inline_run()
passed = int(sys.version_info[0] >= 3)
reprec.assertoutcome(passed=passed, failed=int(not passed))
def test_bytes_literal(self, testdir):
"""Test that doctests which output bytes fail in Python 3 when
the ALLOW_BYTES option is not used. The same test should pass
in Python 2 (#1287).
"""
testdir.maketxtfile(test_doc="""
>>> b'foo'
'foo'
""")
reprec = testdir.inline_run()
passed = int(sys.version_info[0] == 2)
reprec.assertoutcome(passed=passed, failed=int(not passed))
class TestDoctestSkips:
"""
If all examples in a doctest are skipped due to the SKIP option, then
the tests should be SKIPPED rather than PASSED. (#957)
"""
@pytest.fixture(params=['text', 'module'])
def makedoctest(self, testdir, request):
def makeit(doctest):
mode = request.param
if mode == 'text':
testdir.maketxtfile(doctest)
else:
assert mode == 'module'
testdir.makepyfile('"""\n%s"""' % doctest)
return makeit
def test_one_skipped(self, testdir, makedoctest):
makedoctest("""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
4
""")
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=1)
def test_one_skipped_failed(self, testdir, makedoctest):
makedoctest("""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
200
""")
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(failed=1)
def test_all_skipped(self, testdir, makedoctest):
makedoctest("""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2 # doctest: +SKIP
200
""")
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(skipped=1)
class TestDoctestAutoUseFixtures:
SCOPES = ['module', 'session', 'class', 'function']
def test_doctest_module_session_fixture(self, testdir):
"""Test that session fixtures are initialized for doctest modules (#768)
"""
# session fixture which changes some global data, which will
# be accessed by doctests in a module
testdir.makeconftest("""
import pytest
import sys
@pytest.yield_fixture(autouse=True, scope='session')
def myfixture():
assert not hasattr(sys, 'pytest_session_data')
sys.pytest_session_data = 1
yield
del sys.pytest_session_data
""")
testdir.makepyfile(foo="""
import sys
def foo():
'''
>>> assert sys.pytest_session_data == 1
'''
def bar():
'''
>>> assert sys.pytest_session_data == 1
'''
""")
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines('*2 passed*')
@pytest.mark.parametrize('scope', SCOPES)
@pytest.mark.parametrize('enable_doctest', [True, False])
def test_fixture_scopes(self, testdir, scope, enable_doctest):
"""Test that auto-use fixtures work properly with doctest modules.
See #1057 and #1100.
"""
testdir.makeconftest('''
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
return 99
'''.format(scope=scope))
testdir.makepyfile(test_1='''
def test_foo():
"""
>>> getfixture('auto') + 1
100
"""
def test_bar():
assert 1
''')
params = ('--doctest-modules',) if enable_doctest else ()
passes = 3 if enable_doctest else 2
result = testdir.runpytest(*params)
result.stdout.fnmatch_lines(['*=== %d passed in *' % passes])
@pytest.mark.parametrize('scope', SCOPES)
@pytest.mark.parametrize('autouse', [True, False])
@pytest.mark.parametrize('use_fixture_in_doctest', [True, False])
def test_fixture_module_doctest_scopes(self, testdir, scope, autouse,
use_fixture_in_doctest):
"""Test that auto-use fixtures work properly with doctest files.
See #1057 and #1100.
"""
testdir.makeconftest('''
import pytest
@pytest.fixture(autouse={autouse}, scope="{scope}")
def auto(request):
return 99
'''.format(scope=scope, autouse=autouse))
if use_fixture_in_doctest:
testdir.maketxtfile(test_doc="""
>>> getfixture('auto')
99
""")
else:
testdir.maketxtfile(test_doc="""
>>> 1 + 1
2
""")
result = testdir.runpytest('--doctest-modules')
assert 'FAILURES' not in str(result.stdout.str())
result.stdout.fnmatch_lines(['*=== 1 passed in *'])
@pytest.mark.parametrize('scope', SCOPES)
def test_auto_use_request_attributes(self, testdir, scope):
"""Check that all attributes of a request in an autouse fixture
behave as expected when requested for a doctest item.
"""
testdir.makeconftest('''
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
if "{scope}" == 'module':
assert request.module is None
if "{scope}" == 'class':
assert request.cls is None
if "{scope}" == 'function':
assert request.function is None
return 99
'''.format(scope=scope))
testdir.maketxtfile(test_doc="""
>>> 1 + 1
2
""")
result = testdir.runpytest('--doctest-modules')
assert 'FAILURES' not in str(result.stdout.str())
result.stdout.fnmatch_lines(['*=== 1 passed in *'])
| mpl-2.0 |
rplevka/selenium | py/selenium/webdriver/common/touch_actions.py | 71 | 5966 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The Touch Actions implementation
"""
from selenium.webdriver.remote.command import Command
class TouchActions(object):
"""
Generate touch actions. Works like ActionChains; actions are stored in the
TouchActions object and are fired with perform().
"""
def __init__(self, driver):
"""
Creates a new TouchActions object.
:Args:
- driver: The WebDriver instance which performs user actions.
It should be with touchscreen enabled.
"""
self._driver = driver
self._actions = []
def perform(self):
"""
Performs all stored actions.
"""
for action in self._actions:
action()
def tap(self, on_element):
"""
Taps on a given element.
:Args:
- on_element: The element to tap.
"""
self._actions.append(lambda:
self._driver.execute(Command.SINGLE_TAP, {'element': on_element.id}))
return self
def double_tap(self, on_element):
"""
Double taps on a given element.
:Args:
- on_element: The element to tap.
"""
self._actions.append(lambda:
self._driver.execute(Command.DOUBLE_TAP, {'element': on_element.id}))
return self
def tap_and_hold(self, xcoord, ycoord):
"""
Touch down at given coordinates.
:Args:
- xcoord: X Coordinate to touch down.
- ycoord: Y Coordinate to touch down.
"""
self._actions.append(lambda:
self._driver.execute(Command.TOUCH_DOWN, {
'x': int(xcoord),
'y': int(ycoord)}))
return self
def move(self, xcoord, ycoord):
"""
Move held tap to specified location.
:Args:
- xcoord: X Coordinate to move.
- ycoord: Y Coordinate to move.
"""
self._actions.append(lambda:
self._driver.execute(Command.TOUCH_MOVE, {
'x': int(xcoord),
'y': int(ycoord)}))
return self
def release(self, xcoord, ycoord):
"""
Release previously issued tap 'and hold' command at specified location.
:Args:
- xcoord: X Coordinate to release.
- ycoord: Y Coordinate to release.
"""
self._actions.append(lambda:
self._driver.execute(Command.TOUCH_UP, {
'x': int(xcoord),
'y': int(ycoord)}))
return self
def scroll(self, xoffset, yoffset):
"""
Touch and scroll, moving by xoffset and yoffset.
:Args:
- xoffset: X offset to scroll to.
- yoffset: Y offset to scroll to.
"""
self._actions.append(lambda:
self._driver.execute(Command.TOUCH_SCROLL, {
'xoffset': int(xoffset),
'yoffset': int(yoffset)}))
return self
def scroll_from_element(self, on_element, xoffset, yoffset):
"""
Touch and scroll starting at on_element, moving by xoffset and yoffset.
:Args:
- on_element: The element where scroll starts.
- xoffset: X offset to scroll to.
- yoffset: Y offset to scroll to.
"""
self._actions.append(lambda:
self._driver.execute(Command.TOUCH_SCROLL, {
'element': on_element.id,
'xoffset': int(xoffset),
'yoffset': int(yoffset)}))
return self
def long_press(self, on_element):
"""
Long press on an element.
:Args:
- on_element: The element to long press.
"""
self._actions.append(lambda:
self._driver.execute(Command.LONG_PRESS, {'element': on_element.id}))
return self
def flick(self, xspeed, yspeed):
"""
Flicks, starting anywhere on the screen.
:Args:
- xspeed: The X speed in pixels per second.
- yspeed: The Y speed in pixels per second.
"""
self._actions.append(lambda:
self._driver.execute(Command.FLICK, {
'xspeed': int(xspeed),
'yspeed': int(yspeed)}))
return self
def flick_element(self, on_element, xoffset, yoffset, speed):
"""
Flick starting at on_element, and moving by the xoffset and yoffset
with specified speed.
:Args:
- on_element: Flick will start at center of element.
- xoffset: X offset to flick to.
- yoffset: Y offset to flick to.
- speed: Pixels per second to flick.
"""
self._actions.append(lambda:
self._driver.execute(Command.FLICK, {
'element': on_element.id,
'xoffset': int(xoffset),
'yoffset': int(yoffset),
'speed': int(speed)}))
return self
# Context manager so TouchActions can be used in a 'with .. as' statements.
def __enter__(self):
return self # Return created instance of self.
def __exit__(self, _type, _value, _traceback):
pass # Do nothing, does not require additional cleanup. | apache-2.0 |
rsivapr/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
mrshu/scikit-learn | sklearn/ensemble/forest.py | 1 | 49130 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremly randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe, Brian Holt
# License: BSD 3
import itertools
import numpy as np
from warnings import warn
from abc import ABCMeta, abstractmethod
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..feature_selection.selector_mixin import SelectorMixin
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import array2d, check_random_state, check_arrays, safe_asarray
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from .base import BaseEnsemble
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor"]
MAX_INT = np.iinfo(np.int32).max
def _parallel_build_trees(n_trees, forest, X, y,
sample_mask, X_argsorted, seed, verbose):
"""Private function used to build a batch of trees within a job."""
random_state = check_random_state(seed)
trees = []
for i in xrange(n_trees):
if verbose > 1:
print("building tree %d of %d" % (i + 1, n_trees))
seed = random_state.randint(MAX_INT)
tree = forest._make_estimator(append=False)
tree.set_params(compute_importances=forest.compute_importances)
tree.set_params(random_state=check_random_state(seed))
if forest.bootstrap:
n_samples = X.shape[0]
indices = random_state.randint(0, n_samples, n_samples)
tree.fit(X[indices], y[indices],
sample_mask=sample_mask, X_argsorted=X_argsorted,
check_input=False)
tree.indices_ = indices
else:
tree.fit(X, y,
sample_mask=sample_mask, X_argsorted=X_argsorted,
check_input=False)
trees.append(tree)
return trees
def _parallel_predict_proba(trees, X, n_classes, n_outputs):
"""Private function used to compute a batch of predictions within a job."""
n_samples = X.shape[0]
if n_outputs == 1:
proba = np.zeros((n_samples, n_classes))
for tree in trees:
proba_tree = tree.predict_proba(X)
if n_classes == tree.n_classes_:
proba += proba_tree
else:
for j, c in enumerate(tree.classes_):
proba[:, c] += proba_tree[:, j]
else:
proba = []
for k in xrange(n_outputs):
proba.append(np.zeros((n_samples, n_classes[k])))
for tree in trees:
proba_tree = tree.predict_proba(X)
for k in xrange(n_outputs):
if n_classes[k] == tree.n_classes_[k]:
proba[k] += proba_tree[k]
else:
for j, c in enumerate(tree.classes_[k]):
proba[k][:, c] += proba_tree[k][:, j]
return proba
def _parallel_predict_regression(trees, X):
"""Private function used to compute a batch of predictions within a job."""
return sum(tree.predict(X) for tree in trees)
def _partition_trees(forest):
"""Private function used to partition trees between jobs."""
# Compute the number of jobs
if forest.n_jobs == -1:
n_jobs = min(cpu_count(), forest.n_estimators)
else:
n_jobs = min(forest.n_jobs, forest.n_estimators)
# Partition trees between jobs
n_trees = [int(forest.n_estimators / n_jobs)] * n_jobs
for i in xrange(forest.n_estimators % n_jobs):
n_trees[i] += 1
starts = [0] * (n_jobs + 1)
for i in xrange(1, n_jobs + 1):
starts[i] = starts[i - 1] + n_trees[i - 1]
return n_jobs, n_trees, starts
def _parallel_X_argsort(X):
"""Private function used to sort the features of X."""
return np.asarray(np.argsort(X.T, axis=1).T, dtype=np.int32, order="F")
def _partition_features(forest, n_total_features):
"""Private function used to partition features between jobs."""
# Compute the number of jobs
if forest.n_jobs == -1:
n_jobs = min(cpu_count(), n_total_features)
else:
n_jobs = min(forest.n_jobs, n_total_features)
# Partition features between jobs
n_features = [n_total_features / n_jobs] * n_jobs
for i in xrange(n_total_features % n_jobs):
n_features[i] += 1
starts = [0] * (n_jobs + 1)
for i in xrange(1, n_jobs + 1):
starts[i] = starts[i - 1] + n_features[i - 1]
return n_jobs, n_features, starts
class BaseForest(BaseEnsemble, SelectorMixin):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.compute_importances = compute_importances
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.feature_importances_ = None
self.verbose = verbose
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = array2d(X, dtype=np.float32, order='C')
return np.array([est.tree_.apply(X) for est in self.estimators_]).T
def fit(self, X, y):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (integers that correspond to classes in
classification, real numbers in regression).
Returns
-------
self : object
Returns self.
"""
self.random_state = check_random_state(self.random_state)
# Precompute some data
X, y = check_arrays(X, y, sparse_format="dense")
if (getattr(X, "dtype", None) != DTYPE or
X.ndim != 2 or not X.flags.fortran):
X = array2d(X, dtype=DTYPE, order="F")
n_samples, self.n_features_ = X.shape
if self.bootstrap:
sample_mask = None
X_argsorted = None
else:
if self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
sample_mask = np.ones((n_samples,), dtype=np.bool)
n_jobs, _, starts = _partition_features(self, self.n_features_)
all_X_argsorted = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_X_argsort)(
X[:, starts[i]:starts[i + 1]])
for i in xrange(n_jobs))
X_argsorted = np.asfortranarray(np.hstack(all_X_argsorted))
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if isinstance(self.base_estimator, ClassifierMixin):
y = np.copy(y)
if self.n_outputs_ == 1:
self.classes_ = np.unique(y)
self.n_classes_ = len(self.classes_)
else:
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
unique = np.unique(y[:, k])
self.classes_.append(unique)
self.n_classes_.append(unique.shape[0])
y[:, k] = np.searchsorted(unique, y[:, k])
else:
if self.n_outputs_ == 1:
self.classes_ = None
self.n_classes_ = 1
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Assign chunk of trees to jobs
n_jobs, n_trees, _ = _partition_trees(self)
# Parallel loop
all_trees = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_build_trees)(
n_trees[i],
self,
X,
y,
sample_mask,
X_argsorted,
self.random_state.randint(MAX_INT),
verbose=self.verbose)
for i in xrange(n_jobs))
# Reduce
self.estimators_ = [tree for tree in itertools.chain(*all_trees)]
# Calculate out of bag predictions and score
if self.oob_score:
if isinstance(self, ClassifierMixin):
self.oob_decision_function_ = []
self.oob_score_ = 0.0
n_classes_ = self.n_classes_
classes_ = self.classes_
if self.n_outputs_ == 1:
n_classes_ = [n_classes_]
classes_ = [classes_]
predictions = []
for k in xrange(self.n_outputs_):
predictions.append(np.zeros((n_samples,
n_classes_[k])))
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
p_estimator = estimator.predict_proba(X[mask, :])
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in xrange(self.n_outputs_):
predictions[k][mask, :] += p_estimator[k]
for k in xrange(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
self.oob_decision_function_.append(decision)
self.oob_score_ += (np.mean(y[:, k] ==
classes_[k].take(
np.argmax(predictions[k], axis=1),
axis=0)))
if self.n_outputs_ == 1:
self.oob_decision_function_ = \
self.oob_decision_function_[0]
self.oob_score_ /= self.n_outputs_
else:
# Regression:
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
p_estimator = estimator.predict(X[mask, :])
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[mask, :] += p_estimator
n_predictions[mask, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in xrange(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k], predictions[:, k])
self.oob_score_ /= self.n_outputs_
# Sum the importances
if self.compute_importances:
self.feature_importances_ = \
sum(tree.feature_importances_ for tree in self.estimators_) \
/ self.n_estimators
return self
class ForestClassifier(BaseForest, ClassifierMixin):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is computed as the majority
prediction of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
n_samples = len(X)
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
# Check data
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_trees(self)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_proba)(
self.estimators_[starts[i]:starts[i + 1]],
X,
self.n_classes_,
self.n_outputs_)
for i in xrange(n_jobs))
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in xrange(1, len(all_proba)):
proba += all_proba[j]
proba /= self.n_estimators
else:
for j in xrange(1, len(all_proba)):
for k in xrange(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in xrange(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the mean predicted class log-probabilities of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(BaseForest, RegressorMixin):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_trees(self)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_regression)(
self.estimators_[starts[i]:starts[i + 1]], X)
for i in xrange(n_jobs))
# Reduce
y_hat = sum(all_y_hat) / self.n_estimators
return y_hat
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of classifical
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
Note: this parameter is tree-specific.
max_features : int, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If "auto", then `max_features=sqrt(n_features)` on
classification tasks and `max_features=n_features` on regression
problems.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
compute_importances : boolean, optional (default=True)
Whether feature importances are computed and stored into the
``feature_importances_`` attribute when calling fit.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel. If -1, then the number of jobs
is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
`feature_importances_` : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_decision_function_` : array, shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
bootstrap=True,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density",
"max_features", "random_state"),
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifical
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
Note: this parameter is tree-specific.
max_features : int, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If "auto", then `max_features=sqrt(n_features)` on
classification tasks and `max_features=n_features`
on regression problems.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
compute_importances : boolean, optional (default=True)
Whether feature importances are computed and stored into the
``feature_importances_`` attribute when calling fit.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel. If -1, then the number of jobs
is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
`feature_importances_` : array of shape = [n_features]
The feature mportances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_prediction_` : array, shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
bootstrap=True,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density",
"max_features", "random_state"),
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
Note: this parameter is tree-specific.
max_features : int, string or None, optional (default="auto")
The number of features to consider when looking for the best split.
- If "auto", then `max_features=sqrt(n_features)` on
classification tasks and `max_features=n_features`
on regression problems.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
compute_importances : boolean, optional (default=True)
Whether feature importances are computed and stored into the
``feature_importances_`` attribute when calling fit.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel. If -1, then the number of jobs
is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
`feature_importances_` : array of shape = [n_features]
The feature mportances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_decision_function_` : array, shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density",
"max_features", "random_state"),
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
Note: this parameter is tree-specific.
max_features : int, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If "auto", then `max_features=sqrt(n_features)` on
classification tasks and `max_features=n_features`
on regression problems.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
compute_importances : boolean, optional (default=True)
Whether feature importances are computed and stored into the
``feature_importances_`` attribute when calling fit.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel. If -1, then the number of jobs
is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
`feature_importances_` : array of shape = [n_features]
The feature mportances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_prediction_` : array, shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density",
"max_features", "random_state"),
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as trees in the forest.
The dimensionality of the resulting representation is approximately
``n_estimators * 2 ** max_depth``.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
Maximum depth of each tree.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel. If -1, then the number of jobs
is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
n_jobs=1,
random_state=None,
verbose=0):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density",
"max_features", "random_state"),
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = 1
def fit(self, X, y=None):
"""Fit estimator.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data used to build forests.
"""
self.fit_transform(X, y)
return self
def fit_transform(self, X, y=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data used to build forests.
Returns
-------
X_transformed: sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = safe_asarray(X)
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y)
self.one_hot_encoder_ = OneHotEncoder()
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data to be transformed.
Returns
-------
X_transformed: sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
srcLurker/home-assistant | homeassistant/components/shell_command.py | 28 | 2181 | """
Exposes regular shell commands as services.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/shell_command/
"""
import logging
import subprocess
import shlex
import voluptuous as vol
from homeassistant.helpers import template
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
DOMAIN = 'shell_command'
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
cv.slug: cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Setup the shell_command component."""
conf = config.get(DOMAIN, {})
cache = {}
def service_handler(call):
"""Execute a shell command service."""
cmd = conf[call.service]
if cmd in cache:
prog, args, args_compiled = cache[cmd]
elif ' ' not in cmd:
prog = cmd
args = None
args_compiled = None
cache[cmd] = prog, args, args_compiled
else:
prog, args = cmd.split(' ', 1)
args_compiled = template.Template(args, hass)
cache[cmd] = prog, args, args_compiled
if args_compiled:
try:
rendered_args = args_compiled.render(call.data)
except TemplateError as ex:
_LOGGER.exception('Error rendering command template: %s', ex)
return
else:
rendered_args = None
if rendered_args == args:
# no template used. default behavior
shell = True
else:
# template used. Break into list and use shell=False for security
cmd = [prog] + shlex.split(rendered_args)
shell = False
try:
subprocess.call(cmd, shell=shell,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except subprocess.SubprocessError:
_LOGGER.exception('Error running command: %s', cmd)
for name in conf.keys():
hass.services.register(DOMAIN, name, service_handler)
return True
| mit |
TeamHG-Memex/hgprofiler | lib/model/group.py | 1 | 1326 | from sqlalchemy import Table
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import ForeignKey
from sqlalchemy import UniqueConstraint
from sqlalchemy.orm import relationship
from model import Base
group_join_site = Table(
'group_join_site',
Base.metadata,
Column('group_id', Integer, ForeignKey('group.id'), primary_key=True),
Column('site_id', Integer, ForeignKey('site.id'), primary_key=True),
)
class Group(Base):
''' Data model for a profile. '''
__tablename__ = 'group'
__table_args__ = (
UniqueConstraint('name', name='group_name'),
)
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
# One group has 0-n sites.
sites = relationship(
'Site',
secondary=group_join_site
)
def __init__(self, name, sites):
''' Constructor. '''
self.name = name
self.sites = sites
def as_dict(self):
''' Return dictionary representation of this site. '''
# Sort labels by name
sites = [site.as_dict() for site in self.sites]
sorted_sites = sorted(sites, key=lambda x: x['name'])
return {
'id': self.id,
'name': self.name,
'sites': sorted_sites,
}
| apache-2.0 |
SaptakS/open-event-orga-server | app/api/helpers/utils.py | 7 | 7224 | import json
from hashlib import md5
from flask import request
from flask.ext.restplus import Resource as RestplusResource
from flask_restplus import Model, fields, reqparse
from app.helpers.data import update_version
from app.models.event import Event as EventModel
from app.api.helpers.error_docs import (
notfound_error_model,
notauthorized_error_model,
validation_error_model,
invalidservice_error_model,
)
from .helpers import get_object_list, get_object_or_404, get_object_in_event, \
create_model, validate_payload, delete_model, update_model, \
handle_extra_payload, get_paginated_list, fix_attribute_names
DEFAULT_PAGE_START = 1
DEFAULT_PAGE_LIMIT = 20
POST_RESPONSES = {
400: ('Validation error', validation_error_model),
401: ('Authentication failure', notauthorized_error_model),
404: ('Event does not exist', notfound_error_model),
201: 'Resource created successfully'
}
PUT_RESPONSES = {
400: ('Validation Error', validation_error_model),
401: ('Authentication failure', notauthorized_error_model),
404: ('Object/Event not found', notfound_error_model)
}
SERVICE_RESPONSES = {
404: ('Service not found', notfound_error_model),
400: ('Service does not belong to event', invalidservice_error_model),
}
# Parameters for a paginated response
PAGE_PARAMS = {
'start': {
'description': 'Serial number to start from',
'type': int,
'default': DEFAULT_PAGE_START
},
'limit': {
'description': 'Limit on the number of results',
'type': int,
'default': DEFAULT_PAGE_LIMIT
},
}
# ETag Header (required=False by default)
ETAG_HEADER_DEFN = [
'If-None-Match', 'ETag saved by client for cached resource'
]
# Base Api Model for a paginated response
PAGINATED_MODEL = Model('PaginatedModel', {
'start': fields.Integer,
'limit': fields.Integer,
'count': fields.Integer,
'next': fields.String,
'previous': fields.String
})
# Custom Resource Class
class Resource(RestplusResource):
def dispatch_request(self, *args, **kwargs):
resp = super(Resource, self).dispatch_request(*args, **kwargs)
# ETag checking.
if request.method == 'GET':
old_etag = request.headers.get('If-None-Match', '')
# Generate hash
data = json.dumps(resp)
new_etag = md5(data).hexdigest()
if new_etag == old_etag:
# Resource has not changed
return '', 304
else:
# Resource has changed, send new ETag value
return resp, 200, {'ETag': new_etag}
elif request.method == 'POST':
# Grab just the response data
# Exclude status code and headers
resp_data = resp[0]
data = json.dumps(resp_data)
etag = md5(data).hexdigest()
# Add ETag to response headers
resp[2].update({'ETag': etag})
return resp
# Base class for Paginated Resource
class PaginatedResourceBase():
"""
Paginated Resource Helper class
This includes basic properties used in the class
"""
parser = reqparse.RequestParser()
parser.add_argument('start', type=int, default=DEFAULT_PAGE_START)
parser.add_argument('limit', type=int, default=DEFAULT_PAGE_LIMIT)
# DAO for Models
class BaseDAO:
"""
DAO for a basic independent model
"""
version_key = None
is_importing = False # temp key to set to True when an import operation is underway
def __init__(self, model, post_api_model=None, put_api_model=None):
self.model = model
self.post_api_model = post_api_model
self.put_api_model = put_api_model if put_api_model else post_api_model
def get(self, id_):
return get_object_or_404(self.model, id_)
def list(self, **kwargs):
return get_object_list(self.model, **kwargs)
def paginated_list(self, url=None, args={}, **kwargs):
return get_paginated_list(self.model, url=url, args=args, **kwargs)
def create(self, data, validate=True):
if validate:
data = self.validate(data, self.post_api_model)
item = create_model(self.model, data)
self.update_version(item.id)
return item
def update(self, id_, data, validate=True):
if validate:
data = self.validate_put(data, self.put_api_model)
item = update_model(self.model, id_, data)
self.update_version(id_)
return item
def delete(self, id_):
item = delete_model(self.model, id_)
self.update_version(id_)
return item
def validate(self, data, model=None, check_required=True):
if not model:
model = self.post_api_model
if model:
data = handle_extra_payload(data, model)
validate_payload(data, model, check_required=check_required)
data = fix_attribute_names(data, model)
return data
def validate_put(self, data, model=None):
"""
Abstraction over validate with check_required set to False
"""
return self.validate(data, model=model, check_required=False)
def update_version(self, event_id):
"""
Update version of the component of the event
"""
if self.version_key:
update_version(event_id, False, self.version_key)
# Helper functions
def _del(self, data, fields):
"""
Safe delete fields from payload
"""
data_copy = data.copy()
for field in fields:
if field in data:
del data_copy[field]
return data_copy
# DAO for Service Models
class ServiceDAO(BaseDAO):
"""
Data Access Object for service models like microlocations,
speakers and so.
"""
def get(self, event_id, sid):
return get_object_in_event(self.model, sid, event_id)
def list(self, event_id, **kwargs):
# Check if an event with `event_id` exists
get_object_or_404(EventModel, event_id)
return get_object_list(self.model, event_id=event_id, **kwargs)
def paginated_list(self, url=None, args={}, **kwargs):
return get_paginated_list(self.model, url=url, args=args, **kwargs)
def create(self, event_id, data, url, validate=True):
if validate:
data = self.validate(data)
item = create_model(self.model, data, event_id=event_id)
self.update_version(event_id)
# Return created resource with a 201 status code and its Location
# (url) in the header.
resource_location = url + '/' + str(item.id)
return item, 201, {'Location': resource_location}
def update(self, event_id, service_id, data, validate=True):
if validate:
data = self.validate_put(data)
item = update_model(self.model, service_id, data, event_id)
self.update_version(event_id)
return item
def delete(self, event_id, service_id):
item = delete_model(self.model, service_id, event_id=event_id)
self.update_version(event_id)
return item
# store task results in case of testing
# state and info
TASK_RESULTS = {}
| gpl-3.0 |
alqfahad/odoo | addons/account_sequence/__openerp__.py | 261 | 1904 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Entries Sequence Numbering',
'version': '1.1',
'category': 'Accounting & Finance',
'description': """
This module maintains internal sequence number for accounting entries.
======================================================================
Allows you to configure the accounting sequences to be maintained.
You can customize the following attributes of the sequence:
-----------------------------------------------------------
* Prefix
* Suffix
* Next Number
* Increment Number
* Number Padding
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['account'],
'data': [
'account_sequence_data.xml',
'account_sequence_installer_view.xml',
'account_sequence.xml'
],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
RydrDojo/Ridr_app | pylotVenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/mbcharsetprober.py | 2924 | 3268 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| mit |
qiankunshe/sky_engine | sky/tools/download_sky_shell.py | 5 | 1263 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import subprocess
import sys
def download(base_url, out_dir, name):
url = '%s/%s' % (base_url, name)
dst = os.path.join(out_dir, name)
print 'Downloading', url
subprocess.call([ 'curl', '-o', dst, url ])
def main():
parser = argparse.ArgumentParser(description='Downloads sky_shell from Google storage')
parser.add_argument('revision_file')
parser.add_argument('out_dir')
args = parser.parse_args()
out_dir = args.out_dir
if not os.path.exists(out_dir):
os.makedirs(out_dir)
revision = None
with open(args.revision_file, 'r') as f:
revision = f.read()
base_url = 'https://storage.googleapis.com/mojo/sky/shell/linux-x64/%s' % revision
download(base_url, out_dir, 'sky_shell')
download(base_url, out_dir, 'icudtl.dat')
download(base_url, out_dir, 'sky_snapshot')
subprocess.call([ 'chmod', 'a+x', os.path.join(out_dir, 'sky_shell' )])
subprocess.call([ 'chmod', 'a+x', os.path.join(out_dir, 'sky_snapshot' )])
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
Kongsea/tensorflow | tensorflow/contrib/learn/python/learn/utils/gc.py | 45 | 6164 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""System for specifying garbage collection (GC) of path based data.
This framework allows for GC of data specified by path names, for example files
on disk. gc.Path objects each represent a single item stored at a path and may
be a base directory,
/tmp/exports/0/...
/tmp/exports/1/...
...
or a fully qualified file,
/tmp/train-1.ckpt
/tmp/train-2.ckpt
...
A gc filter function takes and returns a list of gc.Path items. Filter
functions are responsible for selecting Path items for preservation or deletion.
Note that functions should always return a sorted list.
For example,
base_dir = "/tmp"
# Create the directories.
for e in xrange(10):
os.mkdir("%s/%d" % (base_dir, e), 0o755)
# Create a simple parser that pulls the export_version from the directory.
path_regex = "^" + re.escape(base_dir) + "/(\\d+)$"
def parser(path):
match = re.match(path_regex, path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
path_list = gc.get_paths("/tmp", parser) # contains all ten Paths
every_fifth = gc.mod_export_version(5)
print(every_fifth(path_list)) # shows ["/tmp/0", "/tmp/5"]
largest_three = gc.largest_export_versions(3)
print(largest_three(all_paths)) # shows ["/tmp/7", "/tmp/8", "/tmp/9"]
both = gc.union(every_fifth, largest_three)
print(both(all_paths)) # shows ["/tmp/0", "/tmp/5",
# "/tmp/7", "/tmp/8", "/tmp/9"]
# Delete everything not in 'both'.
to_delete = gc.negation(both)
for p in to_delete(all_paths):
gfile.DeleteRecursively(p.path) # deletes: "/tmp/1", "/tmp/2",
# "/tmp/3", "/tmp/4", "/tmp/6",
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import heapq
import math
import os
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
Path = collections.namedtuple('Path', 'path export_version')
def largest_export_versions(n):
"""Creates a filter that keeps the largest n export versions.
Args:
n: number of versions to keep.
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
heap = []
for idx, path in enumerate(paths):
if path.export_version is not None:
heapq.heappush(heap, (path.export_version, idx))
keepers = [paths[i] for _, i in heapq.nlargest(n, heap)]
return sorted(keepers)
return keep
def one_of_every_n_export_versions(n):
"""Creates a filter that keeps one of every n export versions.
Args:
n: interval size.
Returns:
A filter function that keeps exactly one path from each interval
[0, n], (n, 2n], (2n, 3n], etc... If more than one path exists in an
interval the largest is kept.
"""
def keep(paths):
"""A filter function that keeps exactly one out of every n paths."""
keeper_map = {} # map from interval to largest path seen in that interval
for p in paths:
if p.export_version is None:
# Skip missing export_versions.
continue
# Find the interval (with a special case to map export_version = 0 to
# interval 0.
interval = math.floor(
(p.export_version - 1) / n) if p.export_version else 0
existing = keeper_map.get(interval, None)
if (not existing) or (existing.export_version < p.export_version):
keeper_map[interval] = p
return sorted(keeper_map.values())
return keep
def mod_export_version(n):
"""Creates a filter that keeps every export that is a multiple of n.
Args:
n: step size.
Returns:
A filter function that keeps paths where export_version % n == 0.
"""
def keep(paths):
keepers = []
for p in paths:
if p.export_version % n == 0:
keepers.append(p)
return sorted(keepers)
return keep
def union(lf, rf):
"""Creates a filter that keeps the union of two filters.
Args:
lf: first filter
rf: second filter
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
l = set(lf(paths))
r = set(rf(paths))
return sorted(list(l|r))
return keep
def negation(f):
"""Negate a filter.
Args:
f: filter function to invert
Returns:
A filter function that returns the negation of f.
"""
def keep(paths):
l = set(paths)
r = set(f(paths))
return sorted(list(l-r))
return keep
def get_paths(base_dir, parser):
"""Gets a list of Paths in a given directory.
Args:
base_dir: directory.
parser: a function which gets the raw Path and can augment it with
information such as the export_version, or ignore the path by returning
None. An example parser may extract the export version from a path
such as "/tmp/exports/100" an another may extract from a full file
name such as "/tmp/checkpoint-99.out".
Returns:
A list of Paths contained in the base directory with the parsing function
applied.
By default the following fields are populated,
- Path.path
The parsing function is responsible for populating,
- Path.export_version
"""
raw_paths = gfile.ListDirectory(base_dir)
paths = []
for r in raw_paths:
p = parser(Path(os.path.join(compat.as_str_any(base_dir),
compat.as_str_any(r)),
None))
if p:
paths.append(p)
return sorted(paths)
| apache-2.0 |
dzan/xenOnArm | tools/python/logging/logging-0.4.9.2/test/log_test3.py | 42 | 3360 | #!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""
A test harness for the logging module. Tests new fileConfig (not yet a complete test).
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import logging, logging.config
def doLog(logger):
logger.debug("Debug")
logger.info("Info")
logger.warning("Warning")
logger.error("Error")
logger.critical("Critical")
def main():
logging.config.fileConfig("log_test3.ini")
logger = logging.getLogger(None)
print "---------------------------------------------------"
print "-- Logging to root; messages appear on console only"
print "---------------------------------------------------"
doLog(logger)
print "----------------------------------------------------------------------"
print "-- Logging to log02; messages appear on console and in file python.log"
print "----------------------------------------------------------------------"
logger = logging.getLogger("log02")
doLog(logger)
print "--------------------------------------------------------------------------"
print "-- Logging to log02.log03; messages appear on console, in file python.log,"
print "-- and at logrecv.py tcp (if running. <= DEBUG messages will not appear)."
print "--------------------------------------------------------------------------"
logger = logging.getLogger("log02.log03")
doLog(logger)
print "-----------------------------------------------------------------------"
print "-- Logging to log02.log03.log04; messages appear only at logrecv.py udp"
print "-- (if running. <= INFO messages will not appear)."
print "-----------------------------------------------------------------------"
logger = logging.getLogger("log02.log03.log04")
doLog(logger)
print "--------------------------------------------------------------------"
print "-- Logging to log02.log03.log04.log05.log06; messages appear at"
print "-- logrecv.py udp (if running. < CRITICAL messages will not appear)."
print "--------------------------------------------------------------------"
logger = logging.getLogger("log02.log03.log04.log05.log06")
doLog(logger)
print "-- All done."
logging.shutdown()
if __name__ == "__main__":
main() | gpl-2.0 |
QKaiser/pynessus | pynessus/models/vulnerability.py | 1 | 1817 | from nessusobject import NessusObject
class Vulnerability(NessusObject):
def __init__(self, server):
super(Vulnerability, self).__init__(server)
self._id = None
self._count = 0
self._plugin_id = 0
self._plugin_name = 0
self._plugin_family = None
self._vuln_index = 0
self._severity = 0
self._severity_index = 0
self._host = None
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = int(value)
@property
def count(self):
return self._count
@count.setter
def count(self, value):
self._count = value
@property
def plugin_id(self):
return self._plugin_id
@plugin_id.setter
def plugin_id(self, value):
self._plugin_id = value
@property
def plugin_name(self):
return self._plugin_name
@plugin_name.setter
def plugin_name(self, value):
self._plugin_name = value
@property
def plugin_family(self):
return self._plugin_family
@plugin_family.setter
def plugin_family(self, value):
self._plugin_family = value
@property
def vuln_index(self):
return self._vuln_index
@vuln_index.setter
def vuln_index(self, value):
self._vuln_index = value
@property
def severity(self):
return self._severity
@severity.setter
def severity(self, value):
self._severity = value
@property
def severity_index(self):
return self._severity_index
@severity_index.setter
def severity_index(self, value):
self._severity_index = value
@property
def host(self):
return self._host
@host.setter
def host(self, value):
self._host = value | apache-2.0 |
skycucumber/Messaging-Gateway | webapp/venv/lib/python2.7/site-packages/twisted/python/monkey.py | 62 | 2227 | # -*- test-case-name: twisted.test.test_monkey -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import division, absolute_import
class MonkeyPatcher(object):
"""
Cover up attributes with new objects. Neat for monkey-patching things for
unit-testing purposes.
"""
def __init__(self, *patches):
# List of patches to apply in (obj, name, value).
self._patchesToApply = []
# List of the original values for things that have been patched.
# (obj, name, value) format.
self._originals = []
for patch in patches:
self.addPatch(*patch)
def addPatch(self, obj, name, value):
"""
Add a patch so that the attribute C{name} on C{obj} will be assigned to
C{value} when C{patch} is called or during C{runWithPatches}.
You can restore the original values with a call to restore().
"""
self._patchesToApply.append((obj, name, value))
def _alreadyPatched(self, obj, name):
"""
Has the C{name} attribute of C{obj} already been patched by this
patcher?
"""
for o, n, v in self._originals:
if (o, n) == (obj, name):
return True
return False
def patch(self):
"""
Apply all of the patches that have been specified with L{addPatch}.
Reverse this operation using L{restore}.
"""
for obj, name, value in self._patchesToApply:
if not self._alreadyPatched(obj, name):
self._originals.append((obj, name, getattr(obj, name)))
setattr(obj, name, value)
def restore(self):
"""
Restore all original values to any patched objects.
"""
while self._originals:
obj, name, value = self._originals.pop()
setattr(obj, name, value)
def runWithPatches(self, f, *args, **kw):
"""
Apply each patch already specified. Then run the function f with the
given args and kwargs. Restore everything when done.
"""
self.patch()
try:
return f(*args, **kw)
finally:
self.restore()
| gpl-2.0 |
chouseknecht/ansible | lib/ansible/modules/network/iosxr/iosxr_command.py | 13 | 6835 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: iosxr_command
version_added: "2.1"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Run commands on remote devices running Cisco IOS XR
description:
- Sends arbitrary commands to an IOS XR node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(iosxr_config) to configure iosxr devices.
extends_documentation_fragment: iosxr
notes:
- This module works with C(network_cli). See L(the IOS-XR Platform Options,../network/user_guide/platform_iosxr.html).
- This module does not support C(netconf) connection.
- Tested against IOS XR 6.1.3
options:
commands:
description:
- List of commands to send to the remote iosxr device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
aliases: ['waitfor']
version_added: "2.2"
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
default: all
choices: ['any', 'all']
version_added: "2.2"
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
default: 1
"""
EXAMPLES = """
tasks:
- name: run show version on remote devices
iosxr_command:
commands: show version
- name: run show version and check to see if output contains iosxr
iosxr_command:
commands: show version
wait_for: result[0] contains IOS-XR
- name: run multiple commands on remote nodes
iosxr_command:
commands:
- show version
- show interfaces
- { command: example command that prompts, prompt: expected prompt, answer: yes}
- name: run multiple commands and evaluate the output
iosxr_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains IOS-XR
- result[1] contains Loopback0
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.network.common.utils import to_lines
from ansible.module_utils.network.iosxr.iosxr import run_commands, iosxr_argument_spec
from ansible.module_utils.network.iosxr.iosxr import command_spec
def parse_commands(module, warnings):
commands = module.params['commands']
for item in list(commands):
try:
command = item['command']
except Exception:
command = item
if module.check_mode and not command.startswith('show'):
warnings.append(
'Only show commands are supported when using check mode, not '
'executing %s' % command
)
commands.remove(item)
return commands
def main():
argument_spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(iosxr_argument_spec)
argument_spec.update(command_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
result = {'changed': False, 'warnings': warnings}
commands = parse_commands(module, warnings)
wait_for = module.params['wait_for'] or list()
try:
conditionals = [Conditional(c) for c in wait_for]
except AttributeError as exc:
module.fail_json(msg=to_text(exc))
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'stdout': responses,
'stdout_lines': list(to_lines(responses)),
})
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
nyu-dl/dl4mt-tutorial | session1/nmt.py | 4 | 40615 | '''
Build a simple neural machine translation model
'''
import theano
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import cPickle as pkl
import ipdb
import numpy
import copy
import os
import warnings
import sys
import time
from collections import OrderedDict
from data_iterator import TextIterator
profile = False
# push parameters to Theano shared variables
def zipp(params, tparams):
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
# pull parameters from Theano shared variables
def unzip(zipped):
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
# get the list of parameters: Note that tparams must be OrderedDict
def itemlist(tparams):
return [vv for kk, vv in tparams.iteritems()]
# dropout
def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(
use_noise,
state_before * trng.binomial(state_before.shape, p=0.5, n=1,
dtype=state_before.dtype),
state_before * 0.5)
return proj
# make prefix-appended name
def _p(pp, name):
return '%s_%s' % (pp, name)
# initialize Theano shared variables according to the initial parameters
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
# load parameters
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
warnings.warn('%s is not in the archive' % kk)
continue
params[kk] = pp[kk]
return params
# layers: 'name': ('parameter initializer', 'feedforward')
layers = {'ff': ('param_init_fflayer', 'fflayer'),
'gru': ('param_init_gru', 'gru_layer'),
'gru_cond_simple': ('param_init_gru_cond_simple',
'gru_cond_simple_layer'),
}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
# some utilities
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin, nout=None, scale=0.01, ortho=True):
if nout is None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
W = scale * numpy.random.randn(nin, nout)
return W.astype('float32')
def tanh(x):
return tensor.tanh(x)
def linear(x):
return x
def concatenate(tensor_list, axis=0):
"""
Alternative implementation of `theano.tensor.concatenate`.
This function does exactly the same thing, but contrary to Theano's own
implementation, the gradient is implemented on the GPU.
Backpropagating through `theano.tensor.concatenate` yields slowdowns
because the inverse operation (splitting) needs to be done on the CPU.
This implementation does not have that problem.
:usage:
>>> x, y = theano.tensor.matrices('x', 'y')
>>> c = concatenate([x, y], axis=1)
:parameters:
- tensor_list : list
list of Theano tensor expressions that should be concatenated.
- axis : int
the tensors will be joined along this axis.
:returns:
- out : tensor
the concatenated tensor expression.
"""
concat_size = sum(tt.shape[axis] for tt in tensor_list)
output_shape = ()
for k in range(axis):
output_shape += (tensor_list[0].shape[k],)
output_shape += (concat_size,)
for k in range(axis + 1, tensor_list[0].ndim):
output_shape += (tensor_list[0].shape[k],)
out = tensor.zeros(output_shape)
offset = 0
for tt in tensor_list:
indices = ()
for k in range(axis):
indices += (slice(None),)
indices += (slice(offset, offset + tt.shape[axis]),)
for k in range(axis + 1, tensor_list[0].ndim):
indices += (slice(None),)
out = tensor.set_subtensor(out[indices], tt)
offset += tt.shape[axis]
return out
# batch preparation, returns padded batches for both source and target
# sequences with their corresponding masks
def prepare_data(seqs_x, seqs_y, maxlen=None,
n_words_src=30000, n_words=30000):
# x: a list of sentences
lengths_x = [len(s) for s in seqs_x]
lengths_y = [len(s) for s in seqs_y]
# filter sequences according to maximum sequence length
if maxlen is not None:
new_seqs_x = []
new_seqs_y = []
new_lengths_x = []
new_lengths_y = []
for l_x, s_x, l_y, s_y in zip(lengths_x, seqs_x, lengths_y, seqs_y):
if l_x < maxlen and l_y < maxlen:
new_seqs_x.append(s_x)
new_lengths_x.append(l_x)
new_seqs_y.append(s_y)
new_lengths_y.append(l_y)
lengths_x = new_lengths_x
seqs_x = new_seqs_x
lengths_y = new_lengths_y
seqs_y = new_seqs_y
if len(lengths_x) < 1 or len(lengths_y) < 1:
return None, None, None, None
n_samples = len(seqs_x)
maxlen_x = numpy.max(lengths_x) + 1
maxlen_y = numpy.max(lengths_y) + 1
# pad batches and create masks
x = numpy.zeros((maxlen_x, n_samples)).astype('int64')
y = numpy.zeros((maxlen_y, n_samples)).astype('int64')
x_mask = numpy.zeros((maxlen_x, n_samples)).astype('float32')
y_mask = numpy.zeros((maxlen_y, n_samples)).astype('float32')
for idx, [s_x, s_y] in enumerate(zip(seqs_x, seqs_y)):
x[:lengths_x[idx], idx] = s_x
x_mask[:lengths_x[idx]+1, idx] = 1.
y[:lengths_y[idx], idx] = s_y
y_mask[:lengths_y[idx]+1, idx] = 1.
return x, x_mask, y, y_mask
# feedforward layer: affine transformation + point-wise nonlinearity
def param_init_fflayer(options, params, prefix='ff', nin=None, nout=None,
ortho=True):
if nin is None:
nin = options['dim_proj']
if nout is None:
nout = options['dim_proj']
params[_p(prefix, 'W')] = norm_weight(nin, nout, scale=0.01, ortho=ortho)
params[_p(prefix, 'b')] = numpy.zeros((nout,)).astype('float32')
return params
def fflayer(tparams, state_below, options, prefix='rconv',
activ='lambda x: tensor.tanh(x)', **kwargs):
return eval(activ)(
tensor.dot(state_below, tparams[_p(prefix, 'W')]) +
tparams[_p(prefix, 'b')])
# GRU layer
def param_init_gru(options, params, prefix='gru', nin=None, dim=None):
if nin is None:
nin = options['dim_proj']
if dim is None:
dim = options['dim_proj']
# embedding to gates transformation weights, biases
W = numpy.concatenate([norm_weight(nin, dim),
norm_weight(nin, dim)], axis=1)
params[_p(prefix, 'W')] = W
params[_p(prefix, 'b')] = numpy.zeros((2 * dim,)).astype('float32')
# recurrent transformation weights for gates
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix, 'U')] = U
# embedding to hidden state proposal weights, biases
Wx = norm_weight(nin, dim)
params[_p(prefix, 'Wx')] = Wx
params[_p(prefix, 'bx')] = numpy.zeros((dim,)).astype('float32')
# recurrent transformation weights for hidden state proposal
Ux = ortho_weight(dim)
params[_p(prefix, 'Ux')] = Ux
return params
def gru_layer(tparams, state_below, options, prefix='gru', mask=None,
**kwargs):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
dim = tparams[_p(prefix, 'Ux')].shape[1]
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
# utility function to slice a tensor
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
# state_below is the input word embeddings
# input to the gates, concatenated
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + \
tparams[_p(prefix, 'b')]
# input to compute the hidden state proposal
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + \
tparams[_p(prefix, 'bx')]
# step function to be used by scan
# arguments | sequences |outputs-info| non-seqs
def _step_slice(m_, x_, xx_, h_, U, Ux):
preact = tensor.dot(h_, U)
preact += x_
# reset and update gates
r = tensor.nnet.sigmoid(_slice(preact, 0, dim))
u = tensor.nnet.sigmoid(_slice(preact, 1, dim))
# compute the hidden state proposal
preactx = tensor.dot(h_, Ux)
preactx = preactx * r
preactx = preactx + xx_
# hidden state proposal
h = tensor.tanh(preactx)
# leaky integrate and obtain next hidden state
h = u * h_ + (1. - u) * h
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h
# prepare scan arguments
seqs = [mask, state_below_, state_belowx]
init_states = [tensor.alloc(0., n_samples, dim)]
_step = _step_slice
shared_vars = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Ux')]]
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info=init_states,
non_sequences=shared_vars,
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
rval = [rval]
return rval
# Conditional GRU layer without Attention
def param_init_gru_cond_simple(options, params, prefix='gru_cond', nin=None,
dim=None, dimctx=None):
if nin is None:
nin = options['dim']
if dim is None:
dim = options['dim']
if dimctx is None:
dimctx = options['dim']
params = param_init_gru(options, params, prefix, nin=nin, dim=dim)
# context to GRU gates
Wc = norm_weight(dimctx, dim*2)
params[_p(prefix, 'Wc')] = Wc
# context to hidden proposal
Wcx = norm_weight(dimctx, dim)
params[_p(prefix, 'Wcx')] = Wcx
return params
def gru_cond_simple_layer(tparams, state_below, options, prefix='gru',
mask=None, context=None, one_step=False,
init_state=None,
**kwargs):
assert context, 'Context must be provided'
if one_step:
assert init_state, 'previous state must be provided'
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
# mask
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
dim = tparams[_p(prefix, 'Ux')].shape[1]
# initial/previous state
if init_state is None:
init_state = tensor.alloc(0., n_samples, dim)
assert context.ndim == 2, 'Context must be 2-d: #sample x dim'
# projected context to GRU gates
pctx_ = tensor.dot(context, tparams[_p(prefix, 'Wc')])
# projected context to hidden state proposal
pctxx_ = tensor.dot(context, tparams[_p(prefix, 'Wcx')])
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
# projected x to gates
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + \
tparams[_p(prefix, 'bx')]
# projected x to hidden state proposal
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + \
tparams[_p(prefix, 'b')]
# step function to be used by scan
# arguments | sequences |outputs-info| non-seqs
def _step_slice(m_, x_, xx_, h_, pctx_, pctxx_, U, Ux):
preact = tensor.dot(h_, U)
preact += x_
preact += pctx_
preact = tensor.nnet.sigmoid(preact)
r = _slice(preact, 0, dim)
u = _slice(preact, 1, dim)
preactx = tensor.dot(h_, Ux)
preactx *= r
preactx += xx_
preactx += pctxx_
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h
seqs = [mask, state_below_, state_belowx]
_step = _step_slice
shared_vars = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Ux')]]
if one_step:
rval = _step(*(seqs+[init_state, pctx_, pctxx_]+shared_vars))
else:
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info=[init_state],
non_sequences=[pctx_,
pctxx_]+shared_vars,
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
return rval
# initialize all parameters
def init_params(options):
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])
params['Wemb_dec'] = norm_weight(options['n_words'], options['dim_word'])
# encoder
params = get_layer(options['encoder'])[0](options, params,
prefix='encoder',
nin=options['dim_word'],
dim=options['dim'])
ctxdim = options['dim']
# init_state, init_cell
params = get_layer('ff')[0](options, params, prefix='ff_state',
nin=ctxdim, nout=options['dim'])
# decoder
params = get_layer(options['decoder'])[0](options, params,
prefix='decoder',
nin=options['dim_word'],
dim=options['dim'],
dimctx=ctxdim)
# readout
params = get_layer('ff')[0](options, params, prefix='ff_logit_lstm',
nin=options['dim'], nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_prev',
nin=options['dim_word'],
nout=options['dim_word'], ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_ctx',
nin=ctxdim, nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit',
nin=options['dim_word'],
nout=options['n_words'])
return params
# build a training model
def build_model(tparams, options):
opt_ret = dict()
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
# description string: #words x #samples
x = tensor.matrix('x', dtype='int64')
x_mask = tensor.matrix('x_mask', dtype='float32')
y = tensor.matrix('y', dtype='int64')
y_mask = tensor.matrix('y_mask', dtype='float32')
n_timesteps = x.shape[0]
n_timesteps_trg = y.shape[0]
n_samples = x.shape[1]
# word embedding (source)
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
# pass through encoder gru, recurrence here
proj = get_layer(options['encoder'])[1](tparams, emb, options,
prefix='encoder',
mask=x_mask)
# last hidden state of encoder rnn will be used to initialize decoder rnn
ctx = proj[0][-1]
ctx_mean = ctx
# initial decoder state
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
# word embedding (target), we will shift the target sequence one time step
# to the right. This is done because of the bi-gram connections in the
# readout and decoder rnn. The first target will be all zeros and we will
# not condition on the last output.
emb = tparams['Wemb_dec'][y.flatten()]
emb = emb.reshape([n_timesteps_trg, n_samples, options['dim_word']])
emb_shifted = tensor.zeros_like(emb)
emb_shifted = tensor.set_subtensor(emb_shifted[1:], emb[:-1])
emb = emb_shifted
# decoder - pass through the decoder gru, recurrence here
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=y_mask, context=ctx,
one_step=False,
init_state=init_state)
# hidden states of the decoder gru
proj_h = proj
# we will condition on the last state of the encoder only
ctxs = ctx[None, :, :]
# compute word probabilities
logit_lstm = get_layer('ff')[1](tparams, proj_h, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm+logit_prev+logit_ctx)
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit',
activ='linear')
logit_shp = logit.shape
probs = tensor.nnet.softmax(
logit.reshape([logit_shp[0]*logit_shp[1], logit_shp[2]]))
# cost
y_flat = y.flatten()
y_flat_idx = tensor.arange(y_flat.shape[0]) * options['n_words'] + y_flat
cost = -tensor.log(probs.flatten()[y_flat_idx])
cost = cost.reshape([y.shape[0], y.shape[1]])
cost = (cost * y_mask).sum(0)
return trng, use_noise, x, x_mask, y, y_mask, opt_ret, cost
# build a sampler
def build_sampler(tparams, options, trng, use_noise):
x = tensor.matrix('x', dtype='int64')
n_timesteps = x.shape[0]
n_samples = x.shape[1]
# word embedding (source)
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
# encoder
proj = get_layer(options['encoder'])[1](tparams, emb, options,
prefix='encoder')
ctx = proj[0][-1]
ctx_mean = ctx
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
print 'Building f_init...',
outs = [init_state, ctx]
f_init = theano.function([x], outs, name='f_init', profile=profile)
print 'Done'
# y: 1 x 1
y = tensor.vector('y_sampler', dtype='int64')
init_state = tensor.matrix('init_state', dtype='float32')
# if it's the first word, emb should be all zero
emb = tensor.switch(y[:, None] < 0,
tensor.alloc(0., 1, tparams['Wemb_dec'].shape[1]),
tparams['Wemb_dec'][y])
# apply one step of gru layer
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=None, context=ctx,
one_step=True,
init_state=init_state)
next_state = proj
ctxs = ctx
# compute the output probability dist and sample
logit_lstm = get_layer('ff')[1](tparams, next_state, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm+logit_prev+logit_ctx)
logit = get_layer('ff')[1](tparams, logit, options,
prefix='ff_logit', activ='linear')
next_probs = tensor.nnet.softmax(logit)
next_sample = trng.multinomial(pvals=next_probs).argmax(1)
# next word probability
print 'Building f_next..',
inps = [y, ctx, init_state]
outs = [next_probs, next_sample, next_state]
f_next = theano.function(inps, outs, name='f_next', profile=profile)
print 'Done'
return f_init, f_next
# generate sample, either with stochastic sampling or beam search
def gen_sample(tparams, f_init, f_next, x, options, trng=None, k=1, maxlen=30,
stochastic=True, argmax=False):
# k is the beam size we have
if k > 1:
assert not stochastic, \
'Beam search does not support stochastic sampling'
sample = []
sample_score = []
if stochastic:
sample_score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
# get initial state of decoder rnn and encoder context
ret = f_init(x)
next_state, ctx0 = ret[0], ret[1]
next_w = [-1] # indicator for the first target word (bos target)
for ii in xrange(maxlen):
ctx = numpy.tile(ctx0, [live_k, 1])
inps = [next_w, ctx, next_state]
ret = f_next(*inps)
next_p, next_w, next_state = ret[0], ret[1], ret[2]
if stochastic:
if argmax:
nw = next_p[0].argmax()
else:
nw = next_w[0]
sample.append(nw)
sample_score -= numpy.log(next_p[0, nw])
if nw == 0:
break
else:
cand_scores = hyp_scores[:, None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k-dead_k)]
voc_size = next_p.shape[1]
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k-dead_k).astype('float32')
new_hyp_states = []
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_hyp_scores[idx] = copy.copy(costs[idx])
new_hyp_states.append(copy.copy(next_state[ti]))
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = numpy.array(hyp_states)
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
return sample, sample_score
# calculate the log probablities on a given corpus using translation model
def pred_probs(f_log_probs, prepare_data, options, iterator, verbose=True):
probs = []
n_done = 0
for x, y in iterator:
n_done += len(x)
x, x_mask, y, y_mask = prepare_data(x, y,
n_words_src=options['n_words_src'],
n_words=options['n_words'])
pprobs = f_log_probs(x, x_mask, y, y_mask)
for pp in pprobs:
probs.append(pp)
if numpy.isnan(numpy.mean(probs)):
ipdb.set_trace()
if verbose:
print >>sys.stderr, '%d samples computed' % (n_done)
return numpy.array(probs)
# optimizers
# name(hyperp, tparams, grads, inputs (list), cost) = f_grad_shared, f_update
def adam(lr, tparams, grads, inp, cost, beta1=0.9, beta2=0.999, e=1e-8):
gshared = [theano.shared(p.get_value() * 0., name='%s_grad' % k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inp, cost, updates=gsup, profile=profile)
updates = []
t_prev = theano.shared(numpy.float32(0.))
t = t_prev + 1.
lr_t = lr * tensor.sqrt(1. - beta2**t) / (1. - beta1**t)
for p, g in zip(tparams.values(), gshared):
m = theano.shared(p.get_value() * 0., p.name + '_mean')
v = theano.shared(p.get_value() * 0., p.name + '_variance')
m_t = beta1 * m + (1. - beta1) * g
v_t = beta2 * v + (1. - beta2) * g**2
step = lr_t * m_t / (tensor.sqrt(v_t) + e)
p_t = p - step
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((t_prev, t))
f_update = theano.function([lr], [], updates=updates,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rup2' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rg2up,
profile=profile)
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in
zip(zipped_grads, running_up2, running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]
f_update = theano.function([lr], [], updates=ru2up+param_up,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rgup+rg2up,
profile=profile)
updir = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_updir' % k)
for k, p in tparams.iteritems()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4))
for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
running_grads2)]
param_up = [(p, p + udn[1])
for p, udn in zip(itemlist(tparams), updir_new)]
f_update = theano.function([lr], [], updates=updir_new+param_up,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def sgd(lr, tparams, grads, x, mask, y, cost):
gshared = [theano.shared(p.get_value() * 0., name='%s_grad' % k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function([x, mask, y], cost, updates=gsup,
profile=profile)
pup = [(p, p - lr * g) for p, g in zip(itemlist(tparams), gshared)]
f_update = theano.function([lr], [], updates=pup, profile=profile)
return f_grad_shared, f_update
def train(dim_word=100, # word vector dimensionality
dim=1000, # the number of GRU units
encoder='gru',
decoder='gru_cond_simple',
patience=10, # early stopping patience
max_epochs=5000,
finish_after=10000000, # finish after this many updates
dispFreq=100,
decay_c=0., # L2 regularization penalty
alpha_c=0., # not used
lrate=0.01, # learning rate
n_words_src=100000, # source vocabulary size
n_words=100000, # target vocabulary size
maxlen=100, # maximum length of the description
optimizer='rmsprop',
batch_size=16,
valid_batch_size=16,
saveto='model.npz',
validFreq=1000,
saveFreq=1000, # save the parameters after every saveFreq updates
sampleFreq=100, # generate some samples after every sampleFreq
datasets=[
'/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.en.tok',
'/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.fr.tok'],
valid_datasets=['../data/dev/newstest2011.en.tok',
'../data/dev/newstest2011.fr.tok'],
dictionaries=[
'/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.en.tok.pkl',
'/data/lisatmp3/chokyun/europarl/europarl-v7.fr-en.fr.tok.pkl'],
use_dropout=False,
reload_=False,
overwrite=False):
# Model options
model_options = locals().copy()
# load dictionaries and invert them
worddicts = [None] * len(dictionaries)
worddicts_r = [None] * len(dictionaries)
for ii, dd in enumerate(dictionaries):
with open(dd, 'rb') as f:
worddicts[ii] = pkl.load(f)
worddicts_r[ii] = dict()
for kk, vv in worddicts[ii].iteritems():
worddicts_r[ii][vv] = kk
# reload options
if reload_ and os.path.exists(saveto):
print 'Reloading model options'
with open('%s.pkl' % saveto, 'rb') as f:
model_options = pkl.load(f)
print 'Loading data'
train = TextIterator(datasets[0], datasets[1],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=batch_size,
maxlen=maxlen)
valid = TextIterator(valid_datasets[0], valid_datasets[1],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=valid_batch_size,
maxlen=maxlen)
print 'Building model'
params = init_params(model_options)
# reload parameters
if reload_ and os.path.exists(saveto):
print 'Reloading model parameters'
params = load_params(saveto, params)
tparams = init_tparams(params)
trng, use_noise, \
x, x_mask, y, y_mask, \
opt_ret, \
cost = \
build_model(tparams, model_options)
inps = [x, x_mask, y, y_mask]
print 'Building sampler'
f_init, f_next = build_sampler(tparams, model_options, trng, use_noise)
# before any regularizer
print 'Building f_log_probs...',
f_log_probs = theano.function(inps, cost, profile=profile)
print 'Done'
cost = cost.mean()
# apply L2 regularization on weights
if decay_c > 0.:
decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
weight_decay = 0.
for kk, vv in tparams.iteritems():
weight_decay += (vv ** 2).sum()
weight_decay *= decay_c
cost += weight_decay
# un used, attention weight regularization
if alpha_c > 0. and not model_options['decoder'].endswith('simple'):
alpha_c = theano.shared(numpy.float32(alpha_c), name='alpha_c')
alpha_reg = alpha_c * (
(tensor.cast(y_mask.sum(0)//x_mask.sum(0), 'float32')[:, None] -
opt_ret['dec_alphas'].sum(0))**2).sum(1).mean()
cost += alpha_reg
# after all regularizers - compile the computational graph for cost
print 'Building f_cost...',
f_cost = theano.function(inps, cost, profile=profile)
print 'Done'
print 'Computing gradient...',
grads = tensor.grad(cost, wrt=itemlist(tparams))
print 'Done'
# compile the optimizer, the actual computational graph is compiled here
lr = tensor.scalar(name='lr')
print 'Building optimizers...',
f_grad_shared, f_update = eval(optimizer)(lr, tparams, grads, inps, cost)
print 'Done'
print 'Optimization'
best_p = None
bad_counter = 0
uidx = 0
estop = False
history_errs = []
# reload history
if reload_ and os.path.exists(saveto):
rmodel = numpy.load(saveto)
history_errs = list(rmodel['history_errs'])
if 'uidx' in rmodel:
uidx = rmodel['uidx']
if validFreq == -1:
validFreq = len(train[0])/batch_size
if saveFreq == -1:
saveFreq = len(train[0])/batch_size
if sampleFreq == -1:
sampleFreq = len(train[0])/batch_size
for eidx in xrange(max_epochs):
n_samples = 0
for x, y in train:
n_samples += len(x)
uidx += 1
use_noise.set_value(1.)
x, x_mask, y, y_mask = prepare_data(x, y, maxlen=maxlen,
n_words_src=n_words_src,
n_words=n_words)
if x is None:
print 'Minibatch with zero sample under length ', maxlen
uidx -= 1
continue
ud_start = time.time()
# compute cost, grads and copy grads to shared variables
cost = f_grad_shared(x, x_mask, y, y_mask)
# do the update on parameters
f_update(lrate)
ud = time.time() - ud_start
# check for bad numbers, usually we remove non-finite elements
# and continue training - but not done here
if numpy.isnan(cost) or numpy.isinf(cost):
print 'NaN detected'
return 1., 1., 1.
# verbose
if numpy.mod(uidx, dispFreq) == 0:
print 'Epoch ', eidx, 'Update ', uidx, 'Cost ', cost, 'UD ', ud
# save the best model so far, in addition, save the latest model
# into a separate file with the iteration number for external eval
if numpy.mod(uidx, saveFreq) == 0:
print 'Saving the best model...',
if best_p is not None:
params = best_p
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, uidx=uidx, **params)
pkl.dump(model_options, open('%s.pkl' % saveto, 'wb'))
print 'Done'
# save with uidx
if not overwrite:
print 'Saving the model at iteration {}...'.format(uidx),
saveto_uidx = '{}.iter{}.npz'.format(
os.path.splitext(saveto)[0], uidx)
numpy.savez(saveto_uidx, history_errs=history_errs,
uidx=uidx, **unzip(tparams))
print 'Done'
# generate some samples with the model and display them
if numpy.mod(uidx, sampleFreq) == 0:
# FIXME: random selection?
for jj in xrange(numpy.minimum(5, x.shape[1])):
stochastic = True
sample, score = gen_sample(tparams, f_init, f_next,
x[:, jj][:, None],
model_options, trng=trng, k=1,
maxlen=30,
stochastic=stochastic,
argmax=False)
print 'Source ', jj, ': ',
for vv in x[:, jj]:
if vv == 0:
break
if vv in worddicts_r[0]:
print worddicts_r[0][vv],
else:
print 'UNK',
print
print 'Truth ', jj, ' : ',
for vv in y[:, jj]:
if vv == 0:
break
if vv in worddicts_r[1]:
print worddicts_r[1][vv],
else:
print 'UNK',
print
print 'Sample ', jj, ': ',
if stochastic:
ss = sample
else:
score = score / numpy.array([len(s) for s in sample])
ss = sample[score.argmin()]
for vv in ss:
if vv == 0:
break
if vv in worddicts_r[1]:
print worddicts_r[1][vv],
else:
print 'UNK',
print
# validate model on validation set and early stop if necessary
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
valid_errs = pred_probs(f_log_probs, prepare_data,
model_options, valid)
valid_err = valid_errs.mean()
history_errs.append(valid_err)
if uidx == 0 or valid_err <= numpy.array(history_errs).min():
best_p = unzip(tparams)
bad_counter = 0
if len(history_errs) > patience and valid_err >= \
numpy.array(history_errs)[:-patience].min():
bad_counter += 1
if bad_counter > patience:
print 'Early Stop!'
estop = True
break
if numpy.isnan(valid_err):
ipdb.set_trace()
print 'Valid ', valid_err
# finish after this many updates
if uidx >= finish_after:
print 'Finishing after %d iterations!' % uidx
estop = True
break
print 'Seen %d samples' % n_samples
if estop:
break
if best_p is not None:
zipp(best_p, tparams)
use_noise.set_value(0.)
valid_err = pred_probs(f_log_probs, prepare_data,
model_options, valid).mean()
print 'Valid ', valid_err
params = copy.copy(best_p)
numpy.savez(saveto, zipped_params=best_p,
history_errs=history_errs,
uidx=uidx,
**params)
return valid_err
if __name__ == '__main__':
pass
| bsd-3-clause |
aferr/TemporalPartitioningMemCtl | src/arch/x86/isa/insts/simd64/integer/data_transfer/move.py | 65 | 2969 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop MOVD_MMX_R {
mov2fp mmx, regm, srcSize=dsz, destSize=8
};
def macroop MOVD_MMX_M {
ldfp mmx, seg, sib, disp, dataSize=8
};
def macroop MOVD_MMX_P {
rdip t7
ldfp mmx, seg, riprel, disp, dataSize=8
};
def macroop MOVD_R_MMX {
mov2int reg, mmxm, size=dsz
};
def macroop MOVD_M_MMX {
stfp mmx, seg, sib, disp, dataSize=8
};
def macroop MOVD_P_MMX {
rdip t7
stfp mmx, seg, riprel, disp, dataSize=8
};
def macroop MOVQ_MMX_MMX {
movfp mmx, mmxm
};
def macroop MOVQ_MMX_M {
ldfp mmx, seg, sib, disp, dataSize=8
};
def macroop MOVQ_MMX_P {
rdip t7
ldfp mmx, seg, riprel, disp, dataSize=8
};
def macroop MOVQ_M_MMX {
stfp mmx, seg, sib, disp, dataSize=8
};
def macroop MOVQ_P_MMX {
rdip t7
stfp mmx, seg, riprel, disp, dataSize=8
};
'''
# MOVDQ2Q
# MOVQ2DQ
| bsd-3-clause |
librasungirl/openthread | tools/harness-automation/cases/ed_6_1_2.py | 18 | 1869 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class ED_6_1_2(HarnessCase):
role = HarnessCase.ROLE_ED
case = '6 1 2'
golden_devices_required = 2
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
anryko/ansible | lib/ansible/modules/network/fortios/fortios_endpoint_control_client.py | 7 | 11261 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_endpoint_control_client
short_description: Configure endpoint control client lists in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify endpoint_control feature and client category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
endpoint_control_client:
description:
- Configure endpoint control client lists.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
ad_groups:
description:
- Endpoint client AD logon groups.
type: str
ftcl_uid:
description:
- Endpoint FortiClient UID.
type: str
id:
description:
- Endpoint client ID.
required: true
type: int
info:
description:
- Endpoint client information.
type: str
src_ip:
description:
- Endpoint client IP address.
type: str
src_mac:
description:
- Endpoint client MAC address.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure endpoint control client lists.
fortios_endpoint_control_client:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
endpoint_control_client:
ad_groups: "<your_own_value>"
ftcl_uid: "<your_own_value>"
id: "5"
info: "<your_own_value>"
src_ip: "<your_own_value>"
src_mac: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_endpoint_control_client_data(json):
option_list = ['ad_groups', 'ftcl_uid', 'id',
'info', 'src_ip', 'src_mac']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def endpoint_control_client(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['endpoint_control_client'] and data['endpoint_control_client']:
state = data['endpoint_control_client']['state']
else:
state = True
endpoint_control_client_data = data['endpoint_control_client']
filtered_data = underscore_to_hyphen(filter_endpoint_control_client_data(endpoint_control_client_data))
if state == "present":
return fos.set('endpoint-control',
'client',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('endpoint-control',
'client',
mkey=filtered_data['id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_endpoint_control(data, fos):
if data['endpoint_control_client']:
resp = endpoint_control_client(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"endpoint_control_client": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"ad_groups": {"required": False, "type": "str"},
"ftcl_uid": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"info": {"required": False, "type": "str"},
"src_ip": {"required": False, "type": "str"},
"src_mac": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_endpoint_control(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_endpoint_control(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
GaussDing/jieba | jieba/posseg/__init__.py | 1 | 7167 | from __future__ import absolute_import, unicode_literals
import re
import os
import jieba
import sys
import marshal
from functools import wraps
from .._compat import *
from .viterbi import viterbi
PROB_START_P = "prob_start.p"
PROB_TRANS_P = "prob_trans.p"
PROB_EMIT_P = "prob_emit.p"
CHAR_STATE_TAB_P = "char_state_tab.p"
re_han_detail = re.compile("([\u4E00-\u9FA5]+)")
re_skip_detail = re.compile("([\.0-9]+|[a-zA-Z0-9]+)")
re_han_internal = re.compile("([\u4E00-\u9FA5a-zA-Z0-9+#&\._]+)")
re_skip_internal = re.compile("(\r\n|\s)")
re_eng = re.compile("[a-zA-Z0-9]+")
re_num = re.compile("[\.0-9]+")
re_eng1 = re.compile('^[a-zA-Z0-9]$', re.U)
def load_model(f_name, isJython=True):
_curpath = os.path.normpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
result = {}
with open(f_name, "rb") as f:
for line in f:
line = line.strip()
if not line:
continue
line = line.decode("utf-8")
word, _, tag = line.split(" ")
result[word] = tag
if not isJython:
return result
start_p = {}
abs_path = os.path.join(_curpath, PROB_START_P)
with open(abs_path, 'rb') as f:
start_p = marshal.load(f)
trans_p = {}
abs_path = os.path.join(_curpath, PROB_TRANS_P)
with open(abs_path, 'rb') as f:
trans_p = marshal.load(f)
emit_p = {}
abs_path = os.path.join(_curpath, PROB_EMIT_P)
with open(abs_path, 'rb') as f:
emit_p = marshal.load(f)
state = {}
abs_path = os.path.join(_curpath, CHAR_STATE_TAB_P)
with open(abs_path, 'rb') as f:
state = marshal.load(f)
f.close()
return state, start_p, trans_p, emit_p, result
if sys.platform.startswith("java"):
char_state_tab_P, start_P, trans_P, emit_P, word_tag_tab = load_model(
jieba.get_abs_path_dict())
else:
from .char_state_tab import P as char_state_tab_P
from .prob_start import P as start_P
from .prob_trans import P as trans_P
from .prob_emit import P as emit_P
word_tag_tab = load_model(jieba.get_abs_path_dict(), isJython=False)
def makesure_userdict_loaded(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
if jieba.user_word_tag_tab:
word_tag_tab.update(jieba.user_word_tag_tab)
jieba.user_word_tag_tab = {}
return fn(*args, **kwargs)
return wrapped
class pair(object):
def __init__(self, word, flag):
self.word = word
self.flag = flag
def __unicode__(self):
return '%s/%s' % (self.word, self.flag)
def __repr__(self):
return self.__str__()
def __str__(self):
if PY2:
return self.__unicode__().encode(default_encoding)
else:
return self.__unicode__()
def encode(self, arg):
return self.__unicode__().encode(arg)
def __cut(sentence):
prob, pos_list = viterbi(
sentence, char_state_tab_P, start_P, trans_P, emit_P)
begin, nexti = 0, 0
for i, char in enumerate(sentence):
pos = pos_list[i][0]
if pos == 'B':
begin = i
elif pos == 'E':
yield pair(sentence[begin:i + 1], pos_list[i][1])
nexti = i + 1
elif pos == 'S':
yield pair(char, pos_list[i][1])
nexti = i + 1
if nexti < len(sentence):
yield pair(sentence[nexti:], pos_list[nexti][1])
def __cut_detail(sentence):
blocks = re_han_detail.split(sentence)
for blk in blocks:
if re_han_detail.match(blk):
for word in __cut(blk):
yield word
else:
tmp = re_skip_detail.split(blk)
for x in tmp:
if x:
if re_num.match(x):
yield pair(x, 'm')
elif re_eng.match(x):
yield pair(x, 'eng')
else:
yield pair(x, 'x')
def __cut_DAG_NO_HMM(sentence):
DAG = jieba.get_sentence_dag(sentence)
route = {}
jieba.calc_path_backward(sentence, DAG, route)
x = 0
N = len(sentence)
buf = ''
while x < N:
y = route[x][1] + 1
l_word = sentence[x:y]
if re_eng1.match(l_word):
buf += l_word
x = y
else:
if buf:
yield pair(buf, 'eng')
buf = ''
yield pair(l_word, word_tag_tab.get(l_word, 'x'))
x = y
if buf:
yield pair(buf, 'eng')
buf = ''
def __cut_DAG(sentence):
DAG = jieba.get_sentence_dag(sentence)
route = {}
jieba.calc_path_backward(sentence, DAG, route)
x = 0
buf = ''
N = len(sentence)
while x < N:
y = route[x][1] + 1
l_word = sentence[x:y]
if y - x == 1:
buf += l_word
else:
if buf:
if len(buf) == 1:
yield pair(buf, word_tag_tab.get(buf, 'x'))
elif not jieba.FREQ.get(buf):
recognized = __cut_detail(buf)
for t in recognized:
yield t
else:
for elem in buf:
yield pair(elem, word_tag_tab.get(elem, 'x'))
buf = ''
yield pair(l_word, word_tag_tab.get(l_word, 'x'))
x = y
if buf:
if len(buf) == 1:
yield pair(buf, word_tag_tab.get(buf, 'x'))
elif not jieba.FREQ.get(buf):
recognized = __cut_detail(buf)
for t in recognized:
yield t
else:
for elem in buf:
yield pair(elem, word_tag_tab.get(elem, 'x'))
def __cut_internal(sentence, HMM=True):
sentence = strdecode(sentence)
blocks = re_han_internal.split(sentence)
if HMM:
__cut_blk = __cut_DAG
else:
__cut_blk = __cut_DAG_NO_HMM
for blk in blocks:
if re_han_internal.match(blk):
for word in __cut_blk(blk):
yield word
else:
tmp = re_skip_internal.split(blk)
for x in tmp:
if re_skip_internal.match(x):
yield pair(x, 'x')
else:
for xx in x:
if re_num.match(xx):
yield pair(xx, 'm')
elif re_eng.match(x):
yield pair(xx, 'eng')
else:
yield pair(xx, 'x')
def __lcut_internal(sentence):
return list(__cut_internal(sentence))
def __lcut_internal_no_hmm(sentence):
return list(__cut_internal(sentence, False))
@makesure_userdict_loaded
def cut(sentence, HMM=True):
if jieba.pool is None:
for w in __cut_internal(sentence, HMM=HMM):
yield w
else:
parts = strdecode(sentence).splitlines(True)
if HMM:
result = jieba.pool.map(__lcut_internal, parts)
else:
result = jieba.pool.map(__lcut_internal_no_hmm, parts)
for r in result:
for w in r:
yield w
| mit |
shakamunyi/crux | crux/keystone.py | 2 | 6066 | import logging
import keystoneclient.v2_0.client as ksclient
from .exc import *
class Keystone (object):
log = logging.getLogger(__name__)
def __init__(self, username=None, password=None,
token=None, tenant_name=None,
tenant_id=None, auth_url=None,
endpoint=None):
self.username = username
self.password = password
self.token = token
self.tenant_name = tenant_name
self.tenant_id = tenant_id
self.auth_url = auth_url
self.endpoint = endpoint
self.get_keystone_client()
def get_keystone_client(self):
self.client = ksclient.Client(username=self.username,
password=self.password,
token=self.token,
tenant_name=self.tenant_name,
tenant_id=self.tenant_id,
auth_url=self.auth_url,
endpoint=self.endpoint)
def find_tenant(self, tenant_name):
tenants = self.client.tenants.list()
res = [x for x in tenants if x.name == tenant_name]
if len(res) == 1:
return res[0]
elif len(res) > 1:
raise ValueError('found multiple matches for tenant %s' %
tenant_name)
else:
raise KeyError(tenant_name)
def create_tenant(self, tenant_name,
tenant_description=None):
tenant = self.client.tenants.create(
tenant_name,
tenant_description)
return tenant
def find_or_create_tenant(self, tenant_name,
tenant_description=None):
try:
tenant = self.find_tenant(tenant_name)
self.log.info('using existing tenant %s (%s)',
tenant.name,
tenant.id)
except KeyError:
tenant = self.create_tenant(tenant_name,
tenant_description=tenant_description)
self.log.info('created new tenant %s (%s)',
tenant.name,
tenant.id)
return tenant
def find_user(self, user_name):
users = self.client.users.list()
res = [x for x in users if x.name == user_name]
if len(res) == 1:
return res[0]
elif len(res) > 1:
raise ValueError('found multiple matches for user %s' %
user_name)
else:
raise KeyError(user_name)
def create_user(self, user_name, user_password, tenant,
user_email=None, user_enabled=True):
if not user_password:
raise CruxException('cannot create a user with an empty '
'password')
user = self.client.users.create(
user_name,
user_password,
user_email,
tenant.id,
user_enabled)
return user
def find_or_create_user(self, user_name,
user_password=None,
tenant=None,
user_email=None, user_enabled=True):
try:
user = self.find_user(user_name)
self.log.info('using existing user %s (%s)',
user.name,
user.id)
except KeyError:
if tenant is None:
raise ValueError('cannot create user with '
'undefined tenant')
user = self.create_user(user_name, user_password, tenant,
user_email=user_email,
user_enabled=user_enabled)
self.log.info('created new user %s (%s)',
user.name,
user.id)
return user
def update_user(self, user,
user_email=None,
user_password=None,
user_enabled=None):
if user_enabled is not None:
self.log.info('updating enabled for user %s',
user.name)
self.client.users.update(user, enabled=user_enabled)
if user_email is not None:
self.log.info('updating email for user %s',
user.name)
self.client.users.update(user, email=user_email)
if user_password is not None:
self.log.info('updating password for user %s',
user.name)
self.client.users.update_password(user, user_password)
def find_user(self, user_name):
users = self.client.users.list()
res = [x for x in users if x.name == user_name]
if len(res) == 1:
return res[0]
elif len(res) > 1:
raise ValueError('found multiple matches for user %s' %
user_name)
else:
raise KeyError(user_name)
def find_role(self, role_name):
roles = self.client.roles.list()
res = [x for x in roles if x.name == role_name]
if len(res) == 1:
return res[0]
elif len(res) > 1:
raise ValueError('found multiple matches for role %s' %
role_name)
else:
raise KeyError(role_name)
def create_role(self, role_name):
role = self.client.roles.create(role_name)
return role
def find_or_create_role(self, role_name):
try:
role = self.find_role(role_name)
self.log.info('using existing role %s (%s)',
role.name,
role.id)
except KeyError:
role = self.create_role(role_name)
self.log.info('created new role %s (%s)',
role.name,
role.id)
return role
| apache-2.0 |
krageon/closure-linter | closure_linter/javascriptstatetracker.py | 107 | 5171 | #!/usr/bin/env python
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser for JavaScript files."""
from closure_linter import javascripttokens
from closure_linter import statetracker
from closure_linter import tokenutil
# Shorthand
Type = javascripttokens.JavaScriptTokenType
class JsDocFlag(statetracker.DocFlag):
"""Javascript doc flag object.
Attribute:
flag_type: param, return, define, type, etc.
flag_token: The flag token.
type_start_token: The first token specifying the flag JS type,
including braces.
type_end_token: The last token specifying the flag JS type,
including braces.
type: The JavaScript type spec.
name_token: The token specifying the flag name.
name: The flag name
description_start_token: The first token in the description.
description_end_token: The end token in the description.
description: The description.
"""
# Please keep these lists alphabetized.
# Some projects use the following extensions to JsDoc.
# TODO(robbyw): determine which of these, if any, should be illegal.
EXTENDED_DOC = frozenset([
'class', 'code', 'desc', 'final', 'hidden', 'inheritDoc', 'link',
'meaning', 'provideGoog', 'throws'])
LEGAL_DOC = EXTENDED_DOC | statetracker.DocFlag.LEGAL_DOC
def __init__(self, flag_token):
"""Creates the JsDocFlag object and attaches it to the given start token.
Args:
flag_token: The starting token of the flag.
"""
statetracker.DocFlag.__init__(self, flag_token)
class JavaScriptStateTracker(statetracker.StateTracker):
"""JavaScript state tracker.
Inherits from the core EcmaScript StateTracker adding extra state tracking
functionality needed for JavaScript.
"""
def __init__(self):
"""Initializes a JavaScript token stream state tracker."""
statetracker.StateTracker.__init__(self, JsDocFlag)
def Reset(self):
self._scope_depth = 0
self._block_stack = []
super(JavaScriptStateTracker, self).Reset()
def InTopLevel(self):
"""Compute whether we are at the top level in the class.
This function call is language specific. In some languages like
JavaScript, a function is top level if it is not inside any parenthesis.
In languages such as ActionScript, a function is top level if it is directly
within a class.
Returns:
Whether we are at the top level in the class.
"""
return self._scope_depth == self.ParenthesesDepth()
def InFunction(self):
"""Returns true if the current token is within a function.
This js-specific override ignores goog.scope functions.
Returns:
True if the current token is within a function.
"""
return self._scope_depth != self.FunctionDepth()
def InNonScopeBlock(self):
"""Compute whether we are nested within a non-goog.scope block.
Returns:
True if the token is not enclosed in a block that does not originate from
a goog.scope statement. False otherwise.
"""
return self._scope_depth != self.BlockDepth()
def GetBlockType(self, token):
"""Determine the block type given a START_BLOCK token.
Code blocks come after parameters, keywords like else, and closing parens.
Args:
token: The current token. Can be assumed to be type START_BLOCK
Returns:
Code block type for current token.
"""
last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, reverse=True)
if last_code.type in (Type.END_PARAMETERS, Type.END_PAREN,
Type.KEYWORD) and not last_code.IsKeyword('return'):
return self.CODE
else:
return self.OBJECT_LITERAL
def GetCurrentBlockStart(self):
"""Gets the start token of current block.
Returns:
Starting token of current block. None if not in block.
"""
if self._block_stack:
return self._block_stack[-1]
else:
return None
def HandleToken(self, token, last_non_space_token):
"""Handles the given token and updates state.
Args:
token: The token to handle.
last_non_space_token: The last non space token encountered
"""
if token.type == Type.START_BLOCK:
self._block_stack.append(token)
if token.type == Type.IDENTIFIER and token.string == 'goog.scope':
self._scope_depth += 1
if token.type == Type.END_BLOCK:
start_token = self._block_stack.pop()
if tokenutil.GoogScopeOrNoneFromStartBlock(start_token):
self._scope_depth -= 1
super(JavaScriptStateTracker, self).HandleToken(token,
last_non_space_token)
| apache-2.0 |
ruibarreira/linuxtrail | usr/lib/python2.7/dist-packages/simplejson/__init__.py | 42 | 23137 | r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print(json.dumps("\"foo\bar"))
"\"foo\bar"
>>> print(json.dumps(u'\u1234'))
"\u1234"
>>> print(json.dumps('\\'))
"\\"
>>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True))
{"a": 0, "b": 0, "c": 0}
>>> from simplejson.compat import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> obj = [1,2,3,{'4': 5, '6': 7}]
>>> json.dumps(obj, separators=(',',':'), sort_keys=True)
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' '))
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from simplejson.compat import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 3 (char 2)
"""
from __future__ import absolute_import
__version__ = '3.6.5'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict', 'simple_first',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decimal import Decimal
from .scanner import JSONDecodeError
from .decoder import JSONDecoder
from .encoder import JSONEncoder, JSONEncoderForHTML
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
from . import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from ._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
bigint_as_string=False,
item_sort_key=None,
for_json=False,
ignore_nan=False,
int_as_string_bitcount=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
for_json=False, ignore_nan=False, int_as_string_bitcount=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If *skipkeys* is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If *ensure_ascii* is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If *check_circular* is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If *allow_nan* is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the original JSON specification, instead of using
the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). See
*ignore_nan* for ECMA-262 compliant behavior.
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, *separators* should be an
``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')``
if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most
compact JSON representation, you should specify ``(',', ':')`` to eliminate
whitespace.
*encoding* is the character encoding for str instances, default is UTF-8.
*default(obj)* is a function that should return a serializable version
of obj or raise ``TypeError``. The default simply raises ``TypeError``.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
If *bigint_as_string* is true (default: ``False``), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise. Note that this is still a
lossy operation that will not round-trip correctly and should be used
sparingly.
If *int_as_string_bitcount* is a positive number (n), then int of size
greater than or equal to 2**n or lower than or equal to -2**n will be
encoded as strings.
If specified, *item_sort_key* is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key. This option takes precedence over
*sort_keys*.
If *sort_keys* is true (default: ``False``), the output of dictionaries
will be sorted by item.
If *for_json* is true (default: ``False``), objects with a ``for_json()``
method will use the return value of that method for encoding as JSON
instead of the object.
If *ignore_nan* is true (default: ``False``), then out of range
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized as
``null`` in compliance with the ECMA-262 specification. If true, this will
override *allow_nan*.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg. NOTE: You should use *default* or *for_json* instead
of subclassing whenever possible.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array
and not bigint_as_string and not sort_keys
and not item_sort_key and not for_json
and not ignore_nan and int_as_string_bitcount is None
and not kw
):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
bigint_as_string=bigint_as_string,
sort_keys=sort_keys,
item_sort_key=item_sort_key,
for_json=for_json,
ignore_nan=ignore_nan,
int_as_string_bitcount=int_as_string_bitcount,
**kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
for_json=False, ignore_nan=False, int_as_string_bitcount=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, ``separators`` should be an
``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')``
if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most
compact JSON representation, you should specify ``(',', ':')`` to eliminate
whitespace.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
If *bigint_as_string* is true (not the default), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise.
If *int_as_string_bitcount* is a positive number (n), then int of size
greater than or equal to 2**n or lower than or equal to -2**n will be
encoded as strings.
If specified, *item_sort_key* is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key. This option takes precendence over
*sort_keys*.
If *sort_keys* is true (default: ``False``), the output of dictionaries
will be sorted by item.
If *for_json* is true (default: ``False``), objects with a ``for_json()``
method will use the return value of that method for encoding as JSON
instead of the object.
If *ignore_nan* is true (default: ``False``), then out of range
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized as
``null`` in compliance with the ECMA-262 specification. If true, this will
override *allow_nan*.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg. NOTE: You should use *default* instead of subclassing
whenever possible.
"""
# cached encoder
if (
not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array
and not bigint_as_string and not sort_keys
and not item_sort_key and not for_json
and not ignore_nan and int_as_string_bitcount is None
and not kw
):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
bigint_as_string=bigint_as_string,
sort_keys=sort_keys,
item_sort_key=item_sort_key,
for_json=for_json,
ignore_nan=ignore_nan,
int_as_string_bitcount=int_as_string_bitcount,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead
of subclassing whenever possible.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead
of subclassing whenever possible.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
from . import decoder as dec
from . import encoder as enc
from . import scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def simple_first(kv):
"""Helper function to pass to item_sort_key to sort simple
elements to the top, then container elements.
"""
return (isinstance(kv[1], (list, dict, tuple)), kv[0])
| gpl-3.0 |
Lokke/eden | languages/ja.py | 6 | 353165 | # -*- coding: utf-8 -*-
{
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'この地域を地理的に指定するロケーション。これはロケーションの階層構造のうちの一つか、ロケーショングループの一つか、この地域の境界に面するロケーションです。',
"Acronym of the organization's name, eg. IFRC.": '団体の略称 (IFRCなど)',
"Authenticate system's Twitter account": '認証システムの Twitter アカウント',
"Can't import tweepy": 'tweepyをインポートできません',
"Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.": "救援要請と寄付項目を関連付けるには、項目左の'寄付'ボタンを押してください。",
"Couldn't import tweepy library": 'tweepy libraryをインポートできません',
"Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.": 'サイトの所在地住所を詳細に記述します。情報伝達と物品搬送に使用します。このサイトに関する情報を、以下の「ロケーション」項目にGIS/地図データを挿入できることに注意してください。',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'もしこの設定が地域メニューにある地域を指しているのであれば、メニューで使う名前を設定してください。個人用の地図設定の名前では、ユーザの名前で設定されます。',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'この項目が設定されている場合、ユーザーは、登録の際、この団体のスタッフとしてアサインされるように指定することができます。ただし、ユーザーのドメインと団体のドメイン項目に差異がない場合のみ有効です。',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'この項目の内容はユーザーの基本所在地となり、ユーザーが地図上に表示されるようになります。',
"If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.": 'この設定が有効の場合、削除されたレコードには削除済みフラグが付与されるだけで、実際のデータは消去されません。一般のユーザが閲覧することはできませんが、データベースを直接参照することでデータを確認できます。',
"If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:": '行方不明者の登録が存在しない場合、「人物情報を追加」ボタンを押して、新規登録を行ってください。',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": 'リストに病院が表示されない場合、「病院情報を追加」することで新規に登録が可能です。',
"If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": 'オフィスが一覧にない場合は、「オフィスを追加」をクリックすることで新規のオフィスを追加できます。',
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": "もしあなたの団体の登録がない場合、'団体を追加'リンクをクリックすることで追加が可能です",
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'データを同期する際には、ネットワークを経由してではなく、ファイルから行うことも可能です。ネットワークが存在しない場合に利用されます。ファイルからのデータインポート、およびファイルへのエクスポートはこのページから実行可能です。右部のリンクをクリックしてください。',
"Level is higher than parent's": '親情報よりも高いレベルです',
"NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.": "注意: SMS は'アクション可能'のためリクエストがフィルターされます。一方、ツイートのリクエストはフィルターされません。よって、これは検索する手段となります",
"Need a 'url' argument!": "'url'引数が必要です。",
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "オプション項目。ジオメトリカラムの名称です。PostGISでのデフォルト値は 'the_geom'となります。",
"Parent level should be higher than this record's level. Parent level is": '親レベルは、このレコードのレベルより上位でなければなりません。親レベルは',
"Password fields don't match": 'パスワードが一致しません。',
"Phone number to donate to this organization's relief efforts.": 'この団体の救援活動に対して寄付を行う際の連絡先となる電話番号を記載します。',
"Please come back after sometime if that doesn't help.": 'この方法で問題が解決しない場合は、しばらく時間を置いて再度アクセスしてください。',
"Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.": "'Delete Old'ボタンを押すことで、データを参照しているレコードは全て参照先を再指定され、古い方のレコードは削除されます。",
"Quantity in %s's Inventory": '%s 倉庫にある量',
"Search here for a person's record in order to:": '人物情報の検索を行い、以下の機能を実現します:',
"Select a person in charge for status 'assigned'": "状況が '割り当て済み' である担当者を選択します",
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": 'もし全ての特定の場所が住所階層の最下層で親の場所を必要とするなら、これを選択して下さい。例えば、もし「地区」が階層の最小の地域なら、全ての特定の場所は親階層の地区を持っている必要が有るでしょう。',
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'もし全ての特定の場所が住所階層での親の場所を必要とするなら、これを選択して下さい。これは被災地の「地域」表示の設定に役立てられます。',
"Sorry, things didn't get done on time.": 'すいません、時間通りに行われていません。',
"Sorry, we couldn't find that page.": 'すいません、お探しのページは見つかりませんでした。',
"System's Twitter account updated": 'システムのTwitterアカウントを変更しました',
"The <a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>Well-Known Text</a> representation of the Polygon/Line.": "この線、あるいは面の<a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>具体的な説明</a>",
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": 'このプロジェクトの資金提供組織を選択します。複数の項目を選択するには、Ctrlキーを押しながらクリックしてください。',
"The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.": 'この団体の活動分野を選択します。複数の項目を選択するには、コントロールキーを押しながらクリックしてください。',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": '画像ファイルのURLです。ファイルのアップロードを行わない場合、ロケーションをURL項目に入力してください。',
"The person's manager within this Office/Project.": 'このオフィス/プロジェクトのマネージャ。',
"To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": '遺体の検索を行うには、遺体のID番号を入力してください。検索時のワイルドカード文字として、%を使うことができます。入力せずに「検索」すると、全ての遺体が表示されます。',
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": 'ID情報を入力することで、遺体を検索します。ワイルドカードとして % が使用できます。何も指定せずに「検索」すると、全ての遺体が表示されます。',
"To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "病院を検索するには、名前、病院のID、団体名、省略名のいずれかをスペース(空白)で区切って入力してください。 % がワイルドカードとして使えます。全病院のリストを表示するにはなにも入力せずに '検索' ボタンを押してください。",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '探し出したい病院をテキスト入力し、検索を行うことができます。検索時のワイルドカード文字として、%を使うことができます。何も入力せずに「検索」ボタンを押した場合、全ての病院を表示します。',
"To search for a hospital, enter any part of the name or ID. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '病院を検索するには、名称の一部かIDを入力してください。検索時のワイルドカード文字として、%を使うことができます。何も入力せずに「検索」を押した場合、全ての病院を表示します。',
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "ロケーションを検索するには、名前を入力します。%をワイルドカード文字として使用することが出来ます。何も入力しないで '検索' をクリックするとすべてのロケーションが表示されます。",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '苗字、名前などを半角スペースで区切って入力し、人物検索して下さい。「%」を使うとファジー検索できます。何も入力せずに検索すれば、全ての情報を検索表示します。',
"To search for a person, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '人を検索するためには、お名前(苗字、名前または両方)を入力してください。また姓名の間にはスペースをいれてください。ワイルドカードとして % が使えます。すべての人物情報をリストするには、検索ボタンをおしてください。',
"To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.": '探し出したい支援要請をテキスト入力し、検索を行うことができます。検索時のワイルドカード文字として、%を使うことができます。何も入力せずに「検索」ボタンを押した場合、全ての支援要請を表示します。',
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": 'アセスメントを検索するには、アセスメントのチケット番号の一部を入力してください。ワイルドカードとして % が使えます。すべてのアセスメントをリストするには、なにも入力せず検索ボタンをおしてください。',
"Type the first few characters of one of the Person's names.": '検索したい人物の名前の先頭数文字を入力してください',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": '画像ファイルのアップロードはここから行ってください。ファイルのアップロードを行わない場合、ロケーションをURL項目に入力してください。',
"View and/or update details of the person's record": '人物情報を検索し、詳細の閲覧や更新を行ないます',
"View/Edit the Database directly (caution: doesn't respect the framework rules!)": 'データベースの直接閲覧/編集(注意:フレームワークの規則に反します)',
"What are the people's normal ways to obtain food in this area?": 'この地域で食料を調達するための手段を記載してください',
"What should be done to reduce women and children's vulnerability to violence?": '未成年や女性を暴力から守るために、どのような活動や設備が必要かを記載してください',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": '他とデータを同期するとき、二つ(以上)の団体がそれぞれ更新した情報を同期するときにコンフリクトが発生することがあります。同期モジュールは、コンフリクトを自動解決しようと試みますが、解決できないことがあります。そのような場合、手作業でコンフリクトを解決するか、クリックして次のページに進んでください。',
"You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click ": 'ユーザ固有の設定を行っている場合、ここで変更を行っても、目に見える変化がない場合があります。ユーザ固有の設定を行うには、以下をクリックしてください。 ',
"You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": '変更が保存されていません。「キャンセル」をクリックした後、「保存」を押して保存してください。変更を破棄するには、OK をクリックしてください。',
"You haven't made any calculations": '計算が実行されていません',
"You haven't yet Verified your account - please check your email": '利用者登録はまだ有効ではありません。',
"couldn't be parsed so NetworkLinks not followed.": 'パースできなかったため、 NetworkLinksはフォローされません。',
"includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'OpenLayersで未サポートの機能である GroundOverlayやScreenOverlayを含むため、不具合がある可能性があります。',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"更新" は、"field1=\'newvalue\'" のようなオプションです。"JOIN の結果を更新または削除することはできません。',
'# Houses Damaged': '損傷した家屋の数',
'# Houses Flooded': '浸水した家屋数',
'# People Needing Food': '食料が必要な人の数',
'# People at Risk From Vector-Borne Diseases': '生物が媒介する疾病の危険性がある人の数',
'# People without Access to Safe Drinking-Water': '安全な飲料水が確保されていない人の数',
'# of Houses Damaged': '損壊した家屋数',
'# of Houses Destroyed': '全壊した家屋数',
'# of International Staff': '国外スタッフ人数',
'# of National Staff': '国内スタッフの人数',
'# of People Affected': '被災者数',
'# of People Deceased': '死亡者数',
'# of People Injured': '負傷者数',
'# of Vehicles': '車両数',
'%s Create a new site or ensure that you have permissions for an existing site.': '%s 新しいサイトを作成するか既存のサイトに対する権限を持っているかどうか確認して下さい',
'%s rows deleted': '%s 行を削除しました',
'%s rows updated': '%s 行を更新しました',
'& then click on the map below to adjust the Lat/Lon fields': 'そして下の地図をクリックして、緯度 / 経度フィールドを調節してください',
'* Required Fields': '* は必須項目です',
'0-15 minutes': '0-15 分間',
'1 Assessment': '1アセスメント',
'1 location, shorter time, can contain multiple Tasks': '1つの地域における短期間の活動を表し、1つの支援活動のなかで複数のタスクを実行します。',
'1-3 days': '1-3 日間',
'1. Fill the necessary fields in BLOCK letters.': '1. 太字の項目は必須項目です.',
'15-30 minutes': '15-30 分間',
'2 different options are provided here currently:': '現在は、2種類のオプションが提供されています。',
'2. Always use one box per letter and leave one box space to seperate words.': '2. 一マス一文字で、単語の間は一マス開けてください。',
'2x4 Car': '2x4 車両',
'30-60 minutes': '30-60 分間',
'4-7 days': '4-7 日間',
'4x4 Car': '四輪駆動車',
'8-14 days': '8-14 日間',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': '機能クラスに設定したマーカーを上書きする必要があれば、個々のロケーションに設定したマーカーを設定します',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'このデータ内容を確認できるファイルやURL情報、連絡先担当者などのリファレンスデータを記載します。最初の何文字かを入力することで、既存の類似文書にリンクすることが可能です。',
'A Warehouse is a physical place which contains Relief Items available to be Distributed.': '倉庫とは、救援物資の配布を行うことができる物理的な地点を意味します。',
'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.': '倉庫 / サイトとは、物資の保管場所のことであり、住所とGIS情報が付帯します。特定の建物や、市内の特定地域などがあげられます。',
'A brief description of the group (optional)': 'グループの詳細(オプション)',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'GPSからダウンロードしたファイルには、その地点に関する様々な情報がXML形式で保存されています。',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'GPSから取得したGPX形式のファイル。タイムスタンプは画像と関連づけられ、地図上に配置することができます。',
'A library of digital resources, such as photos, documents and reports': '写真や文書、レポートなど、電子化された資料',
'A location group is a set of locations (often, a set of administrative regions representing a combined area). Member locations are added to a location group here. Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group. A location group can be used to define the extent of an affected area, if it does not fall within one administrative region. Location groups can be used in the Regions menu.': 'ロケーションを取りまとめた単位はロケーショングループと呼称されます(たいていは、一定範囲内の管理対象地域をさします)。このページから、ロケーションをグループに追加することができます。ロケーショングループ単位で地図上に表示させたり、検索結果として表示させることが可能となります。グループを使用することで、1つの管理地域に縛られない被災地域定義が可能となります。ロケーショングループは、地域メニューから定義できます。',
'A location group must have at least one member.': 'ロケーショングループには、メンバーが最低一人必要です。',
'A place within a Site like a Shelf, room, bin number etc.': 'Site内に存在する施設。例えば棚、部屋、Binの番号など',
'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.': 'binのスナップショットや追加情報の更新は、ここから行えます。',
'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.': 'ロケーションのスナップショットや、Siteに関する追加情報の更新は、ここから行えます。',
'A snapshot of the location or additional documents that contain supplementary information about the Site can be uploaded here.': 'ロケーションのスナップショットや、Siteに関する追加情報の更新は、ここから行えます。',
'A survey series with id %s does not exist. Please go back and create one.': 'ID番号 %sに関するsurvey seriesは存在しません。「戻る」ボタンを押して、新規に作成してください。',
'ABOUT THIS MODULE': 'このモジュールについて',
'ABOUT': '概要',
'ACCESS DATA': 'アクセスデータ',
'ANY': '全て',
'API is documented here': 'APIに関する文書はこちら',
'ATC-20 Rapid Evaluation modified for New Zealand': 'ニュージーランド向けに変更したATC-20(建物の簡易安全性評価プロセス)',
'ATC-20': 'ATC-20(建物の簡易安全性評価プロセス)',
'Abbreviation': '省略',
'Ability to Fill Out Surveys': '調査記入能力',
'Ability to customize the list of details tracked at a Shelter': '避難所で追跡する詳細のリストのカスタマイズ可否',
'Ability to customize the list of human resource tracked at a Shelter': '避難所で追跡する詳細のリストのカスタマイズの可否',
'Ability to customize the list of important facilities needed at a Shelter': '避難所で追跡する人的資源のリストのカスタマイズの可否',
'Ability to track partial fulfillment of the request': '支援要請の部分的な達成度の追跡可否',
'Ability to view Results of Completed and/or partially filled out Surveys': '完了または一部完了した聞き取り調査の結果をみる機能',
'About Sahana Eden': 'Sahana Edenについて',
'About Sahana': 'Sahanaについて',
'About this module': 'モジュールの詳細',
'About': '情報',
'Access denied': 'アクセスが拒否されました',
'Access to Shelter': '避難所へのアクセス',
'Access to education services': '学校へのアクセス',
'Accessibility of Affected Location': '被災地域へのアクセス方法',
'Account registered, however registration is still pending approval - please wait until confirmation received.': '利用者登録の申請を受け付けました。所属団体またはサイト管理者による承認を待っています。',
'Acronym': '略称/イニシャル',
'Actionable by all targeted recipients': 'すべての対象受信者にとって実用的な',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': '指定された参加者のみ実施可能です。<note>の中に行使するためのIDがあることが必要です。',
'Actionable': '対応可能',
'Actioned?': '実施済み?',
'Actions taken as a result of this request.': '要請に対して行われるアクション',
'Actions': 'アクション',
'Active Problems': '対処中の問題',
'Activities Map': '支援活動マップ',
'Activities are blue.': '支援活動(アクティビティ)は青色で表示されます。',
'Activities matching Assessments:': 'アセスメントに適合した支援活動',
'Activities of boys 13-17yrs before disaster': '災害発生前の13-17歳男子の活動状況',
'Activities of boys 13-17yrs now': '現在の13-17歳男子の活動状況',
'Activities of boys <12yrs before disaster': '災害発生前の12歳以下男子の活動状況',
'Activities of boys <12yrs now': '現在の12歳以下男子の活動状況',
'Activities of children': '子供たちの活動',
'Activities of girls 13-17yrs before disaster': '災害発生前の13-17歳女子の活動状況',
'Activities of girls 13-17yrs now': '現在の13-17歳女子の活動状況',
'Activities of girls <12yrs before disaster': '災害発生前の12歳以下女子の活動状況',
'Activities of girls <12yrs now': '現在の12歳以下女子の活動状況',
'Activities': '支援活動',
'Activity Added': '支援活動を追加しました',
'Activity Deleted': '支援活動を削除しました',
'Activity Details': '支援活動の詳細',
'Activity Report': '支援活動レポート',
'Activity Reports': '支援活動レポート',
'Activity Type': '支援活動タイプ',
'Activity Updated': '支援活動を更新しました',
'Activity': '支援活動',
'Add Address': 'アドレスを追加',
'Add Activity Type': '支援活動タイプを追加',
'Add Aid Request': '治療要請を追加',
'Add Alternative Item': '代わりの物資を追加',
'Add Assessment Summary': 'アセスメントの要約を追加',
'Add Assessment': 'アセスメントを追加',
'Add Baseline Type': '基準値タイプの追加',
'Add Baseline': '基準値の追加',
'Add Bin Type': 'Bin Typeを追加',
'Add Bins': 'Binを追加',
'Add Bundle': 'Bundleを追加',
'Add Catalog.': 'カタログを追加',
'Add Category': 'カテゴリを追加',
'Add Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 関係を追加',
'Add Config': '設定を追加',
'Add Contact': '連絡先を追加',
'Add Contact Information': '連絡先情報を追加',
'Add Credential': '証明書の追加',
'Add Credentials': '証明書の追加',
'Add Detailed Evaluation': '詳細な評価を追加',
'Add Disaster Victims': '被災者情報を追加',
'Add Distribution.': '配給所を追加',
'Add Donor': '資金提供組織を追加',
'Add Flood Report': '洪水レポートを追加',
'Add Group Member': 'グループメンバを追加',
'Add Identity': 'IDを追加',
'Add Image': '画像を追加',
'Add Impact Type': '災害影響のタイプを追加',
'Add Impact': '被災状況の追加',
'Add Inventory Item': '備蓄物資を追加します',
'Add Inventory Store': '物資集積地点を追加',
'Add Item (s)': '物資を追加',
'Add Item Catalog': '物資カタログを追加',
'Add Item Category': '救援物資カタログカテゴリを追加',
'Add Item Sub-Category': '救援物資サブカテゴリを追加',
'Add Item to Request': '要求する支援物資の登録',
'Add Item to Shipment': '輸送に物資を追加する',
'Add Item': '物資を追加',
'Add Key': 'Keyを追加',
'Add Kit': 'Kitを追加',
'Add Level 1 Assessment': 'レベル1アセスメントを追加',
'Add Level 2 Assessment': 'レベル2アセスメントを追加',
'Add Line': '行を追加',
'Add Location Group': 'ロケーショングループを追加',
'Add Locations': 'ロケーションを追加',
'Add Log Entry': 'ログエントリを追加',
'Add Member': 'メンバを追加',
'Add Membership': 'メンバシップを追加',
'Add Message': 'メッセージを追加',
'Add Need Type': '需要タイプを追加',
'Add Need': '要求を追加',
'Add New Aid Request': '援助要請を新規追加',
'Add New Assessment Summary': '新規アセスメントの要約を追加',
'Add New Baseline Type': '基準値タイプの新規追加',
'Add New Baseline': '新しい基準値を追加',
'Add New Bin Type': 'Bin Typeを新規追加',
'Add New Bin': 'Binを新規追加',
'Add New Budget': '予算を新規追加',
'Add New Bundle': 'Bundleを新規追加',
'Add New Cluster Subsector': 'クラスタのサブセクタを新規作成',
'Add New Cluster': 'クラスタを新規追加',
'Add New Commitment Item': '物資コミットメントを新規追加',
'Add New Config': '設定を新規追加',
'Add New Distribution Item': '配給物資を新規追加',
'Add New Distribution': '配給所を新規追加',
'Add New Document': '文書を新規追加',
'Add New Donor': '資金提供組織を新規追加',
'Add New Entry': 'エントリを新規追加',
'Add New Flood Report': '洪水情報を新規追加',
'Add New Image': '画像を新規追加',
'Add New Impact Type': '災害影響のタイプを新規追加',
'Add New Impact': '新規影響を追加',
'Add New Inventory Item': '備蓄物資を新規追加',
'Add New Inventory Store': '物資集積場所を新規追加',
'Add New Item Catalog Category': '物資カタログカテゴリを新規追加',
'Add New Item Catalog': '物資カタログを新規追加',
'Add New Item Sub-Category': '物資サブカテゴリを新規追加',
'Add New Item to Kit': 'キットに救援物資を新規追加',
'Add New Key': 'Keyを新規追加',
'Add New Level 1 Assessment': 'レベル1アセスメントを新規追加',
'Add New Level 2 Assessment': 'レベル2アセスメントを新規追加',
'Add New Member': 'メンバを新規追加',
'Add New Membership': 'メンバシップを新規追加',
'Add New Metadata': 'メタデータを新規追加',
'Add New Need Type': '需要タイプを新規追加',
'Add New Need': '新しい要求を登録する',
'Add New Note': '追加情報を新規追加',
'Add New Peer': 'データ同期先を新規追加',
'Add New Position': '場所を新規追加',
'Add New Problem': '問題を新規追加',
'Add New Rapid Assessment': '被災地の現況アセスメントを新規追加',
'Add New Received Item': '受領した物資を新規追加',
'Add New Record': 'レコードを新規追加',
'Add New Request Item': '特定物資の要請を新規追加',
'Add New Request': '支援要請を新規追加',
'Add New Response': '支援要請を新規追加',
'Add New River': '河川情報を新規追加',
'Add New Role to User': 'ユーザに役割を新規割り当て',
'Add New Sent Item': '送った物資の追加',
'Add New Setting': '設定を新規追加',
'Add New Shipment to Send': '発送する輸送物資を新規追加',
'Add New Site': 'Siteを新規追加',
'Add New Solution': '解決案を提示する',
'Add New Staff Type': 'スタッフタイプを新規追加',
'Add New Staff': 'スタッフを新規追加',
'Add New Storage Location': '備蓄場所を新規追加',
'Add New Survey Answer': '新しい調査の回答を追加しました',
'Add New Survey Question': '調査項目を新規追加',
'Add New Survey Section': '新しい調査セクションを追加',
'Add New Survey Series': '新しい一連の調査を追加します',
'Add New Survey Template': 'Survey Templateを新規追加',
'Add New Team': 'チームを新規追加',
'Add New Ticket': 'チケットを新規追加',
'Add New Track': '追跡情報を新規追加',
'Add New Unit': '単位を新規追加',
'Add New User to Role': '新規ユーザに役割を割り当て',
'Add New Warehouse Item': '倉庫物資を新規追加',
'Add New': '新規追加',
'Add Note': 'ノートを追加',
'Add Peer': 'データ同期先を追加',
'Add Performance Evaluation': 'パフォーマンス評価を追加',
'Add Person': '人物情報を追加',
'Add Photo': '写真を追加',
'Add Point': 'ポイントを追加',
'Add Polygon': 'Polygonを追加',
'Add Position': '場所を追加',
'Add Problem': '問題を追加',
'Add Projections': '地図投影法を追加',
'Add Question': '質問事項を追加',
'Add Rapid Assessment': '被災地の現況アセスメントを追加',
'Add Rapid Evaluation': '迅速評価を追加',
'Add Recipient Site': '受け取りSiteを追加',
'Add Recipient': '受け取り担当者を追加',
'Add Record': 'レコードを追加',
'Add Recovery Report': '遺体回収レポートを追加',
'Add Reference Document': 'リファレンス文書を追加',
'Add Report': 'レポートを追加',
'Add Request Detail': '支援要請の詳細を追加',
'Add Request Item': '物資の要請を追加します',
'Add Request': '支援要請を追加',
'Add Response': '返答を追加',
'Add Section': 'Sectionを追加',
'Add Sender Organization': '送付元団体を追加',
'Add Sender Site': '送付元Siteを追加',
'Add Setting': '設定を追加',
'Add Shipment Transit Log': '輸送履歴を追加',
'Add Shipment/Way Bills': '輸送費/渡航費を追加',
'Add Site': 'サイトを追加',
'Add Skill Types': 'スキルタイプを追加',
'Add Solution': '解決案を追加',
'Add Staff Type': 'スタッフタイプを追加',
'Add Staff': 'スタッフを追加',
'Add Storage Bin ': 'Storage Binを追加 ',
'Add Storage Bin Type': 'Storage Bin Typeを追加',
'Add Storage Location': '備蓄地点を追加',
'Add Sub-Category': 'サブカテゴリを追加',
'Add Subscription': '寄付金情報を追加',
'Add Survey Answer': '調査の回答を追加',
'Add Survey Question': '聞き取り調査項目を追加',
'Add Survey Section': '調査セクションの追加',
'Add Survey Series': '一連の調査を追加',
'Add Survey Template': '調査テンプレートを追加',
'Add Team Member': 'メンバを追加',
'Add Team': 'チームを追加',
'Add Ticket': 'チケットを追加',
'Add Unit': '単位を追加',
'Add Volunteer Registration': 'ボランティア登録を追加',
'Add Warehouse Item': '倉庫物資を追加',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'ファイル、URL、あるいは、このデータの確認を行なう連絡先のような参照文書を追加します。参照文書を入力しない場合、代わりにあなたのメールが表示されます。',
'Add a Volunteer': 'ボランティアの追加',
'Add a new Relief Item.': '救援物資を新規追加',
'Add a new Site from where the Item is being sent.': 'この救援物資の送付先を新規サイトとして追加',
'Add a new Site where the Item is being sent to.': 'この物資の送付先サイトを新規追加',
'Add an Photo.': '写真を追加.',
'Add location': 'ロケーションを追加',
'Add main Item Category.': '主要なアイテムカテゴリを追加',
'Add main Item Sub-Category.': '主要な救援物資サブカテゴリを追加',
'Add new Group': 'グループを新規追加',
'Add new Individual': '個人を新規追加',
'Add new position.': '新しいポジションを追加してください。',
'Add new project.': 'プロジェクトを新規追加',
'Add new staff role.': 'スタッフの権限を新規追加',
'Add or Update': '追加、あるいは更新',
'Add the Storage Bin Type.': 'Storage Binタイプを追加します。',
'Add the Storage Location where this bin is located.': 'binが保存されている貯蔵場所を追加します。',
'Add the Storage Location where this this Bin belongs to.': 'このBinがある備蓄地点を追加します。',
'Add the main Warehouse/Site information where this Bin belongs to.': 'その物資の備蓄スペースとなっている倉庫/サイトの情報を追加してください。',
'Add the main Warehouse/Site information where this Item is to be added.': 'この物資が追加されることになっている主要な倉庫 / サイトの情報を追加してください。',
'Add the main Warehouse/Site information where this Storage location is.': 'その物資の備蓄場所となっている倉庫/サイトの情報を追加してください。',
'Add the unit of measure if it doesnt exists already.': '距離単位が未登録の場合、単位を追加します。',
'Add to Bundle': 'Bundleへの登録',
'Add to Catalog': 'カタログへ登録',
'Add to budget': '予算項目へ登録',
'Add': '追加',
'Add/Edit/Remove Layers': 'レイヤを追加/編集/削除',
'Added to Group': 'メンバシップを追加しました',
'Added to Team': 'メンバシップを追加しました',
'Additional Beds / 24hrs': '追加ベッド予測数 / 24h',
'Additional Comments': '追加コメント',
'Additional quantity quantifier – i.e. “4x5”.': '数量を表す追記(例 「4x5」)',
'Address Details': '住所情報の詳細',
'Address Type': '住所情報タイプ',
'Address added': '住所情報を追加しました',
'Address deleted': '住所情報を削除しました',
'Address updated': '住所情報を更新しました',
'Address': '住所情報',
'Addresses': '住所',
'Adequate food and water available': '適切な量の食料と水が供給されている',
'Adequate': '適正',
'Adjust Item(s) Quantity': 'アイテム量の修正',
'Adjust Items due to Theft/Loss': 'アイテム量の修正(盗難/紛失のため)',
'Admin Email': '管理者の電子メール',
'Admin Name': '管理者名',
'Admin Tel': '管理者の電話番号',
'Admin': '管理者',
'Administration': '管理',
'Administrator': '管理者',
'Admissions/24hrs': '患者増加数/24h',
'Adolescent (12-20)': '青年(12-20)',
'Adolescent participating in coping activities': '未成年が災害対応に従事',
'Adult (21-50)': '成人(21-50)',
'Adult ICU': '成人 ICU',
'Adult Psychiatric': '精神病の成人',
'Adult female': '成人女性',
'Adult male': '成人男性',
'Adults in prisons': '刑務所で服役中の成人がいる',
'Advanced Bin Search': 'Binの詳細検索',
'Advanced Catalog Search': 'カタログの詳細検索',
'Advanced Category Search': '詳細カテゴリー検索',
'Advanced Item Search': '詳細な物資検索',
'Advanced Location Search': '詳細な位置検索',
'Advanced Site Search': 'Siteの詳細検索',
'Advanced Sub-Category Search': 'サブカテゴリの詳細検索',
'Advanced Unit Search': '高度な単位検索',
'Advanced': '詳細',
'Advanced:': 'もっと正確に:',
'Advisory': '注意喚起',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'このボタンをクリックすると、解決法のペアが順に表示されます。各ペアから、最も適する項目を1つずつ選択してください。',
'Age Group': '年齢グループ',
'Age group does not match actual age.': '年齢グループが実際の年齢と一致しません。',
'Age group': '年齢グループ',
'Aggravating factors': '悪化要因',
'Aggregate Items': 'アイテムの集約',
'Agriculture': '農業',
'Aid Request Details': '援助要請の詳細',
'Aid Request added': '援助要請を追加しました',
'Aid Request deleted': '救援要請を追加しました',
'Aid Request updated': '援助要請を更新しました',
'Aid Request': '治療要請',
'Aid Requests': '援助要請',
'Air Transport Service': '物資空輸サービス',
'Aircraft Crash': '飛行機事故',
'Aircraft Hijacking': '航空機ハイジャック',
'Airport Closure': '空港閉鎖',
'Airspace Closure': '離陸地点閉鎖',
'Alcohol': 'アルコール',
'Alert': 'アラート',
'All Inbound & Outbound Messages are stored here': '送受信した全てのメッセージはここに格納されます。',
'All Locations': '全てのロケーション',
'All Records': 'すべてのレコード',
'All Requested Items': '物資要請一覧',
'All Resources': 'すべての資源',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'このサイトのSahana Software Foundationで提供されるデータのライセンスは、CCA (Creative Commons Attribution licence)となります。しかし、すべてのデータの発生源が、このサイトであるとは限りません。詳細は、各エントリの情報ソースの項目に記載されています。',
'All': '全て',
'Allowed to push': 'プッシュが許可済みである',
'Allows a Budget to be drawn up': '予算の策定を行ないます',
'Allows authorized users to control which layers are available to the situation map.': '認証済みユーザーが「状況地図のどのレイヤが利用できるか」を制御することを許可します。',
'Alternative Item Details': '代わりの品物についての詳細',
'Alternative Item added': '代わりの物資を追加しました',
'Alternative Item deleted': '代わりの品物が削除されました',
'Alternative Item updated': '代わりの物資を更新しました',
'Alternative Item': '代わりの物資',
'Alternative Items': '代わりとなる物資',
'Alternative infant nutrition in use': '利用中の乳児用代替食',
'Alternative places for studying available': '学校以外の場所を学習に利用可能である',
'Alternative places for studying': '授業開設に利用可能な施設',
'Ambulance Service': '救急サービス',
'An Inventory Store is a physical place which contains Relief Items available to be Distributed.': '物資集積場所とは、救援物資の配給能力をもつ、物理的な場所を指します。',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': '物資の受け入れ、貯蔵設備の管理、必要な物資の記録、サプライチェーン・マネジメント、調達、その他様々な資産やリソースの管理といった機能。',
'An item which can be used in place of another item': '他の物資の代わりに使う物資',
'Analysis of Completed Surveys': '完了したフィードバックの分析',
'Animal Die Off': '動物の死',
'Animal Feed': '動物のエサ',
'Animals': '動物',
'Answer Choices (One Per Line)': '選択肢(一行に一つ)',
'Anthropology': '人類学',
'Antibiotics available': '抗生物質が利用可能',
'Antibiotics needed per 24h': '24時間ごとに必要な抗生物質',
'Any available Metadata in the files will be read automatically, such as Timestamp, Author, Latitude & Longitude.': 'ファイル内の利用可能なすべてのメタデータ(タイムスタンプ、作成者、緯度経度等)を自動的に読み込みます。',
'Any comments about this sync partner.': 'データの同期先に関するコメント',
'Apparent Age': '年齢(外見)',
'Apparent Gender': '性別(外見)',
'Application Permissions': 'アプリケーションに対する権限',
'Application': '申請',
'Applications': 'アプリケーション',
'Appropriate clothing available': '適切な衣料が利用可能である',
'Appropriate cooking equipment/materials in HH': '世帯内にて適切な調理器具/食材が利用可能である',
'Approved': '承認されました',
'Approver': '承認者',
'Approx. number of cases/48h': '事象の発生概数/48h',
'Approximately how many children under 5 with diarrhea in the past 48 hours?': '過去48時間以内に発生した、5歳未満小児の下痢症状発生件数を記載してください。概数でかまいません',
'Archive not Delete': 'Archiveを削除しない',
'Arctic Outflow': '北極気団の南下',
'Are basic medical supplies available for health services since the disaster?': '災害発生後、基本的な医療行為を行えるよう、ヘルスサービスに対して供給があったかどうかを記載します',
'Are breast milk substitutes being used here since the disaster?': '災害発生後、母乳代替品が使われているかどうかを記載します',
'Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?': '日中時間帯、この地域での生活や遊び、通行によって、未成年や高齢者、障碍者に肉体的な危害が及ぶ可能性があるかを記載します',
'Are the chronically ill receiving sufficient care and assistance?': '慢性病の罹患者に対して、十分なケアと介護が行われているかを記載します',
'Are there adults living in prisons in this area?': 'この地域で刑務所に収容されている成人がいるかどうかを記載してください',
'Are there alternative places for studying?': '学校以外に学習を行える場所があるかどうかを記載してください',
'Are there cases of diarrhea among children under the age of 5?': '5歳未満の幼児に下痢症状が発生しているかどうかを記載してください',
'Are there children living in adult prisons in this area?': 'この地域で、成人用刑務所に収容されている未成年がいるかどうかを記載してください',
'Are there children living in boarding schools in this area?': 'この地域で、寄宿舎に居住している未成年がいるかどうかを記載してください',
'Are there children living in homes for disabled children in this area?': 'この地域で、障がいのある子供の世話をするために家にいる未成年がいるかどうかを記載してください',
'Are there children living in juvenile detention in this area?': 'この地域で、少年院に収容されている未成年がいるかどうかを記載してください',
'Are there children living in orphanages in this area?': 'この地域で、孤児となった子供は居ますか?',
'Are there children with chronical illnesses in your community?': '慢性疾患をもった子どもが共同体の中にいるかどうかを記載してください',
'Are there health services functioning for the community since the disaster?': '災害発生後、共同体で医療サービスが機能しているかどうかを記載してください',
'Are there older people living in care homes in this area?': 'この地域で、介護施設に居住している高齢者がいるかどうかを記載してください',
'Are there older people with chronical illnesses in your community?': 'この共同体のなかで、慢性疾患を患っている高齢者がいるかどうかを記載してください',
'Are there people with chronical illnesses in your community?': 'この共同体の中で、慢性疾患を患っている人物がいるかどうかを記載してください',
'Are there separate latrines for women and men available?': 'トイレが男女別になっているかどうかを記載してください',
'Are there staff present and caring for the residents in these institutions?': 'これら施設の居住者に対して、ケアと介護を行えるスタッフが存在するかどうかを記載してください',
'Area': 'エリア',
'Areas inspected': '調査済み地域',
'Assessment Details': 'アセスメントの詳細',
'Assessment Reported': 'アセスメントを報告しました',
'Assessment Summaries': 'アセスメントの要約',
'Assessment Summary Details': 'アセスメント要約の詳細',
'Assessment Summary added': 'アセスメントの要約を追加しました',
'Assessment Summary deleted': 'アセスメントの要約を削除しました',
'Assessment Summary updated': 'アセスメントの要約を更新しました',
'Assessment Type': 'アセスメントタイプ',
'Assessment added': 'アセスメントを追加しました',
'Assessment admin level': 'アセスメントの管理レベル',
'Assessment and Activities Gap Analysis Map': 'アセスメントと活動のギャップについての解析マップ',
'Assessment and Activities Gap Analysis Report': 'アセスメントと支援活動のギャップ解析レポート',
'Assessment deleted': 'アセスメントを削除しました',
'Assessment timeline': 'アセスメントタイムライン',
'Assessment updated': 'アセスメントを更新しました',
'Assessment': 'アセスメント',
'Assessments Needs vs. Activities': '需要アセスメントと支援活動のギャップ',
'Assessments and Activities': 'アセスメントと支援活動',
'Assessments are shown as green, yellow, orange, red.': 'アセスメントは、緑・黄・オレンジ・赤のいずれかの色で表されます。',
'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments': 'アセスメントとは、専門団体によって作成された調査文書のことを指します。データには、WFP(国連世界食糧計画)アセスメントも含まれます',
'Assessments are structured reports done by Professional Organizations': 'アセスメントとは、専門団体によって作成された調査文書のことを指します。',
'Assessments': 'アセスメント',
'Assessments:': 'アセスメント:',
'Assessor': '査定実施者',
'Asset Assigned': '資産割り当て',
'Asset Assignment Details': '資産割り当ての詳細',
'Asset Assignments deleted': '資産の割り当てを削除しました',
'Asset Assignments updated': '物資割り当てを更新しました',
'Asset Assignments': '資産割り当て',
'Asset Details': '資産の詳細',
'Asset Management': '資産管理',
'Asset Number': '資産番号',
'Asset added': '資産を追加しました',
'Asset deleted': '資産を削除しました',
'Asset updated': '資産を更新しました',
'Asset': '資産',
'Assets': '資産',
'Assign Asset': '資産割り当て',
'Assign Storage Location': '蓄積地点の割り当て',
'Assign to Org.': '組織に割り当て',
'Assigned To': '担当者',
'Assigned to': '担当者',
'Assigned': '割り当てられた',
'Assignments': '割り当て',
'Assistance for immediate repair/reconstruction of houses': '緊急の修理/家屋復旧の手伝い',
'Assistant': 'アシスタント',
'At/Visited Location (not virtual)': '実際に訪問した/訪問中のロケーション',
'Attend to information sources as described in <instruction>': '<instruction>に記載されている情報ソースへの参加',
'Attribution': '属性',
'Audit Read': '監査報告書の読み込み',
'Audit Write': '監査報告書の書き込み',
'Author': '作者',
'Automotive': '車両',
'Availability': 'ボランティア期間',
'Available Alternative Inventory Items': '利用可能な他の物資',
'Available Beds': '利用可能なベッド数',
'Available Inventory Items': '利用可能な倉庫内の物資',
'Available Messages': '利用可能なメッセージ',
'Available Records': '利用可能なレコード',
'Available databases and tables': '利用可能なデータベースおよびテーブル',
'Available for Location': '活動可能な地域',
'Available from': 'ボランティア開始日',
'Available in Viewer?': 'ビューワ内で利用可能?',
'Available until': 'ボランティア終了日',
'Availablity': '活動期間',
'Avalanche': '雪崩',
'Avoid the subject event as per the <instruction>': '<instruction>に従って対象の事象を避ける',
'Babies who are not being breastfed, what are they being fed on?': '乳児に対して母乳が与えられない場合、どうやって乳幼児の食事を確保しますか?',
'Baby And Child Care': '乳幼児へのケア',
'Background Color for Text blocks': 'テキストブロックの背景色',
'Background Color': '背景色',
'Bahai': 'バハイ',
'Baldness': '禿部',
'Balochi': 'バロチ語',
'Banana': 'バナナ',
'Bank/micro finance': '銀行/マイクロファイナンス',
'Barricades are needed': 'バリケードが必要',
'Base Layer?': '基本レイヤ?',
'Base Layers': '基本レイヤ',
'Base Location': '基本となるロケーション',
'Base Unit': '基本単位',
'Baseline Number of Beds': '平常時のベッド設置数',
'Baseline Type Details': '基準値タイプの詳細',
'Baseline Type added': '基準値タイプを追加しました',
'Baseline Type deleted': '基準値のタイプを削除しました',
'Baseline Type updated': '基準値タイプを更新しました',
'Baseline Type': '基準値タイプ',
'Baseline Types': '基準値の種類',
'Baseline added': '基準値を追加しました',
'Baseline deleted': '基準値を削除しました',
'Baseline number of beds of that type in this unit.': 'この施設における、通常状態のベッド収容数です。',
'Baseline updated': '基準値を更新しました',
'Baselines Details': '基準値の詳細',
'Baselines': '基準値',
'Basic Assess.': '基本アセスメント',
'Basic Assessment Reported': 'ベーシック・アセスメントを報告しました',
'Basic Assessment': '基本アセスメント',
'Basic Details': '基本情報',
'Basic information on the requests and donations, such as category, the units, contact details and the status.': '支援要請と寄付に関する基本情報です。カテゴリ、単位、連絡先詳細および状態等が記載されています。',
'Basic medical supplies available prior to disaster': '災害発生以前 基本的な医療行為の提供',
'Basic medical supplies available since disaster': '災害発生後 基本的な医療行為の提供',
'Basic reports on the Shelter and drill-down by region': '避難所の基本レポートと、地域による絞り込み',
'Basic': '基本',
'Baud rate to use for your modem - The default is safe for most cases': 'モデムを使用するためのボーレートです。大抵の場合はデフォルトが安全です。',
'Baud': 'ボー値',
'Beam': '梁',
'Bed Capacity per Unit': '施設ごとのベッド最大収容数',
'Bed Capacity': 'ベッド最大収容数',
'Bed Type': 'ベッド種別',
'Bed type already registered': 'ベッドのタイプは既に登録済みです。',
'Bedding materials available': '寝具が利用可能である',
'Below ground level': '地下',
'Beneficiary Type': '受益者タイプ',
'Biological Hazard': '生物災害',
'Biscuits': 'ビスケット',
'Blizzard': '吹雪',
'Blood Type (AB0)': '血液型 (AB0式)',
'Blowing Snow': '地吹雪',
'Boat': 'ボート',
'Bodies found': '未回収の遺体',
'Bodies recovered': '回収済みの遺体',
'Body Recovery Reports': '遺体回収レポート',
'Body Recovery Request': '遺体回収の要請',
'Body Recovery Requests': '遺体回収の要請',
'Body': '本文',
'Bomb Explosion': '爆発が発生',
'Bomb Threat': '爆発の危険性',
'Bomb': '爆発物',
'Border Color for Text blocks': 'テキストブロックの枠色',
'Bounding Box Insets': '領域を指定した枠組みへ差し込む',
'Bounding Box Size': '領域を指定した枠組みのサイズ',
'Boys 13-18 yrs in affected area': '影響地域内の13-18歳の男子数',
'Boys 13-18 yrs not attending school': '学校に来ていなかった13-18歳の男子数',
'Boys 6-12 yrs in affected area': '影響地域内の6-12歳の男子数',
'Boys 6-12 yrs not attending school': '学校に来ていなかった6-12歳の男子数',
'Brand Details': '銘柄の詳細',
'Brand added': '銘柄を追加しました',
'Brand deleted': '銘柄が削除されました',
'Brand updated': '銘柄が更新されました',
'Brand': '銘柄',
'Brands': '銘柄',
'Breast milk substitutes in use since disaster': '災害発生後から母乳代替品を使用している',
'Breast milk substitutes used prior to disaster': '災害前から母乳代替品を使用していた',
'Bricks': 'レンガ',
'Bridge Closed': '橋梁(通行止め)',
'Bucket': 'バケツ',
'Buddhist': '仏教徒',
'Budget Details': '予算の詳細',
'Budget Updated': '予算を更新しました',
'Budget added': '予算を追加しました',
'Budget deleted': '予算を削除しました',
'Budget updated': '予算を更新しました',
'Budget': '予算',
'Budgeting Module': '予算編成モジュール',
'Budgets': '予算編成',
'Buffer': 'バッファ',
'Bug': 'バグ',
'Building Aide': '建設援助',
'Building Assessment': '建物のアセスメント',
'Building Assessments': '建築物アセスメント',
'Building Collapsed': '崩壊した建物',
'Building Name': '建物名',
'Building Safety Assessments': '建物の安全アセスメント',
'Building Short Name/Business Name': '建物の名前 / 会社名',
'Building or storey leaning': '建物または階層が傾いている',
'Built using the Template agreed by a group of NGOs working together as the': '例えばECB等、多くのNGOによって利用されている形式を使っての記録が可能です。',
'Bulk Uploader': 'まとめてアップロード',
'Bundle Contents': '小包の内容',
'Bundle Details': 'Bundleの詳細',
'Bundle Updated': 'バンドルを更新しました',
'Bundle added': 'バンドルを追加しました',
'Bundle deleted': 'バンドルを削除しました',
'Bundle updated': 'バンドル・セットを更新しました',
'Bundle': 'バンドル',
'Bundles': 'バンドル',
'Burn ICU': '熱傷 ICU',
'Burn': '火傷(やけど)',
'Burned/charred': '火傷/炭化',
'Business damaged': 'ビジネスへの損害が発生している',
'By Inventory': '物資の送付元',
'By Person': '人物ごと',
'By Site': 'サイト別',
'By Warehouse': '送付元倉庫',
'CBA Women': 'CBA 女性',
'CN': '貨物運送状',
'CSS file %s not writable - unable to apply theme!': 'CSS ファイル %s が書き込み不可になっているため、テーマを適用することができません。',
'Calculate': '計算',
'Camp Coordination/Management': '仮泊施設間の調整 / 管理',
'Camp': '仮泊施設',
'Can only disable 1 record at a time!': '一度に1つしか無効にできません!',
'Can users register themselves for authenticated login access?': '新規ユーザが、他者の承認なしに自分を新規ユーザとして登録できるか?',
'Cancel Add': '追加を取り消す',
'Cancel Shipment': '輸送をキャンセルする',
'Cancel': 'キャンセル',
'Canceled': 'キャンセル',
'Candidate Matches for Body %s': 'Bodyに適合した候補者は %s',
'Canned Fish': '魚の缶詰',
'Cannot be empty': '必ず入力してください。',
'Cannot delete whilst there are linked records. Please delete linked records first.': 'リンクされたレコードがあるので削除できません。このレコードよりも先に、リンク先のレコードを削除してください。',
'Cannot disable your own account!': '自分自身のアカウントを無効にする事はできません',
'Capacity (Max Persons)': '収容可能数 (最大人数)',
'Capacity (W x D X H)': '収容可能面積 (W x D X H)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': '被災者の個々のグループについて、情報を取得する (ツアー旅行者、滞在者、家族、など)',
'Capture Information on each disaster victim': '被災者情報を個別に把握する',
'Capturing organizational information of a relief organization and all the projects they have in the region': '個々の支援団体と、地域内で実行中の全てのプロジェクトを取得します',
'Capturing the essential services each Volunteer is providing and where': '各ボランティアの居場所と、提供している主要なサービスを取得する',
'Capturing the projects each organization is providing and where': '各団体の所在地と、提供している主要なサービスを取得します',
'Cardiology': '心臓病学',
'Cash available to restart business': '事業再開に必要な資金調達が可能',
'Cassava': 'キャッサバ',
'Casual Labor': '一般労働',
'Casualties': '犠牲者',
'Catalog Item added': '救援物資カタログにアイテムを追加しました',
'Catalog Item deleted': 'カタログアイテムを削除しました',
'Catalog Item updated': '救援物資カタログを更新しました',
'Catalog Item': '救援物資カタログ',
'Catalog Items': '物資カタログ',
'Catalog Name': 'カタログ名',
'Catalog': 'カタログ',
'Category': 'カテゴリ',
'Category<>Sub-Category<>Catalog Relation added': 'Category<>Sub-Category<>Catalog 間の関係を追加しました',
'Category<>Sub-Category<>Catalog Relation deleted': 'Category<>Sub-Category<>Catalog 関係を削除しました',
'Category<>Sub-Category<>Catalog Relation updated': 'Category<>Sub-Category<>Catalog 間の関係を更新しました',
'Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 間の関係',
'Ceilings, light fixtures': '天井、照明あり',
'Central point to record details on People': '被災者や支援者など、関係者情報の集積を行ないます',
'Certificate Status': '認証状態',
'Certification': '有資格者',
'Change Password': 'パスワードの変更',
'Check for errors in the URL, maybe the address was mistyped.': '入力したURLに間違いがないか確認してください。',
'Check if the URL is pointing to a directory instead of a webpage.': 'URLがウェブページではなくディレクトリを指定しているか、確認してください。',
'Check outbox for the message status': '送信箱を調べてメッセージステータスを確認する',
'Check to delete': '削除項目にチェック',
'Check to delete:': '削除項目にチェック:',
'Check': '確認',
'Check-In': 'チェックイン',
'Check-Out': 'チェックアウト',
'Check-in': 'チェックイン',
'Check-out': 'チェックアウト',
'Checklist created': 'チェックリストを作成しました',
'Checklist deleted': 'チェックリストを削除しました',
'Checklist of Operations': '作業項目チェックリスト',
'Checklist updated': 'チェックリストを更新しました',
'Checklist': 'チェックリスト',
'Chemical Hazard': '化学災害',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': '兵器による攻撃、脅威(化学兵器、生物兵器、放射能汚染、核兵器、高威力の爆発)',
'Chicken': 'ニワトリ',
'Child (2-11)': '子供 (2-11歳)',
'Child (< 18 yrs)': '子供 (18歳未満)',
'Child Abduction Emergency': '未成年誘拐警報',
'Child headed households (<18 yrs)': '代表者が未成年 (18歳以下)の世帯数',
'Child': '子供',
'Children (2-5 years)': '子供たち (2-5歳)',
'Children (5-15 years)': '子供たち(5-15歳)',
'Children (< 2 years)': '子供たち (2歳未満)',
'Children in adult prisons': '成人用刑務所に未成年がいる',
'Children in boarding schools': '寄宿制学校の児童がいる',
'Children in homes for disabled children': '障がい児施設にいる子ども',
'Children in juvenile detention': '少年院収容者がいる',
'Children in orphanages': '身寄りの無い人がいる',
'Children living on their own (without adults)': '未成年のみで自活(成人無し)',
'Children not enrolled in new school': '新しい学校に入学していない子供',
'Children orphaned by the disaster': '被災のため孤児になった子供たち',
'Children separated from their parents/caregivers': '親(または親相当の後見人)とはぐれた子供の数',
'Children that have been sent to safe places': '安全な地域へ疎開済みの子供数',
'Children who have disappeared since the disaster': '災害発生後に行方不明の子供たち',
'Children with chronical illnesses': '慢性疾患をもつ子供がいる',
'Chinese (Taiwan)': '中国語 (台湾繁体字)',
'Cholera Treatment Capability': 'コレラ治療対応能力',
'Cholera Treatment Center': 'コレラ治療センター',
'Cholera Treatment': 'コレラの治療',
'Cholera-Treatment-Center': 'コレラ治療センター',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': '新規の評価とチームの判定に基づいた新しいポスターを選択してください。建物全体が深刻な状態の場合「危険」を、一部は使える場合「制限あり」です。主要な出入口に「調査済み」プラカードを設置してください。全ての使用可能な出入口には他のプラカードを設置してください。',
'Choose': '選択',
'Choosing Skill and Resources of Volunteers': 'ボランティアのスキルとリソースを選択してください',
'Christian': 'キリスト教徒',
'Church': '教会',
'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': '行方不明時の状況や、この人物の生存を最後に確認した人物についての情報を記載してください。',
'Civil Emergency': '市民緊急事態',
'Cladding, glazing': '被覆・外壁、ガラス板',
'Clear Selection': '選択をクリア',
'Click on the link %(url)s to reset your password': 'リンクをクリックしてください %(url)s パスワードのリセット',
'Click on the link %(url)s to verify your email': 'リンクをクリックしてください %(url)s 登録されたメールアドレスに間違いが無いことが確認されます',
'Client IP': 'クライアントIP',
'Clinical Laboratory': '臨床検査',
'Clinical Operations': '診療の人員数',
'Clinical Status': '診療状況',
'Close map': '地図を閉じる',
'Closed': '閉鎖中',
'Closure': '閉鎖・通行止め',
'Clothing': '衣服',
'Cluster Details': 'クラスタの詳細',
'Cluster Distance': 'クラスタ距離',
'Cluster Subsector Details': 'クラスタのサブクラスタの詳細',
'Cluster Subsector added': 'クラスタのサブセクタを追加しました',
'Cluster Subsector deleted': 'クラスタのサブセクタを削除しました',
'Cluster Subsector updated': 'クラスタのサブセクタを更新しました',
'Cluster Subsector': 'クラスタのサブクラスタ',
'Cluster Subsectors': 'クラスタのサブセクタ',
'Cluster Threshold': 'クラスタのしきい値',
'Cluster added': 'クラスタを追加しました',
'Cluster deleted': 'クラスタを削除しました',
'Cluster updated': 'クラスタを更新しました',
'Cluster': 'クラスタ',
'Cluster(s)': 'クラスタ',
'Clusters': 'クラスタ',
'Code': 'プロジェクトコード',
'Cold Wave': '寒波',
'Collapse, partial collapse, off foundation': '全壊、一部損壊、off foundation',
'Collective center': '収集センター',
'Color for Underline of Subheadings': 'サブヘッダのアンダーラインの色',
'Color of Buttons when hovering': 'ホバー時のボタンの色',
'Color of bottom of Buttons when not pressed': '押されなかった時のボタンの下部の色',
'Color of bottom of Buttons when pressed': 'ボタン押下時の下部の色',
'Color of dropdown menus': 'ドロップダウンメニューの色',
'Color of selected Input fields': '選択中の入力フィールドの色',
'Color of selected menu items': '選択中のメニューアイテムの色',
'Column Choices (One Per Line': 'カラム選択 (一行に一つ',
'Columns, pilasters, corbels': '円柱、付け柱、コーベル',
'Combined Method': '複数証跡の組み合わせ',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': '復旧まで少々お待ちください。あなた以外の閲覧者にも、この表示がされています。',
'Come back later.': '復旧まで少々お待ちください',
'Comments': 'コメント',
'Commercial/Offices': '商業 / オフィス',
'Commit Date': '受け入れ日',
'Commit from %s': '%sからのコミット',
'Commit': 'コミット',
'Commit. Status': '物資到着の見込み',
'Commiting a changed spreadsheet to the database': '変更後のスプレッドシートをデータベースに反映します',
'Commitment Added': 'コミットメントを追加しました',
'Commitment Canceled': 'コミットをキャンセルしました',
'Commitment Details': 'コミットの詳細',
'Commitment Item Details': 'コミットされた救援物資の詳細',
'Commitment Item added': 'コミットの物資を追加しました',
'Commitment Item deleted': 'コミットされた救援物資を削除しました',
'Commitment Item updated': 'コミット物資を更新しました',
'Commitment Item': '物資のコミットメント',
'Commitment Items': 'コミットされた物資',
'Commitment Status': '支援の引き受け状況',
'Commitment Updated': 'コミットを更新しました',
'Commitment': 'コミットメント',
'Commitments': 'コミット',
'Committed By': '受け入れ団体/人',
'Committed': 'コミット済み',
'Committing Inventory': '引き受け中の倉庫',
'Communication problems': 'コミュニケーションの問題',
'Community Centre': 'コミュニティセンター',
'Community Health Center': '地域の医療センター',
'Community Member': 'コミュニティの構成員',
'Complete Unit Label for e.g. meter for m.': '単位を表すラベル。例えばメートルなら m など。',
'Complete': '完了',
'Completed': '完了',
'Complexion': '人種、肌色',
'Compose': 'メッセージ作成',
'Compromised': '易感染状態',
'Concrete frame': 'コンクリートのフレーム',
'Concrete shear wall': 'コンクリートせん断壁',
'Config added': '設定を追加しました',
'Config deleted': '設定を削除しました',
'Config updated': '設定を更新しました',
'Config': '設定',
'Configs': '設定',
'Configurations': '設定',
'Configure Run-time Settings': 'ランタイムの設定',
'Confirm Shipment Received': '配送物の受領を確認',
'Confirmed Incidents': '確認済みのインシデント',
'Confirmed': '確認済み',
'Conflict Details': 'コンフリクトの詳細',
'Conflict Resolution': 'データ競合の解決',
'Consignment Note': '出荷通知',
'Constraints Only': '制約のみ',
'Consumable': '消耗品',
'Contact Data': '連絡先データ',
'Contact Details': '連絡先の詳細',
'Contact Information Added': '連絡先情報を追加しました',
'Contact Information Deleted': '連絡先情報を削除しました',
'Contact Information Updated': '連絡先情報を更新しました',
'Contact Information': '連絡先情報',
'Contact Method': '問い合わせ方法',
'Contact Name': '連絡先名',
'Contact Person': '窓口担当者',
'Contact Phone': '連絡先電話番号',
'Contact details': '連絡先の詳細',
'Contact information added': '連絡先情報を追加しました',
'Contact information deleted': '連絡先情報を削除しました',
'Contact information updated': '連絡先情報を更新しました',
'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': '詳細事項の質問や連絡を行なう際の連絡担当者を記載します(レポート報告者と異なる場合のみ)。電話番号、住所、電子メールなどを記載してください。',
'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': '情報伝達や追加質問を行う際の代表担当者(報告者と異なる場合のみ記載してください)。電話番号や住所、メールアドレスなどを指定できます。',
'Contact us': '問い合わせ',
'Contact': '連絡先',
'Contacts': '連絡先',
'Contents': '内容',
'Contradictory values!': '値が矛盾しています!',
'Contributor': '投稿者',
'Conversion Tool': '変換ツール',
'Cooking NFIs': '調理用器具',
'Cooking Oil': '調理油',
'Coordinate Conversion': '座標変換',
'Coping Activities': '一時対応活動',
'Copy any data from the one to be deleted into the one to keep': '削除する側の候補地から残す方の候補地へ、必要なデータを転載します。',
'Copy': 'コピー',
'Corn': 'とうもろこし',
'Cost Type': '料金種別',
'Cost per Megabyte': '1メガバイト毎に課金',
'Cost per Minute': '1分毎に課金',
'Country of Residence': '居住国',
'Country': '国',
'Create & manage Distribution groups to receive Alerts': 'アラートの送付先グループを作成・管理する',
'Create Activity Report': '支援活動レポートを追加',
'Create Activity Type': '支援活動タイプを追加',
'Create Activity': '支援活動を追加',
'Create Assessment': 'アセスメントを新規追加',
'Create Asset': '資産の追加',
'Create Bed Type': 'ベッドの種類を追加',
'Create Brand': '銘柄を追加',
'Create Budget': '予算を追加',
'Create Catalog Item': '物資カタログを追加',
'Create Catalog': 'カタログを追加',
'Create Checklist': 'チェックリストの作成',
'Create Cholera Treatment Capability Information': 'コレラ治療能力に関する情報の追加',
'Create Cluster Subsector': 'クラスタのサブセクタを追加',
'Create Cluster': 'クラスタを追加',
'Create Contact': '連絡先を追加',
'Create Dead Body Report': '遺体発見レポートを追加',
'Create Feature Layer': 'Feature Layerを追加',
'Create Group Entry': 'グループエントリの作成',
'Create Group': 'グループを追加',
'Create Hospital': '病院を新規追加',
'Create Identification Report': 'IDレポートを追加',
'Create Impact Assessment': '災害影響範囲アセスメントの作成',
'Create Import Job': 'Import Jobの作成',
'Create Incident Report': 'インシデントレポートを追加',
'Create Incident': 'インシデントを追加',
'Create Item Category': '物資カテゴリを追加',
'Create Item Pack': '救援物資パックの追加',
'Create Item': '救援物資を新規追加',
'Create Kit': 'キットを新規追加',
'Create Layer': 'レイヤを追加',
'Create Location': 'ロケーションを追加',
'Create Map Profile': '地図設定を追加',
'Create Marker': 'マーカーを追加',
'Create Member': 'メンバを追加',
'Create Mobile Impact Assessment': '災害影響範囲アセスメントをモバイル端末から作成',
'Create Office': 'オフィスを追加',
'Create Organization': '団体を追加',
'Create Personal Effects': 'Personal Effectsを追加',
'Create Project': 'プロジェクトを追加',
'Create Projection': '地図投影法を追加',
'Create Rapid Assessment': '被災地の現況アセスメントを作成',
'Create Report': 'レポートを新規追加',
'Create Request': '支援要請を作成',
'Create Resource': 'リソースを追加',
'Create River': '河川情報を追加',
'Create Role': '役割を追加',
'Create Sector': '活動分野を追加',
'Create Service Profile': 'サービスプロファイルを追加',
'Create Shelter Service': '避難所における提供サービスを追加',
'Create Shelter Type': '避難所タイプを追加',
'Create Shelter': '避難所を追加',
'Create Skill Type': 'スキルタイプを追加',
'Create Skill': 'スキルを追加',
'Create Status': '状況を追加',
'Create Task': 'タスクを追加',
'Create Theme': 'テーマを追加',
'Create User': 'ユーザを追加',
'Create Volunteer': 'ボランティアの追加',
'Create Warehouse': '倉庫を追加',
'Create a Person': '人物情報を追加',
'Create a group entry in the registry.': '登録にグループエントリを作成。',
'Create, enter, and manage surveys.': '調査の作成、入力、管理を実施',
'Creation of Surveys': '聞き取り調査の新規作成',
'Credential Details': '証明書の詳細',
'Credential added': '証明書を追加しました',
'Credential deleted': '証明書を削除しました',
'Credential updated': '証明書を更新しました',
'Credentials': '証明書',
'Crime': '犯罪',
'Criteria': '基準',
'Currency': '通貨',
'Current Group Members': '現在のグループメンバ',
'Current Identities': '現在のID',
'Current Location': '現在のロケーション',
'Current Log Entries': '現在のログエントリ',
'Current Memberships': '現在のメンバシップ',
'Current Notes': '現在選択中の追加情報',
'Current Registrations': '現在の登録',
'Current Status': '現在の状況',
'Current Team Members': '現在のチームメンバ',
'Current Twitter account': '現在のTwitterアカウント',
'Current community priorities': '現在のコミュニティの優先順位',
'Current general needs': '現在の需要',
'Current greatest needs of vulnerable groups': '現在、被災者が最も必要としている物資/サービス',
'Current health problems': '現在の健康問題',
'Current main income sources': '現在の主な収入源',
'Current major expenses': '現在の主な支出項目',
'Current number of patients': '現在の患者数',
'Current problems, categories': '現在の問題、カテゴリ',
'Current problems, details': '現在の問題の詳細',
'Current request': '現在の要求',
'Current response': '現在の対応状況',
'Current session': '現在のセッション',
'Current type of health problems, adults': '現在発生中の健康問題(成人)',
'Current type of health problems, children': '現在発生中の健康問題(小児)',
'Current type of source for drinking water': '現在の飲料水確保方法',
'Current type of source for sanitary water': '現在の生活用水確保方法',
'Custom Database Resource (e.g., anything defined as a resource in Sahana)': 'カストマイズされたデータベースのリソース (例:Sahana 内のリソースとして定義された物)',
'Customisable category of aid': 'カスタマイズ可能な支援カテゴリ',
'DC': '寄付の証明(Donation Certificate)',
'DECISION': '決定',
'DNA Profile': 'DNAプロファイル',
'DNA Profiling': 'DNAプロファイリング',
'DVI Navigator': '被災者の検索',
'Daily': '日次',
'Dam Overflow': 'ダム決壊',
'Damage': '損傷',
'Dangerous Person': '危険人物',
'Dashboard': 'ダッシュボード',
'Data import policy': 'データのインポートポリシー',
'Data uploaded': 'データがアップロードされました',
'Database': 'データベース',
'Date & Time': '日付と時刻',
'Date Avaialble': '日付あり',
'Date Available': '可能な日付',
'Date Received': '物資受領日',
'Date Requested': '要請した日',
'Date Required': '物資が必要になる日',
'Date Sent': '送付日',
'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': '物資を受領した日時を記録します。デフォルトでは現在の時間が入力されます。変更するには、ドロップダウンリストから選択してください。',
'Date and Time': '日付と時刻',
'Date and time this report relates to.': 'このレポートに関連する日付と時刻',
'Date of Birth': '生年月日',
'Date of Latest Information on Beneficiaries Reached': '恩恵を受ける人にたどり着いた最新の情報の日付',
'Date of Report': 'レポートの日付',
'Date': '日付',
'Date/Time of Find': '日付/発見日時',
'Date/Time of disappearance': '行方不明になった日付/時刻',
'Date/Time': '日付/時刻',
'De-duplicator': '重複解消機能',
'Dead Body Details': '遺体の詳細',
'Dead Body Reports': '遺体情報レポート',
'Dead Body': '遺体の管理',
'Dead body report added': '遺体発見レポートを追加しました',
'Dead body report deleted': '遺体報告を削除しました',
'Dead body report updated': '遺体レポートを更新しました',
'Deaths in the past 24h': '過去24時間の死者',
'Deaths/24hrs': '死亡者数/24h',
'Debug': 'デバッグ',
'Deceased': '死亡',
'Decimal Degrees': '十進角',
'Decomposed': '腐乱',
'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'マップウィンドウのデフォルトの縦高。ウィンドウレイアウトでは、マップはウィンドウ全体に最大化されるので、大きな値を設定する必要はありません。',
'Default Height of the map window.': '地図ウィンドウの初期の高さ',
'Default Marker': 'デフォルトマーカー',
'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'マップウィンドウのデフォルトの幅。ウィンドウレイアウトでは、マップはウィンドウ全体に最大化されるので、大きな値を設定する必要はありません。',
'Default Width of the map window.': '地図ウィンドウの幅の初期値',
'Default synchronization policy': 'データ同期ポリシーのデフォルト設定',
'Defaults updated': 'デフォルト値を更新しました',
'Defaults': 'デフォルト値',
'Defecation area for animals': '動物排便用の地域',
'Defines the icon used for display of features on handheld GPS.': 'ハンドヘルドGPSに表示するアイコンを決定します。',
'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.': '対話型地図および KML の出力上で Feature の表示に使用するアイコンを定義します。Feature Class に割り当てられたマーカーを上書きする必要がある場合、個々の場所に割り当てられたマーカーが設定されます。どちらも定義されていない場合は、デフォルトのマーカーが使用されます。',
'Defines the icon used for display of features on interactive map & KML exports.': 'インタラクティブマップとKMLエクスポートで建物などの表示に使われるアイコン定義',
'Defines the marker used for display & the attributes visible in the popup.': 'ポップアップ時と通常時に表示されるマーカーを指定してください。',
'Degrees must be a number between -180 and 180': '度数は -180 から 180 の間にしてください。',
'Dehydration': '脱水症状',
'Delete Aid Request': '援助要請を削除',
'Delete Alternative Item': '代わりの物資を削除する',
'Delete Assessment Summary': 'アセスメントの要約を削除',
'Delete Assessment': 'アセスメントを削除',
'Delete Asset Assignments': '資産割り当ての削除',
'Delete Asset': '資産の削除',
'Delete Baseline Type': '基準値タイプを削除',
'Delete Baseline': '基準値を削除',
'Delete Brand': 'ブランドを削除してください',
'Delete Budget': '予算を削除',
'Delete Bundle': 'Bundleを削除',
'Delete Catalog Item': '救援物資カタログを削除',
'Delete Cluster Subsector': 'クラスタのサブクラスタを削除',
'Delete Cluster': 'クラスタを削除',
'Delete Commitment Item': 'コミットした物資の削除',
'Delete Commitment': 'コミットメントの削除',
'Delete Config': '設定を削除',
'Delete Contact Information': '連絡先情報の削除',
'Delete Credential': '証明書の削除',
'Delete Distribution Item': '配給物資を削除',
'Delete Distribution': '配給所を削除',
'Delete Document': '文書を削除',
'Delete Donor': '資金提供組織を削除',
'Delete Entry': 'エントリを削除',
'Delete Feature Layer': '機能レイヤを削除',
'Delete Group': 'グループを削除',
'Delete Hospital': '病院を削除',
'Delete Image': '画像を削除',
'Delete Impact Type': '影響範囲のタイプを削除',
'Delete Impact': '影響範囲の削除',
'Delete Incident Report': 'インシデントレポートを削除',
'Delete Incident': 'インシデントを削除',
'Delete Inventory Item': '備蓄物資を削除',
'Delete Inventory Store': '物資集積地点を削除',
'Delete Item Category': 'アイテムカテゴリを削除',
'Delete Item Pack': '救援物資パックの削除',
'Delete Item': '救援物資を削除',
'Delete Key': 'Keyを削除',
'Delete Kit': 'Kitを削除',
'Delete Layer': 'レイヤーを削除',
'Delete Level 1 Assessment': 'レベル1アセスメントの削除',
'Delete Level 2 Assessment': 'レベル2アセスメントの削除',
'Delete Location': 'ロケーションを削除',
'Delete Map Profile': '地図設定を削除',
'Delete Marker': 'マーカーを削除',
'Delete Membership': 'メンバシップを削除',
'Delete Message': 'メッセージを削除',
'Delete Metadata': 'メタデータを削除',
'Delete Need Type': '需要タイプを削除',
'Delete Need': '要求を削除',
'Delete Office': 'オフィスを削除',
'Delete Old': '古いものを削除',
'Delete Organization': '団体情報を削除',
'Delete Peer': 'データ同期先の削除',
'Delete Person': '人物情報を削除',
'Delete Photo': '写真を削除',
'Delete Project': 'プロジェクトを削除',
'Delete Projection': '地図投影法を削除',
'Delete Rapid Assessment': '被災地の現況アセスメントを削除',
'Delete Received Item': '受け取った物資の削除',
'Delete Received Shipment': '受け取った輸送の削除',
'Delete Record': 'レコードを削除',
'Delete Recovery Report': '遺体回収レポートを削除',
'Delete Report': 'レポートを削除',
'Delete Request Item': '物資の要請を削除',
'Delete Request': '支援要請を削除',
'Delete Resource': 'リソースを削除',
'Delete Section': 'Sectionを削除',
'Delete Sector': '活動分野を削除',
'Delete Sent Item': '送付物資を削除',
'Delete Sent Shipment': '輸送物資を削除',
'Delete Service Profile': 'サービスプロファイルを削除',
'Delete Setting': '設定を削除',
'Delete Skill Type': 'スキルタイプを削除',
'Delete Skill': 'スキルを削除',
'Delete Staff Type': 'スタッフタイプを削除',
'Delete Status': '状況を削除しました',
'Delete Subscription': '寄付申し込みを削除',
'Delete Survey Answer': '調査回答削除',
'Delete Survey Question': 'Survey Questionを削除',
'Delete Survey Section': '調査項目を削除',
'Delete Survey Series': '一連の調査を削除',
'Delete Survey Template': '調査用テンプレートを削除',
'Delete Unit': '単位を削除',
'Delete User': 'ユーザを削除',
'Delete Volunteer': 'ボランティアを削除',
'Delete Warehouse Item': '倉庫物資の削除',
'Delete Warehouse': '倉庫を削除',
'Delete from Server?': 'サーバから削除しますか?',
'Delete': '削除',
'Delivered': '配信済み',
'Delphi Decision Maker': 'Delphi意思決定',
'Demographic': '人口情報',
'Demonstrations': 'デモ発生',
'Dental Examination': '歯科検査',
'Dental Profile': '歯の欠損/治療跡',
'Department/Unit Name': '所属部課名',
'Deployment': '展開',
'Describe the condition of the roads to your hospital.': '道路状況|病院までの道路状況を記載してください',
"Describe the procedure which this record relates to (e.g. 'medical examination')": 'このレコードに関連する手続きを説明してください。(例えば "検診" です。)',
'Description of Bin Type': 'Binタイプを記載してください',
'Description of Contacts': '連絡先の説明',
'Description of defecation area': '排泄用地についての補足説明',
'Description of drinking water source': '飲料水に関する補足説明',
'Description of sanitary water source': '生活用水に関する説明',
'Description of water source before the disaster': '災害発生前の水の確保方法について補足説明',
'Description': '説明',
'Descriptive Text (e.g., Prose, etc)': '説明文 (例: 文学、等)',
'Designated for': '指定済み',
'Desire to remain with family': '家族との残留を希望',
'Destination': '目的地',
'Detail': '詳細',
'Details': '詳細',
'Dialysis': '透析',
'Diaphragms, horizontal bracing': '仕切り板、水平部材',
'Diarrhea among children under 5': '5歳未満の幼児に下痢が蔓延している',
'Diarrhea': '下痢',
'Dignitary Visit': '要人の訪問',
'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': '物資備蓄スペースの容積。ドロップダウンリストから単位を選び、以下の形式にしたがって入力してください。 1 x 2 x 3 , 横幅 x 奥行き x 縦幅。',
'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': '物資備蓄スペースの容積。ドロップダウンリストから単位を選び、以下の形式にしたがって入力してください。 1 x 2 x 3 , 横幅 x 奥行き x 縦幅。',
'Direction': '方向',
'Disable': '無効',
'Disabled participating in coping activities': '障害者が災害対応に従事',
'Disabled': '無効',
'Disabled?': '無効になっているか?',
'Disaster Victim Identification': '被災者の同定',
'Disaster Victim Registry': '被災者登録',
'Disaster clean-up/repairs': '災害の清掃活動や修復',
'Discharge (cusecs)': '流水量 (cusecs)',
'Discharges/24hrs': '退院者数/24h',
'Discussion Forum on item': 'フォーラム(物資について)',
'Discussion Forum': 'フォーラム',
'Disease vectors': '病原媒介者',
'Dispatch Items': 'アイテムの発送',
'Dispatch': '発送',
'Dispensary': '診療所',
'Displaced Populations': '避難者数',
'Displaced': '避難中',
'Display Polygons?': '多角形を表示しますか?',
'Display Routes?': 'ルートを表示しますか?',
'Display Tracks?': 'Tracksを表示しますか?',
'Display Waypoints?': 'ウェイポイントを表示しますか?',
'Dispose Expired/Unusable Items': '期限切れ / 使用できない物資の処分',
'Dispose': '処分',
'Distance between defecation area and water source': '水資源採取場所と排泄場所の間の距離',
'Distance between latrines and temporary shelter in meters': 'トイレと避難所の距離(m)',
'Distance between shelter and latrines': '簡易避難所と排泄場所との間の距離(メートル)',
'Distance(Kms)': '距離(Kms)',
'Distribution Details': '配給所の詳細',
'Distribution Item Details': '配給物資の詳細',
'Distribution Item added': '配給物資を追加しました',
'Distribution Item deleted': '配給物資を削除しました',
'Distribution Item updated': '配給物資を更新しました',
'Distribution Item': '配給物資',
'Distribution Items': '配給物資',
'Distribution added': '配給所を追加しました',
'Distribution deleted': '配給所を削除しました',
'Distribution groups': '配信グループ',
'Distribution updated': '配給所を更新しました',
'Distribution': '配給所',
'Distributions': '配給所',
'District': '地区(行政地区)',
'Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'あなたの地域の青年は、災害に対応するための支援活動に参加しますか?(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do households each have at least 2 containers (10-20 litres each) to hold water?': '1つの世帯ごとに、少なくとも2つ以上の水貯蔵容器(10-20リットル/容器)があるかどうかを記載してください',
'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?': '調理や食事に必要となる道具や器材(コンロ、ポット、皿やプレート、マグカップ、飲料容器など)が世帯に存在するかを記載します',
'Do households have bedding materials available (tarps, plastic mats, blankets)?': 'ベッド、あるいはベッド用部材(例:タープ、プラスチックマット、毛布)が世帯に存在するかを記載します',
'Do households have household water storage containers?': '水貯蔵容器が世帯に存在するかを記載します',
'Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '地域にいるマイノリティ(社会的少数者)の人が、自助的な災害対処につながる活動に参加しているか記載してください。(例 打ち合わせ、宗教活動、地域の清掃ボランティアなど)',
'Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '災害復旧活動に従事している高齢者が、共同体の中にいるかどうかを記載してください(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?': '個人に対して、少なくとも2セット以上の衣服(シャツ、ズボン/腰巻、下着など)があるかどうか記載してください',
'Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?': '十分な量のサニタリ / 衛生用品が、安定して供給されているかどうかを記載します(石鹸、シャンプー、歯ブラシ、洗濯用洗剤など)',
'Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'あなたの地域で障害者と一緒にいる方は、災害に対処るための彼らの支援活動に参加しますか?(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do women and girls have easy access to sanitary materials?': '女性用生理用品の入手が容易かどうかを記載してください',
'Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'あなたの地域の女性は、災害対応のための支援活動に参加しますか?(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do you have access to cash to restart your business?': 'ビジネス再開に必要な現金が入手可能かどうかを記載してください',
'Do you know of any incidents of violence?': '暴力事件が発生したかどうかを記載してください',
'Do you know of children living on their own (without adults)?': '成人がおらず、未成年のみで生活しているグループがあるかどうかを記載してください',
'Do you know of children separated from their parents or caregivers?': '親や養育者とはぐれた未成年がいるかどうかを記載してください',
'Do you know of children that have been orphaned by the disaster?': '災害によって孤児となった未成年がいるかどうかを記載してください',
'Do you know of children that have been sent to safe places?': '安全な場所に疎開した未成年がいるかどうかを記載してください',
'Do you know of children that have disappeared without explanation in the period since the disaster?': '災害発生後、行き先の説明ないまま連絡が取れなくなった未成年がいるかどうかを記載してください',
'Do you know of older people who are primary caregivers of children?': '未成年に対する介護経験がある高齢者がいるかどうかを記載してください',
'Do you know of parents/caregivers missing children?': '子供と連絡が取れなくなった親や養育者がいるかどうかを記載してください',
'Do you really want to delete these records?': '本当にこれらのデータを削除しますか?',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'この輸送の受領をキャンセルしますか?キャンセルするとこの物資は備蓄から削除されます。この操作は *取り消せません!*',
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': '出荷された物資をキャンセルしますか?この物資は、在庫に返されます。このアクションは、元に戻せません。',
'Do you want to over-write the file metadata with new default values?': 'ファイルのメタデータを、新しいデフォルト値で上書きしますか?',
'Do you want to receive this shipment?': 'この輸送物資を受け取られますか?',
'Do you want to send these Committed items?': 'これらコミットされた物資を送付してよいですか?',
'Do you want to send this shipment?': 'この発送情報を送信しますか?',
'Document Details': '文書の詳細',
'Document Scan': '文書のスキャン',
'Document added': '文書を追加しました',
'Document deleted': '文書を削除しました',
'Document updated': '文書を更新しました',
'Document': '文書',
'Documents and Photos': '文書と写真',
'Documents': '文書',
'Does this facility provide a cholera treatment center?': 'コレラ治療センターの機能を提供可能かどうか',
'Doing nothing (no structured activity)': '活動なし(組織立った行動なし)',
'Dollars': 'ドル',
'Domain': 'ドメイン',
'Domestic chores': '家事手伝い',
'Donation Certificate': '寄付証明書',
'Donation Phone #': '寄付受付電話番号',
'Donor Details': '資金提供組織の詳細',
'Donor added': '資金提供組織を追加しました',
'Donor deleted': '資金提供組織を削除しました',
'Donor updated': '資金提供組織を更新しました',
'Donor': '資金提供組織',
'Donors Report': '資金提供レポート',
'Donors': '資金提供組織',
'Door frame': 'ドア枠',
'Download PDF': 'PDFをダウンロード',
'Draft Features': '草案(ドラフト)',
'Draft': 'ドラフト',
'Drainage': '排水',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'ロケーションに対する、スタッフと備品の予算を作成します。',
'Drill Down by Group': 'グループで絞り込み',
'Drill Down by Incident': 'インシデントで絞り込み',
'Drill Down by Shelter': '避難所で絞り込み',
'Driving License': '運転免許',
'Drought': '干ばつ',
'Drugs': '医薬品',
'Dug Well': '丸井戸',
'Duplicate?': '重複?',
'Duration': '活動実施期間',
'Dust Storm': '粉塵嵐',
'Dwelling': '居住施設',
'Dwellings': '住居数',
'EMS Reason': '緊急医療受け入れ状態',
'EMS Status Reason': '救急医療状況の理由',
'EMS Status': 'EMSステータス',
'EMS Traffic Status': '救急医療の混雑状況',
'ER Status Reason': 'ER医療状況の理由',
'ER Status': 'ER ステータス',
'Early Recovery': '早期復旧',
'Earthquake': '地震',
'Easy access to sanitation items for women/girls': '女性用サニタリ用品の入手が容易である',
'Edit Activity': '支援活動を編集',
'Edit Address': '住所の編集',
'Edit Aid Request': '援助要請を編集',
'Edit Alternative Item': '代わりの物資を編集',
'Edit Application': 'アプリケーションの編集',
'Edit Assessment Summary': 'アセスメントの要約を編集',
'Edit Assessment': 'アセスメントを編集',
'Edit Asset Assignment': '資産割り当ての編集',
'Edit Asset': '資産を編集',
'Edit Baseline Type': '基準値のタイプを編集',
'Edit Baseline': 'Baselineの編集',
'Edit Brand': '銘柄の編集',
'Edit Budget': '予算の編集',
'Edit Bundle': 'Bundleの編集',
'Edit Catalog Item': '救援物資カタログの編集',
'Edit Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 関係の編集',
'Edit Cluster Subsector': 'クラスタのサブセクターの編集',
'Edit Cluster': 'クラスタを編集',
'Edit Commitment Item': 'コミットされた物資の検索',
'Edit Commitment': 'コミットを編集',
'Edit Config': '設定の編集',
'Edit Contact Information': '連絡先情報の編集',
'Edit Contact': '連絡先の編集',
'Edit Contents': '内容の編集',
'Edit Credential': '証明書の編集',
'Edit Dead Body Details': '遺体の詳細を編集',
'Edit Defaults': 'デフォルト値の編集',
'Edit Description': '説明の編集',
'Edit Details': '詳細の編集',
'Edit Disaster Victims': '被災者情報の編集',
'Edit Distribution Item': '配給物資の編集',
'Edit Distribution': '配給所の編集',
'Edit Document': '文書を編集',
'Edit Donor': '資金提供組織の編集',
'Edit Email Settings': '電子メール設定の編集',
'Edit Feature Layer': 'Feature Layerの編集',
'Edit Flood Report': '洪水レポートの編集',
'Edit Gateway Settings': 'ゲートウェイ設定の編集',
'Edit Group': 'グループの編集',
'Edit Hospital': '病院の編集',
'Edit Identification Report': 'IDレポートの編集',
'Edit Identity': 'IDの編集',
'Edit Image Details': '画像の詳細の編集',
'Edit Image': '画像の編集',
'Edit Impact Type': '災害影響のタイプを編集',
'Edit Impact': '被災影響の編集',
'Edit Incident Report': 'インシデントレポートの編集',
'Edit Incident': 'インシデントを編集',
'Edit Inventory Item': '備蓄物資の編集',
'Edit Inventory Store': '物資集積地点の編集',
'Edit Item Catalog Categories': '救援物資カタログのカテゴリを編集',
'Edit Item Catalog': '救援物資カタログの編集',
'Edit Item Category': '救援物資カテゴリの編集',
'Edit Item Pack': '物資パックを編集',
'Edit Item Sub-Categories': '救援物資サブカテゴリの編集',
'Edit Item': '物資の編集',
'Edit Key': 'Keyの編集',
'Edit Kit': 'Kitの編集',
'Edit Layer': 'レイヤの編集',
'Edit Level 1 Assessment': 'レベル1アセスメントを編集する',
'Edit Level 2 Assessment': 'レベル2アセスメントを編集',
'Edit Location': 'ロケーションの編集',
'Edit Log Entry': 'ログエントリの編集',
'Edit Map Profile': '地図設定を編集する',
'Edit Map Services': '地図サービスの編集',
'Edit Marker': 'マーカーの編集',
'Edit Membership': 'メンバシップの編集',
'Edit Message': 'メッセージの編集',
'Edit Messaging Settings': 'メッセージ設定の編集',
'Edit Metadata': 'メタデータの編集',
'Edit Modem Settings': 'モデム設定の編集',
'Edit Need Type': '需要タイプの編集',
'Edit Need': 'ニーズを編集',
'Edit Note': '追加情報を編集',
'Edit Office': 'オフィスの編集',
'Edit Options': 'オプション編集',
'Edit Organization': '団体の編集',
'Edit Parameters': 'パラメータの編集',
'Edit Peer Details': 'データ同期先の詳細を編集',
'Edit Peer': 'データ同期先の編集',
'Edit Person Details': '人物情報の詳細を編集',
'Edit Personal Effects Details': 'Personal Effectsの詳細の編集',
'Edit Photo': '写真の編集',
'Edit Pledge': '寄付の編集',
'Edit Position': '場所の編集',
'Edit Problem': '問題の編集',
'Edit Project': 'プロジェクトの編集',
'Edit Projection': '地図投影法の編集',
'Edit Rapid Assessment': '被災地の現況アセスメントの編集',
'Edit Received Item': '物資の受領を編集',
'Edit Received Shipment': '物資の輸送の受領報告を編集',
'Edit Record': 'レコードの編集',
'Edit Recovery Details': '遺体回収の詳細を編集',
'Edit Registration Details': '登録状況の詳細を編集',
'Edit Registration': '登録の編集',
'Edit Report': 'レポートの編集',
'Edit Request Item': '物資の要請を編集',
'Edit Request': '支援要請の編集',
'Edit Resource': 'リソースの編集',
'Edit Response': '返信を編集',
'Edit River': '河川の編集',
'Edit Role': '役割の編集',
'Edit Sector': '活動分野を編集',
'Edit Sent Item': '送付した物資の編集',
'Edit Setting': '設定の編集',
'Edit Settings': '設定の編集',
'Edit Shelter Service': '避難所提供サービスの編集',
'Edit Shelter Type': '避難所タイプの編集',
'Edit Shelter': '避難所の編集',
'Edit Shipment Transit Log': '輸送履歴の編集',
'Edit Shipment to Send': '送付する輸送を編集',
'Edit Shipment/Way Bills': '輸送費/移動費の編集',
'Edit Shipment<>Item Relation': '輸送<>物資の関係を編集',
'Edit Site': 'Siteを編集',
'Edit Skill Type': 'スキルタイプの編集',
'Edit Skill': 'スキルの編集',
'Edit Solution': '解決案の編集',
'Edit Staff Type': 'スタッフタイプの編集',
'Edit Staff': 'スタッフの編集',
'Edit Storage Bin Type(s)': 'Storage Binタイプを編集',
'Edit Storage Bins': 'Storage Binの編集',
'Edit Storage Location': '備蓄地点の編集',
'Edit Subscription': '寄付申し込みの編集',
'Edit Survey Answer': '調査回答の編集',
'Edit Survey Question': '調査の質問項目を編集',
'Edit Survey Section': 'フィードバック内容を編集します',
'Edit Survey Series': '一連の調査の編集',
'Edit Survey Template': '調査テンプレートを編集',
'Edit Task': 'タスクの編集',
'Edit Team': 'チームの編集',
'Edit Theme': 'テーマの編集',
'Edit Themes': 'テーマの編集',
'Edit Ticket': 'チケットの編集',
'Edit Track': '追跡情報の編集',
'Edit Tropo Settings': 'Tropo 設定の編集',
'Edit Unit': '単位の編集',
'Edit User': 'ユーザの編集',
'Edit Volunteer Details': 'ボランティアの詳細を編集する',
'Edit Volunteer Registration': 'ボランティア登録の編集',
'Edit Warehouse Item': '倉庫物資を編集',
'Edit Warehouse': '倉庫を編集',
'Edit current record': '現在のレコードの編集',
'Edit message': 'メッセージの編集',
'Edit the Application': 'アプリケーションの編集',
'Edit': '編集',
'Editable?': '編集可能?',
'Education materials received': '教育資材を受領した',
'Education materials, source': '教育資材の送付元',
'Education': '教育',
'Effects Inventory': '備蓄物資への影響',
'Eggs': '卵',
'Either a shelter or a location must be specified': '避難所かロケーションのどちらかを特定する必要があります',
'Either file upload or document URL required.': 'ファイルのアップロードと文書のURLの両方が必要です。',
'Either file upload or image URL required.': 'アップロードするファイルか、URLを指定してください。',
'Elderly person headed households (>60 yrs)': '代表者が60歳以上の世帯数',
'Electrical': '電動の',
'Electrical, gas, sewerage, water, hazmats': '電気、ガス、下水道、水、有害物',
'Elevated': '高まる',
'Elevators': 'エレベーター',
'Email Address': 'メールアドレス',
'Email Settings': '電子メール設定',
'Email address verified, however registration is still pending approval - please wait until confirmation received.': '電子メールの認証は完了しましたが、登録はまだ完了していません。確認が完了するまで少々お待ちください。',
'Email settings updated': '電子メールの設定を更新しました',
'Email verification': '利用者登録の確認',
'Email': '電子メール',
'Embalming': '遺体防腐処理',
'Embassy': '大使館',
'Emergency Capacity Building project': 'ECB (緊急時の被災者収容建築プロジェクト)',
'Emergency Department': '救急部門',
'Emergency Shelter': '緊急避難所',
'Emergency Support Facility': '緊急支援施設',
'Emergency Support Service': '緊急支援サービス',
'Emergency Telecommunications': '緊急時電話連絡先',
'Enable/Disable Layers': 'レイヤの有効化/無効化',
'Enabled': '有効',
'End date should be after start date': '終了日付は開始日付より後にしてください',
'End date': '終了日',
'End of Period': '終了期間',
'English': 'English 英語',
'Enter Coordinates': '緯度経度を入力',
'Enter Coordinates:': '座標入力:',
'Enter a GPS Coord': 'GPS Coordを入力',
'Enter a GPS Coordinate': 'GPS座標を入力してください',
'Enter a date before': '以前の日時を入力',
'Enter a few characters of the name to select an existing Location or else simply type the name of the new Location.': '最初の数文字を入力して既存の項目から選ぶか、あるいは新しいロケーション名を入力して、ロケーションを特定してください。',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'アップロードするスプレッドシートの名前を入力してください。(必須項目)',
'Enter a new support request.': '新規の支援要請を登録',
'Enter a summary of the request here.': '要求事項の概要を入力',
'Enter a unique label!': 'そのラベル名は使われています。一意のラベル名を入力してください。',
'Enter a valid date before': 'より前の正しい日付を入力してください',
'Enter a valid email': '正しいメールアドレスを入力してください',
'Enter a valid future date': '正しい未来の日付を入力してください',
'Enter some characters to bring up a list of possible matches': '文字を入力することで、候補の一覧が表示されます',
'Enter some characters to bring up a list of possible matches.': '検索文字列を入力してください',
'Enter tags separated by commas.': 'タグはカンマで区切って入力してください。',
'Enter the same password as above': '確認のため、パスワードを再入力',
'Enter your firstname': 'あなたの名前を入力',
'Entered': '入力された',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': '電話番号の入力は任意です。入力すると、SMS メッセージの受け取り登録ができます。',
'Entering an Organization is optional, but doing so directs you to the appropriate approver & means you automatically get the appropriate permissions.': '選択リストに含まれる団体のメンバーであれば、所属する団体を選択してください。(団体の選択は必須ではありません)',
'Entry deleted': 'エントリを削除しました',
'Environment': '環境',
'Equipment': '備品',
'Error encountered while applying the theme.': 'テーマ適用時にエラーが発生しました。',
'Error in message': 'エラーメッセージ',
"Error logs for '%(app)s'": '"%(app)s" に関するエラーログ',
'Errors': 'エラー',
'Estimated # of households who are affected by the emergency': '非常事態の影響を受けた世帯の推定数',
'Estimated # of people who are affected by the emergency': '非常事態の影響を受けた住民の推定数',
'Estimated Overall Building Damage': '建物全体の被害見積り',
'Estimated total number of people in institutions': 'なんらかの施設に収容されている住民の推定数',
'Euros': 'ユーロ',
'Evacuating': '退避中',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'このメッセージの情報を評価します。(この値は、公開される警告アプリケーションで使用してはなりません)',
'Event Time': 'イベント発生時刻',
'Event Type': 'イベントタイプ',
'Event type': 'イベントタイプ',
'Example': '例',
'Exceeded': '超過',
'Exclude contents': 'コンテンツを除く',
'Excreta disposal': 'し尿処理',
'Execute a pre-planned activity identified in <instruction>': '事前に準備していた計画 <instruction>を実行する',
'Existing Placard Type': '設置されたポスターのタイプ',
'Existing food stocks': '食糧備蓄あり',
'Existing food stocks, main dishes': '備蓄中の食料(主皿)',
'Existing food stocks, side dishes': '備蓄中の食料(副皿)',
'Exits': '出口',
'Expected In': '予定期間',
'Expected Out': '予期される出力',
'Experience': '熟練者',
'Expiry Date': '有効期限',
'Expiry Time': '有効期限',
'Expiry_Date': '有効期限',
'Explosive Hazard': '爆発災害',
'Export Data': 'データのエクスポート',
'Export Database as CSV': 'データベースをCSV形式でエクスポート',
'Export in GPX format': 'GPXフォーマットでエクスポート',
'Export in KML format': 'KMLフォーマットでエクスポート',
'Export in OSM format': 'OSMフォーマットでエクスポート',
'Export in PDF format': 'PDFフォーマットでエクスポート',
'Export in RSS format': 'RSSフォーマットでエクスポート',
'Export in XLS format': 'XLSフォーマットでエクスポート',
'Export': 'エクスポート',
'Exterior Only': '外装のみ',
'Exterior and Interior': '外装と内装',
'External Features': '外部機能',
'Eye Color': '目の色',
'Facial hair, color': 'ヒゲ, 色',
'Facial hair, type': 'ヒゲ, 形状',
'Facial hear, length': 'ヒゲ, 長さ',
'Facility Operations': '施設の運用',
'Facility Status': '施設の状態',
'Facility Type': '施設タイプ',
'Factors affecting school attendance': '生徒の就学に影響する要因',
'Failed to send mail to Approver - see if you can notify them manually!': '承認依頼メールを送信できませんでした。利用者登録は完了していません。サイト管理者へ連絡してください。',
'Failed!': '失敗しました!',
'Falling Object Hazard': '落下/墜落による災害',
'Families/HH': '家族/世帯',
'Family tarpaulins received': 'タープ(家族用簡易テント)を受領した',
'Family tarpaulins, source': 'タープ(家族用簡易テント)の送付元',
'Family': '家族',
'Family/friends': '家族/友人',
'Farmland/fishing material assistance, Rank': '農業 / 漁業用物資の補助、ランク',
'Fatalities': '死亡者',
'Fax': 'ファックス',
'Feature Layer Details': '機能レイヤの詳細',
'Feature Layer added': '機能レイヤを追加しました',
'Feature Layer deleted': '機能レイヤを削除しました',
'Feature Layer updated': '機能レイヤを更新しました',
'Feature Layers': '機能レイヤ',
'Feature Namespace': 'Feature 名前空間',
'Feature Request': '機能の要求',
'Feature Type': 'Feature タイプ',
'Feature': '機能',
'Features Include': '含まれる機能',
'Female headed households': '代表者が女性の世帯数',
'Female': '女性',
'Few': '少数',
'Field Hospital': '野外病院',
'File': 'ファイル',
'Fill in Latitude': '緯度を記入',
'Fill in Longitude': '経度を記入',
'Fill out Rapid Evaluation Forms': '迅速評価フォームに記入します',
'Fill out detailed Evaluation Forms': '詳細な評価フォームに入力する',
'Filter Field': 'フィールドをフィルタする',
'Filter Value': '値をフィルタ',
'Filter': 'フィルタ',
'Filtered search of aid pledges and requests': '援助申出と要請の検索されたもの',
'Find All Matches': '完全一致',
'Find Dead Body Report': '遺体レポートの発見',
'Find Hospital': '病院を探す',
'Find Person Record': '人物情報を検索',
'Find Recovery Report': '遺体発見レポート',
'Find Volunteers': 'ボランティアを探す',
'Find a Person Record': '人物情報を検索する',
'Find by Name': '名前で検索',
'Find': '検索',
'Finder': '発見者',
'Fingerprint': '指紋',
'Fingerprinting': '指紋',
'Fingerprints': '指紋',
'Finish': '完了',
'Finished Jobs': '完了したジョブ',
'Fire suppression and rescue': '消火・救出活動',
'Fire': '火災',
'First Name': '苗字',
'First name': '苗字',
'Fishing': '漁業',
'Flash Flood': '鉄砲水',
'Flash Freeze': '瞬間凍結',
'Fleet Management': '船舶の管理',
'Flexible Impact Assessments': '災害影響範囲アセスメント',
'Flood Alerts show water levels in various parts of the country': '洪水警報では、国内各所の水位情報を確認することができます。',
'Flood Alerts': '洪水警報',
'Flood Report Details': '洪水レポートの詳細',
'Flood Report added': '洪水レポートを追加しました',
'Flood Report deleted': '洪水レポートを削除しました',
'Flood Report updated': '洪水レポートを更新しました',
'Flood Report': '洪水レポート',
'Flood Reports': '洪水レポート',
'Flood': '洪水',
'Flow Status': '流れの状況',
'Focal Point': '代表者',
'Fog': '濃霧',
'Food Supply': '食料の供給',
'Food assistance available/expected': '食糧援助が利用可能 / 期待できる',
'Food assistance': '食糧援助',
'Food': '食料',
'Footer file %s missing!': 'フッターファイル%sが見つかりません。',
'Footer': 'フッタ',
'For Eden instances enter the application base URL, e.g. http://sync.sahanfoundation.org/eden, for other peers the URL of the synchronization interface.': 'Eden の場合はベースURL(例えば http://sync.sahanfoundation.org/eden)、他のシステムの場合は同期インターフェースのURL。',
'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'POP-3では通常110 (SSLでは995)で、IMAPでは通常143 (IMAPSでは993)。',
'For Warehouse': '倉庫向け',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': '国の場合は ISO2 コード、町の場合は 空港コード(Airport Locode)',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'それぞれの同期パートナーについて、指定した間隔で実行する同期ジョブがデフォルトで存在します。必要に応じて、さらなる同期ジョブを設定し、カスタマイズすることができます。開始するには、リンクをクリックしてください。',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'セキュリティ向上のため、ユーザー名とパスワードを入力し、団体の他端末の管理者にユーザー名とパスワードを通知して「データ同期」 -> 「データ同期パートナー」であなたのUUIDに追加してもらうことを推奨します。',
'For live help from the Sahana community on using this application, go to': 'Sahanaの使い方について Sahanaコミュニティからライブヘルプを希望する際は、以下に進んでください。',
'For messages that support alert network internal functions': '警戒(alert)ネットワークの内部機能をサポートするメッセージの場合',
'For more details on the Sahana Eden system, see the': 'Sahana Edenに関する詳細は、以下をごらんください。',
'For more information, see ': '詳細は、以下を参照してください。',
'For other types, the next screen will allow you to enter the relevant details...': 'その他の種類については、次の画面で関連する詳細情報を入力できます…',
'For': ' ',
'For:': '対象:',
'Forest Fire': '森林火災',
'Formal camp': '指定避難所',
'Format': 'フォーマット',
'Forms': 'フォーム',
'Found': '発見された',
'Foundations': '構造基礎',
'Freezing Drizzle': '凍結霧雨',
'Freezing Rain': 'みぞれ',
'Freezing Spray': '冷却スプレー',
'French': 'フランス語',
'Friday': '金曜日',
'From Inventory': '送付元',
'From Location': '送付元ロケーション',
'From Organization': '送付元団体',
'From Person': '送付元の担当者',
'From Warehouse': '倉庫から',
'From': '輸送元',
'Frost': '凍結',
'Fulfil. Status': '確保量は十分か',
'Fulfillment Status': '充足状況',
'Full beard': 'もみあげまでのアゴヒゲ、口髭あり',
'Full': '満員',
'Fullscreen Map': 'フルスクリーン表示',
'Function Permissions': '機能に対する権限',
'Function': '機能',
'Functional Tests': '機能テスト',
'Functions available': '利用可能な機能',
'Funding Organization': '資金提供団体',
'Funeral': '葬儀',
'Further Action Recommended': '更なる対応が推奨されている',
'GIS Reports of Shelter': '避難所のGISレポート',
'GIS integration to view location details of the Shelter': '避難所のロケーション詳細を閲覧するGISインテグレーション',
'GPS Marker': 'GPSマーカー',
'GPS Track File': 'GPS Track ファイル',
'GPS Track': 'GPS トラック',
'GPX Layers': 'GPX レイヤ',
'GPX Track': 'GPX形式の追跡情報',
'GRN Status': 'GRNステータス',
'Gale Wind': '強風',
'Gantt Chart': 'ガントチャート',
'Gap Analysis Map': 'ギャップ解析マップ',
'Gap Analysis Report': 'ギャップ解析報告',
'Gap Analysis': 'ギャップ解析',
'Gap Map': '需給ギャップマップ',
'Gap Report': '需給ギャップの報告',
'Gateway Settings': 'ゲートウェイ設定',
'Gateway settings updated': 'ゲートウェイ設定を更新しました',
'Gender': '性別',
'General Comment': '包括コメント',
'General Medical/Surgical': '一般医学/外科',
'General emergency and public safety': '一般的緊急事態と公共の安全',
'General information on demographics': '人口統計の情報',
'Generator': '発電機',
'Geocoder Selection': 'Geocoder 選択',
'Geometry Name': 'Geometry名',
'Geonames.org search requires Internet connectivity!': 'Geonames.org の検索を行うには、インターネットに接続している必要があります。',
'Geophysical (inc. landslide)': '地球物理 (地滑りを含む)',
'Geotechnical Hazards': '地盤災害',
'Geotechnical': '地質工学',
'Geraldo module not available within the running Python - this needs installing for PDF output!': '実行中のPythonでGeraldoモジュールが利用できません。PDF出力に必要です。',
'Geraldo not installed': 'Geraldoがインストールされていません',
'Get incoming recovery requests as RSS feed': '遺体回収要請をRSSフィードとして取得する',
'Girls 13-18 yrs in affected area': '影響地域内の13-18歳の女子数',
'Girls 13-18 yrs not attending school': '学校に来ていなかった13-18歳の女子数',
'Girls 6-12 yrs in affected area': '影響地域内の6-12歳の女子数',
'Girls 6-12 yrs not attending school': '学校に来ていなかった6-12歳の女子数',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': '画像に関する説明。特に、写真のどの箇所に何が確認できるかを記載します (オプション)',
'Give information about where and when you have seen the person': '人物を見かけた場所や時間の情報を提供してください',
'Give information about where and when you have seen them': 'どこで、いつ、彼らを見かけたのか、情報をください',
'Global Messaging Settings': 'メッセージの全般設定',
'Glossary': '用語集',
'Go to Request': '支援要請に行く',
'Goatee': 'やぎヒゲ',
'Goods Received Note': '受諾した物資の注釈',
'Government UID': '政府UID',
'Government building': '政府所管の建物',
'Government': '政府・行政機関',
'Grade': '学年',
'Greek': 'ギリシャ語',
'Green': '緑',
'Ground movement, fissures': '地盤移動、亀裂',
'Ground movement, settlement, slips': '地盤移動、沈下、がけ崩れ',
'Group %(group_id)s created': 'グループ %(group_id)s を作成しました',
'Group Description': 'グループの説明',
'Group Details': 'グループの詳細',
'Group ID': 'グループID',
'Group Member added': 'グループメンバを追加しました',
'Group Members': 'グループメンバ',
'Group Memberships': 'グループメンバシップ',
'Group Name': 'グループ名',
'Group Title': 'グループのタイトル',
'Group Type': 'グループのタイプ',
'Group added': 'グループを追加しました',
'Group deleted': 'グループを削除しました',
'Group description': 'グループの説明',
'Group name': 'グループ名',
'Group type': 'グループタイプ',
'Group updated': 'グループを更新しました',
'Group': 'グループ',
'Groups removed': 'グループを削除しました',
'Groups': 'グループ',
'Guest': 'ゲスト',
'HR Data': '人的資源の情報',
'HR Manager': '人的資源マネージャー',
'Hail': 'あられ',
'Hair Color': '頭髪の色',
'Hair Length': '頭髪の長さ',
'Hair Style': 'ヘアスタイル',
'Has additional rights to modify records relating to this Organization or Site.': 'この団体やサイトに関連するレコードを変更するための権限を追加します',
'Has data from this Reference Document been entered into Sahana?': 'リファレンス文書の内容が Sahanaに登録してあるかどうかを記載してください。',
'Has only read-only access to records relating to this Organization or Site.': 'この団体やサイトに関連するレコードを閲覧のみに制限します',
'Has the safety and security of women and children in your community changed since the emergency?': '緊急事態以来、女性や未成年の生活の危険度が変化したかどうかを記載してください',
'Has your business been damaged in the course of the disaster?': '災害の過程で、ビジネス上の損害を受けているかどうかを記載してください',
'Have households received any shelter/NFI assistance or is assistance expected in the coming days?': '世帯に対して避難所用品や生活必需品が配布されている、あるいは数日以内に配布を実施できるかを記載してください',
'Have normal food sources been disrupted?': '平常時の食料調達源が利用不可能になったかどうかを記載してください',
'Have schools received or are expecting to receive any assistance?': '学校に対してなんらかの支援が行われた、あるいは行われる予定であるかどうかを記載してください',
'Have the people received or are you expecting any medical or food assistance in the coming days?': '医療品や食糧支援を、被災者、あるいはあなたが受領したかどうか、あるいは数日以内に受領できそうかどうかを記載してください。',
'Hazard Pay': '災害補償金',
'Hazardous Material': '危険物',
'Hazardous Road Conditions': '災害発生後の道路状況',
'Header Background': 'ヘッダー背景',
'Header background file %s missing!': 'ヘッダー背景ファイル%sが存在しません。',
'Headquarters': '本部・本社',
'Health care assistance, Rank': '医療 / 介護支援、ランク',
'Health center with beds': '保健所(ベッドあり)',
'Health center without beds': '保健所(ベッドなし)',
'Health center': '保健所',
'Health services functioning prior to disaster': '災害発生以前 ヘルスサービスの提供',
'Health services functioning since disaster': '災害発生後 ヘルスサービスの提供',
'Health services status': '医療サービス状況',
'Health': '保険・介護',
'Healthcare Worker': 'ヘルスケア要員',
'Heat Wave': '熱波',
'Heat and Humidity': '熱と湿度',
'Height (cm)': '身長 (cm)',
'Height': '身長',
'Help': ' ヘルプ ',
'Helps to monitor status of hospitals': '病院の現状把握に役立つ情報を管理します',
'Helps to report and search for Missing Persons': '行方不明者の報告と検索を支援します。',
'Here are the solution items related to the problem.': '問題に関連する解決案です。',
'Heritage Listed': '遺産登録',
'Hide Details': '詳細を隠す',
'Hierarchy Level 0 Name (e.g. Country)': '階層レベル0の名前(例: 国)',
'Hierarchy Level 1 Name (e.g. Province)': '階層レベル1の名前 (例: 都道府県)',
'Hierarchy Level 2 Name': 'ロケーション階層レベル2の名前',
'Hierarchy Level 3 Name': '階層レベル3の名前',
'Hierarchy Level 4 Name': '階層レベル4の名前',
'High Water': '最高水位',
'High': '高',
'Hindu': 'ヒンズー教徒',
'History': '履歴',
'Hit the back button on your browser to try again.': 'ブラウザの「戻る」ボタンを押して、やり直してください。',
'Holiday Address': '休日の住所',
'Home Address': '自宅住所',
'Home Country': '所属国',
'Home Crime': '住居犯罪',
'Home': 'ホーム',
'Hospital Details': '病院の詳細',
'Hospital Status Report': '病院ステータスレポート',
'Hospital information added': '病院情報を追加しました',
'Hospital information deleted': '病院情報を削除しました',
'Hospital information updated': '病院情報を更新しました',
'Hospital status assessment.': '病院ステータスアセスメント',
'Hospital': '病院',
'Hospitals': '病院情報',
'Hot Spot': 'ホットスポット',
'Hour': '時間',
'Hourly': '1時間毎',
'Household kits received': '家事用品を受領しました',
'Household kits, source': '家事用品の送付元',
'How did boys 13-17yrs spend most of their time prior to the disaster?': '災害発生前、13-17歳の男子がよく集まっていた場所と活動は?',
'How did boys <12yrs spend most of their time prior to the disaster?': '災害発生前、12歳以下の男子がよく集まっていた場所と活動は?',
'How did boys girls 13-17yrs spend most of their time prior to the disaster?': '災害発生前、13-17歳の女子がよく集まっていた場所と活動は?',
'How did girls <12yrs spend most of their time prior to the disaster?': '災害発生前、12歳以下の女子がよく集まっていた場所と活動は?',
'How do boys 13-17yrs spend most of their time now?': '現在、13-17歳の男子は普段何をして過ごしていますか?',
'How do boys <12yrs spend most of their time now?': '現在、12歳以下の男子は普段何をして過ごしていますか?',
'How do girls 13-17yrs spend most of their time now?': '現在、13-17歳の女子は普段何をして過ごしていますか?',
'How do girls <12yrs spend most of their time now?': '現在、12歳以下の女子は普段何をして過ごしていますか?',
'How does it work?': 'どのように動きますか?',
'How is this person affected by the disaster? (Select all that apply)': 'この人物の被災状況を記載してください(該当する項目を全て選択)',
'How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.': '水資源を確保できる地点までの距離を記載します。徒歩で往復し、待ち時間も含めた時間を記載してください。',
'How long does it take you to walk to the health service?': '医療サービスが提供されている場所まで、徒歩で必要な時間を記載します。',
'How long will the food last?': '洪水の残存予測期間',
'How long will this water resource last?': '水の供給が枯渇する時期',
'How many Boys (0-17 yrs) are Dead due to the crisis': '災害で死亡した少年の数(0-17歳)',
'How many Boys (0-17 yrs) are Injured due to the crisis': '災害で負傷した少年の数(0-17歳)',
'How many Boys (0-17 yrs) are Missing due to the crisis': '災害で行方不明となった少年の数(0-17歳)',
'How many Girls (0-17 yrs) are Dead due to the crisis': '災害で死亡した少女の数(0-17歳)',
'How many Girls (0-17 yrs) are Injured due to the crisis': '災害で負傷した少女の数(0-17歳)',
'How many Girls (0-17 yrs) are Missing due to the crisis': '災害で行方不明になった少女の数(0-17歳)',
'How many Men (18 yrs+) are Dead due to the crisis': '災害で死亡した男性の数(18歳以上)',
'How many Men (18 yrs+) are Injured due to the crisis': '災害で負傷した男性の数(18歳以上)',
'How many Men (18 yrs+) are Missing due to the crisis': '災害で行方不明となった男性の数(18歳以上)',
'How many Women (18 yrs+) are Dead due to the crisis': '災害で死亡した女性の数(18歳以上)',
'How many Women (18 yrs+) are Injured due to the crisis': '災害で負傷した女性の数(18歳以上)',
'How many Women (18 yrs+) are Missing due to the crisis': '災害で行方不明となった女性の数(18歳以上)',
'How many days will the supplies last?': '支援物資がなくなるまでの日数',
'How many doctors in the health centers are still actively working?': 'ヘルスセンター内の医師の人数を記載してください',
'How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?': '居住不可になった家屋数を記載してください(居住不可 = 基礎構造や土台部分の破壊など)',
'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?': '災害によって破損したが、まだ利用が可能である住居の数を記載してください(利用可能 = 窓の破壊、壁のヒビ、屋根の軽微な破損など)',
'How many latrines are available in the village/IDP centre/Camp?': '村落/IDPセンター/仮泊施設内で利用可能なトイレの数を記載してください',
'How many midwives in the health centers are still actively working?': '医療センター内の助産師の人数を記載してください',
'How many new cases have been admitted to this facility in the past 24h?': '過去24時間でこの施設で受け入れたケースの数は?',
'How many nurses in the health centers are still actively working?': '保健所で活動可能な看護師は何人居ますか?',
'How many of the patients with the disease died in the past 24h at this facility?': 'この施設で過去24時間で何人の患者がこの病気で亡くなりましたか?',
'How many of the primary school age boys (6-12) in the area are not attending school?': 'この地域の、登校していない学童期男児(6-12歳)の数を記載してください。',
'How many of the primary school age girls (6-12) in the area are not attending school?': 'この地域の、登校していない学童期女児(6-12歳)の数を記載してください。',
'How many of the primary/secondary schools are now open and running a regular schedule of class?': '平常通りの授業を実施できている小学校・中学校・高校の数を記入してください',
'How many of the secondary school age boys (13-18) in the area are not attending school?': 'この地域の、登校していない中高校生年齢男子(13-18歳)の数を記載してください。',
'How many of the secondary school age girls (13-18) in the area are not attending school?': 'この地域の、登校していない女子中高生(13-18歳)の数を記載してください。',
'How many patients with the disease are currently hospitalized at this facility?': 'この病気のためにこの施設に入院している患者は現在何人ですか?',
'How many primary school age boys (6-12) are in the affected area?': '被災地域内の学童期男児(6-12歳)の数を記載してください',
'How many primary school age girls (6-12) are in the affected area?': '被災地域内の学童期女児(6-12歳)の数を記載してください。',
'How many primary/secondary schools were opening prior to the disaster?': '災害発生前に授業が行われていた小学校・中学校・高校の数を記載してください',
'How many secondary school age boys (13-18) are in the affected area?': '被災地域内の男子中学生・男子高校生(13-18歳)の数を記載してください',
'How many secondary school age girls (13-18) are in the affected area?': '被災地域内の中高生年齢女子(13-18歳)の数を記載してください。',
'How many teachers have been affected by the disaster (affected = unable to work)?': '被災し、授業ができない状態の教師の人数を記載してください',
'How many teachers worked in the schools prior to the disaster?': '災害発生前の教師の人数を記載してください',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'どの程度詳細な情報が表示されるかを定義します。ズームすることで詳細が表示されるようになりますが、そのかわり、広域を見渡すことができなくなります。逆に、ズームしないことで広域を表示できますが、詳細情報の確認は行えなくなります。',
'Human Resource Management': '人的資源マネージメント',
'Human Resource': '人的資源',
'Human Resources Management': '人的資源管理',
'Human Resources': '人的資源',
'Humanitarian NGO': '人道支援NGO',
'Hurricane Force Wind': 'ハリケーンの風力',
'Hurricane': 'ハリケーン',
'Hygiene NFIs': '衛生用品',
'Hygiene kits received': '衛生用品を受領した',
'Hygiene kits, source': '衛生用品の送付元',
'Hygiene practice': '衛生習慣',
'Hygiene problems': '衛生上の問題',
'Hygiene': '衛生',
'I am available in the following area(s)': '以下の地域を担当できます',
'ID Label': 'IDラベル',
'ID Label: ': 'IDラベル: ',
'ID Tag Number': 'IDタグ番号',
'ID Tag': 'ID タグ',
'ID type': 'IDタイプ',
'Ice Pressure': '氷結圧力',
'Iceberg': 'アイスバーグ',
'Ideally a full URL to the source file, otherwise just a note on where data came from.': 'できればソースファイルの完全なURLを記載します。難しい場合はデータ入手元のメモでも構いません。',
'Identification Report': 'IDレポート',
'Identification Reports': 'IDレポート',
'Identification Status': 'IDステータス',
'Identification label of the Storage bin.': '備蓄コンテナの区別用ラベル番号。',
'Identification': 'ID',
'Identified as': '判明した身元',
'Identified by': 'によって識別された',
'Identity Details': '身元確認の詳細',
'Identity added': '身元情報を追加しました',
'Identity deleted': '身元確認を削除しました',
'Identity updated': '身元確認を更新しました',
'Identity': '身元確認',
'If Staff have login accounts then they are given access to edit the details of the': 'スタッフがログイン用アカウントを有している場合、以下項目の詳細を編集することができます:',
'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.': '「Unit = m, Base Unit = Km」の場合、「1m = 0.001 km」なので乗数は0.0001 です。',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'このドメインの電子メールアドレスを所有するユーザーを認証する場合は、承認がさらに必要かどうか、必要なら誰が承認するか、を決めるのに承認者フィールドを使用します。',
'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.': '有効にすると、ユーザーがアクセスしたときに、全てのレコードがログに保存されます。無効にすると、モジュール毎に有効にすることができます。',
'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.': '有効にすると、ユーザーが編集したすべてのレコードを記録します。無効にすると、モジュール毎に有効にできます。',
'If neither are defined, then the Default Marker is used.': 'もし両方共定義されていない場合、デフォルトマーカーが使われます。',
'If no marker defined then the system default marker is used': 'マーカーが定義されていない場合は、システムのデフォルトマーカーを使用します。',
'If no, specify why': 'いいえ、の場合はその理由を記載してください',
'If none are selected, then all are searched.': 'もしなにも選択しなければ、全てを検索します',
'If the location is a geographic area, then state at what level here.': '場所が地理的に確定できる場所ならば、その場所のレベルを記載してくだい。',
'If the request is for type "Other", you should enter a summary of the request here.': '支援要請が"その他"の場合、概要をここに入力する必要があります',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'この項目が設定されている場合、ユーザーは、登録の際、この団体のスタッフとして登録されるように指定することができます',
'If this is set to True then mails will be deleted from the server after downloading.': 'Trueに設定されている場合は、メールはダウンロード後にサーバーから削除されます。',
'If this record should be restricted then select which role is required to access the record here.': 'このレコードへのアクセスを制限する際には、アクセスに必要となる権限を選択してください',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'このレコードを制限したい場合、アクセスを許可する権限を指定してください。',
'If yes, specify what and by whom': '「はい」の場合、供給される食料と供給元',
'If yes, which and how': '「はい」の場合、混乱している場所や原因を記載',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': '参照文書を入力しない場合は、データ検証のために入力者の電子メールが表示されます。',
'If you know what the Geonames ID of this location is then you can enter it here.': 'このロケーションの Geonames ID がある場合、ここに入力してください。',
'If you know what the OSM ID of this location is then you can enter it here.': 'このロケーションの OSM ID がある場合、ここに入力してください。',
'If you need to add a new document then you can click here to attach one.': '文書の添付はこのページから可能です。',
'If you want several values, then separate with': '複数の値を入力したい場合、この文字で分割してください : ',
'If you would like to help, then please': 'ご協力いただける方は登録をお願いします',
'Illegal Immigrant': '不法移民',
'Image Details': '画像の詳細',
'Image Tags': '画像のタグ',
'Image Type': '画像のタイプ',
'Image Upload': '画像のアップロード',
'Image added': '画像を追加しました',
'Image deleted': '画像を削除しました',
'Image updated': '画像を更新しました',
'Image': '画像',
'Image/Attachment': '画像/添付資料',
'Image/Other Attachment': '画像/その他の添付ファイル',
'Imagery': '画像',
'Images': '画像',
'Immediate reconstruction assistance, Rank': '建築物の緊急修理 / 再建築支援、ランク',
'Impact Assessment Summaries': '災害影響範囲アセスメントの概要',
'Impact Assessments': '災害影響範囲アセスメント',
'Impact Baselines': '影響範囲の基準値',
'Impact Details': '被害の詳細',
'Impact Type Details': '災害影響のタイプ詳細',
'Impact Type added': '災害の影響タイプを追加しました',
'Impact Type deleted': '影響範囲タイプを削除しました',
'Impact Type updated': '災害影響のタイプを更新しました',
'Impact Type': '災害影響タイプ',
'Impact Types': '災害影響のタイプ',
'Impact added': '被災影響を追加しました',
'Impact deleted': '影響範囲を削除しました',
'Impact updated': '被災状況を更新しました',
'Impacts': '影響',
'Import & Export Data': 'データのインポートとエクスポート',
'Import Data': 'データのインポート',
'Import Job': 'Jobのインポート',
'Import Jobs': 'Jobsのインポート',
'Import and Export': 'インポートとエクスポート',
'Import from Ushahidi Instance': 'Ushahidi インスタンスから設定をインポート',
'Import if Master': 'マスターなら取り込む',
'Import job created': 'Import jobを作成しました',
'Import multiple tables as CSV': '複数のテーブルをCSVとしてインポート',
'Import': 'インポート',
'Import/Export': 'インポート/エクスポート',
'Important': '重要',
'Importantly where there are no aid services being provided': '救護サービスが提供されていない地域において重要となります',
'Imported': 'インポートしました',
'Importing data from spreadsheets': 'スプレッドシートからデータをインポートしています',
'Improper decontamination': '不適切な汚染の除去',
'Improper handling of dead bodies': '誤った扱いをされている遺体',
'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'In GeoServerでは、これはレイヤ名です。WFS getCapabilitiesでは、これはコロン(:)後のFeatureType名の部分です。',
'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'GeoServer では、これはワークスペース名です。WFS getCapabilities では、これはコロン「:」の前の FeatureType の部分となります。',
'In Inventories': 'この物資の在処',
'In Process': '実行中',
'In Progress': '実行中',
'In Transit': '輸送中',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'この地図のウィンドウレイアウトは、全体を覆い隠します。従って、ここで大きな値を入力する必要はありません',
'In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?': '一般的に、コミュニティ内の高齢者、障がい者、子供、青年、女性たちが最も必要としている物資やサービスがなんであるかを記載してください',
'Inbound Mail Settings': '着信メール設定',
'Inbox': '受信箱',
'Incident Categories': 'インシデントカテゴリ',
'Incident Details': 'インシデントの詳細',
'Incident Report Details': 'インシデントレポートの詳細',
'Incident Report added': '災害影響範囲レポートを追加しました',
'Incident Report deleted': 'インシデントレポートを削除しました',
'Incident Report updated': 'インシデントレポートを更新しました',
'Incident Report': 'インシデントレポート',
'Incident Reporting System': 'インシデントの報告を行ないます',
'Incident Reporting': 'インシデントレポート',
'Incident Reports': 'インシデントレポート',
'Incident added': 'インシデントを追加しました',
'Incident deleted': 'インシデントを削除しました',
'Incident updated': 'インシデントを更新しました',
'Incident': 'インシデント',
'Incidents': 'インシデント',
'Incoming Shipment canceled': '到着する配送が取消しされました',
'Incoming Shipment updated': '入荷した物資が更新されました',
'Incoming': '入荷',
'Incomplete': '未完了',
'Individuals': '個人',
'Industrial Crime': '産業犯罪',
'Industrial': '産業',
'Industry Fire': '工場から出火',
'Industry close to village/camp': '村落/仮泊施設の周辺に工場が存在',
'Infant (0-1)': '乳児(0-1歳)',
'Infectious Disease': '感染症',
'Infectious Diseases': '感染症',
'Infestation': '感染',
'Informal Leader': '非公式なリーダー',
'Informal camp': '非指定避難所',
'Information gaps': '情報のギャップ',
'Infusion catheters available': '注入カテーテルが利用可能',
'Infusion catheters need per 24h': '24時間毎に必要な注入カテーテル数',
'Infusion catheters needed per 24h': '24時間ごとに、注入カテーテルが必要',
'Infusions available': '点滴が利用可能',
'Infusions needed per 24h': '24時間毎に必要な点滴の数',
'Input Job': 'Jobのインポート',
'Inspected': '調査済み',
'Inspection Date': '調査した日付',
'Inspection date and time': '調査日時',
'Inspection time': '調査した時刻',
'Inspector ID': '調査者ID',
'Instance Type': 'インスタンスタイプ',
'Instant Porridge': 'インスタント粥',
'Institution': 'その他の組織',
'Insufficient Privileges': '権限が足りません',
'Insufficient vars: Need module, resource, jresource, instance': '不十分な変数: module, resource, jresource, instance が必要です',
'Insufficient': '不足',
'Intake Items': 'アイテムの受け入れ',
'Intergovernmental Organization': '国際政府間組織',
'Interior walls, partitions': '室内の壁、仕切り',
'Internal Features': '内部機能',
'Internal State': '内部状態',
'International NGO': '国際NGO',
'International Organization': '国際機関',
'International Staff': '国外からのスタッフ',
'Intervention': '介入',
'Interview taking place at': 'インタビュー実施場所',
'Invalid Query': '無効なクエリ',
'Invalid email': '無効な電子メール',
'Invalid login': '無効なログイン',
'Invalid request!': 'リクエストは無効です。',
'Invalid ticket': '無効なチケット',
'Invalid': '無効な',
'Inventories with Item': '在庫アイテム',
'Inventories': '在庫管理',
'Inventory Item Details': '救援物資の在庫詳細',
'Inventory Item added': '救援物資の在庫を追加しました',
'Inventory Item deleted': '備蓄物資を削除しました',
'Inventory Item updated': '備蓄物資を更新しました',
'Inventory Item': '備蓄物資',
'Inventory Items Available for Request Item': '要求された物資に適合する、倉庫内の物資',
'Inventory Items': '備蓄物資',
'Inventory Management': '物資の管理',
'Inventory Store Details': '物資集積地点の詳細',
'Inventory Store added': '物資集積地点を追加しました',
'Inventory Store deleted': '物資集積地点を削除しました',
'Inventory Store updated': '物資集積地点を更新しました',
'Inventory Store': '物資集積地点',
'Inventory Stores': '物資集積地点',
'Inventory functionality is available for:': '備蓄機能を利用可能:',
'Inventory of Effects': '救援物資の影響',
'Inventory': '在庫',
'Inventory/Ledger': '在庫 / 元帳',
'Is adequate food and water available for these institutions?': '関係者に対して十分な水と食料が供給されていますか?',
'Is it safe to collect water?': '水の確保は安全に行えるか?',
'Is there any industrial or agro-chemical production close to the affected area/village?': '村落/集落の近くに、工場あるいは農業化学プラントなどが存在しますか?',
'Is this a strict hierarchy?': 'これは厳密な階層構造ですか?',
'Issuing Authority': '発行機関',
'It is built using the Template agreed by a group of NGOs working together as the': '聞き取り項目のテンプレートは、以下リンクのNGO組織と協同で作成されています。',
'Item Added to Shipment': '輸送情報に物資を追加する',
'Item Catalog Categories': '物資カタログカテゴリ',
'Item Catalog Category Details': '救援物資カタログのカテゴリ詳細',
'Item Catalog Category added': '救援物資カタログのカテゴリを追加しました',
'Item Catalog Category deleted': '救援物資カタログのカテゴリを削除しました',
'Item Catalog Category updated': '物資カタログカテゴリを更新しました',
'Item Catalog Category': '救援物資カタログのカテゴリ',
'Item Catalog Details': '物資カタログの詳細',
'Item Catalog added': '救援物資カタログを追加しました',
'Item Catalog deleted': '物資カタログを削除しました',
'Item Catalog updated': '物資カタログを更新しました',
'Item Catalogs': '救援物資カタログ',
'Item Categories': '物資カテゴリ',
'Item Category Details': '物資カテゴリの詳細',
'Item Category added': '救援物資カテゴリを追加しました',
'Item Category deleted': '救援物資カテゴリを削除しました',
'Item Category updated': '物資カテゴリを更新しました',
'Item Category': '物資カテゴリ',
'Item Details': '救援物資の詳細',
'Item Pack Details': '救援物資パックの詳細',
'Item Pack added': '物資パックを追加しました',
'Item Pack deleted': '救援物資のパックを削除しました',
'Item Pack updated': '救援物資パックを更新しました',
'Item Packs': '物資パック',
'Item Sub-Categories': '救援物資のサブカテゴリ',
'Item Sub-Category Details': '物資サブカテゴリの詳細',
'Item Sub-Category added': '救援物資のサブカテゴリを追加しました',
'Item Sub-Category deleted': '物資サブカテゴリを削除しました',
'Item Sub-Category updated': '救援物資サブカテゴリを更新しました',
'Item Sub-Category': '物資サブカテゴリ',
'Item added to shipment': '物資が輸送に回りました',
'Item added': '救援物資を追加しました',
'Item already in Bundle!': '物資がすでにバンドルに存在しています。',
'Item already in Kit!': '救援物資は既にキットに存在しています',
'Item already in budget!': '物資は既に予算に登録されています',
'Item deleted': '物資を削除しました',
'Item updated': '救援物資を更新しました',
'Item': '物資',
'Items': '救援物資',
'Japan': '日本',
'Japanese': '日本語',
'Jerry can': 'ジェリ缶',
'Jew': 'ユダヤ教徒',
'Job Market': '求人',
'Job Title': '肩書き',
'Jobs': '職業',
'Just Once': '一度だけ',
'KPIs': 'KPI',
'Key Details': 'Keyの詳細',
'Key added': 'キーを追加しました',
'Key deleted': 'キーを削除しました',
'Key updated': 'キーを更新しました',
'Key': 'キー',
'Keys': 'キー',
'Kit Contents': 'Kitの内容',
'Kit Details': 'Kitの詳細',
'Kit Updated': 'キットを更新しました',
'Kit added': 'キットを追加しました',
'Kit deleted': 'キットを削除しました',
'Kit updated': 'キットを更新しました',
'Kit': 'キット',
'Kits': 'キット',
'Known Identities': '既知のID',
'Known incidents of violence against women/girls': '女性に対する暴力行為が発生した',
'Known incidents of violence since disaster': '災害発生後に暴力行為が発生した',
'LICENSE': 'ライセンス',
'LMS Administration': 'LMSの管理',
'Label': 'ラベル',
'Lack of material': '資材不足',
'Lack of school uniform': '学校制服が不足',
'Lack of supplies at school': '学校用物資の不足',
'Lack of transport to school': '学校への輸送手段の不足',
'Lactating women': '授乳中の女性の数',
'Lahar': 'ラハール',
'Landslide': '地すべり',
'Language': 'Language 言語',
'Last Name': '名前',
'Last known location': '最後に目撃された場所',
'Last name': '名前',
'Last synchronization time': 'データ同期の最終実施時刻',
'Last updated': '最終更新日',
'Last updated by': '最終更新者',
'Last updated on': '直近のアップデート実施時刻',
'Latitude & Longitude': '緯度&経度',
'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '緯度は南北方向(上下)を定義します。赤道ではゼロ、北半球ではプラス、南半球ではマイナスとなります。',
'Latitude is North-South (Up-Down).': '緯度は南北(上下)です',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '緯度は赤道では0、北半球ではプラス、南半球ではマイナスになります',
'Latitude should be between': '緯度の値として有効な値は',
'Latitude': '緯度',
'Latrines': 'トイレ',
'Law enforcement, military, homeland and local/private security': '法執行機関、自衛隊、警察および警備会社',
'Layer Details': 'レイヤの詳細',
'Layer added': 'レイヤを追加しました',
'Layer deleted': 'レイヤを削除しました',
'Layer updated': 'レイヤを更新しました',
'Layer': 'レイヤ',
'Layers updated': 'レイヤを更新しました',
'Layers': 'レイヤ',
'Layout': 'レイアウト',
'Legend Format': '凡例形式',
'Length': '長さ',
'Level 1 Assessment Details': 'レベル1アセスメントの詳細',
'Level 1 Assessment added': 'レベル1アセスメントを追加しました',
'Level 1 Assessment deleted': 'レベル1のアセスメントを削除しました',
'Level 1 Assessment updated': 'レベル1アセスメントを更新しました',
'Level 1 Assessments': 'レベル1 アセスメント',
'Level 1': 'レベル1',
'Level 2 Assessment Details': 'レベル2アセスメントの詳細',
'Level 2 Assessment added': 'レベル2アセスメントを追加しました',
'Level 2 Assessment deleted': 'レベル2アセスメントを削除しました',
'Level 2 Assessment updated': 'レベル2アセスメントを更新しました',
'Level 2 Assessments': 'レベル2アセスメント',
'Level 2 or detailed engineering evaluation recommended': 'レベル2あるいは詳細な技術的評価を行うことを推奨します',
'Level 2': 'レベル2',
'Level': 'レベル',
'Library support not available for OpenID': 'OpenIDのライブラリサポートが利用できません',
'License Plate': '個人認証カード',
'Line': '行',
'LineString': '折れ線',
'Link Item & Shipment': 'アイテムと輸送を紐付ける',
'Link an Item & Shipment': 'アイテムと出荷を結び付ける',
'Linked Records': '参照しているレコード',
'Linked records': '関連しているレコード',
'List / Add Baseline Types': '基準値タイプの一覧 / 追加',
'List / Add Impact Types': '災害影響のタイプを表示 / 追加',
'List / Add Services': 'サービスの一覧表示 / 追加',
'List / Add Types': 'タイプの一覧表示 / 追加',
'List Activities': '支援活動一覧',
'List Aid Requests': '援助要請の一覧',
'List All Entries': '全てのエントリ一覧',
'List All Memberships': '全てのメンバシップ一覧',
'List All Reports': '報告すべての一覧',
'List All': '全項目一覧',
'List Alternative Items': '代わりの物資一覧',
'List Assessment Summaries': 'アセスメント要約の一覧',
'List Assessments': 'アセスメント一覧',
'List Asset Assignments': '資産割り当ての一覧',
'List Assets': '資産一覧',
'List Baseline Types': '基準値タイプ一覧',
'List Baselines': '基準値一覧',
'List Brands': '銘柄の一覧',
'List Budgets': '予算の一覧',
'List Bundles': 'Bundleの一覧',
'List Catalog Items': '物資カタログの一覧',
'List Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 関係一覧',
'List Checklists': 'チェックリスト一覧',
'List Cluster Subsectors': 'クラスタのサブセクタ一覧',
'List Cluster': 'クラスタ一覧',
'List Clusters': 'クラスタ一覧',
'List Commitment Items': 'コミットされた救援物資の一覧',
'List Commitments': 'コミットメントの一覧',
'List Configs': '設定一覧',
'List Conflicts': 'データ競合一覧',
'List Contact Information': '連絡先情報の一覧',
'List Contacts': '連絡先一覧',
'List Credentials': '証明書一覧',
'List Current': '現在の一覧',
'List Distribution Items': '配給物資リスト',
'List Distributions': '配給所リスト',
'List Documents': '文書の一覧',
'List Donors': '資金提供組織一覧',
'List Feature Layers': 'Featureレイヤリスト',
'List Flood Reports': '洪水レポート一覧',
'List GPX Layers': 'GPXレイヤ一覧',
'List Groups': 'グループ一覧',
'List Groups/View Members': 'グループを一覧/メンバーを表示',
'List Hospitals': '病院の一覧',
'List Identities': 'ID一覧',
'List Images': '画像の一覧',
'List Impact Assessments': '災害影響範囲アセスメント一覧',
'List Impact Types': '災害影響のタイプ一覧',
'List Impacts': '被害一覧',
'List Incident Reports': 'インシデントレポート一覧',
'List Incidents': 'インシデント一覧',
'List Inventory Items': '備蓄物資リスト',
'List Inventory Stores': '物資集積地点リスト',
'List Item Catalog Categories': '救援物資カタログのカテゴリ一覧',
'List Item Catalogs': '救援物資カタログ一覧',
'List Item Categories': '物資カテゴリ一覧',
'List Item Packs': '物資パックの一覧',
'List Item Sub-Categories': '物資サブカテゴリ一覧',
'List Items': '救援物資一覧',
'List Keys': 'Keyの一覧',
'List Kits': 'Kit一覧',
'List Layers': 'レイヤ一覧',
'List Level 1 Assessments': 'レベル1アセスメントの一覧',
'List Level 1 assessments': 'レベル1アセスメント一覧',
'List Level 2 Assessments': 'レベル2のアセスメント一覧',
'List Level 2 assessments': 'レベル2アセスメント一覧',
'List Locations': 'ロケーション一覧',
'List Log Entries': 'ログエントリ一覧',
'List Map Profiles': '地図設定の一覧',
'List Markers': 'マーカー一覧',
'List Members': 'メンバ一覧',
'List Memberships': 'メンバシップ一覧',
'List Messages': 'メッセージ一覧',
'List Metadata': 'メタデータ一覧',
'List Missing Persons': '行方不明者リストを表示',
'List Need Types': '需要タイプ一覧',
'List Needs': 'ニーズ一覧',
'List Notes': '追加情報一覧',
'List Offices': 'オフィス一覧',
'List Organizations': '団体一覧',
'List Peers': 'データ同期先一覧',
'List Personal Effects': '携帯品のリスト',
'List Persons': '人物情報一覧',
'List Photos': '写真リスト',
'List Positions': '場所一覧',
'List Problems': '問題一覧',
'List Projections': '地図投影法リスト',
'List Projects': 'プロジェクト一覧',
'List Rapid Assessments': '被災地の現況アセスメント一覧',
'List Received Items': '受領された物資の一覧',
'List Received Shipments': '受領された輸送一覧',
'List Records': 'レコード一覧',
'List Registrations': '登録証明書の一覧',
'List Reports': 'レポート一覧',
'List Request Items': '物資要請リスト',
'List Requests': '支援要請の一覧',
'List Resources': 'リソース一覧',
'List Responses': '回答の一覧',
'List Rivers': '河川リスト',
'List Roles': '役割一覧',
'List Sections': 'Section一覧',
'List Sectors': '活動分野の一覧',
'List Sent Items': '送付した物資一覧',
'List Sent Shipments': '送付済み物資一覧',
'List Service Profiles': 'サービスプロファイル一覧',
'List Settings': '設定一覧',
'List Shelter Services': '避難所での提供サービス一覧',
'List Shelter Types': '避難所タイプ一覧',
'List Shelters': '避難所の一覧',
'List Shipment Transit Logs': '物資輸送履歴の一覧',
'List Shipment/Way Bills': '輸送費/渡航費の一覧',
'List Shipment<>Item Relation': '輸送と物資の関連性一覧',
'List Shipments': '配送の一覧',
'List Sites': 'Site一覧',
'List Skill Types': 'スキルタイプを一覧表示',
'List Skills': 'スキルを一覧表示',
'List Solutions': '解決案一覧',
'List Staff Types': 'スタッフタイプ一覧',
'List Staff': 'スタッフ一覧',
'List Status': '状況一覧',
'List Storage Bin Type(s)': 'Storage Binタイプ一覧',
'List Storage Bins': 'Storage Bin一覧',
'List Storage Location': '備蓄地点の一覧',
'List Subscriptions': '寄付申し込み一覧',
'List Support Requests': '支援要求のリスト',
'List Survey Answers': '調査の回答の一覧',
'List Survey Questions': 'Survey Question一覧',
'List Survey Sections': 'Survey Sectionsの一覧',
'List Survey Series': '一連の調査リスト',
'List Survey Templates': '調査テンプレートの一覧',
'List TMS Layers': 'TMS レイヤの一覧',
'List Tasks': 'タスク一覧',
'List Teams': 'チーム一覧',
'List Themes': 'テーマ一覧',
'List Tickets': 'チケット一覧',
'List Tracks': '追跡情報の一覧',
'List Units': '単位一覧',
'List Users': 'ユーザ一覧',
'List Volunteers': 'ボランティアの表示',
'List WMS Layers': 'WMSレイヤ一覧',
'List Warehouse Items': '倉庫に備蓄中の物資一覧',
'List Warehouses': '倉庫の一覧',
'List all': '全項目を表示',
'List of Items': '物資一覧',
'List of Missing Persons': '行方不明者リスト',
'List of Peers': 'データ同期先一覧',
'List of Reports': 'レポート一覧',
'List of Requests': '支援要請の一覧',
'List of Roles': '権限リスト',
'List of Spreadsheets uploaded': 'アップロード済スプレッドシート一覧',
'List of Spreadsheets': 'スプレッドシート一覧',
'List of Volunteers for this skill set': 'このスキルを所持するボランティアの一覧',
'List of addresses': '住所一覧',
'List unidentified': '身元不明者の一覧',
'List': '一覧',
'List/Add': '一覧/追加',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': '救援団体は自身の支援活動の内容と場所を登録し、公開することで、他の組織との活動を調整することが可能となります。',
'Live Help': 'ライブヘルプ',
'Livelihood': '生計',
'Load Cleaned Data into Database': '整形したデータをデータベースへロード',
'Load Details': '詳細情報の読み込み',
'Load Raw File into Grid': 'Rawファイルをグリッドにロードしてください',
'Load the details to help decide which is the best one to keep out of the 2.': '2つのうちどちらを残すほうがよいか判断するため、詳細情報を確認します。',
'Loading Locations': 'ロケーションデータロード中',
'Loading Locations...': '位置を読込みしています ...',
'Loading': '読み込み中',
'Local Name': 'ローカル名',
'Local Names': 'ローカル名',
'Location 1': 'ロケーション 1',
'Location 2': 'ロケーション 2',
'Location De-duplicated': 'ロケーションの重複解消',
'Location Details': 'ロケーションの詳細',
'Location Hierarchy Level 0 Name': 'ロケーション階層レベル0の名前',
'Location Hierarchy Level 1 Name': 'ロケーション階層レベル1の名前',
'Location Hierarchy Level 2 Name': 'ロケーション階層レベル2の名前',
'Location Hierarchy Level 3 Name': 'ロケーション階層レベル3の名前',
'Location Hierarchy Level 4 Name': 'ロケーション階層レベル4の名前',
'Location Hierarchy Level 5 Name': 'ロケーション階層レベル5の名前',
'Location added': 'ロケーションを追加しました',
'Location cannot be converted into a group.': 'ロケーションはグループに変換できません',
'Location deleted': 'ロケーションを削除しました',
'Location details': 'ロケーションの詳細',
'Location group cannot be a parent.': 'ロケーショングループは親にできません',
'Location group cannot have a parent.': 'ロケーショングループに親情報がありません。',
'Location updated': 'ロケーションを更新しました',
'Location': 'ロケーション',
'Location: ': 'ロケーション: ',
'Locations De-duplicator': 'ロケーションの重複解消',
'Locations of this level need to have a parent of level': 'このレベルのロケーションには、親属性となるレベルが必要です',
'Locations should be different!': '異なる位置を設定してください!',
'Locations': 'ロケーション',
'Lockdown': '厳重監禁',
'Log Entry Details': 'ログエントリの詳細',
'Log entry added': 'ログエントリを追加しました',
'Log entry deleted': 'ログエントリを削除しました',
'Log entry updated': 'ログエントリを更新しました',
'Log': 'ログ',
'Logged in': 'ログインしました',
'Logged out': 'ログアウトしました',
'Login': 'ログイン',
'Logistics Management System': '物流管理システム',
'Logistics Management': '物流管理',
'Logistics': '物流',
'Logo file %s missing!': 'ロゴファイル%sが見つかりません。',
'Logo': 'ロゴ',
'Logout': 'ログアウト',
'Long Text': '詳細テキスト',
'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.': '経度は東西方向(横)の座標軸です。緯度は南北方向(上下)の座標軸です。赤道ではゼロ、北半球ではプラス、南半球ではマイナスとなります。経度は、子午線(グリニッジ標準時)をゼロとして、東(ヨーロッパ、アジア)がプラスとなります。西(大西洋、アメリカ)がマイナスです。10進法で記入してください。',
'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '経度は東西(横)です。経度は子午線(グリニッジ標準時)でゼロ、東(ヨーロッパ、アジア)でプラスです。西(大西洋、アメリカ)でマイナスです。',
'Longitude is West - East (sideways).': '緯度は東西です(横方向)',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '経度はグリニッジ子午線(グリニッジ標準時)上が0度です。東側に向かってヨーロッパやアジアの各地で正の値となります。西に向かって大西洋やアメリカの各地で負の値となります。',
'Longitude should be between': '経度の値の有効な範囲は',
'Longitude': '経度',
'Looking up Parents': '親を検索',
'Looting': '略奪',
'Lost Password': 'パスワードの紛失',
'Lost': '行方不明',
'Low': '低',
'Magnetic Storm': '磁気嵐',
'Main cash source': '主な現金収入源',
'Main income sources before disaster': '災害発生前の主な収入源',
'Major expenses': '主な費用',
'Major outward damage': '大きな損傷あり',
'Make Commitment': 'コミットの作成',
'Make Pledge': '寄付の作成',
'Make Request': '支援を要請する',
'Make a Request for Aid': '援助要請を登録',
'Make a Request': '支援要請を登録',
'Make preparations per the <instruction>': '<instruction>毎に準備作業を行う',
'Male': '男性',
'Malnutrition present prior to disaster': '災害前から栄養が失調発生していた',
'Manage Category': 'カテゴリ管理',
'Manage Item catalog': '物資カタログの管理',
'Manage Kits': 'Kitsの管理',
'Manage Relief Item Catalogue': '救援アイテムカタログの管理',
'Manage Sub-Category': 'サブカテゴリの管理',
'Manage Users & Roles': 'ユーザと役割の管理',
'Manage Warehouses/Sites': '倉庫/Sitesの管理',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': '支援物資、資産、人員、その他のリソースに対する要求を管理します。支援物資が要求された時に在庫と照合します。',
'Manage requests of hospitals for assistance.': '病院からの支援要請の管理',
'Manage volunteers by capturing their skills, availability and allocation': 'ボランティアのスキル、稼働状況、割り当て状況を管理します',
'Manage': '管理',
'Manager': 'マネージャ',
'Managing Office': 'オフィスの管理',
'Managing, Storing and Distributing Relief Items': '救援物資の保管、流通、配布状況を管理します',
'Managing, Storing and Distributing Relief Items.': '救援物資の管理、保存、配布状況を管理します。',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': '必須項目。GeoServerでのこの項目はレイヤー名となります。WFSの get Capabilitiesでは、コロン( : )の後に付与される FeatureTypeとして表示されます。',
'Mandatory. The URL to access the service.': '省略できません。サービスにアクセスするためのURLです。',
'Manual Synchronization': 'データ手動同期',
'Manual': 'マニュアル',
'Many': '多数',
'Map Profile added': '地図の設定を追加しました',
'Map Profile deleted': '地図設定を削除しました',
'Map Profile updated': '地図設定を更新しました',
'Map Profile': '地図の設定',
'Map Profiles': '地図の設定',
'Map Height': '地図の縦高',
'Map Service Catalog': '地図サービスカタログ',
'Map Settings': '地図の設定',
'Map Viewing Client': '地図閲覧クライアント',
'Map Width': '地図の横幅',
'Map of Hospitals': '病院の地図',
'Map': '地図',
'Mapping': 'マッピング',
'Marine Security': '海上保安',
'Marital Status': '婚姻状況',
'Marker Details': 'マーカーの詳細',
'Marker added': 'マーカーを追加しました',
'Marker deleted': 'マーカーを削除しました',
'Marker updated': 'マーカーを更新しました',
'Marker': 'マーカー',
'Markers': 'マーカー',
'Master Message Log to process incoming reports & requests': '受け取ったレポートと要求を処理するマスターメッセージログ',
'Master Message Log': 'マスターメッセージログ',
'Match Percentage': '一致率',
'Match Requests': '支援要請マッチ',
'Match percentage indicates the % match between these two records': 'マッチの割合は、2つのレコードの間のマッチ状況をあわらします',
'Matching Catalog Items': '適合する救援物資カタログ',
'Matching Records': '一致するレコード',
'Matrix of Choices (Multiple Answers)': '選択肢 (複数可)',
'Matrix of Choices (Only one answer)': '選択肢 (複数選択不可)',
'Matrix of Text Fields': 'テキストフィールドのマトリックス',
'Max Persons per Dwelling': '住居ごとの最大収容人数',
'Maximum Weight': '最大重量',
'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.': '最大重量| ドロップダウンリストで単位を選択してから、備蓄地点の最大重量を指定します。',
'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.': 'storage binに収容することができるアイテムの最大重量を指定します。ドロップダウンリストから、単位を選択してください。',
'Measure Area: Click the points around the polygon & end with a double-click': '観測領域: 多角形の角をクリックし、ダブルクリックで終了',
'Measure Length: Click the points along the path & end with a double-click': '距離を計測: 経路上の中継点をクリックして、終点でダブルクリックしてください',
'Medical and public health': '医療、公衆衛生',
'Medicine': '薬品',
'Medium': '中',
'Megabytes per Month': '1月毎のメガバイト数',
'Member removed from Group': 'メンバシップを削除しました',
'Members': 'メンバ',
'Membership Details': 'メンバシップの詳細',
'Membership updated': 'メンバシップを更新しました',
'Membership': 'メンバシップ',
'Memberships': 'メンバシップ',
'Message Details': 'メッセージの詳細',
'Message Sent': 'メッセージが送信されました',
'Message Variable': 'メッセージ変数',
'Message added': 'メッセージを追加しました',
'Message deleted': 'メッセージを削除しました',
'Message field is required!': 'メッセージは必須です',
'Message sent to outbox': 'メッセージを送信箱に送りました',
'Message updated': 'メッセージを更新しました',
'Message variable': 'メッセージ変数',
'Message': 'メッセージ',
'Messages': 'メッセージ',
'Messaging settings updated': 'メッセージング設定を更新しました',
'Messaging': 'メッセージング',
'Metadata Details': 'メタデータの詳細',
'Metadata added': 'メタデータを追加しました',
'Metadata can be supplied here to be applied to all uploaded photos, if desired.': '必要に応じて、アップロードした全ての画像に適用されるメタデータをここで入力できます。',
'Metadata deleted': 'メタデータを削除しました',
'Metadata updated': 'メタデータを更新しました',
'Metadata': 'メタデータ',
'Meteorite': '隕石落下',
'Meteorological (inc. flood)': '気象 (洪水を含む)',
'Method used': '使用されるメソッド',
'Micronutrient malnutrition prior to disaster': '災害前から栄養失調傾向あり',
'Middle Name': 'ミドルネーム',
'Migrants or ethnic minorities': '移民、あるいは少数民族の数',
'Military': '軍隊',
'Minimum Bounding Box': '最小:領域を指定した枠組み',
'Minimum shift time is 6 hours': '最小シフト時間は6時間です。',
'Minor/None': '少数 / なし',
'Minorities participating in coping activities': '少数民族が災害対応に従事',
'Minute': '分',
'Minutes must be a number between 0 and 60': '分には0-60の間の数字を記入してください',
'Minutes must be a number greater than 0 and less than 60': '分数は0から60の間で入力してください',
'Minutes per Month': '一ヶ月に数分間',
'Minutes should be a number greater than 0 and less than 60': '分は0から60の間で入力してください',
'Miscellaneous': 'その他',
'Missing Person Details': '行方不明者の詳細',
'Missing Person Reports': '行方不明者レポート',
'Missing Person': '行方不明者',
'Missing Persons Registry': '行方不明者の登録',
'Missing Persons Report': '行方不明者のレポート',
'Missing Persons': '行方不明者',
'Missing Report': '行方不明レポート',
'Missing Senior Citizen': '高齢者の行方不明',
'Missing Vulnerable Person': '被介護者の行方不明',
'Missing': '行方不明',
'Mobile Assess.': '移動端末アクセス',
'Mobile Basic Assessment': 'モバイルの基本アセスメント',
'Mobile Basic': 'モバイルの基礎',
'Mobile Phone': '携帯番号',
'Mobile': 'モバイル',
'Mode': 'モード',
'Modem Settings': 'モバイル機器の設定',
'Modem settings updated': 'モバイル機器の設定を更新しました',
'Moderate': 'モデレート',
'Moderator': 'モデレータ',
'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': '地物の変更: 変形する地物を選択し、点の一つをドラッグすることで地物の形を修正可能です。',
'Modify Information on groups and individuals': 'グループと個人の情報更新',
'Modifying data in spreadsheet before importing it to the database': 'データベース登録前に、スプレッドシート内のデータ項目を修正',
'Module Administration': 'モジュール管理',
'Module disabled!': 'モジュールが無効です',
'Module provides access to information on current Flood Levels.': 'このモジュールにより、洪水の現在の水位情報にアクセス可能です',
'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.': 'モジュールでは、専門団体によって作成された調査文書を管理します。データには、WFP(国連世界食糧計画)アセスメントも含まれます。',
'Monday': '月曜日',
'Monthly Cost': '月額費用',
'Monthly Salary': '給与(月額)',
'Months': '月',
'Morgue Status': '死体安置所のステータス',
'Morgue Units Available': '死体公示所の収容可能数',
'Mosque': 'モスク',
'Motorcycle': 'オートバイ',
'Moustache': '口ひげ',
'Move Feature: Drag feature to desired location': 'Featureの移動: Feature を希望するロケーションにドラッグしてください',
'Movements (Filter In/Out/Lost)': '活動 (フィルター イン/アウト/ロスト)',
'MultiPolygon': 'マルチポリゴン',
'Multiple Choice (Multiple Answers)': '複数選択(複数回答)',
'Multiple Choice (Only One Answer)': '複数選択(1つだけ回答)',
'Multiple Matches': '複数の結果が適合しました',
'Multiple Text Fields': '複数の入力項目',
'Multiple': '複数',
'Multiplicator': '乗数',
'Muslim': 'イスラム教徒',
'Must a location have a parent location?': 'ある場所にはその親の場所が無ければならないですか?',
'My Current function': '現在登録している機能',
'My Tasks': '自分のタスク',
'N/A': '該当なし',
'NZSEE Level 1': 'NZSEE レベル1',
'NZSEE Level 2': 'NZSEE レベル 2',
'Name and/or ID Label': '名前および/またはIDラベル',
'Name and/or ID': '名前および/またはID',
'Name of Storage Bin Type.': '物資保管タイプの名前です。',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'ヘッダーの背景に使用される、static にあるファイルの名前 (オプションでサブパス)。',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': '左上の画像で静的位置を表すファイル名(サブパス名はオプション)',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'フッターに使われるビューにあるファイル名 (オプションとしてサブパス)。',
'Name of the person in local language and script (optional).': '現地言語での名前と表記(オプション)',
'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.': 'このレポートに関連する組織や部署の名前。部署をもたない病院の場合は空欄にしてください。',
'Name or Job Title': '名前あるいは役職名',
'Name': '名前',
'Name, Org and/or ID': '名前、組織、IDなど',
'Name/Model/Type': '名前/ モデル/タイプ',
'Name: ': '名前: ',
'Names can be added in multiple languages': '名前は、複数の言語で記述することができます。',
'National ID Card': 'ナショナルIDカード',
'National NGO': '国内NPO',
'National Staff': '現地スタッフ',
'Nationality of the person.': 'この人物の国籍です。',
'Nationality': '国籍',
'Nautical Accident': '船舶事故',
'Nautical Hijacking': '船舶ハイジャック',
'Need Type Details': '需要タイプの詳細',
'Need Type added': '需要タイプを追加しました',
'Need Type deleted': '需要タイプを削除しました',
'Need Type updated': '需要タイプを更新しました',
'Need Type': '需要タイプ',
'Need Types': '需要タイプ',
'Need added': 'ニーズを追加しました',
'Need deleted': 'ニーズを削除しました',
'Need to be logged-in to be able to submit assessments': '評価を確定させるには、ログインが必要です',
'Need to configure Twitter Authentication': 'Twitterの認証を設定する必要があります',
'Need to select 2 Locations': 'ロケーションを2つ指定してください',
'Need to specify a Budget!': '予算を指定する必要があります。',
'Need to specify a Kit!': 'Kitを指定する必要があります。',
'Need to specify a Resource!': 'リソースを指定する必要があります。',
'Need to specify a bundle!': 'bundleを指定する必要があります。',
'Need to specify a group!': 'グループを指定する必要があります。',
'Need to specify a location to search for.': '検索対象となるロケーションを指定する必要があります。',
'Need to specify a role!': '役割を指定する必要があります。',
'Need to specify a service!': 'サービスを指定してください!',
'Need to specify a table!': 'テーブルを指定する必要があります。',
'Need to specify a user!': 'ユーザを指定する必要があります。',
'Need updated': 'ニーズを更新しました',
'Needs Details': '需要の詳細',
'Needs to reduce vulnerability to violence': '暴力行為の対策として必要な物資 / サービス',
'Needs': '要求',
'Negative Flow Isolation': '逆流の分離',
'Neighbourhood': '近隣',
'Neighbouring building hazard': '隣接ビルが危険な状態',
'Neonatal ICU': '新生児ICU',
'Neonatology': '新生児科',
'Network': 'ネットワーク',
'Neurology': '神経科',
'New Assessment reported from': '新規アセスメントの報告元',
'New Checklist': '新規チェックリスト',
'New Peer': '新しいデータ同期先',
'New Record': '新規レコード',
'New Report': '新規レポート',
'New Request': '新規の支援要請',
'New Solution Choice': '新しい解決案を選択',
'New Support Request': '新しい支援要請',
'New Synchronization Peer': '新しい同期先',
'New cases in the past 24h': '過去24時間の新規ケース数',
'New': '新規',
'News': 'ニュース',
'Next View': '次を表示',
'Next': '次へ',
'No Activities Found': '支援活動が見つかりませんでした',
'No Addresses currently registered': '住所は、まだ登録がありません。',
'No Aid Requests have been made yet': '援助要請がまだ作成されていません',
'No Alternative Items currently registered': '代替物資は現在登録されていません',
'No Assessment Summaries currently registered': 'アセスメントの要約が登録されていません',
'No Assessments currently registered': '登録済みのアセスメントがありません',
'No Asset Assignments currently registered': '現在のところ資産割り当ては登録されていません',
'No Assets currently registered': '登録されている資産は現在ありません。',
'No Baseline Types currently registered': '登録済みのBaseline Typesはありません',
'No Baselines currently registered': '登録されている基準値はありません',
'No Brands currently registered': '登録されている銘柄がありません',
'No Budgets currently registered': '予算は、まだ登録がありません。',
'No Bundles currently registered': 'Bundleは、まだ登録がありません。',
'No Catalog Items currently registered': '登録済みのカタログアイテムがありません',
'No Category<>Sub-Category<>Catalog Relation currently registered': 'Category<>Sub-Category<>Catalog間の関係は、まだ登録がありません。',
'No Checklist available': '利用可能なチェックリストがありません',
'No Cluster Subsectors currently registered': 'クラスタのサブセクタはまだ登録がありません',
'No Clusters currently registered': '登録済みのクラスタはありません',
'No Commitment Items currently registered': '現在のところコミット済み物資は登録されていません',
'No Commitments': 'コミットメントがありません',
'No Configs currently defined': '設定は、まだ定義されていません',
'No Credentials currently set': '現在のところ証明書が設定されていません',
'No Details currently registered': '詳細は、まだ登録されていません',
'No Distribution Items currently registered': '配給物資の登録がありません',
'No Distributions currently registered': '配給所の登録がありません',
'No Documents found': '文書が見つかりませんでした。',
'No Donors currently registered': '資金提供組織はまだ登録されていません',
'No Feature Layers currently defined': 'Feature Layersはまだ定義されていません',
'No Flood Reports currently registered': '登録済みの洪水情報はありません',
'No GPX Layers currently defined': 'GPXレイヤはまだ定義されていません',
'No Groups currently defined': 'グループはまだ定義されていません',
'No Groups currently registered': 'グループはまだ登録されていません',
'No Hospitals currently registered': '病院はまだ登録されていません',
'No Identification Report Available': '利用可能なIDレポートはありません',
'No Identities currently registered': '登録されているIDはありません',
'No Image': '画像なし',
'No Images currently registered': '画像の登録はありません',
'No Impact Types currently registered': '被害の種類は未登録です',
'No Impacts currently registered': 'これまでに登録されたImpactはありません',
'No Incident Reports currently registered': '登録されているインシデントレポートはありません',
'No Incidents currently registered': '登録済みのインシデントはありません。',
'No Incoming Shipments': '到着予定の輸送物資',
'No Inventory Items currently registered': '備蓄物資の登録がありません',
'No Inventory Stores currently registered': '現在登録されている物資集積地点はありません',
'No Item Catalog Category currently registered': '救援物資カタログのカテゴリはまだ登録がありません',
'No Item Catalog currently registered': 'アイテムカタログはまだ登録されていません',
'No Item Categories currently registered': '救援物資カテゴリの登録がありません',
'No Item Packs currently registered': '救援物資のパックは、まだ登録がありません',
'No Item Sub-Category currently registered': '救援物資のサブカテゴリはまだ登録されていません',
'No Item currently registered': 'アイテムはまだ登録されていません',
'No Items currently registered': '物資はまだ登録されていません',
'No Items currently requested': '要求されている物資はありません',
'No Keys currently defined': 'Keyはまだ定義されていません',
'No Kits currently registered': 'Kitはまだ登録されていません',
'No Level 1 Assessments currently registered': '現在のところ、レベル1アセスメントは登録されていません',
'No Level 2 Assessments currently registered': '現在のところ、レベル2アセスメントは登録されていません',
'No Locations currently available': '現在利用可能なロケーションはありません',
'No Locations currently registered': 'ロケーションはまだ登録されていません',
'No Map Profiles currently defined': '地図の設定が定義されていません',
'No Markers currently available': '現在利用可能なマーカーはありません',
'No Match': '合致する結果がありません',
'No Matching Catalog Items': '適合する救援物資はありませんでした',
'No Matching Records': '適合する検索結果がありませんでした',
'No Members currently registered': 'メンバはまだ登録されていません',
'No Memberships currently defined': 'メンバシップはまだ登録されていません',
'No Messages currently in Outbox': '送信箱にメッセージがありません',
'No Metadata currently defined': 'メタデータはまだ定義されていません',
'No Need Types currently registered': '現在登録されている需要タイプはありません',
'No Needs currently registered': '現在要求は登録されていません',
'No Offices currently registered': 'オフィスはまだ登録されていません',
'No Offices found!': 'オフィスが見つかりませんでした',
'No Organizations currently registered': '団体はまだ登録されていません',
'No Packs for Item': 'この物資に対する救援物資パックはありません',
'No Peers currently registered': '登録済みのデータ同期先はありません',
'No People currently registered in this shelter': 'この避難所に登録されている人物情報はありません',
'No Persons currently registered': '人物情報はまだ登録されていません',
'No Persons currently reported missing': '現在、行方不明者の登録はありません',
'No Persons found': '該当する人物はいませんでした',
'No Photos found': '写真の登録がありません',
'No Picture': '写真がありません',
'No Presence Log Entries currently registered': '所在地履歴の登録がありません',
'No Problems currently defined': '定義済みの問題がありません',
'No Projections currently defined': '地図投影法は、まだ定義されていません。',
'No Projects currently registered': '定義済みのプロジェクトはありません',
'No Rapid Assessments currently registered': '被災地の現況アセスメントはまだ登録されていません',
'No Received Items currently registered': '受領された救援物資の登録はありません',
'No Received Shipments': '受け取った輸送はありません',
'No Records currently available': '利用可能なレコードはありません',
'No Records matching the query': '条件に当てはまるレコードが存在しません',
'No Request Items currently registered': '物資要請の登録がありません',
'No Requests have been made yet': '支援要請は、まだ行われていません',
'No Requests match this criteria': 'この条件に一致する支援要請はありません',
'No Requests': '支援要請がありません',
'No Responses currently registered': '現在登録されていて返答が無いもの',
'No Rivers currently registered': '河川情報の登録がありません',
'No Roles currently defined': '役割はまだ定義されていません',
'No Sections currently registered': 'このセクションの登録情報がありません',
'No Sectors currently registered': '登録済みの活動分野がありません',
'No Sent Items currently registered': '送付した物資の登録がありません',
'No Sent Shipments': '送付が行われた輸送がありません',
'No Settings currently defined': '設定は、まだ定義されていません',
'No Shelter Services currently registered': '登録されている避難所サービスがありません',
'No Shelter Types currently registered': '登録済みの避難所タイプがありません',
'No Shelters currently registered': '避難所はまだ登録されていません',
'No Shipment Transit Logs currently registered': '物資輸送履歴の登録がありません',
'No Shipment/Way Bills currently registered': '輸送費/Way Billsはまだ登録されていません',
'No Shipment<>Item Relation currently registered': '輸送とアイテムの関連付けはまだ登録されていません',
'No Sites currently registered': '登録されているサイトはありません',
'No Skill Types currently set': '設定済みのスキルタイプはありません',
'No Solutions currently defined': '解決案はまだ定義されていません',
'No Staff Types currently registered': 'スタッフタイプはまだ登録されていません',
'No Staff currently registered': 'スタッフはまだ登録されていません',
'No Storage Bin Type currently registered': '登録済みのStorage Binタイプがありません',
'No Storage Bins currently registered': 'Storage Binはまだ登録されていません',
'No Storage Locations currently registered': '登録されている備蓄地点がありません',
'No Subscription available': '寄付の申し込みがありません',
'No Support Requests currently registered': '現在のところ、支援要請は登録されていません',
'No Survey Answers currently registered': 'これまでに登録されたフィードバックの回答はありません',
'No Survey Questions currently registered': '登録済みのSurvey Questionsはありません',
'No Survey Sections currently registered': '登録済みのSurvey Sectionはありません',
'No Survey Series currently registered': '現在、調査報告は登録されていません',
'No Survey Template currently registered': '登録されている調査テンプレートがありません',
'No TMS Layers currently defined': 'TMS レイヤーがまだ定義されていません',
'No Tasks with Location Data': 'ロケーション情報を持っているタスクがありません',
'No Themes currently defined': 'テーマはまだ定義されていません',
'No Tickets currently registered': 'チケットはまだ定義されていません',
'No Tracks currently available': '利用可能な追跡情報はありません',
'No Units currently registered': '単位はまだ登録されていません',
'No Users currently registered': '登録済みのユーザがありません',
'No Volunteers currently registered': 'ボランティアの登録がありません',
'No Warehouse Items currently registered': '現在登録済みの倉庫物資はありません',
'No Warehouses currently registered': '倉庫が登録されていません',
'No Warehouses match this criteria': '条件に合致する倉庫がありません',
'No access at all': '完全に孤立中',
'No access to this record!': 'このレコードにはアクセスできません',
'No action recommended': 'アクション無しを推奨',
'No calculations made': '見積が作成されていません',
'No conflicts logged': 'コンフリクトのログはありません。',
'No contact information available': '利用可能な連絡先情報はありません',
'No contacts currently registered': '連絡先が登録されていません',
'No data in this table - cannot create PDF!': 'テーブルにデータがありません。PDF を作成できません。',
'No databases in this application': 'このアプリケーションにデータベースはありません',
'No dead body reports available': '遺体情報のレポートはありません',
'No entries found': 'エントリが見つかりません',
'No entries matching the query': 'クエリに一致するエントリはありませんでした。',
'No import jobs': 'インポートされたJobがありません',
'No linked records': 'リンクされているレコードはありません',
'No location known for this person': 'この人物の消息が不明です',
'No locations found for members of this team': 'このチームのメンバーの場所が見つかりませんでした',
'No locations registered at this level': 'この階層に登録されているロケーションはありません',
'No log entries matching the query': '検索に合致するログエントリがありません',
'No matching items for this request': 'この支援要請に適合する物資はありません',
'No matching records found.': '一致するレコードがありませんでした。',
'No messages in the system': 'システム上にメッセージが存在しません',
'No notes available': '追加情報はありません',
'No peers currently registered': '現在登録されているデータ同期先はありません',
'No pending registrations found': '処理保留中の登録申請はありません',
'No pending registrations matching the query': '検索に合致する処理保留登録申請がありません。',
'No person record found for current user.': '現在のユーザの人物情報レコードが見つかりませんでした。',
'No positions currently registered': '登録されているpositionがありません',
'No problem group defined yet': '定義済みの問題グループがありません。',
'No records matching the query': '条件に当てはまるレコードが存在しません',
'No records to delete': '削除するレコードがありません',
'No recovery reports available': '利用可能な遺体回収レポートはありません',
'No report available.': '利用可能なレポートはありません。',
'No reports available.': '利用可能なレポートがありません。',
'No reports currently available': '利用可能なレポートはありません',
'No requests found': '支援要請は見つかりませんでした',
'No resources currently registered': 'リソースはまだ登録されていません',
'No resources currently reported': 'レポート済みのリソースはありません',
'No service profile available': '利用可能なサービスプロファイルはありません',
'No skills currently set': 'スキルが登録されていません',
'No status information available': '状況に関する情報はありません',
'No synchronization': '同期なし',
'No tasks currently registered': 'タスクはまだ登録されていません',
'No template found!': 'テンプレートが見つかりません。',
'No units currently registered': '単位はまだ登録されていません',
'No volunteer information registered': 'ボランティア情報はまだ登録されていません',
'No': 'いいえ',
'Non-structural Hazards': 'その他の災害',
'None (no such record)': 'なし(記録がありません)',
'None': 'なし',
'Noodles': '麺',
'Normal food sources disrupted': '普段の食料供給源が混乱している',
'Normal': '通常どおり',
'Not Applicable': '該当なし',
'Not Authorised!': '認証されていません',
'Not Possible': '対応不可',
'Not Set': '設定されていません',
'Not Authorized': '認証されていません',
'Not installed or incorrectly configured.': 'インストールされていないか、適切な設定がされていません',
'Not yet a Member of any Group': 'メンバシップはまだ登録されていません',
'Note Details': '追加情報の詳細',
'Note Status': '状態を記録',
'Note Type': '追加情報の種類',
'Note added': '追加情報を追加しました',
'Note deleted': '追加情報を削除しました',
'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead': '注意:このリストは、活動中のボランティアのみ表示しています。システムに登録しているすべての人をみるには、ホーム・スクリーンから検索してください。',
'Note updated': '追加情報を更新しました',
'Note': '追加情報',
'Notes': '追加情報',
'Notice to Airmen': 'NOTAM (航空従事者用)',
'Number of Columns': '列数',
'Number of Patients': '患者数',
'Number of Rows': '行数',
'Number of Vehicles': '車両数',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'この施設において、今後24時間以内に利用可能になると予測されている、このタイプの追加ベッド数。',
'Number of alternative places for studying': '授業用に確保できる場所の数',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'このタイプの利用可能/空きベッド数(報告時点)',
'Number of deaths during the past 24 hours.': '過去24時間以内の死亡者数',
'Number of discharged patients during the past 24 hours.': '退院患者数(過去24時間以内)',
'Number of doctors actively working': '現在活動中の医師の数',
'Number of doctors': '医者の人数',
'Number of houses damaged, but usable': '破損しているが利用可能な家屋の数',
'Number of houses destroyed/uninhabitable': '全壊/居住不可になった家屋数',
'Number of in-patients at the time of reporting.': 'レポート時の患者数です。',
'Number of latrines': 'トイレ総数',
'Number of midwives actively working': '現在活動中の助産師の数',
'Number of newly admitted patients during the past 24 hours.': '入院患者数(過去24時間以内)',
'Number of non-medical staff': '医療従事以外のスタッフ数',
'Number of nurses actively working': '現在活動中の看護師の数',
'Number of nurses': '看護師の人数',
'Number of private schools': '私立学校の数',
'Number of public schools': '公立学校の数',
'Number of religious schools': '宗教学校の数',
'Number of residential units not habitable': '住めなくなった住居の数',
'Number of residential units': '居住施設の数',
'Number of schools damaged but usable': '破損しているが利用可能な校舎の数',
'Number of schools destroyed/uninhabitable': '全壊 / 利用不可能な校舎の数',
'Number of schools open before disaster': '災害前に開校していた学校数',
'Number of schools open now': '現在開校している学校の数',
'Number of teachers affected by disaster': '被災した教師の数',
'Number of teachers before disaster': '災害発生前の教師の数',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': '病院に設置されている、現在利用可能なベッドの数。日時レポートにより、自動的に更新されます。',
'Number of vacant/available units to which victims can be transported immediately.': '現在利用可能なユニット数。犠牲者を即座に安置できる数。',
'Number or Label on the identification tag this person is wearing (if any).': 'この人物の衣服につけられているタグの番号、あるいはラベル名(ある場合のみ).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'この場所をあとで検索するための番号かコード 例: フラグ番号、グリッドの位置、サイトの参照番号など',
'Number': '番号',
'Number/Percentage of affected population that is Female & Aged 0-5': '女性(0-5歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 13-17': '女性(13-17歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 18-25': '女性(18-25歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 26-60': '女性(26-60歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 6-12': '女性(6-12歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 61+': '女性(61歳以上)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 0-5': '男性(0-5歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 13-17': '男性(13-17歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 18-25': '男性(18-25歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 26-60': '男性(26-60歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 6-12': '男性(6-12歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 61+': '男性(61歳以上)の被災者数 / 割合',
'Numbers Only': '数値のみ',
'Nursery Beds': '看護ベッド',
'Nutrition problems': '栄養問題',
'Nutrition': '食料・栄養',
'OR Reason': '手術室の詳細',
'OR Status Reason': '手術室の状態理由',
'OR Status': '手術室の状態',
'Observer': 'オブザーバ',
'Obsolete': '廃止済み',
'Obstetrics/Gynecology': '産婦人科',
'Office Address': 'オフィスの住所',
'Office Details': 'オフィスの詳細',
'Office added': 'オフィスを追加しました',
'Office deleted': 'オフィスを削除しました',
'Office updated': 'オフィスを更新しました',
'Office': 'オフィス',
'Offices': 'オフィス',
'Offline Sync (from USB/File Backup)': 'データのオフライン同期(USB/バックアップファイル利用)',
'Offline Sync': 'データのオフライン同期',
'Old': '古い',
'Older people as primary caregivers of children': '子供の介護を、高齢者が担当',
'Older people in care homes': '介護施設で生活する高齢者がいる',
'Older people participating in coping activities': '高齢者が災害対応に従事',
'Older people with chronical illnesses': '慢性疾患をもつ高齢者がいる',
'Older person (>60 yrs)': '高齢者(60歳以上)',
'On by default? (only applicable to Overlays)': 'デフォルトでオン(オーバーレイにのみ有効)',
'On by default?': 'デフォルトでON?',
'One Time Cost': '1回毎の費用',
'One time cost': '一回毎の費用',
'One-time costs': '一回毎の費用',
'One-time': '1回毎',
'Oops! Something went wrong...': '申し訳ありません、何か問題が発生しています。',
'Oops! something went wrong on our side.': '申し訳ありません、システム側に問題が発生しています。',
'Opacity (1 for opaque, 0 for fully-transparent)': '不透明度(1は不透明、0は完全に透明)',
'Open Assessment': '未解決のアセスメント',
'Open Map': '地図を開く',
'Open area': '空き地',
'Open recent': '最近使用したものを開く',
'Open': '開く',
'OpenStreetMap Editor': 'OpenStreetMap エディタ',
'Operating Rooms': '手術室',
'Optional link to an Incident which this Assessment was triggered by.': 'このアセスメントの端緒となった事故へのオプション・リンク',
'Optional': '任意',
'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'オプション。GeoServerでは、ワークスペース名前空間のURIです。WFS getCapabilitiesでは、FeatureType名のコロンの前の部分です。',
'Options': 'オプション',
'Organization Details': '団体の詳細',
'Organization Registry': '団体情報の登録',
'Organization added': '団体を追加しました',
'Organization deleted': '団体を削除しました',
'Organization updated': '団体を更新しました',
'Organization': '団体',
'Organizations': '団体',
'Origin of the separated children': '離別した子供たちの出身地',
'Origin': '出身地',
'Other (describe)': 'その他 (要記述)',
'Other (specify)': 'その他(具体的に)',
'Other Evidence': 'その他の証跡',
'Other Faucet/Piped Water': 'その他 蛇口/パイプによる水源',
'Other Isolation': 'その他の孤立',
'Other Name': 'その他の名前',
'Other activities of boys 13-17yrs before disaster': 'その他、災害発生前の13-17歳男子の活動状況',
'Other activities of boys 13-17yrs': 'その他、13-17歳男子の活動状況',
'Other activities of boys <12yrs before disaster': 'その他、災害発生前の12歳以下男子の活動状況',
'Other activities of boys <12yrs': 'その他、12歳以下男子の活動状況',
'Other activities of girls 13-17yrs before disaster': 'その他、災害発生前の13-17歳女子の活動状況',
'Other activities of girls 13-17yrs': 'その他、13-17歳女子の活動状況',
'Other activities of girls<12yrs before disaster': 'その他、災害発生前の12歳以下女子の活動状況',
'Other activities of girls<12yrs': 'その他、12歳以下女子の活動状況',
'Other alternative infant nutrition in use': 'その他、使用されている乳児用代替食',
'Other alternative places for study': 'その他、授業開設に利用可能な施設',
'Other assistance needed': 'その他に必要な援助活動',
'Other assistance, Rank': 'その他の援助、ランク',
'Other current health problems, adults': 'その他の健康問題(成人)',
'Other current health problems, children': 'その他の健康問題(小児)',
'Other events': '他のイベント',
'Other factors affecting school attendance': 'その他、生徒の就学に影響する要因',
'Other major expenses': 'その他の主な支出',
'Other non-food items': '食料以外の救援物資',
'Other recommendations': '他の推薦',
'Other residential': '住宅その他',
'Other school assistance received': 'その他の学校用品を受領した',
'Other school assistance, details': '受領した学校用品の内訳',
'Other school assistance, source': 'その他の学校用品の送付元',
'Other side dishes in stock': '在庫のあるその他食材',
'Other types of water storage containers': 'それ以外の水貯蔵容器タイプ',
'Other ways to obtain food': 'それ以外の食料調達方法',
'Other': 'その他',
'Outbound Mail settings are configured in models/000_config.py.': '送信メール設定は、models/000_config.py で定義されています。',
'Outbox': '送信箱',
'Outgoing SMS Handler': 'SMS 送信ハンドラ',
'Outgoing SMS handler': 'SMS送信ハンドラ',
'Overall Hazards': 'すべての危険',
'Overhead falling hazard': '頭上落下物の危険',
'Overland Flow Flood': '陸上の洪水流量',
'Overlays': 'オーバーレイ',
'Owned Records': '自身のレコード',
'Owned Resources': '保持しているリソース',
'PDAM': '水道会社(PDAM)',
'PIN number ': 'PIN 番号',
'PIN': '暗証番号',
'PL Women': 'PL 女性',
'Pack': 'パック',
'Packs': 'パック',
'Pan Map: keep the left mouse button pressed and drag the map': 'マップをパン: マウスの左ボタンを押したまま、地図をドラッグしてください',
'Parameters': 'パラメータ',
'Parapets, ornamentation': '欄干、オーナメント',
'Parent Office': '親組織のオフィス',
'Parent needs to be of the correct level': '適切なレベルの親属性を指定してください',
'Parent needs to be set for locations of level': 'ロケーションのレベルには親属性が必要です',
'Parent needs to be set': '親情報が設定される必要があります',
'Parent': '親',
'Parents/Caregivers missing children': '親/介護者とはぐれた子供たち',
'Partial': '一部 / 不足',
'Participant': '参加者',
'Pashto': 'パシュトー語',
'Passport': 'パスポート',
'Password for authentication at the peer. Note that only HTTP Basic authentication is supported.': 'Password for authentication at the peer. HTTPベーシック認証のみサポートしています。',
'Password': 'パスワード',
'Path': 'パス',
'Pathology': '病理学',
'Patients': '患者数',
'Pediatric ICU': '小児ICU',
'Pediatric Psychiatric': '小児精神科',
'Pediatrics': '小児科医',
'Peer Details': 'データ同期先の詳細',
'Peer Registration Details': 'データ同期先登録の詳細',
'Peer Registration Request': 'データ同期先の登録要求',
'Peer Registration': 'データ同期先登録',
'Peer Type': '同期先タイプ',
'Peer UID': '同期先UID',
'Peer added': 'データ同期先を追加しました',
'Peer deleted': 'データ同期先を削除しました',
'Peer not allowed to push': '同期先がデータのプッシュを許可していません',
'Peer registration request added': 'データ同期先の登録要求を追加しました',
'Peer registration request deleted': 'データ同期先の登録要求を削除しました',
'Peer registration request updated': 'データ同期先の登録要求を更新しました',
'Peer updated': '同期先を更新しました',
'Peer': 'データ同期先',
'Peers': '同期先',
'Pending Requests': '保留中の支援要請',
'Pending': '中断',
'People Needing Food': '食料不足',
'People Needing Shelter': '避難所が必要',
'People Needing Water': '水が必要',
'People Trapped': '救難者',
'People with chronical illnesses': '慢性疾患をもつ成人がいる',
'People': '人物情報',
'Person 1': '人物 1',
'Person 1, Person 2 are the potentially duplicate records': '人物情報1と人物情報2は重複したレコードの可能性があります。',
'Person 2': '人物 2',
'Person Data': '人物データ',
'Person De-duplicator': '人物情報の重複削除',
'Person Details': '人物情報の詳細',
'Person Finder': '消息情報',
'Person Registry': '人物情報の登録',
'Person added to Group': 'グループメンバを追加しました',
'Person added to Team': 'グループメンバを追加しました',
'Person added': '人物情報を追加しました',
'Person deleted': '人物情報を削除しました',
'Person details updated': '人物情報を更新しました',
'Person interviewed': 'インタビュー担当者',
'Person missing': '行方不明中',
'Person must be specified!': '登録がありません',
'Person reporting': 'レポート報告者',
'Person who has actually seen the person/group.': '人物/グループで実際に目撃された人物情報',
'Person who is reporting about the presence.': 'この所在報告を行った人物です。',
'Person who observed the presence (if different from reporter).': '人物の所在を確認したひとの情報(報告者と異なる場合のみ記入)。',
'Person': '人物情報',
'Person/Group': '人物/グループ',
'Personal Data': '個人情報',
'Personal Effects Details': '個人の影響の詳細',
'Personal Effects': '所持品',
'Personal impact of disaster': 'この人物の被災状況',
'Personal': '個人',
'Persons in institutions': '施設居住中の住人',
'Persons with disability (mental)': '障がい者数(精神的障がい者を含む)',
'Persons with disability (physical)': '肉体的な障がい者の数',
'Persons': '人物情報',
'Phone 1': '電話番号',
'Phone 2': '電話番号(予備)',
'Phone': '電話番号',
'Phone/Business': '電話番号/仕事',
'Phone/Emergency': '電話番号/緊急連絡先',
'Phone/Exchange': '電話/とりつぎ',
'Photo Details': '写真の詳細',
'Photo Taken?': '写真撮影済み?',
'Photo added': '写真を追加しました',
'Photo deleted': '写真を削除しました',
'Photo updated': '写真を更新しました',
'Photo': '写真',
'Photograph': '写真',
'Photos': '写真',
'Physical Description': '身体外見の説明',
'Physical Safety': '身体的安全',
'Picture upload and finger print upload facility': '指紋や写真のアップロード機能',
'Picture': '写真',
'Place for solid waste disposal': '廃棄物の処理を行う場所を記載してください',
'Place of Recovery': '遺体回収場所',
'Place on Map': '地図上の場所',
'Places for defecation': 'トイレ',
'Places the children have been sent to': '子供たちの避難先',
'Planner': '立案者',
'Playing': '家庭内/外で遊ぶ',
'Please correct all errors.': 'すべてのエラーを修正してください。',
'Please enter a First Name': '苗字を入力してください',
'Please enter a valid email address': '有効な電子メールアドレスを入力してください。',
'Please enter the first few letters of the Person/Group for the autocomplete.': '自動入力するには人物あるいはグループの最初の数文字を入力してください',
'Please enter the recipient': '受取担当者を入力してください',
'Please fill this!': 'ここに入力してください',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened. If a ticket was issued then please provide the Ticket ID.': '言及先のURLを明示し、期待する結果と実際に発生した結果を記述してください。不具合チケットが発行された場合は、そのチケットIDも記載してください。',
'Please report here where you are:': 'いまあなたが居る場所を入力してください。',
'Please select another level': '別のレベルを選択してください',
'Please select': '選んでください',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': '携帯電話番号でサインアップし、Sahanaからのテキストメッセージを受け取れるようにします。国際電話コードまで含めた形式で入力してください',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': '病気の治療に当たって問題となる事象の詳細を記載します。状況を改善するための提案も、もしあれば記載してください。',
'Please use this field to record any additional information, including a history of the record if it is updated.': '追加情報はこの項目に記載してください。レコードの変更履歴などにも利用可能です。',
'Please use this field to record any additional information, including any Special Needs.': '特別な要求など、どんな追加情報でも構いませんので、この部分に記録してください',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'UshahidiのインスタンスIDなど、追加情報がある場合はこの項目に記載してください。レコードの変更履歴などにも利用可能です。',
'Pledge Aid to match these Requests': 'これらの要求に一致する支援に寄付する',
'Pledge Aid': '寄付する',
'Pledge Status': '寄付のステータス',
'Pledge Support': '寄付サポート',
'Pledge': '寄付',
'Pledged': '寄付済み',
'Pledges': '寄付',
'Point': 'ポイント',
'Poisoning': '中毒',
'Poisonous Gas': '有毒ガス',
'Police': '警察',
'Pollution and other environmental': '汚染、あるいはその他の環境要因',
'Polygon reference of the rating unit': 'その評価単位への参照ポリゴン',
'Polygon': 'ポリゴン',
'Population and number of households': '人口と世帯数',
'Population': '利用者数',
'Porridge': 'おかゆ',
'Port Closure': '港湾閉鎖',
'Port': 'ポート',
'Position Details': 'ポジションの詳細',
'Position added': 'Position を追加しました',
'Position deleted': 'ポジションを削除しました',
'Position type': '場所のタイプ',
'Position updated': 'ポジションを更新しました',
'Positions': 'ポジション',
'Postcode': '郵便番号',
'Poultry restocking, Rank': '家禽の補充、ランク',
'Poultry': '家禽(ニワトリ)',
'Pounds': 'ポンド',
'Power Failure': '停電',
'Pre-cast connections': 'プレキャスト連結',
'Preferred Name': '呼び名',
'Pregnant women': '妊婦の数',
'Preliminary': '予備',
'Presence Condition': '所在情報',
'Presence Log': '所在履歴',
'Presence': '所在',
'Previous View': '前を表示',
'Previous': '前へ',
'Primary Name': '基本名',
'Primary Occupancy': '主要な従事者',
'Priority Level': '優先度レベル',
'Priority': '優先度',
'Private': '企業',
'Problem Administration': '問題管理',
'Problem Details': '問題の詳細',
'Problem Group': '問題グループ',
'Problem Title': '問題の名称',
'Problem added': '問題を追加しました',
'Problem connecting to twitter.com - please refresh': 'twitter.comに接続できません。更新ボタンを押してください',
'Problem deleted': '問題を削除しました',
'Problem updated': '問題を更新しました',
'Problem': '問題',
'Problems': '問題',
'Procedure': '手続き',
'Procurements': '物資の調達',
'Product Description': '製品の説明',
'Product Name': '製品名',
'Profile': 'プロファイル',
'Project Activities': 'プロジェクト活動状況',
'Project Details': 'プロジェクトの詳細',
'Project Management': 'プロジェクト管理',
'Project Status': 'プロジェクトのステータス',
'Project Tracking': 'プロジェクト追跡',
'Project added': 'プロジェクトを追加しました',
'Project deleted': 'プロジェクトを削除しました',
'Project has no Lat/Lon': 'プロジェクトの緯度/経度情報はありません',
'Project updated': 'プロジェクトを更新しました',
'Project': 'プロジェクト',
'Projection Details': '地図投影法の詳細',
'Projection added': '地図投影法を追加しました',
'Projection deleted': '地図投影法を削除しました',
'Projection updated': '地図投影法を更新しました',
'Projection': '地図投影法',
'Projections': '地図投影法',
'Projects': 'プロジェクト',
'Property reference in the council system': '評議システムで使用されるプロパティリファレンス',
'Protected resource': '保護されたリソース',
'Protection': '被災者保護',
'Provide Metadata for your media files': 'メディアファイルにメタデータを提供',
'Provide a password': 'パスワードを入力',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': '建物全体か損傷箇所のスケッチを提供し、損傷箇所を明示してください。',
'Province': '都道府県',
'Proxy-server': 'プロキシサーバ',
'Psychiatrics/Adult': '精神病/成人',
'Psychiatrics/Pediatric': '精神病/小児',
'Public Event': '公開イベント',
'Public and private transportation': '公共および民営の交通機関',
'Public assembly': '公会堂',
'Public': '公開',
'Pull tickets from external feed': '外部フィードからのticketの取得',
'Punjabi': 'パンジャブ',
'Push tickets to external system': '外部システムにチケットの発信',
'Put a choice in the box': '箱の中から選んで取る',
'Pyroclastic Flow': '火砕流',
'Pyroclastic Surge': '火砕サージ',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'PythonでPython Serial moduleが利用できません。モデムの有効化に必要です。',
'Python needs the ReportLab module installed for PDF export': '実行中のPythonでReportLabモジュールが利用できません。PDF出力に必要です。',
'Quantity Committed': '引き受けた量',
'Quantity Fulfilled': '十分な量がある',
'Quantity in Transit': '運送中の数量',
'Quantity': '数量',
'Quarantine': '隔離施設',
'Queries': 'クエリ',
'Query Feature': '問合せ機能',
'Query': 'クエリ',
'Queryable?': '検索可能?',
'RC frame with masonry infill': '鉄骨入りコンクリートブロック',
'RECORD A': 'レコード A',
'RECORD B': 'レコード B',
'RESPONSE': '対応',
'Race': '人種',
'Radiological Hazard': '放射能災害',
'Radiology': '放射線科',
'Railway Accident': '鉄道事故',
'Railway Hijacking': '鉄道ハイジャック',
'Rain Fall': '降雨',
'Rapid Assessment Details': '被災地の現況アセスメントの詳細',
'Rapid Assessment added': '被災地の現況アセスメントを追加しました',
'Rapid Assessment deleted': '被災地の現況アセスメントを削除しました',
'Rapid Assessment updated': '被災地の現況アセスメントを更新しました',
'Rapid Assessment': '被災地の現況アセスメント',
'Rapid Assessments & Flexible Impact Assessments': '被災地の現況アセスメントと、災害影響範囲アセスメント',
'Rapid Assessments': '被災地の現況アセスメント',
'Rapid Close Lead': '急いで閉め、先導してください。',
'Rapid Data Entry': 'データ入力簡易版',
'Rating Scale': '評価尺度',
'Raw Database access': 'データベースへの直接アクセス',
'Read-Only': '読み込み専用',
'Read-only': '登録内容の編集を禁止',
'Real World Arbitrary Units': '実在の任意単位',
'Receive Items': '物資を受領',
'Receive Shipment': '輸送を受け取る',
'Receive this shipment?': 'この物資送付を受領しますか?',
'Receive': '物資受領',
'Received By': '物資受領責任者',
'Received Item Details': '配送済み物資の詳細',
'Received Item deleted': '受領した物資を削除しました',
'Received Item updated': '受領された物資を更新しました',
'Received Shipment Details': '受け取った輸送の詳細',
'Received Shipment canceled and items removed from Inventory': '受領した輸送をキャンセルしました。物資は備蓄から削除されます',
'Received Shipment canceled': '受け取った輸送をキャンセルしました',
'Received Shipment updated': '受領済みの配送物の情報が更新されました',
'Received Shipments': '受諾した輸送物資',
'Received': '受領済み',
'Receiving and Sending Items': '送付 / 受領した救援物資',
'Recipient': '受け取り担当者',
'Recipients': '受信者',
'Recommendations for Repair and Reconstruction or Demolition': '再築や取り壊し、修繕を推奨',
'Record %(id)s created': 'レコード %(id)s が作成されました',
'Record Created': '作成されたレコード',
'Record Details': 'レコードの詳細',
'Record ID': 'レコードID',
'Record Saved': 'レコードが保存されました',
'Record added': 'レコードを追加しました',
'Record any restriction on use or entry': '利用や入力に当たっての制限事項を記載',
'Record deleted': 'レコードを削除しました',
'Record last updated': '最近更新されたレコード',
'Record not found!': 'レコードが見つかりませんでした',
'Record updated': 'レコードを更新しました',
'Record': 'レコード',
'Recording and Assigning Assets': '物資の割り当てと記録',
'Records': 'レコード',
'Recovery Request added': '遺体の回収要請を追加しました',
'Recovery Request deleted': '遺体回収要請を削除しました',
'Recovery Request updated': '遺体回収要請を更新しました',
'Recovery Request': '遺体回収の要請',
'Recovery Requests': '遺体回収要請',
'Recovery report added': '遺体回収レポートを追加しました',
'Recovery report deleted': '遺体回収レポートを削除しました',
'Recovery report updated': '遺体回収レポートを更新しました',
'Recovery': '遺体回収',
'Recruitment': '人材募集',
'Recurring Cost': '経常費用',
'Recurring cost': '経常費用',
'Recurring costs': '経常費用',
'Recurring': '採用活動',
'Red': '赤',
'Reference Document': '関連文書',
'Region Location': '地域のロケーション',
'Regional': '国際支部',
'Register Person into this Shelter': 'この避難所に人物情報を登録',
'Register Person': '人物情報を登録',
'Register them as a volunteer': 'ボランティアとして登録',
'Register': '登録',
'Registered People': '登録した人物情報',
'Registered users can': '登録済みのユーザは',
'Registering ad-hoc volunteers willing to contribute': '貢献を希望する臨時ボランティアを登録',
'Registration Details': '登録情報詳細',
'Registration Disabled!': '現在アカウント登録は受け付けていません。',
'Registration added': '登録を追加しました',
'Registration entry deleted': '登録を削除しました',
'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': '登録はまだ承認されていません (承認者:(%s)) -- 確認メールが届くまでもうしばらくお待ちください。',
'Registration key': '登録key',
'Registration successful': '登録に成功しました',
'Registration updated': '登録を更新しました',
'Registration': '登録',
'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '地域内で活動する全ての支援団体を追跡し、情報を保持します。これにより、各団体が活動している地域の情報だけでなく、それぞれの地域でどのような活動が行われているかも掌握することができます。',
'Rehabilitation/Long Term Care': 'リハビリ/長期介護',
'Reinforced masonry': 'コンクリートブロック壁',
'Rejected': '拒否されました',
'Reliable access to sanitation/hygiene items': 'サニタリ / 衛生用品の安定供給がある',
'Relief Item Catalog': '救援物資カタログ',
'Relief Item': '救援物資',
'Relief Items': '救援物資',
'Relief Team': '救援チーム',
'Relief': '救援',
'Religion': '宗教',
'Religious Leader': '宗教指導者',
'Religious': '宗教',
'Relocate as instructed in the <instruction>': '<instruction>の内容に従って再配置',
'Remove Feature: Select the feature you wish to remove & press the delete key': 'Featureの削除: 削除したいfeatureを選択し、削除キーを押下してください',
'Remove Person from Group': 'メンバシップを削除',
'Remove Person from Team': 'メンバシップを削除',
'Remove': '削除',
'Removed from Group': 'メンバシップを削除しました',
'Removed from Team': 'メンバシップを削除しました',
'Repeat your password': 'パスワードをもう一度入力してください',
'Replace if Master': 'マスターなら置換',
'Replace if Newer': '新しいものがあれば置き換える',
'Replace': '置換',
'Report Another Assessment...': '別のアセスメントをレポートする',
'Report Details': 'レポートの詳細',
'Report Resource': 'レポートリソース',
'Report Type': 'レポートタイプ',
'Report Types Include': 'レポートタイプを含む',
'Report a Problem with the Software': 'ソフトウェアの不具合を報告',
'Report added': 'レポートを追加しました',
'Report deleted': 'レポートを削除しました',
'Report my location': '自分の現在地を報告',
'Report that person missing': '行方不明者の情報を報告',
'Report the contributing factors for the current EMS status.': '現在の緊急受け入れ状態に影響している事由を記載',
'Report the contributing factors for the current OR status.': '現在の手術室の状況報告',
'Report the person as found': '人物の所在情報を報告',
'Report them as found': '発見として報告',
'Report them missing': '行方不明として報告',
'Report updated': 'レポートを更新しました',
'Report': 'レポート',
'Reporter Name': 'レポーターの氏名',
'Reporter': 'レポーター',
'Reporting on the projects in the region': 'この地域で展開しているプロジェクトのレポート',
'Reports': 'レポート',
'Request Added': '支援要請を追加しました',
'Request Canceled': '支援要請をキャンセルしました',
'Request Details': '支援要請の詳細',
'Request Item Details': '救援物資要請の詳細',
'Request Item added': '救援物資の要請を追加しました',
'Request Item deleted': '救援物資の要請を削除しました',
'Request Item updated': '救援物資の要請を更新しました',
'Request Item': '物資を要請',
'Request Items': '物資の要請',
'Request Status': '支援要請の状況',
'Request Type': '支援要請のタイプ',
'Request Updated': '支援要請を更新しました',
'Request added': '支援要請を追加しました',
'Request deleted': '支援要請を削除しました',
'Request for Role Upgrade': '上位権限の取得要求',
'Request updated': '支援要請を更新しました',
'Request': '支援要請',
'Request, Response & Session': '要求、応答、およびセッション',
'Requested By Site': '支援要請を行ったサイト',
'Requested By Warehouse': '倉庫からの要請',
'Requested By': '支援要求元',
'Requested Items': '支援要請が行われた物資',
'Requested by': '要求元',
'Requested on': 'に関する要請',
'Requested': '要求済み',
'Requester': '要請の実施者',
'Requestor': '要請者',
'Requests From': '支援要請フォーム',
'Requests for Item': '物資に関する要請',
'Requests': '支援要請',
'Requires Login!': 'ログインしてください。',
'Requires login': 'ログインが必要です',
'Rescue and recovery': '救出、あるいは遺体回収作業',
'Reset Password': 'パスワードのリセット',
'Reset form': 'フォームをクリア',
'Reset': 'リセット',
'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size': 'Featureのリサイズ: リサイズしたいfeatureを選択し、適切なサイズになるようドラッグしてください',
'Resolve Conflict': '競合の解決',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': '"解決"リンクでは、新しい画面を開き、重複している情報を解決してデータベースを更新します',
'Resolve': '解決済みか',
'Resource Details': 'リソースの詳細',
'Resource added': 'リソースを追加しました',
'Resource deleted': 'リソースを削除しました',
'Resource updated': 'リソースを更新しました',
'Resource': 'リソース',
'Resources': 'リソース',
'Respiratory Infections': '呼吸器感染症',
'Response Details': '応答の詳細',
'Response added': '返答を追加しました',
'Response deleted': 'Responseを削除しました',
'Response updated': '返答を更新しました',
'Response': '対応',
'Responses': '対応',
'Restricted Access': 'アクセス制限中',
'Restricted Use': '制限された目的での使用',
'Restrictions': '制限',
'Results': '結果',
'Retail Crime': '小売犯罪',
'Retrieve Password': 'パスワードの取得',
'Rice': '米穀',
'Riot': '暴動',
'River Details': '河川の詳細',
'River added': '河川を追加しました',
'River deleted': '河川を削除しました',
'River updated': '河川を更新しました',
'River': '河川',
'Rivers': '河川',
'Road Accident': '道路障害',
'Road Closed': '道路(通行止め)',
'Road Conditions': '路面の状況',
'Road Delay': '道路遅延',
'Road Hijacking': '道路ハイジャック',
'Road Usage Condition': '道路の路面状況',
'Role Details': '権限の詳細',
'Role Name': '権限の名称',
'Role Required': '権限が必要',
'Role Updated': '権限を更新しました',
'Role added': '権限を追加しました',
'Role deleted': '権限を削除しました',
'Role updated': '権限を更新しました',
'Role': '権限',
'Role-based': '権限に基づいた',
'Roles Permitted': '許可された権限',
'Roles': '権限',
'Roof tile': '屋根瓦',
'Roofs, floors (vertical load)': '屋根、床板 (vertical load)',
'Roster': '名簿',
'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location': '地物の回転: 回転させたい地物を選択し、目的の位置に回転させるために関連付けられた点をドラッグします。',
'Row Choices (One Per Line)': '行の選択 (One Per Line)',
'Rows in table': 'テーブルの行',
'Rows selected': '行が選択されました',
'Run Functional Tests': '動作テストの実行',
'Run Interval': '実行間隔',
'Running Cost': 'ランニングコスト',
'SITUATION': '状況',
'Safe environment for vulnerable groups': '被災者にとって安全な環境である',
'Safety Assessment Form': '安全性アセスメントフォーム',
'Safety of children and women affected by disaster': '被災した女性と未成年が保護されている',
'Sahana Administrator': 'Sahana管理者',
'Sahana Blue': 'Sahana ブルー',
'Sahana Community Chat': 'Sahanaコミュニティチャット',
'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=> その他 (Sahana Agasti, Ushahidi 等.)',
'Sahana Eden <=> Other': 'Sahana Eden <=> 他のシステム',
'Sahana Eden Disaster Management Platform': 'Sahana Eden 被災地支援情報共有プラットフォーム',
'Sahana Eden Website': 'Sahana Eden公式ページ',
'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organizations working in disaster management.': 'Sahana Edenは、災害復旧に関わる様々な支援団体が、お互いに協力しあうために存在します。',
'Sahana FOSS Disaster Management System': 'Sahana オープンソース 被災地情報共有システム',
'Sahana Green': 'Sahana グリーン',
'Sahana Login Approval Pending': 'Sahana ログインは承認待ちです',
'Sahana access granted': 'Sahanaへのアクセス権を付与',
'Sahana: new request has been made. Please login to see if you can fulfil the request.': 'Sahana: 新しい支援要請が行われました。ログインして、支援要請を実現できるか確認してください。',
'Salted Fish': '塩漬けの魚',
'Salvage material usable from destroyed houses': '全壊した家屋から回収した物品(使用可能)',
'Salvage material usable from destroyed schools': '全壊した校舎から回収した物品(使用可能)',
'Sanitation problems': '衛生設備に問題',
'Satellite Office': '現地活動拠点',
'Satellite': '衛星',
'Saturday': '土曜日',
'Save any Changes in the one you wish to keep': '残す方の候補地へ行った変更を保存します。',
'Save': '保存',
'Save: Default Lat, Lon & Zoom for the Viewport': 'デフォルト表示範囲の緯度,経度,ズームレベルを保存',
'Saved.': '保存しました',
'Saving...': '保存しています...',
'Scale of Results': '結果の規模',
'Schedule': 'スケジュール',
'School Closure': '学校閉鎖',
'School Lockdown': '学校の厳重封鎖',
'School Reports': '学校のレポート',
'School Teacher': '学校教師',
'School activities': '学校の活動',
'School assistance received/expected': '学校用支援品を受領済み/受領予定',
'School assistance': '学校の援助',
'School attendance': '学校へ出席者',
'School destroyed': '校舎全壊',
'School heavily damaged': '校舎の深刻な損壊',
'School tents received': '仮校舎用テントを受領',
'School tents, source': '仮校舎用テント、送付元',
'School used for other purpose': '校舎を他目的で利用中',
'School': '学校',
'School/studying': '学校/勉強',
'Schools': '学校',
'Search & List Bin Types': 'Bin Typeを検索して一覧表示',
'Search & List Bins': 'Binsを検索して一覧表示',
'Search & List Catalog': 'カタログを検索して一覧表示',
'Search & List Category': 'カテゴリを検索して一覧表示',
'Search & List Items': '救援物資を検索して一覧表示',
'Search & List Locations': 'ロケーションを検索して一覧表示',
'Search & List Site': 'Siteを検索して一覧表示',
'Search & List Sub-Category': 'サブカテゴリを検索して一覧表示',
'Search & List Unit': '単位を検索して一覧表示',
'Search Activities': '支援活動の検索',
'Search Activity Report': '支援活動レポートの検索',
'Search Addresses': '住所を検索',
'Search Aid Requests': '援助要請を検索',
'Search Alternative Items': 'その他のアイテムを検索',
'Search Assessment Summaries': 'アセスメントの要約を検索',
'Search Assessments': 'アセスメントを検索',
'Search Asset Assignments': '資産割り当ての検索',
'Search Assets': '資産の検索',
'Search Baseline Type': 'Baseline Typeを検索',
'Search Baselines': '基準値の検索',
'Search Brands': '銘柄を検索',
'Search Budgets': '予算を検索',
'Search Bundles': 'Bundleを検索',
'Search Catalog Items': '救援物資カタログを検索',
'Search Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog関係の検索',
'Search Checklists': 'チェックリストを検索',
'Search Cluster Subsectors': 'クラスタのサブセクタを検索',
'Search Clusters': 'クラスタを検索',
'Search Commitment Items': 'コミットされた救援物資の検索',
'Search Commitments': 'コミットの検索',
'Search Configs': '設定を検索',
'Search Contact Information': '連絡先情報を検索',
'Search Contacts': '連絡先を検索',
'Search Credentials': '証明書の検索',
'Search Distribution Items': '配給物資を検索',
'Search Distributions': '配給所を検索',
'Search Documents': 'ドキュメントを検索',
'Search Donors': '資金提供組織の検索',
'Search Existing Locations': '既存のロケーションを検索する',
'Search Feature Layers': 'Feature Layersの検索',
'Search Flood Reports': '洪水レポートの検索',
'Search Geonames': 'Geonamesの検索',
'Search Groups': 'グループの検索',
'Search Hospitals': '病院情報の検索',
'Search Identity': 'ID情報の検索',
'Search Images': '画像の検索',
'Search Impact Type': '被害の種類を検索',
'Search Impacts': '影響の検索',
'Search Incident Reports': 'インシデントレポートを検索',
'Search Incidents': 'インシデントの検索',
'Search Inventory Items': '備蓄物資を検索',
'Search Inventory Stores': '物資集積地点の検索',
'Search Item Catalog Category(s)': 'アイテムカタログカテゴリの検索',
'Search Item Catalog(s)': '救援物資カタログの検索',
'Search Item Categories': '救援物資カテゴリを検索',
'Search Item Packs': '物資のパックを検索',
'Search Item Sub-Category(s)': 'アイテムサブカテゴリの検索',
'Search Items': 'アイテムの検索',
'Search Keys': 'Keyの検索',
'Search Kits': 'Kitsの検索',
'Search Layers': 'レイヤの検索',
'Search Level 1 Assessments': 'レベル1アセスメントの検索',
'Search Level 2 Assessments': 'レベル2のアセスメントを検索',
'Search Locations': 'ロケーションの検索',
'Search Log Entry': 'ログエントリの検索',
'Search Map Profiles': '地図設定の検索',
'Search Markers': 'マーカーの検索',
'Search Members': 'メンバーの検索',
'Search Membership': 'メンバシップの検索',
'Search Memberships': 'メンバシップの検索',
'Search Metadata': 'メタデータの検索',
'Search Need Type': '需要タイプの検索',
'Search Needs': '必要な物資を検索',
'Search Notes': '追加情報を検索',
'Search Offices': 'オフィスの検索',
'Search Organizations': '団体の検索',
'Search Peer': '同期先を検索',
'Search Peers': 'データ同期先を検索',
'Search Personal Effects': 'Personal Effectsの検索',
'Search Persons': '人物情報の検索',
'Search Photos': '写真の検索',
'Search Positions': 'Positionsの検索',
'Search Problems': '問題の検索',
'Search Projections': '地図投影法の検索',
'Search Projects': 'プロジェクトの検索',
'Search Rapid Assessments': '被災地の現況アセスメントを検索',
'Search Received Items': '受領済み救援物資の検索',
'Search Received Shipments': '受信済みの出荷の検索',
'Search Records': 'レコードの検索',
'Search Recovery Reports': '遺体回収レポートを検索',
'Search Registations': '登録情報の検索',
'Search Registration Request': '登録要請を検索',
'Search Report': 'レポートの検索',
'Search Reports': 'レポートの検索',
'Search Request Items': '物資の要請を検索',
'Search Request': '支援要請の検索',
'Search Requested Items': '支援要請されている物資を検索',
'Search Requests': '支援要請の検索',
'Search Resources': 'リソースの検索',
'Search Responses': '検索の応答',
'Search Rivers': '河川を検索',
'Search Roles': '役割の検索',
'Search Sections': 'セクションの検索',
'Search Sectors': '活動分野を検索',
'Search Sent Items': '送付した物資を検索',
'Search Sent Shipments': '送信した出荷の検索',
'Search Service Profiles': 'サービスプロファイルの検索',
'Search Settings': '設定の検索',
'Search Shelter Services': '避難所での提供サービスを検索',
'Search Shelter Types': '避難所タイプの検索',
'Search Shelters': '避難所の検索',
'Search Shipment Transit Logs': '輸送履歴の検索',
'Search Shipment/Way Bills': '輸送費/渡航費の検索',
'Search Shipment<>Item Relation': '輸送と救援物資の関係性の検索',
'Search Site(s)': 'Siteの検索',
'Search Skill Types': 'スキルタイプの検索',
'Search Skills': 'スキルを検索',
'Search Solutions': '解決案の検索',
'Search Staff Types': 'スタッフタイプの検索',
'Search Staff': 'スタッフの検索',
'Search Status': '状態の検索',
'Search Storage Bin Type(s)': 'Storage Bin Typeの検索',
'Search Storage Bin(s)': 'Storage Bin(s)の検索',
'Search Storage Location(s)': '備蓄地点の検索',
'Search Subscriptions': '寄付申し込みを検索',
'Search Support Requests': '支援要求の検索',
'Search Tasks': 'タスクの検索',
'Search Teams': 'チームの検索',
'Search Themes': 'テーマの検索',
'Search Tickets': 'チケットの検索',
'Search Tracks': '追跡情報の検索',
'Search Twitter Tags': 'Twitterのタグを検索',
'Search Units': '単位の検索',
'Search Users': 'ユーザの検索',
'Search Volunteer Registrations': 'ボランティア登録の検索',
'Search Volunteers': 'ボランティアの検索',
'Search Warehouse Items': '倉庫の物資を検索',
'Search Warehouses': 'Warehousesの検索',
'Search and Edit Group': 'グループを検索して編集',
'Search and Edit Individual': '人物情報を検索して個別に編集',
'Search by ID Tag': 'IDタグで検索',
'Search for Items': '物資の検索',
'Search for a Hospital': '病院を探す',
'Search for a Location': '検索地域を指定します',
'Search for a Person': '人物を探す',
'Search for a Project': 'プロジェクトを探す',
'Search for a Request': '支援要請の検索',
'Search for a shipment received between these dates': 'ある期間内に受け取られた輸送を検索する',
'Search for an item by category.': 'カテゴリで物資を検索',
'Search for an item by text.': 'テキストで項目を検索',
'Search here for a person record in order to:': '人物情報を検索することで、以下の事柄を行うことができます。',
'Search messages': 'メッセージの検索',
'Search': '検索',
'Searching for different groups and individuals': '他のグループと個人を探す',
'Secondary Server (Optional)': 'セカンダリサーバ(オプション)',
'Seconds must be a number between 0 and 60': '秒には0-60の間の数字を記入してください',
'Seconds must be a number greater than 0 and less than 60': '秒は0から60の間で入力してください',
'Section Details': 'Sectionの詳細',
'Section deleted': 'Sectionを削除しました',
'Section updated': 'セクションを更新しました',
'Sections': 'セクション',
'Sector Details': '活動分野の詳細',
'Sector added': '活動分野を追加しました',
'Sector deleted': '活動分野を削除しました',
'Sector updated': '活動分野を更新しました',
'Sector': '活動分野',
'Sectors': '活動分野',
'Security Policy': 'セキュリティポリシー',
'Security Status': 'セキュリティステータス',
'Security problems': 'セキュリティーの問題',
'See unassigned recovery requests': 'まだ割り当てられていない遺体回収要請を見る',
'Seen': '発見情報あり',
'Select 2 potential locations from the dropdowns.': '候補地を2つ、ドロップダウンから選択します。',
'Select Items from the Request': '支援要請を基にアイテムを選択する',
'Select Items from this Inventory': '備蓄中の物資から選択',
'Select Language': '言語選択',
'Select Organization': '団体の選択',
'Select Photos': '写真の選択',
'Select a location': 'ロケーションを選択',
'Select a question from the list': 'リストから質問を選択してください',
'Select a range for the number of total beds': 'ベッド総数の範囲を選択',
'Select all that apply': '該当する項目を全て選択',
'Select an Organization to see a list of offices': '団体を選択すると、所属するオフィスが表示されます',
'Select an existing Location': '既に登録してあるロケーションを選択してください',
'Select the Cluster Layers for Assessments and Activities to analyse the Gaps:': 'アセスメントと支援活動のギャップを解析するクラスタの層を選択:',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'オーバーレイを指定し、適切なアセスメントと支援活動を表示させてニーズを明確にします。',
'Select the person assigned to this role for this project.': 'この人物に、プロジェクト内の権限を担当させます。',
'Select the person associated with this scenario.': 'このタスクに関連する人物を選択してください。',
'Select to see a list of subdivisions.': '項目を選択すると、より細かい分類を選択できます。',
'Select to show this configuration in the Regions menu.': '範囲メニューで表示する構成を選択して下さい',
'Select': '選択',
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'SMS送信時に、モデム、Tropoまたはゲートウェイのどちらを使用するかを選択',
'Selects whether to use the gateway or the Modem for sending out SMS': 'SMS送信時、モデムとゲートウェイのどちらを使用するか選択',
'Self Registration': '本人による登録',
'Self-registration': '本人による登録',
'Send Alerts using Email &/or SMS': '電子メールまたはSMSを使用してアラートを送信',
'Send Items': '物資を送付',
'Send Mail': 'メール送信',
'Send Message': 'メッセージを送る',
'Send Notification': '通知を送信',
'Send Shipment': '輸送を開始する',
'Send from %s': '依頼主( %s )',
'Send message': 'メッセージ送信',
'Send new message': '新規メッセージ送信',
'Send': '物資送付',
'Sends & Receives Alerts via Email & SMS': '電子メール/SMS 経由でアラート送信/受信',
'Senior (50+)': '高齢者 (50+)',
'Sensitivity': '感度',
'Sent Item Details': '送付した物資の詳細',
'Sent Item deleted': '輸送済み物資を削除しました',
'Sent Item updated': '送付した救援物資を更新しました',
'Sent Shipment Details': '送付物資の詳細',
'Sent Shipment canceled and items returned to Inventory': '送付処理した輸送がキャンセルされ、物資は倉庫に戻りました',
'Sent Shipment canceled': '輸送開始をキャンセルしました',
'Sent Shipment updated': '送信した物資が更新されました',
'Sent Shipments': '物資を送付しました',
'Sent': '送信',
'Separate latrines for women and men': 'トイレは男女別である',
'Separated children, caregiving arrangements': '親と離れた子供だちのための保育手配',
'Seraiki': 'セライキ',
'Serial Number': 'シリアルナンバー',
'Series': 'シリーズ',
'Server': 'サーバ',
'Service Catalog': 'サービスカタログ',
'Service or Facility': 'サービス、または施設',
'Service profile added': 'サービスプロファイルを追加しました',
'Service profile deleted': 'サービスプロファイルを削除しました',
'Service profile updated': 'サービスプロファイルを更新しました',
'Service': 'サービス',
'Services Available': '利用可能なサービス',
'Services': 'サービス',
'Setting Details': '設定の詳細',
'Setting added': '設定を追加しました',
'Setting deleted': '設定を削除しました',
'Setting updated': '設定を更新しました',
'Settings updated': '設定を更新しました',
'Settings were reset because authenticating with Twitter failed': 'Twitterの認証に失敗したため、設定をクリアします',
'Settings': '設定',
'Severe': '深刻',
'Severity': '深刻度',
'Severity:': '深刻度:',
'Share a common Marker (unless over-ridden at the Feature level)': 'マーカーの共有 (機能レイヤで上書きされない限り)',
'Shelter & Essential NFIs': '避難所/生活用品',
'Shelter Details': '避難所の詳細',
'Shelter Name': '避難所名称',
'Shelter Registry': '避難所登録',
'Shelter Service Details': '避難所サービスの詳細',
'Shelter Service added': '避難所サービスを追加しました',
'Shelter Service deleted': '避難所サービスを削除しました',
'Shelter Service updated': '避難所サービスを更新しました',
'Shelter Service': '避難所サービス',
'Shelter Services': '避難所サービス',
'Shelter Type Details': '避難所タイプの詳細',
'Shelter Type added': '避難所タイプを追加しました',
'Shelter Type deleted': '避難所タイプを削除しました',
'Shelter Type updated': '避難所サービスを更新しました',
'Shelter Type': '避難所タイプ',
'Shelter Types and Services': '避難所のタイプとサービス',
'Shelter Types': '避難所タイプ',
'Shelter added': '避難所を追加しました',
'Shelter deleted': '避難所を削除しました',
'Shelter updated': '避難所を更新しました',
'Shelter': '避難所',
'Shelter/NFI Assistance': '避難所 / 生活用品支援',
'Shelter/NFI assistance received/expected': '避難所 / 生活必需品の支援を受領済み、あるいは受領予定',
'Shelters': '避難所',
'Shipment Created': '輸送が作成されました',
'Shipment Details': '輸送の詳細',
'Shipment Items received by Inventory': '物資備蓄地点から送付された救援物資',
'Shipment Items sent from Inventory': '備蓄物資から輸送を行いました',
'Shipment Items': '救援物資の輸送',
'Shipment Transit Log Details': '輸送履歴の詳細',
'Shipment Transit Log added': '輸送履歴を追加しました',
'Shipment Transit Log deleted': '輸送履歴を削除しました',
'Shipment Transit Log updated': '輸送履歴を更新しました',
'Shipment Transit Logs': '輸送履歴',
'Shipment/Way Bill added': '輸送/移動費を追加しました',
'Shipment/Way Bills Details': '輸送/移動費の詳細',
'Shipment/Way Bills deleted': '輸送/移動費を削除しました',
'Shipment/Way Bills updated': '輸送/移動費を更新しました',
'Shipment/Way Bills': '輸送/移動費',
'Shipment<>Item Relation added': '輸送<>物資間の関係を追加しました',
'Shipment<>Item Relation deleted': '輸送<>アイテム間の関係を削除しました',
'Shipment<>Item Relation updated': '輸送<>物資間の関係を更新しました',
'Shipment<>Item Relations Details': '輸送<>物資間の関係詳細',
'Shipment<>Item Relations': '輸送<>物資間の関係',
'Shipments To': '輸送先',
'Shipments': '輸送',
'Shooting': '銃撃',
'Short Assessment': '簡易評価',
'Short Description': '概要',
'Show Checklist': 'チェックリストを表示',
'Show Details': '詳細を表示',
'Show Map': '地図の表示',
'Show Region in Menu?': '地域をメニューで表示しますか?',
'Show on map': '地図上に表示',
'Sign-up as a volunteer': 'ボランティアとして登録する',
'Sign-up for Account': 'アカウント登録',
'Sign-up succesful - you should hear from us soon!': '登録できました。すぐに連絡が送られます。',
'Sindhi': 'シンド語',
'Site Address': 'サイトの住所',
'Site Administration': 'このサイト自体の管理',
'Site Description': 'サイトの説明',
'Site Details': 'Siteの詳細',
'Site ID': 'サイトID',
'Site Location Description': 'サイト ロケーションの説明',
'Site Location Name': 'サイトロケーション名',
'Site Manager': 'Site 管理者',
'Site Name': 'Site の名前',
'Site added': 'サイトを追加しました',
'Site deleted': 'サイトを削除しました',
'Site updated': 'サイトを更新しました',
'Site': 'サイト',
'Site/Warehouse': 'サイト/倉庫',
'Sites': 'サイト',
'Situation Awareness & Geospatial Analysis': '広域情報の取得や、地理情報の分析を行ないます',
'Sketch': 'スケッチ',
'Skill Details': 'スキルの詳細',
'Skill Status': 'スキル状況',
'Skill Type Details': 'スキルタイプの詳細',
'Skill Type added': 'スキルタイプを追加しました',
'Skill Type deleted': 'スキルタイプを削除しました',
'Skill Type updated': 'スキルタイプを更新しました',
'Skill Types': 'スキルタイプ',
'Skill added': 'スキルを追加しました',
'Skill deleted': 'スキルを削除しました',
'Skill updated': 'スキルを更新しました',
'Skill': 'スキル',
'Skills': 'スキル',
'Slope failure, debris': '斜面崩壊・崩壊堆積物',
'Small Trade': '小規模取引',
'Smoke': '煙',
'Snapshot Report': 'スナップショットレポート',
'Snapshot': 'スナップショット',
'Snow Fall': '降雪',
'Snow Squall': '豪雪',
'Soil bulging, liquefaction': '土壌隆起・液状化',
'Solid waste': '固形廃棄物',
'Solution Details': '解決案の詳細',
'Solution Item': '解決案項目',
'Solution added': '解決案を追加しました',
'Solution deleted': '解決案を削除しました',
'Solution updated': '解決案を更新しました',
'Solution': '解決案',
'Solutions': '解決案',
'Some': '散見',
'Sorry - the server has a problem, please try again later.': 'すみません、サーバーに問題が発生しています。時間を置いてやり直してください。',
'Sorry that location appears to be outside the area of the Parent.': 'このロケーションは親属性のエリアの外に表示されます。',
'Sorry that location appears to be outside the area supported by this deployment.': 'すいません、この位置は、このデプロイメントでサポートされている領域の外です。',
'Sorry, I could not understand your request': '残念ながら、リクエストが理解できませんでした。',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': '申し訳ありませんが、 MapAdmin 権限を持つユーザだけがロケーションのグループを作れます',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': '申し訳ありませんが、ロケーションの編集を行うにはMapAdmin権限を持ったユーザである必要があります。',
'Sorry, something went wrong.': 'すいません、何か問題が発生しています。',
'Sorry, that page is forbidden for some reason.': 'すいません、都合により、このページは閲覧禁止です。',
'Sorry, that service is temporary unavailable.': 'すいません、このサービスは一時的に利用不可となっています。',
'Sorry, there are no addresses to display': 'すいません、表示する住所がありません',
'Source ID': '情報元ID',
'Source Time': '情報ソース入手時刻',
'Source Type': '情報ソース種別',
'Source': '情報元',
'Sources of income': '収入源',
'Space Debris': '宇宙廃棄物',
'Spanish': 'スペイン語',
'Special Ice': '特別な氷',
'Special Marine': '特別海上',
'Special needs': '特別な要求',
'Specialized Hospital': '専門病院',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'ある人々やグループが見られるロケーションの中の特別な場所 (建物、部屋等)',
'Specific Location': '特定のロケーション',
'Specific locations need to have a parent of level': 'ロケーションを指定するには、そのロケーションの親属性指定が必要です',
'Specify a descriptive title for the image.': '画像の説明として一言タイトルをつけてください。',
'Specify the bed type of this unit.': 'この施設にある寝具の種別を指定してください',
'Specify the minimum sustainability in weeks or days.': '最短で何週間、あるいは何日以内に枯渇の可能性があるかを記載してください',
'Specify the number of available sets': '利用可能なセットの個数を入力してください',
'Specify the number of available units (adult doses)': '(成人が使用するとして)使用可能な個数を入力してください',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': '使用可能な乳酸リンゲル液あるいは同等品のリッター数を入力してください',
'Specify the number of sets needed per 24h': '24時間ごとに必要なセットの数を指定する',
'Specify the number of units (adult doses) needed per 24h': '(成人が使用するとして)24時間ごとに必要な個数を入力してください',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': '24時間ごとに必要な乳酸リンゲル液あるいは同等品のリッター数を入力してください',
'Spherical Mercator?': '球面メルカトル?',
'Spreadsheet Importer': 'スプレッドシートの取り込み',
'Spreadsheet uploaded': 'スプレッドシートがアップロードされました',
'Spring': '湧き水',
'Squall': 'スコール',
'Staff 2': 'スタッフ 2',
'Staff Details': 'スタッフの詳細',
'Staff Type Details': 'スタッフタイプの詳細',
'Staff Type added': 'スタッフタイプを追加しました',
'Staff Type deleted': 'スタッフタイプを削除しました',
'Staff Type updated': 'スタッフタイプを更新しました',
'Staff Types': 'スタッフ分類',
'Staff added': 'スタッフを追加しました',
'Staff deleted': 'スタッフを削除しました',
'Staff present and caring for residents': '上記施設にスタッフが配置され、ケアを行っている',
'Staff updated': 'スタッフを更新しました',
'Staff': 'スタッフ',
'Staffing': 'スタッフ配備',
'Stairs': '階段',
'Start date and end date should have valid date values': '開始日と終了日は正しい値である必要があります',
'Start date': '開始日',
'Start of Period': '開始期間',
'Stationery': '文房具',
'Status Report': 'ステータスレポート',
'Status added': '状況が追加されました',
'Status deleted': 'ステータスを削除しました',
'Status of clinical operation of the facility.': '施設で行われている診療の状況を記載してください。',
'Status of general operation of the facility.': '施設の運用状況情報を記載してください。',
'Status of morgue capacity.': '死体安置所の収容状況です。',
'Status of operations of the emergency department of this hospital.': 'この病院の緊急手術室の状態です。',
'Status of security procedures/access restrictions in the hospital.': '病院のアクセス制限/セキュリティ手順の状態。',
'Status of the operating rooms of this hospital.': 'この病院の手術室の状態。',
'Status updated': '状況を更新しました',
'Status': 'ステータス',
'Steel frame': '鉄骨',
'Storage Bin Details': '物資保管場所の詳細',
'Storage Bin Number': 'Storage Bin番号',
'Storage Bin Type Details': '物資保管タイプの詳細',
'Storage Bin Type added': '物資保管タイプを追加しました',
'Storage Bin Type deleted': 'Storage Binタイプを削除しました',
'Storage Bin Type updated': 'Storage Binタイプを更新しました',
'Storage Bin Type': 'Storage Binタイプ',
'Storage Bin Types': '収納箱のタイプ',
'Storage Bin added': 'Storage Binを追加しました',
'Storage Bin deleted': 'Storage Bin を削除しました',
'Storage Bin updated': 'Storage Bin を更新しました',
'Storage Bin': '物資貯蔵容器',
'Storage Bins': '物資保管場所',
'Storage Location Details': '備蓄地点の詳細',
'Storage Location ID': '備蓄地点ID',
'Storage Location Name': '備蓄地点名称',
'Storage Location added': '備蓄地点を追加しました',
'Storage Location deleted': '備蓄地点を削除しました',
'Storage Location updated': '備蓄地点を更新しました',
'Storage Location': '備蓄地点',
'Storage Locations': '備蓄地点',
'Store spreadsheets in the Eden database': 'Edenのデータベースにスプレッドシートを格納',
'Storeys at and above ground level': '階層、あるいは地面より上部',
'Storm Force Wind': '嵐の風の強さ',
'Storm Surge': '高潮',
'Stowaway': '密航者',
'Street (continued)': '住所 (続き)',
'Street Address': '住所',
'Street': 'ストリート',
'Strong Wind': '強風',
'Structural Hazards': '構造破壊',
'Structural': '構造的な',
'Sub Category': 'サブカテゴリ',
'Sub-type': 'サブタイプ',
'Subject': '件名',
'Submission successful - please wait': '送信に成功しました。しばらくお待ちください',
'Submission successful - please wait...': '送信に成功しました。しばらくお待ちください',
'Submit New (full form)': '(完全なフォームで)新しく投稿する',
'Submit New (triage)': '新しい (トリアージ) を追加',
'Submit New': '新規登録',
'Submit a request for recovery': '遺体回収要請を作成する',
'Submit new Level 1 assessment (full form)': 'レベル1のアセスメントを投稿する(完全なフォーム)',
'Submit new Level 1 assessment (triage)': '新しいレベル1アセスメント(トリアージ)を追加',
'Submit new Level 2 assessment': '新しいレベル2アセスメントの登録',
'Submit': '送信',
'Subscription Details': '寄付申し込みの詳細',
'Subscription added': '寄付申し込みを追加しました',
'Subscription deleted': '寄付申し込みを削除しました',
'Subscription updated': '寄付申し込みを更新しました',
'Subscriptions': '寄付申し込み',
'Subsistence Cost': '生存コスト',
'Suburb': '郊外',
'Sufficient care/assistance for chronically ill': '慢性疾患罹患者への十分なケア / 介護がある',
'Suggest not changing this field unless you know what you are doing.': 'よくわからない場合は、この項目を変更しないでください。',
'Summary by Administration Level': '管理レベルの概要',
'Summary': '要約',
'Sunday': '日曜',
'Supervisor': '管理権限を追加',
'Supplies': '支給品',
'Support Request': '支援要請',
'Support Requests': '支援の要請',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': '危機管理の専門グループの助言を取り入れることで、救援活動の優先順位を作成しやすくします。',
'Sure you want to delete this object?': 'このオブジェクトを削除してもよろしいですか?',
'Surgery': '外科',
'Survey Answer Details': '調査回答詳細',
'Survey Answer added': '調査の回答を追加しました',
'Survey Answer deleted': '調査の回答を削除しました',
'Survey Answer updated': '調査回答を更新しました',
'Survey Answer': '調査回答',
'Survey Module': '調査モジュール',
'Survey Name': 'Survey 名',
'Survey Question Details': '調査項目の詳細',
'Survey Question Display Name': 'フィードバックの質問の表示名',
'Survey Question added': '調査の質問を追加しました',
'Survey Question deleted': '調査の質問を削除しました',
'Survey Question updated': 'Survey Questionを更新しました',
'Survey Question': '調査の質問',
'Survey Section Details': 'フィードバック項目の詳細',
'Survey Section Display Name': '調査項目の表示名',
'Survey Section added': '調査項目を追加しました',
'Survey Section deleted': 'フィードバック項目を削除しました',
'Survey Section updated': 'サーベイセクションを更新しました',
'Survey Section': '調査項目',
'Survey Series Details': 'Survey Seriesの詳細',
'Survey Series Name': 'フィードバックシリーズ名',
'Survey Series added': '一連の調査を追加しました',
'Survey Series deleted': '一連の調査を削除しました',
'Survey Series updated': '連続調査を更新しました',
'Survey Series': '一連の調査',
'Survey Template Details': '調査テンプレートの詳細',
'Survey Template added': 'Surveyテンプレートを追加しました',
'Survey Template deleted': '調査テンプレートを削除しました',
'Survey Template updated': '調査のテンプレートを更新しました',
'Survey Template': '調査テンプレート',
'Survey Templates': '調査のテンプレート',
'Switch this on to use individual CSS/Javascript files for diagnostics during development.': '開発時にこのスイッチをONにすることで、CSS/Javascriptファイルの診断を行なえます。',
'Symbology': 'コード',
'Sync Conflicts': 'データ同期中に競合が発生しました',
'Sync History': 'データ同期履歴',
'Sync Now': 'データ同期中',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'データ同期先とは、情報の同期を行うインスタンスやピアのことを指します。(Sahana EdenやSahanaAgasti、Ushahidiなどと同期可能です) 同期先の登録や検索、登録情報の変更を行う際は、リンクをクリックしてページを表示してください。',
'Sync Partners': 'データ同期パートナー',
'Sync Pools': 'プールの同期',
'Sync Schedule': 'データ同期スケジュール',
'Sync Settings': 'データ同期設定',
'Sync process already started on ': 'データ同期プロセスは既に開始しています',
'Synchronisation History': 'データ同期履歴',
'Synchronisation': '同期',
'Synchronization Conflicts': '同期のコンフリクト',
'Synchronization Details': 'データ同期の詳細',
'Synchronization History': 'データ同期履歴',
'Synchronization Peers': 'データ同期先',
'Synchronization Settings': 'データ同期設定',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'データ同期を使用すると、他の端末とデータを共有し、自身のデータを最新の状態に更新することができます。このページには、SahanaEdenにおいてデータ同期を行う方法が記載されています。',
'Synchronization not configured.': 'データ同期が設定されていません',
'Synchronization settings updated': 'データ同期設定を更新しました',
'Synchronization': 'データ同期',
'Syncronisation History': 'データ同期履歴',
'Syncronisation Schedules': 'データ同期スケジュール',
'System allows the General Public to Report Incidents & have these Tracked.': 'システムを使うことで、一般市民によるインシデントの報告、および報告されたインシデントの追跡を行うことができます。',
'System allows the tracking & discovery of Items stored in Locations.': 'システムにより、物資がどこで保持されているかを追跡、明確化することができます。',
'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': 'このシステムは、支援団体、個々の支援者、政府職員、そして避難所に移動した人々の間で、援助の需要と供給の調整を図るための、オンラインの中央データベースです。このシステムを用いて、利用可能な資源を、需要を満たすように、有効かつ効率的に割り当てることができます。',
'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': 'この仕組みでは、災害地域の全てのボランティア情報を提供します。ボランティアの活動場所に加え、そこで提供する支援内容も提供します。',
'TMS Layers': 'TMSレイヤ',
'Table name': 'テーブル名',
'Tags': 'タグ',
'Take shelter in place or per <instruction>': '場所や<instruction>ごとに避難してください',
'Task Details': 'タスクの詳細',
'Task List': 'タスク一覧',
'Task Status': 'タスクの状況',
'Task added': 'タスクを追加しました',
'Task deleted': 'タスクを削除しました',
'Task status': 'タスク状況',
'Task updated': 'タスクを更新しました',
'Tasks': 'タスク',
'Team Description': 'チーム概要',
'Team Details': 'チームの詳細',
'Team Head': 'チーム代表者',
'Team Id': 'チームID',
'Team Leader': 'チームリーダー',
'Team Member added': 'チームメンバーを追加しました',
'Team Members': 'チームメンバー',
'Team Name': 'チーム名',
'Team Type': 'チームタイプ',
'Team added': 'チームを追加しました',
'Team deleted': 'チームを削除しました',
'Team updated': 'チームを更新しました',
'Team': 'チーム',
'Teams': 'チーム',
'Technical testing only, all recipients disregard': '技術検証のみで、すべての受取人は無視されます',
'Telecommunications': '通信・情報',
'Telephone': '電話',
'Telephony': '電話',
'Temp folder %s not writable - unable to apply theme!': '一時フォルダ%sが書き込み不可になっています。テーマを適用できません。',
'Template file %s not readable - unable to apply theme!': 'テンプレートファイル %s が読み込み不可になっています。テーマを適用できません。',
'Templates': 'テンプレート',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': '国内における第五段階管理部門を示す用語(例: 郵便番号の下位部分)。このレベルは通常使われません。',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': '国内で第4の行政区域を示す用語 (例えば村、地区)',
'Term for the primary within-country administrative division (e.g. State or Province).': '国内で最大の行政区域を示す用語 (例えば州や都道府県)',
'Term for the secondary within-country administrative division (e.g. District).': '国内で二番目の管理部門の用語 (例: 区)',
'Term for the third-level within-country administrative division (e.g. City or Town).': '国内で三番目の管理部門を示す用語 (例: 市や町)',
'Term for the top-level administrative division (typically Country).': '最上位の統制区域を示す用語 (一般的には国)',
'Territorial Authority': '地方機関',
'Terrorism': 'テロリズム',
'Tertiary Server (Optional)': '三番目のサーバ(オプション)',
'Test Results': 'テスト結果',
'Text Color for Text blocks': 'テキストブロックのテキスト色',
'Text before each Text Field (One per line)': 'テキストフィールドの前のテキスト (一行に一つ)',
'Text in Message': 'メッセージのテキスト',
'Text in Message: ': 'メッセージのテキスト: ',
'Text': 'テキスト',
'Thanks for your assistance': 'ご協力ありがとうございます',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': '"query"は"db.table1.field1==\'value\'"のような条件です。SQL JOINの"db.table1.field1 == db.table2.field2"結果のようなものです。',
'The Area which this Site is located within.': 'このサイトが含まれる地域',
'The Assessments module allows field workers to send in assessments. 2 different options are provided here currently:': 'アセスメントモジュールは、被災現場で活動する人々による現状の査定報告を記録することができます。現在は、2種類のオプションが提供されています。',
'The Assessments module allows field workers to send in assessments.': 'アセスメントモジュールは、被災現場で活動する人々による現状の査定報告を記録することができます。',
'The Author of this Document (optional)': 'この文書の作成者氏名(オプション)',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'ビルアセスメントモジュールではビルの安全性評価を行います (例:地震の後など)',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'その人物/グループの現在地は報告用の概要レベルの情報あるいは地図上の表示のため正確な情報いずれの場合もあります。場所名の数文字を入力すると、登録済みの場所から検索できます。',
'The District for this Report.': 'このレポートが関連する地区。',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': '承認依頼が送信されるメールアドレス(通常は個人のメールアドレスではなく、グループのメールアドレス)。この欄が空白の場合、ドメインが一致すれば依頼は自動的に承認されます',
'The Group whose members can edit data in this record.': 'このグループのメンバーは、レコード上のデータを修正することができます。',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': '一般ユーザは、インシデント・レポートシステムからインシデントを報告し、その結果を表示させることができます。',
'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).': 'Siteのロケーション、(レポート用で)おおまかな場合と、(地図表示用で)正確な場合とがあります。',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'その人物がやって来たロケーションで、報告のためのだいたいの場所、あるいは地図で表示するための正確な緯度経度です。使用可能なロケーションを検索するには最初の数文字を入力してください',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'その人物が向かう場所は報告用の概要レベルの情報あるいは地図上の表示のため正確な情報いずれの場合もあります。場所名の数文字を入力すると、登録済みの場所から検索できます。',
'The Media Library provides a catalog of digital media.': 'メディア・ライブラリーは、デジタル・メディアの一覧を提供します。',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'メッセージング・モジュールは、SAHANAシステムのコミュニケーション中心となります。災害の前、災害中または災害の後に様々なグループや個人にSMSとeメールで警報やメッセージを送ります。',
'The Office this record is associated with.': 'このレコードに関連するオフィス',
'The Organization Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '団体情報を登録することで、被災地域で活動するすべての団体の活動を追跡します。また、それぞれの地域において、彼らがどこで活動しているかという情報だけでなく、彼らが各地で提供しているプロジェクトの範囲についての情報も提供します。',
'The Organization this record is associated with.': 'このレコードに関連する団体',
'The Organization which is funding this Activity.': 'この支援活動に資金を提供する団体',
'The Person currently filling this Role.': '現在この役割に属している人物',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'プロジェクト追跡モジュールでは、支援活動(アクティビティ)を作成し、必要な物資 / サービスのギャップを満たすことを目的とします。',
'The Rapid Assessments Module stores structured reports done by Professional Organizations.': '被災地の現況アセスメントには、専門団体によって行われたレポートの結果が格納されます。',
'The Request this record is associated with.': 'このレコードに関連する支援要請',
'The Requests Management System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': '支援要請管理システムは、全ての支援団体、救援者、政府職員、および避難所に暮らす避難者たち自身が、要求に応じて援助の供給を調整できる中央のオンラインデータベースです。支援要請管理システムは効果的かつ効率的に要求を満たすことができる利用可能な資源の割り当てを可能にします。',
'The Role this person plays within this Office/Project.': 'オフィス/プロジェクトにおける役割',
'The Role this person plays within this hospital.': '病院内における役割',
'The Role to which this Role reports.': 'この権限の報告先となる権限',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': '避難所登録は、避難所を追跡し、それらの詳細を蓄積します。避難所に関連付けられた人、利用可能なサービス等の他のモジュールと協業します。',
'The Shelter this Request is from (optional).': '要請を行った避難所(オプション)',
'The Shelter this person is checking into.': 'この人物がチェックインした避難所',
'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': '地図を用いてレイヤを利用できる WMS サービスの GetCapabilities の URL。',
'The URL of your web gateway without the post parameters': 'ポストパラメータを指定しないWebゲートウェイのURL',
'The URL to access the service.': 'サービスにアクセスするためのURL',
'The Unique Identifier (UUID) as assigned to this facility by the government.': '政府UUID|政府がこの施設に割り当てている汎用一意識別子(UUID)。',
'The area is ': 'この地域は',
'The attribute within the KML which is used for the title of popups.': 'このKML属性はポップアップのタイトルに使われます。',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'KMLで定義されている属性はポップアップの本文に使用されます。(各属性ごとに半角スペースで分割して記載してください)',
'The body height (crown to heel) in cm.': '頭頂からかかとまでの身長(単位はcm)',
'The category of the Item.': 'この救援物資のカテゴリです',
'The contact person for this organization.': '団体の代表窓口',
'The country the person usually lives in.': 'この人物が普段の生活を営む国',
'The default policy for data import from this peer.': 'このデータ同期先からデータをインポートする際のデフォルト設定。',
'The descriptive name of the peer.': 'データ同期先のわかりやすい名称',
'The duplicate record will be deleted': '重複したレコードは削除されます',
'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.': '入力した単位をこのユニットにリンクします。例えば、mをメートルとする場合、(存在するなら) kilometer を選択して、乗数に値 0.001 を入力します。',
'The first or only name of the person (mandatory).': '人物の苗字(必須)。 外国籍の方等については避難所等での管理上の主たる表記/順に従ってください。',
'The following modules are available': '利用可能なモジュールは以下のとおりです。',
'The hospital this record is associated with.': 'このレコードに関連のある病院。',
'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.': 'ある特定のプロジェクトや、人々、市町村への物資または、交付コード等のついた特定区域への寄付等のは物資は、送付されることになっています。',
'The language to use for notifications.': '通知に使用する言語',
'The language you wish the site to be displayed in.': 'このサイトを表示するための言語',
'The last known location of the missing person before disappearance.': '行方不明者が最後に目撃された場所',
'The length is ': '長さは',
'The list of Brands are maintained by the Administrators.': '銘柄一覧の整備は、管理者によって可能です',
'The list of Item categories are maintained by the Administrators.': '供給物資カテゴリの一覧は、管理者によってメンテナンスされています。',
'The name to be used when calling for or directly addressing the person (optional).': '電話をかける際など、直接連絡をとりたい場合に使われる名前(オプション)',
'The next screen will allow you to detail the number of people here & their needs.': '次の画面では、人数および必要な物資/サービスの詳細を確認できます。',
'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...': '次のスクリーンで、項目の詳細なリストと量を入力できる場合があります。',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': '元の物資一つと同じだけの、代替品の測定単位での数量',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': '表示している地図の周辺タイルをダウンロードする数。0は最初のページの読み込みがより早い事を意味し、数字を大きくすると視点をパンした際に表示がより早くなります。',
'The person at the location who is reporting this incident (optional)': '現地からこのインシデントを報告した人物(オプション)',
'The person reporting about the missing person.': '行方不明者情報の提供者。',
'The person reporting the missing person.': '行方不明者を報告した人',
'The post variable containing the phone number': '電話番号を含む post 変数',
'The post variable on the URL used for sending messages': 'メッセージ送信に使用するURLのPOST変数',
'The post variables other than the ones containing the message and the phone number': 'メッセージや電話番号以外を含むpost変数',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'モデムが接続されているシリアルポート - Linuxでは /dev/ttyUSB0 等、Windowsでは com1, com2 等',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': '要求を満たすためアクセスしていた別のサーバーからの応答がありませんでした。',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': '要求を満たすためアクセスしていた別のサーバーから不正な応答が返ってきました。',
'The simple policy allows anonymous users to Read & registered users to Edit. The full security policy allows the administrator to set permissions on individual tables or records - see models/zzz.py.': 'シンプルポリシーでは、匿名ユーザーによるデータの閲覧、および、登録ユーザーによる編集が許可されます。完全版ポリシーでは、個々のテーブルやレコードに対して管理権限を設定することができます。詳細はmodels/zzz.pyを参照してください。',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': '件名のイベントはこれ以上の脅威や懸案事項を引き起こすことはありません。よって、<instruction>には、今後実施すべきアクションが記述されていません。',
'The time difference between UTC and your timezone, specify as +HHMM for eastern or -HHMM for western timezones.': 'あなたのタイムゾーンとUTCとの差を、東では+HHMMで、西では-HHMMで指定してください',
'The title of the WMS Browser panel in the Tools panel.': '[ツール]パネルのWMS Browserパネルのタイトル',
'The token associated with this application on': 'このアプリケーションが関連づけられているトークン',
'The unique identifier of the peer. Leave blank if the peer is no Sahana Eden instance, it will be auto-assigned in that case.': '一意のデータ同期先識別子です。データ同期先がSahana Edenシステムではない場合は、空白にしておくことで自動的に割り当てが行われます。',
'The unique identifier which identifies this instance to other instances.': 'このインスタンスを他のインスタンスと区別するための固有識別子',
'The way in which an item is normally distributed': '物資が配給される際の通常経路',
'The weight in kg.': '重量(単位:kg)',
'The': ' ',
'Theme Details': 'テーマの詳細',
'Theme added': 'テーマを追加しました',
'Theme deleted': 'テーマを削除しました',
'Theme updated': 'テーマを更新しました',
'Theme': 'テーマ',
'Themes': 'テーマ',
'There are errors': 'エラーが発生しました',
'There are multiple records at this location': 'このロケーションに複数のレコードが存在します',
'There are not sufficient items in the Inventory to send this shipment': 'この輸送を開始するために十分な量の物資が備蓄されていません',
'There is no address for this person yet. Add new address.': 'この人物の住所がまだありません。新しい住所を入力してください',
'There was a problem, sorry, please try again later.': '問題が発生しています。すみませんが、時間を置いてからやり直してください。',
'These are settings for Inbound Mail.': '電子メール受信箱の設定です',
'These are the Incident Categories visible to normal End-Users': '普通のユーザーが見ることができるインシデント一覧です',
'These are the default settings for all users. To change settings just for you, click ': 'これらは、全てのユーザーのデフォルト設定です。個人用の設定を変更するには、以下をクリックしてください。',
'These need to be added in Decimal Degrees.': 'これらは、十進角で追加する必要があります。',
'They': 'それら',
'This Group has no Members yet': 'メンバはまだ登録されていません',
'This Team has no Members yet': 'メンバはまだ登録されていません',
'This appears to be a duplicate of ': 'これは、以下のものと重複しているようです。',
'This can either be the postal address or a simpler description (such as `Next to the Fuel Station`).': '住所か、あるいは簡単な記述(ガソリンスタンドの隣、など)を記載しています。',
'This email address is already in use': 'このメールアドレスは使用されています',
'This file already exists on the server as': 'このファイルは別の名前でサーバに既に存在しています : ',
'This form allows the administrator to remove a duplicate location.': '管理者はこのフォームを使うことで、重複したロケーションデータを削除できます。',
'This is the way to transfer data between machines as it maintains referential integrity.': '参照整合性を保ちつつ、端末間でデータを転送する方法が記載されています。',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': '参照整合性を保ちつつ、端末間でデータを転送する方法が記載されています。...重複したデータは最初に手動で削除する必要があります。',
'This might be due to a temporary overloading or maintenance of the server.': 'サーバーが一時的に過負荷状態になっているか、あるいはメンテナンスを行っています。',
'This page shows you logs of past syncs. Click on the link below to go to this page.': '過去に行ったデータ同期履歴を表示します。以下のリンクをクリックしてください。',
'This screen allows you to upload a collection of photos to the server.': 'この画面では、複数の画像をサーバーにアップロードすることができます。',
'This shipment has already been received.': '輸送が開始され、物資が受領されました',
'This shipment has already been sent.': '輸送が開始され、送付されました',
'This shipment has not been received - it has NOT been canceled because it can still be edited.': 'この輸送は受領されていません。 - まだ編集可能であり、キャンセルされてはいません',
'This shipment has not been sent - it has NOT been canceled because it can still be edited.': '輸送はまだ開始されていませんが、キャンセルされてはいません。編集可能です。',
'This shipment will be confirmed as received.': 'この輸送された物資は、受信済み扱いになります',
'This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': 'この値はその地点の外側までの距離の小さなマウントを追加します。この値が無い場合は、一番外側の地点が境界ボックスになり、表示されない可能性があります。',
'This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': 'この値はこの地域を表示する時に使う最小の幅と高さを示します。この値がない場合、ある単一の地点を表示するときにその周辺の範囲は表示されません。地図が表示された後では、好きな大きさに拡大・縮小できます。',
'Thunderstorm': '雷雨',
'Thursday': '木曜日',
'Ticket Details': 'チケットの詳細',
'Ticket ID': 'チケットID',
'Ticket added': 'チケットを追加しました',
'Ticket deleted': 'チケットを削除しました',
'Ticket updated': 'チケットを更新しました',
'Ticket': 'チケット',
'Ticketing Module': 'チケット発行モジュール',
'Tickets': 'チケット',
'Tilt-up concrete': 'ティルトアップ式コンクリート',
'Timber frame': '木造',
'Time needed to collect water': '水の確保に必要な時間',
'Time of Request': '要求発生時刻',
'Timeline Report': 'タイムラインレポート',
'Timeline': 'タイムライン',
'Timestamp': 'タイムスタンプ',
'Title': 'タイトル',
'To Location': '送付先ロケーション',
'To Organization': '送付先団体',
'To Person': '送付先人物情報',
'To Site': '送付先サイト',
'To begin the sync process, click the button on the right => ': '右のボタンを押すと、データ同期が開始されます。',
'To begin the sync process, click this button => ': 'このボタンを押すと、データ同期が開始されます。=>',
'To create a personal map configuration, click ': '個人用の地図設定を作成するにはクリックしてください',
'To delete': '削除する側',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'OpenStreetMapを編集する際は、models/000_config.pyで定義されている設定を編集してください',
'To submit a new job, use the': 'jobを新規送信するには、以下を使用してください。',
'To variable': '変数に',
'To': ' ',
'Tools': 'ツール',
'Tornado': '竜巻',
'Total # of Beneficiaries Reached ': '支援が到達した受益者の合計数 ',
'Total # of Target Beneficiaries': '受益対象者の合計人数',
'Total # of households of site visited': '訪問した世帯数',
'Total Beds': '合計ベッド数',
'Total Beneficiaries': '受益者の総数',
'Total Cost per Megabyte': 'メガバイト毎の合計費用',
'Total Cost per Minute': '一分毎の合計費用',
'Total Households': '総世帯数',
'Total Monthly Cost': '月額総計',
'Total Monthly Cost: ': '月毎の費用の合計: ',
'Total Monthly': '月ごとの合計',
'Total One-time Costs': '1回毎の費用総計',
'Total Persons': '合計者数',
'Total Recurring Costs': '経常費用総計',
'Total Unit Cost': '単価合計',
'Total Unit Cost: ': '単価合計: ',
'Total Units': '総数',
'Total gross floor area (square meters)': '延面積(平方メートル)',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'この病院のベッド数総計。日時レポートにより、自動的に更新されます。',
'Total number of houses in the area': 'この地域の家屋総数',
'Total number of schools in affected area': '被災地内の学校総数',
'Total population of site visited': '訪問地域の総人口数',
'Total': '合計数',
'Totals for Budget:': '予算の合計:',
'Totals for Bundle:': 'Bundleの合計:',
'Totals for Kit:': 'Kitの合計:',
'Tourist Group': '旅行者グループ',
'Town': '町',
'Traces internally displaced people (IDPs) and their needs': '国内の避難している人(IDP)と彼らの必要としている物資/サービスの追跡',
'Tracing': '履歴の追跡',
'Track Details': '追跡情報の詳細',
'Track deleted': '追跡情報を削除しました',
'Track updated': '追跡情報を更新しました',
'Track uploaded': '追跡情報をアップデートしました',
'Track': '追跡情報',
'Tracking of Projects, Activities and Tasks': 'プロジェクトや支援活動、タスクの追跡',
'Tracking of basic information on the location, facilities and size of the Shelters': '避難所の基本情報(場所、施設、規模等)を追跡',
'Tracks requests for aid and matches them against donors who have pledged aid': '支援要請を管理し、救援物資の提供者とマッチングします。',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': '避難所のロケーション、配置、収容能力と被災者の状態を追跡します。',
'Tracks': 'トラック',
'Traffic Report': 'トラフィックレポート',
'Transfer': '輸送',
'Transit Status': '輸送状態',
'Transit': '移動中の立ち寄り',
'Transit. Status': '輸送状態',
'Transition Effect': '推移への影響',
'Transparent?': '透明ですか?',
'Transportation assistance, Rank': '移動 / 輸送支援、ランク',
'Trauma Center': '心的外傷センター',
'Travel Cost': '移動費',
'Tree': '樹木',
'Tropical Storm': '熱帯低気圧',
'Tropo Messaging Token': 'Tropo メッセージのトークン',
'Tropo Settings': 'Tropo 設定',
'Tropo Voice Token': 'Tropo 音声トークン',
'Tropo settings updated': 'Tropo 設定を更新しました',
'Truck': 'トラック',
'Try checking the URL for errors, maybe it was mistyped.': '入力したURLに間違いがないか確認してください。',
'Try hitting refresh/reload button or trying the URL from the address bar again.': 'ページの再読み込みを行うか、あるいはアドレスバーに直接URLを入力してみてください。',
'Try refreshing the page or hitting the back button on your browser.': 'ページを再読込するか、ブラウザの[戻る]ボタンを押してください。',
'Tsunami': '津波',
'Tuesday': '火曜日',
'Twitter ID or #hashtag': 'Twitter ID あるいは #ハッシュタグ',
'Twitter Settings': 'Twitter設定',
'Type of Construction': '建物の種類',
'Type of cause': '原因のタイプ',
'Type of latrines': 'トイレの種類',
'Type of place for defecation': '排泄用地の種類',
'Type of water source before the disaster': '災害発生前の水の確保方法',
'Type': 'タイプ',
'Types of health services available': '利用可能な健康サービスの種別',
'Types of water storage containers available': '利用可能な水貯蔵容器の種別',
'UID': 'ユニークID',
'UN': '国連',
'UTC Offset': 'UTC(世界標準時刻)との差',
'Unable to parse CSV file!': 'CSVファイルをパースできません。',
'Understaffed': '人員不足',
'Unidentified': '詳細不明',
'Unit Bed Capacity': 'ベッド収容数',
'Unit Cost': '単価',
'Unit Details': '単位の詳細',
'Unit Name': '単位名',
'Unit Set': '単位の設定',
'Unit Short Code for e.g. m for meter.': '単位の略称、例えばメートルはmと表記。',
'Unit added': '単位を追加しました',
'Unit deleted': '単位を削除しました',
'Unit of Measure': '1個口の内訳',
'Unit updated': '単位を更新しました',
'Unit': '単位',
'Units of Measure': '測定単位',
'Units': '単位',
'Unknown Peer': '登録に無いデータ同期先',
'Unknown type of facility': '施設規模不明',
'Unknown': '不明',
'Unreinforced masonry': '補強されていない石造建築物',
'Unresolved Conflicts': '未解決のデータ競合',
'Unsafe': '危険な',
'Unselect to disable the modem': 'モデムを無効化するにはチェックを外す',
'Unsent': '未送信',
'Unsupported data format!': 'サポートされていないデータフォーマットです。',
'Unsupported method!': 'サポートされていないメソッドです。',
'Unsupported method': 'サポートされていないメソッドです',
'Update Activity Report': '支援活動レポートの更新',
'Update Cholera Treatment Capability Information': 'コレラ対策能力情報を更新',
'Update Import Job': 'Import Jobの更新',
'Update Request': '支援要請を更新',
'Update Service Profile': 'サービスプロファイルの更新',
'Update Task Status': 'タスク状況の更新',
'Update Unit': '単位の更新',
'Update if Master': 'マスターサイトなら更新する',
'Update if Newer': '新しいものがあれば更新する',
'Update your current ordered list': '現在の順序付きリストの更新',
'Update': '更新',
'Upload Photos': '写真のアップロード',
'Upload Spreadsheet': 'スプレッドシートのアップロード',
'Upload Track': '追跡情報のアップロード',
'Upload a Spreadsheet': 'スプレッドシートをアップロード',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': '画像ファイルをアップロード(bmp,gif,jpeg,png) 最大300x300ピクセル',
'Upload an image file here.': '画像ファイルをここにアップロードしてください',
'Upload an image, such as a photo': '写真などのイメージをアップロードしてください',
'Upload': 'アップロード',
'Urban Fire': '都市火災',
'Urban area': '市街地',
'Urdu': 'ウルドゥー語',
'Urgent': '緊急',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '複雑なクエリを構築するには、ANDは (...)&(...) を、ORは (...)|(...) を、NOTは ~(...) を使用してください。',
'Use default': 'デフォルト値を使用',
'Use these links to download data that is currently in the database.': 'これらのリンクを使用して、現在データベースにあるデータをダウンロードします。',
'Use this space to add a description about the Bin Type.': 'Bin Typeに関する説明は、このスペースに記載してください。',
'Use this space to add a description about the site location.': 'このスペースを使って、サイトの位置の説明を追加してください。',
'Use this space to add a description about the warehouse/site.': '倉庫/Siteに関する説明は、このスペースに記載してください。',
'Use this space to add additional comments and notes about the Site/Warehouse.': 'Site/倉庫に関する追加情報を記載するには、このスペースを使用してください。',
'Used to import data from spreadsheets into the database': 'スプレッドシートからデータベースにデータをインポートするために使われます',
'User %(first_name)s %(last_name)s Approved': '%(first_name)s %(last_name)s のユーザー登録が承認されました',
'User %(id)s Logged-in': 'ユーザー %(id)s がログインしています',
'User %(id)s Logged-out': 'ユーザー %(id)s がログアウトしました',
'User %(id)s Profile updated': 'ユーザ %(id)s のプロファイルを更新しました',
'User %(id)s Registered': 'ユーザー%(id)sを登録しました',
'User Account has been Disabled': 'ユーザアカウントが無効になっています',
'User Details': 'ユーザーの詳細',
'User ID': 'ユーザーID',
'User Management': 'ユーザー管理',
'User Profile': 'ユーザープロファイル',
'User Requests': 'ユーザー要求',
'User Updated': 'ユーザーを更新しました',
'User added': 'ユーザーを追加しました',
'User already has this role': 'この権限のあるユーザー',
'User deleted': 'ユーザーを削除しました',
'User updated': 'ユーザーを更新しました',
'User': 'ユーザー',
'Username for authentication at the peer. Note that only HTTP Basic authentication is supported.': 'データ同期先との認証に使うユーザ名。HTTPベーシック認証のみサポートしています。',
'Username': 'ユーザー名',
'Users removed': 'ユーザーを削除しました',
'Users': 'ユーザー',
'Usual food sources in the area': 'この地域の普段の食料調達方法',
'Utilities': 'ユーティリティ',
'Utility, telecommunication, other non-transport infrastructure': 'ユーティリティ、通信、その他のインフラ設備(交通以外)',
'Vacancies': '欠員',
'Value': '値',
'Various Reporting functionalities': '多種多様な報告を行う機能',
'Vehicle Crime': '車両犯罪',
'Vehicle Types': '車両の種別',
'Vehicle': '車両',
'Vendor': 'ベンダー',
'Verification Email sent - please check your email to validate. If you do not receive this email please check you junk email or spam filters': 'メールアドレス確認用のメールを送信しました。メールに記載された確認用URLにアクセスしてください。もしメールが届かない場合迷惑メールフォルダに入ってしまっている可能性がありますのでご確認ください。',
'Verification Status': '認証ステータス',
'Verified': '認証済み',
'Verified?': '認証(ログイン)できません.メールアドレス・パスワードを確認してください.',
'Verify Password': 'パスワード再確認',
'Verify password': 'パスワードの確認',
'Version': 'バージョン',
'Very High': '非常に高い',
'View Alerts received using either Email or SMS': '電子メールまたはSMSで受信したアラートの閲覧',
'View Fullscreen Map': '地図をフルスクリーン表示',
'View Image': '画像の閲覧',
'View On Map': '地図上で閲覧',
'View Outbox': '送信箱の表示',
'View Picture': '写真の表示',
'View Requests for Aid': '援助要請を閲覧',
'View Settings': '設定の確認',
'View Tickets': 'チケットの閲覧',
'View and/or update their details': '詳細の閲覧および更新',
'View or update the status of a hospital.': '病院のステータスの閲覧と更新',
'View pending requests and pledge support.': '処理中の要求と寄付サポートの閲覧',
'View the hospitals on a map.': '病院の場所を地図上で表示します。',
'Village Leader': '村長',
'Village': '村落',
'Visible?': '表示しますか?',
'Visual Recognition': '画像認識',
'Volcanic Ash Cloud': '火山灰雲',
'Volcanic Event': '火山活動',
'Volume - Fluids': '流量 - 液状物',
'Volume - Solids': '流量 - 固形物',
'Volume Capacity': '容量',
'Volume/Dimensions': '容量/外形寸法',
'Volunteer Data': 'ボランティアデータ',
'Volunteer Details': 'ボランティアの詳細',
'Volunteer Management': 'ボランティアの管理',
'Volunteer Project': 'ボランティアプロジェクト',
'Volunteer Registration': 'ボランティア登録',
'Volunteer Registrations': 'ボランティア登録',
'Volunteer Request': 'ボランティア要請',
'Volunteer added': 'ボランティアを追加しました',
'Volunteer deleted': 'ボランティアを削除しました',
'Volunteer details updated': 'ボランティアの詳細を更新しました',
'Volunteer registration added': 'ボランティア登録を追加しました',
'Volunteer registration deleted': 'ボランティア登録を削除しました',
'Volunteer registration updated': 'ボランティア登録を更新しました',
'Volunteers were notified!': 'ボランティアに通知されました',
'Volunteers': 'ボランティア',
'Vote': '投票',
'Votes': '投票',
'WASH': '除染',
'WMS Browser Name': 'WMSブラウザ名',
'WMS Browser URL': 'WMSブラウザのURL',
'Walking Only': '徒歩のみ',
'Walking time to the health service': '医療サービス提供所までの徒歩時間',
'Wall or other structural damage': '壁やその他の構造の損傷',
'Warehouse Details': '倉庫の詳細',
'Warehouse Item Details': '倉庫物資の詳細',
'Warehouse Item added': '倉庫物資を追加しました',
'Warehouse Item deleted': '倉庫内物資を削除しました',
'Warehouse Item updated': '倉庫物資を更新しました',
'Warehouse Items': '倉庫に備蓄中の物資',
'Warehouse Management': '倉庫管理',
'Warehouse added': '倉庫を追加しました',
'Warehouse deleted': '倉庫を削除しました',
'Warehouse updated': '倉庫を更新しました',
'Warehouse': '倉庫',
'Warehouse/Sites Registry': '倉庫/Siteの登録',
'Warehouses': '倉庫',
'WatSan': '給水と衛生',
'Water Level still high?': '水位はまだ高いままですか?',
'Water Sanitation Hygiene': '水質衛生',
'Water collection': '給水',
'Water gallon': 'ガロン容器',
'Water storage containers available for HH': '世帯用の水貯蔵容器が利用可能である',
'Water storage containers in households': '世帯の水貯蔵容器',
'Water storage containers sufficient per HH': '世帯毎に1つ以上の水貯蔵容器が利用可能である',
'Water supply': '水の供給',
'Water': '水',
'Waterspout': '水上竜巻',
'Way Bill(s)': '移動費',
'We have tried': '私達は試行しました',
'Website': 'ウェブサイト',
'Wednesday': '水曜日',
'Weekly': '週次',
'Weight (kg)': '体重 (kg)',
'Weight': '体重',
'Welcome to the Sahana Eden Disaster Management Platform': 'Sahana Eden -災害情報管理プラットフォームへようこそ',
'Welcome to the Sahana Eden Disaster Management System': 'Sahana Eden -災害情報管理システムへようこそ',
'Welcome to the Sahana Portal at ': 'Sahana ポータルへようこそ: ',
'Welcome to the Sahana Portal at': 'Sahanaポータルにようこそ',
'Well-Known Text': '既知の文章',
'Were basic medical supplies available for health services prior to the disaster?': '災害前に、基本的な医療サービスが機能していたかどうかを記載してください',
'Were breast milk substitutes used prior to the disaster?': '災害前に利用していた母乳代用品の入手源を記載してください',
'Were there cases of malnutrition in this area prior to the disaster?': 'この地域で、災害前に栄養失調が発生していたかどうかを記載してください',
'Were there health services functioning for the community prior to the disaster?': '災害前、共同体でヘルスサービスが機能していたかどうかを記載してください',
'Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?': '災害発生前から栄養失調の報告があった、あるいはその証跡があったかどうかを記載します',
'What are the factors affecting school attendance?': '生徒の就学状況に影響する要因を記載してください',
'What are your main sources of cash to restart your business?': 'ビジネス再開に必要な現金の、主な調達源を記載してください',
'What are your main sources of income now?': '現在の主な収入源を記載してください',
'What do you spend most of your income on now?': '現在の主な支出要因を記載してください',
'What food stocks exist? (main dishes)': '備蓄食料の種類(主皿)',
'What food stocks exist? (side dishes)': '備蓄食料の種類(副皿)',
'What is the estimated total number of people in all of these institutions?': '上記施設内の居住者を総計すると、おおよそどの程度になるかを記載してください',
'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?': '洗濯、料理、入浴など、日常生活で必要となる清潔な水の、主な入手源を記載してください',
'What is your major source of drinking water?': '飲料水の主な入手源を記載してください',
'What type of latrines are available in the village/IDP centre/Camp?': '村落/IDPセンター/仮泊施設内で利用可能なトイレのタイプは?',
'What type of salvage material can be used from destroyed houses?': '全壊した家屋から回収した部材が流用可能な用途を記載します',
'What type of salvage material can be used from destroyed schools?': '倒壊した校舎において、再利用できる部材は何ですか?',
'What types of health problems do children currently have?': '小児が現在抱えている健康問題のタイプを記載してください',
'What types of health problems do people currently have?': '住人たちが現在抱えている健康問題のタイプを記載してください',
'What types of health services are still functioning in the affected area?': '現在、被災地で機能しているヘルスサービスの種類を選択してください',
'What types of household water storage containers are available?': '世帯で使っている水貯蔵容器のタイプを選択してください',
'What were your main sources of income before the disaster?': '災害発生前の主な収入源を選択してください',
'Wheat': '小麦',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': '地図上に複数のポイントが表示されている場合、それらポイント全てを表示できる縮尺で地図が表示されます。この値は、それらポイントの外に余白を付与します。指定しない場合、表示領域とポイントが重なり、表示範囲から外れてしまう可能性があります。',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': '地図上に複数のポイントが表示されている場合、それらポイント全てを表示できる縮尺で地図が表示されます。この値は、地域を表示する際の横幅と縦高の最小値となります。指定しない場合、対象の一点のみ表示され、その周辺は表示されません。一度表示された後であれば、縮尺の変更が可能です。',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points.': '地点の集合にフォーカスを合わせた地図を表示すると、この地図はそれら地点の集合を表示できる範囲に拡大・縮小します',
'When reports were entered': 'いつ報告が入力されたか',
'Where are the alternative places for studying?': '学校以外で、学習が可能な施設の種類を選択してください',
'Where are the separated children originally from?': '保護者が居ない児童の住居地はどこですか?',
'Where do the majority of people defecate?': 'トイレはどこで済ませますか?',
'Where have the children been sent?': '疎開先の情報がある場合は記載してください',
'Where is solid waste disposed in the village/camp?': '村落/仮泊施設内での、固形廃棄物処理場所を記載してください',
'Whether this is a Sahana Eden, Sahana Agasti, Ushahidi or Other instance.': 'Sahana Eden, Sahana Agasti, Ushahidi あるいは他のシステムの場合も',
'Whiskers': 'ほおひげ',
'Who is doing what and where': '誰がどこで何をしているか',
'Who usually collects water for the family?': '日頃、家族のために水を採取しているのは誰か?',
'Width': '横幅',
'Wild Fire': '野火',
'Wind Chill': '風速冷却',
'Window frame': 'ウィンドウ枠',
'Winter Storm': '吹雪',
'Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?': '災害発生後、女性や少女に対する暴力事件が発生したかどうかを記載してください。具体的な人名や場所を記載する必要はありません',
'Women of Child Bearing Age': '出産年齢の女性',
'Women participating in coping activities': '女性が災害対応に従事',
'Women who are Pregnant or in Labour': '妊娠中、あるいは労働中の女性',
'Womens Focus Groups': '女性のフォーカスグループ(Womens Focus Groups)',
'Wooden plank': '木製板',
'Wooden poles': '木製の柱',
'Working hours end': '作業終了時刻',
'Working hours start': '作業開始時刻',
'Working or other to provide money/food': '金銭/食料調達のため就労、あるいは活動を実施',
'Would you like to display the photos on the map?': '地図上に写真を表示しますか?',
'X-Ray': 'X線',
'Year built': '建築年',
'Year of Manufacture': '製造年',
'Yellow': '黄色',
'Yes': 'はい',
'You are a recovery team?': 'あなたが遺体回収チームの場合',
'You are attempting to delete your own account - are you sure you want to proceed?': '自分のアカウントを削除しようとしています。本当に削除しますか?',
'You are currently reported missing!': 'あなたが行方不明者として登録されています!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': '同期に関する設定は、「設定」セクションで行うことができます。設定には、UUID(unique identification number)、同期スケジュール、ビーコンサービス等が含まれます。同期設定は以下のリンクから変更可能です。',
'You can click on the map below to select the Lat/Lon fields': '下の地図をクリックすることで、緯度経度情報を入力できます',
'You can click on the map below to select the Lat/Lon fields:': '緯度と経度の設定は、以下の地図をクリックすることでも可能です:',
'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.': '経度/緯度の項目は、地図を選択することでも登録可能です。経度は東西方向(横)の座標軸です。緯度は南北方向(上下)の座標軸です。赤道ではゼロ、北半球ではプラス、南半球ではマイナスとなります。経度は、子午線(グリニッジ標準時)をゼロとして、東(ヨーロッパ、アジア)がプラスとなります。西(大西洋、アメリカ)がマイナスです。10進法で記入してください。',
'You can select the Draw tool (': '選択可能な描画ツール (',
'You can select the Draw tool': 'ドローツールを選択できます',
'You can set the modem settings for SMS here.': 'SMS用モデムの設定をすることができます。',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': '変換ツールを使うことで、GPS、あるいはDegrees/Minutes/Seconds形式からデータを変換できます。',
'You do no have permission to cancel this received shipment.': '輸送の受け取りをキャンセルする権限がありません',
'You do no have permission to cancel this sent shipment.': '輸送の送付をキャンセルする権限がありません',
'You do no have permission to make this commitment.': 'このコミットを作成する権限がありません',
'You do no have permission to receive this shipment.': 'この輸送を受け取る権限がありません',
'You do no have permission to send this shipment.': 'この輸送を開始する権限がありません',
'You do not have permission for any site to add an inventory item.': 'あなたには他の場所から在庫アイテムを追加する権限はありません',
'You do not have permission for any site to make a commitment.': 'どの場所にも受け入れを示す権限が有りません。',
'You do not have permission for any site to make a request.': '支援要請を作成する権限がありません',
'You do not have permission for any site to perform this action.': 'この操作をするための権限がありません',
'You do not have permission for any site to receive a shipment.': '物資の輸送を受け取る権限がありません',
'You do not have permission for any site to send a shipment.': '物資の輸送をする権限がありません',
'You do not have permission to send a shipment from this site.': 'あなたはこのサイトから物資を送る権限はありません',
'You have a personal map configuration. To change your personal configuration, click ': '個人用地図設定があります。あなたの個人用地図設定を編集するにはクリックしてください',
'You have found a dead body?': '遺体を発見しましたか?',
'You must be logged in to register volunteers.': 'ボランティアとして登録するには、ログインする必要があります',
'You must be logged in to report persons missing or found.': '行方不明者の発見状況を登録するには、ログインする必要があります。',
'You must provide a series id to proceed.': '処理を行うにはシリーズIDを指定する必要があります。',
'You should edit OpenStreetMap settings in models/000_config.py': 'OpenStreetMapの設定を変更するには、models/000_config.pyを編集してください',
'You should edit Twitter settings in models/000_config.py': 'Twitter設定を変更するには、models/000_config.pyを編集してください。',
'Your Account is Approved - you can now login\n %s%s/': '利用者登録が完了しました。リンク先のログインページで あなたが登録したユーザー名とパスワードを入力してログインしてください。\n %s%s/',
'Your Account is Approved': '利用者登録が完了しました',
'Your action is required. Please approve user %s asap: ': 'あなたの行動が要求されています。ただちにユーザー %s を承認してください。',
'Your action is required. Please approve user': 'ユーザーから承認の依頼が届いています。承諾お願いします',
'Your current ordered list of solution items is shown below. You can change it by voting again.': '解決項目の順番付きリストは以下です。再度投票することによって変更可能です。',
'Your post was added successfully.': '投稿が成功しました',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'あなたがお使いのシステムには、ユニークID (UUID) が割り当てられており、このIDを用いて他のコンピュータがあなたのシステムを同定します。あなたの UUID を閲覧するには、同期 -> 同期設定と進んでください。そのページでは、他の設定を閲覧することもできます。',
'ZIP/Postcode': '郵便番号',
'Zinc roof': 'トタン屋根',
'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': 'ズームイン: マップをクリックするか、拡大したい場所をドラッグで選択してください',
'Zoom Levels': 'ズームレベル',
'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': 'ズームアウト: マップをクリックするか、拡大したい地点をマウスの左ボタンでドラッグしてください',
'Zoom to Current Location': '現在の場所を拡大',
'Zoom to maximum map extent': 'マップの最大範囲までズーム',
'Zoom': 'ズーム',
'act': '活動',
'active': 'アクティブ',
'added': '追加しました',
'all records': '全てのレコード',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'では、スタッフや設備、それらの管理コストまで含めた予算編成を行ないます。',
'allows for creation and management of surveys to assess the damage following a natural disaster.': '自然災害による被災影響調査の作成、および管理を許可する',
'an individual/team to do in 1-2 days': '個人やチーム単位で、1-2日中に実施するべき事柄をさします。',
'approved': '承認された',
'assigned': '担当者・部門が確定',
'average': '平均的',
'black': '黒',
'blond': 'ブロンド',
'blue': '青',
'brown': '茶色',
'business_damaged': 'ビジネスへの損害',
'by': ' ',
'c/o Name': 'c/o 名前',
'can be used to extract data from spreadsheets and put them into database tables.': 'スプレッドシートからデータを抽出して、データベーステーブルに挿入できます。',
'can use this to identify the Location': 'ここからロケーションの特定が可能です',
'caucasoid': '白人',
'check all': '全てチェック',
'click for more details': '詳細はクリック',
'collateral event': '付帯イベント',
'completed': '完了',
'confirmed': '確認済',
'consider': '考慮',
'criminal intent': '犯罪目的',
'crud': '性病',
'curly': '縮れ毛',
'currently registered': '登録済み',
'daily': '日次',
'dark': '濃い',
'data uploaded': 'データがアップロードされました',
'database %s select': 'データベース%sの選択',
'database': 'データベース',
'db': 'データベース',
'delete all checked': 'チェックされた項目を全て削除',
'deleted': '削除されました',
'denied': '拒否されました',
'description': '説明',
'design': 'デザイン',
'diseased': '罹患中',
'displaced': '避難中',
'divorced': '離別',
'done!': '完了!',
'duplicate': '重複',
'edit': '編集',
'editor': '編集者',
'eg. gas, electricity, water': 'ガス、電気、水道など',
'embedded': '埋め込まれた',
'enclosed area': '専用地',
'export as csv file': 'csvファイルとしてエクスポート',
'fat': '肥満',
'feedback': '現地からの要望',
'female': '女性',
'final report': '最終報告書',
'flush latrine with septic tank': '浄化槽つき水洗トイレ',
'follow-up assessment': 'アセスメントのフォローアップ',
'food_sources': '食糧供給源',
'forehead': 'ひたい',
'form data': 'フォームデータ',
'from Twitter': 'Twitter経由',
'full': '完全',
'getting': '取得中',
'green': '緑',
'grey': '灰色',
'here': 'ここ',
'high': '高い',
'hourly': '1時間毎',
'households': '世帯情報',
'human error': 'ヒューマンエラー',
'identified': '身元確認済み',
'ignore': '無視する',
'immediately': '即応',
'in Deg Min Sec format': 'Deg Min Sec フォーマットで',
'in GPS format': 'GPS フォーマットで',
'in Inv.': '個',
'inactive': '休止中',
'initial assessment': '初期アセスメント',
'injured': '負傷中',
'insert new %s': '%sの新規挿入',
'insert new': '新規挿入',
'invalid request': '無効な要求',
'invalid': '無効',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'は、災害犠牲者とその家族、特に身元の判明した遺体、避難者、難民など、全ての情報を集約可能な中央オンラインレポジトリです。名前、年齢、連絡先番号、IDカード番号、避難した場所、その他の詳細が記録されます。人物の写真や指紋をアップロードすることができます。効率性と利便性のため、人物をグループ分けすることができます。',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'は、支援団体による救援活動や復興プロジェクトの作業を管理するために、複数のサブモジュールを組み合わせて高度な機能を実現しようと考えており、物資の受け入れ、貯蔵設備の管理、必要な物資の記録、サプライチェーン・マネジメント、輸送管理、調達、財務記録、その他様々な資産やリソースの管理といった機能を備えています',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': '全ての入荷伝票を追跡することで、カテゴリー分けや適切な実行場所への配分を行う',
'kilogram': 'キログラム',
'kit': 'キット',
'latrines': 'トイレ',
'leave empty to detach account': 'アカウントを取り外すには空欄のままにしてください',
'legend URL': '凡例の URL',
'light': '淡い',
'liter': 'リットル',
'locations': 'ロケーション',
'login': 'ログイン',
'long': '長い',
'long>12cm': '12cm以上',
'low': '低い',
'male': '男性',
'manual': 'マニュアル',
'married': '既婚',
'max': '最大',
'maxExtent': '最大範囲',
'maxResolution': '最高分解能',
'medium': '中',
'medium<12cm': '12cm未満',
'menu item': 'メニューアイテム',
'message_id': 'メッセージID',
'meter cubed': '立方メートル',
'meter': 'メートル',
'meters': 'メートル',
'min': '最小',
'module allows the an inspector to fill information for buildings.': 'モジュールでは、建築物の調査情報を記録できます。',
'module allows the site administrator to configure various options.': 'モジュールを使うことで、サイト管理者が様々な項目を設定する際の手間を省くことができます。',
'module helps monitoring the status of hospitals.': 'モジュールでは、病院の状態をモニタできます。',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'モジュールでは、オンラインマッピング(GIS)を使用して、現在の災害地域の状態を俯瞰することができます。',
'mongoloid': '黄色人種',
'more': 'その他の項目 ',
'n/a': 'データなし',
'natural hazard': '自然災害',
'negroid': '黒人',
'never': 'まだ',
'new ACL': '新規ACL',
'new record inserted': '新規レコードを挿入しました',
'new': '新規登録',
'next 100 rows': '次の100行',
'no': ' ',
'none': 'なし',
'normal': '通常',
'not accessible - no cached version available!': 'アクセスできません - キャッシュされたバージョンがありません!',
'not accessible - using cached version from': 'アクセス不可 - キャッシュ版を使用しています',
'not specified': '未指定',
'num Zoom Levels': 'ズーム倍率',
'obsolete': '孤立中',
'on': ' ',
'once': '一度',
'open defecation': '野外',
'operational intent': '運用目的',
'or import from csv file': 'またはcsvファイルからインポート',
'other': 'その他',
'over one hour': '1時間以上',
'pack of 10': '10のパック',
'people': '居住者情報',
'piece': 'ピース(単位)',
'pit latrine': '穴掘りトイレ',
'pit': '堀穴',
'postponed': '実施を延期',
'preliminary template or draft, not actionable in its current form': '現行フォーム内で実用的でない予備テンプレートまたはドラフト',
'previous 100 rows': '前の100行',
'primary incident': '優先すべきインシデント',
'problem connecting to twitter.com - please refresh': 'twitter.comへの接続に問題が発生しました。再読込を行ってください',
'provides a catalogue of digital media.': 'デジタルメディアのカタログを提供します',
'record does not exist': 'レコードが存在しません',
'record id': 'レコードID',
'records deleted': 'レコードを削除しました',
'red': '赤い',
'reported': '報告済み',
'reports successfully imported.': 'レポートは正しくインポートできました',
'representation of the Polygon/Line.': 'Polygon/Lineの表現',
'retired': '終了',
'retry': '再試行',
'river': '河川',
'sack 20kg': '袋 20kg',
'sack 50kg': '袋 50kg',
'secondary effect': '副次効果',
'see comment': 'コメント参照',
'selected': '選択された',
'separated from family': '家族とはぐれた',
'separated': '別居',
'shaved': '坊主',
'shift_start': 'シフト開始',
'short': '小柄',
'short<6cm': '6cm未満',
'sides': '側面',
'sign-up now': '今すぐ登録',
'simple': '単純な',
'single': '独身',
'slim': 'やせ型',
'specify': '明記してください',
'staff': 'スタッフ',
'state location': 'ステートロケーション',
'state': '状態',
'straight': '直毛',
'suffered financial losses': '経済的損失',
'table': 'テーブル',
'table_name': 'テーブル名',
'tall': '大柄',
'technical failure': '技術的な原因',
'this': 'この',
'times and it is still not working. We give in. Sorry.': '回繰り返しましたが、処理を完了できません。ご迷惑をおかけしますが、処理を中止します。',
'to access the system': 'してシステムにアクセスしてください',
'ton': 'トン',
'tonsure': '剃髪',
'total': '合計',
'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': '避難所を追跡し、それらの詳細を蓄積します。避難所に関連付けられた人、利用可能なサービス等の他のモジュールと協業します。',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': '実行中のPythonで tweepyモジュールが利用できません。Tropo以外でのTwitter機能利用で必要です',
'unable to parse csv file': 'csvファイルをパースできません。',
'unapproved': '承認されていない',
'uncheck all': 'チェックをすべて外す',
'unidentified': '詳細不明',
'uninhabitable = foundation and structure destroyed': '利用不可能 = 基礎構造や土台部分の破壊など',
'unknown': '不明',
'unspecified': 'その他',
'unverified': '未検証',
'updated': '更新しました',
'updates only': '更新のみ',
'urgent': '緊急',
'using default': '標準値を使用',
'verified': '確認済み',
'volunteer': 'ボランティア',
'wavy': '波状',
'weekly': '週次',
'white': '白',
'wider area, longer term, usually contain multiple Activities': '活動範囲が広く、長期的目標を有しており、複数の支援活動を包括します。',
'widowed': '死別',
'window': '窓',
'windows broken, cracks in walls, roof slightly damaged': '窓破損、壁にひび割れ、屋根の一部損傷',
'within human habitat': '人間の居住地域内',
'xlwt module not available within the running Python - this needs installing for XLS output!': '実行中のPythonでxlwtモジュールが利用できません。XLS出力に必要です。',
'yes': 'はい',
}
| mit |
ravindrasingh22/ansible | lib/ansible/plugins/lookup/cartesian.py | 59 | 1732 | # (c) 2013, Bradley Young <young.bradley@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from itertools import product
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
"""
Create the cartesian product of lists
[1, 2, 3], [a, b] -> [1, a], [1, b], [2, a], [2, b], [3, a], [3, b]
"""
def __lookup_variabless(self, terms, variables):
results = []
for x in terms:
intermediate = listify_lookup_plugin_terms(x, variables, loader=self._loader)
results.append(intermediate)
return results
def run(self, terms, variables=None, **kwargs):
terms = self.__lookup_variabless(terms, variables)
my_list = terms[:]
if len(my_list) == 0:
raise AnsibleError("with_cartesian requires at least one element in each list")
return [self._flatten(x) for x in product(*my_list, fillvalue=None)]
| gpl-3.0 |
noooway/exj | exercises_and_metrics_types/MetricSingleInputWidget.py | 1 | 2141 | from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from MetricSingle import *
class MetricSingleInputWidget( GridLayout ):
def __init__( self, metric_single,
current_training_screen, **kwargs ):
super( MetricSingleInputWidget, self ).__init__( **kwargs )
self.cols = 1
self.spacing = 1
self.row_default_height = 40
self.row_force_default = True
self.size_hint_y = None
self.bind( minimum_height = self.setter('height') )
self.metric_name = metric_single.description['name']
input_layout = BoxLayout( orientation = 'horizontal',
spacing = 30 )
metric_label = Label( text = self.metric_name )
input_layout.add_widget( metric_label )
self.value_input = TextInput(
hint_text = str( metric_single.description['value'] ) )
self.value_input.bind(
text = current_training_screen.update_training_from_user_input)
input_layout.add_widget( self.value_input )
del_metric_btn = Button( text = "Del Metr", size_hint_x = 0.3 )
del_metric_btn.on_press = \
lambda: current_training_screen.remove_exercise( self )
input_layout.add_widget( del_metric_btn )
self.add_widget( input_layout )
comment_text = metric_single.description.get(
'comment', 'Comment Metric')
self.comment = TextInput( hint_text = comment_text )
self.comment.bind(
text = current_training_screen.update_training_from_user_input)
self.add_widget( self.comment )
def exercise_from_user_input( self ):
# todo: add input check
value_input = self.value_input.text
comment = self.comment.text
metric = MetricSingle( name = self.metric_name,
value = value_input,
description_dict = { 'comment': comment } )
return( metric )
| mit |
alsrgv/tensorflow | tensorflow/python/keras/utils/vis_utils_test.py | 9 | 2589 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras Layer utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import keras
from tensorflow.python.keras.utils import vis_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
class ModelToDotFormatTest(test.TestCase):
def test_plot_model_cnn(self):
model = keras.Sequential()
model.add(
keras.layers.Conv2D(
filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv'))
model.add(keras.layers.Flatten(name='flat'))
model.add(keras.layers.Dense(5, name='dense'))
dot_img_file = 'model_1.png'
try:
vis_utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
self.assertTrue(file_io.file_exists(dot_img_file))
file_io.delete_file(dot_img_file)
except ImportError:
pass
def test_plot_model_with_wrapped_layers_and_models(self):
inputs = keras.Input(shape=(None, 3))
lstm = keras.layers.LSTM(6, return_sequences=True, name='lstm')
x = lstm(inputs)
# Add layer inside a Wrapper
bilstm = keras.layers.Bidirectional(
keras.layers.LSTM(16, return_sequences=True, name='bilstm'))
x = bilstm(x)
# Add model inside a Wrapper
submodel = keras.Sequential(
[keras.layers.Dense(32, name='dense', input_shape=(None, 32))]
)
wrapped_dense = keras.layers.TimeDistributed(submodel)
x = wrapped_dense(x)
# Add shared submodel
outputs = submodel(x)
model = keras.Model(inputs, outputs)
dot_img_file = 'model_2.png'
try:
vis_utils.plot_model(
model, to_file=dot_img_file, show_shapes=True, expand_nested=True)
self.assertTrue(file_io.file_exists(dot_img_file))
file_io.delete_file(dot_img_file)
except ImportError:
pass
if __name__ == '__main__':
test.main()
| apache-2.0 |
rhinstaller/libblockdev | tests/part_test.py | 1 | 73499 | import unittest
import os
from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, TestTags, tag_test
import overrides_hack
from gi.repository import BlockDev, GLib
class PartTestCase(unittest.TestCase):
requested_plugins = BlockDev.plugin_specs_from_names(("part",))
@classmethod
def setUpClass(cls):
if not BlockDev.is_initialized():
BlockDev.init(cls.requested_plugins, None)
else:
BlockDev.reinit(cls.requested_plugins, True, None)
def setUp(self):
self.addCleanup(self._clean_up)
self.dev_file = create_sparse_tempfile("part_test", 100 * 1024**2)
self.dev_file2 = create_sparse_tempfile("part_test", 100 * 1024**2)
try:
self.loop_dev = create_lio_device(self.dev_file)
except RuntimeError as e:
raise RuntimeError("Failed to setup loop device for testing: %s" % e)
try:
self.loop_dev2 = create_lio_device(self.dev_file2)
except RuntimeError as e:
raise RuntimeError("Failed to setup loop device for testing: %s" % e)
def _clean_up(self):
try:
delete_lio_device(self.loop_dev)
except RuntimeError:
# just move on, we can do no better here
pass
os.unlink(self.dev_file)
try:
delete_lio_device(self.loop_dev2)
except RuntimeError:
# just move on, we can do no better here
pass
os.unlink(self.dev_file2)
class PartCreateTableCase(PartTestCase):
@tag_test(TestTags.CORE)
def test_create_table(self):
"""Verify that it is possible to create a new partition table"""
# doesn't matter if we want to ignore any preexisting partition tables
# or not on a nonexisting device
with self.assertRaises(GLib.GError):
BlockDev.part_create_table ("/non/existing", BlockDev.PartTableType.MSDOS, False)
with self.assertRaises(GLib.GError):
BlockDev.part_create_table ("/non/existing", BlockDev.PartTableType.MSDOS, True)
# doesn't matter if we want to ignore any preexisting partition tables
# or not on a clean device
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, False)
self.assertTrue(succ)
succ = BlockDev.part_create_table (self.loop_dev2, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
# should fail because of a preexisting partition table (and not ignoring it)
with self.assertRaises(GLib.GError):
BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, False)
# should succeed if we want to ignore any preexisting partition tables
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.GPT, True)
self.assertTrue(succ)
class PartGetDiskSpecCase(PartTestCase):
@tag_test(TestTags.CORE)
def test_get_disk_spec(self):
"""Verify that it is possible to get information about disk"""
with self.assertRaises(GLib.GError):
BlockDev.part_get_disk_spec ("/non/existing/device")
ps = BlockDev.part_get_disk_spec (self.loop_dev)
self.assertTrue(ps)
self.assertEqual(ps.path, self.loop_dev)
self.assertEqual(ps.sector_size, 512)
self.assertGreaterEqual(ps.size, 100 * 1024**2 - 512)
self.assertEqual(ps.table_type, BlockDev.PartTableType.UNDEF)
self.assertEqual(ps.flags, 0)
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
ps = BlockDev.part_get_disk_spec (self.loop_dev)
self.assertTrue(ps)
self.assertEqual(ps.path, self.loop_dev)
self.assertEqual(ps.sector_size, 512)
self.assertGreaterEqual(ps.size, 100 * 1024**2 - 512)
self.assertEqual(ps.table_type, BlockDev.PartTableType.MSDOS)
self.assertEqual(ps.flags, 0)
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.GPT, True)
self.assertTrue(succ)
ps = BlockDev.part_get_disk_spec (self.loop_dev)
self.assertTrue(ps)
self.assertEqual(ps.path, self.loop_dev)
self.assertEqual(ps.sector_size, 512)
self.assertGreaterEqual(ps.size, 100 * 1024**2 - 512)
self.assertEqual(ps.table_type, BlockDev.PartTableType.GPT)
self.assertEqual(ps.flags, 0)
class PartCreatePartCase(PartTestCase):
@tag_test(TestTags.CORE)
def test_create_part_simple(self):
"""Verify that it is possible to create a parition"""
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
# we should get proper data back
self.assertTrue(ps)
self.assertEqual(ps.path, self.loop_dev + "1")
self.assertEqual(ps.type, BlockDev.PartType.NORMAL)
self.assertEqual(ps.start, 2048 * 512)
self.assertEqual(ps.size, 10 * 1024**2)
self.assertEqual(ps.flags, 0) # no flags (combination of bit flags)
ps2 = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertEqual(ps.path, ps2.path)
self.assertEqual(ps.type, ps2.type);
self.assertEqual(ps.start, ps2.start)
self.assertEqual(ps.size, ps2.size)
self.assertEqual(ps.flags, ps2.flags)
pss = BlockDev.part_get_disk_parts (self.loop_dev)
self.assertEqual(len(pss), 1)
ps3 = pss[0]
self.assertEqual(ps.path, ps3.path)
self.assertEqual(ps.type, ps3.type)
self.assertEqual(ps.start, ps3.start)
self.assertEqual(ps.size, ps3.size)
self.assertEqual(ps.flags, ps3.flags)
def test_create_part_minimal_start_optimal(self):
"""Verify that it is possible to create a parition with minimal start and optimal alignment"""
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 1, 2 * 1024**2, BlockDev.PartAlign.OPTIMAL)
# we should get proper data back
self.assertTrue(ps)
self.assertEqual(ps.path, self.loop_dev + "1")
self.assertEqual(ps.type, BlockDev.PartType.NORMAL)
self.assertLessEqual(ps.start, 2048 * 512)
self.assertEqual(ps.size, 2 * 1024**2)
self.assertEqual(ps.flags, 0) # no flags (combination of bit flags)
ps2 = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertEqual(ps.path, ps2.path)
self.assertEqual(ps.type, ps2.type);
self.assertEqual(ps.start, ps2.start)
self.assertEqual(ps.size, ps2.size)
self.assertEqual(ps.flags, ps2.flags)
pss = BlockDev.part_get_disk_parts (self.loop_dev)
self.assertEqual(len(pss), 1)
ps3 = pss[0]
self.assertEqual(ps.path, ps3.path)
self.assertEqual(ps.type, ps3.type)
self.assertEqual(ps.start, ps3.start)
self.assertEqual(ps.size, ps3.size)
self.assertEqual(ps.flags, ps3.flags)
def test_create_part_minimal_start(self):
"""Verify that it is possible to create a parition with minimal start"""
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 1, 2 * 1024**2, BlockDev.PartAlign.NONE)
# we should get proper data back
self.assertTrue(ps)
self.assertEqual(ps.path, self.loop_dev + "1")
self.assertEqual(ps.type, BlockDev.PartType.NORMAL)
self.assertEqual(ps.start, 512)
self.assertEqual(ps.size, 2 * 1024**2)
self.assertEqual(ps.flags, 0) # no flags (combination of bit flags)
ps2 = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertEqual(ps.path, ps2.path)
self.assertEqual(ps.type, ps2.type);
self.assertEqual(ps.start, ps2.start)
self.assertEqual(ps.size, ps2.size)
self.assertEqual(ps.flags, ps2.flags)
pss = BlockDev.part_get_disk_parts (self.loop_dev)
self.assertEqual(len(pss), 1)
ps3 = pss[0]
self.assertEqual(ps.path, ps3.path)
self.assertEqual(ps.type, ps3.type)
self.assertEqual(ps.start, ps3.start)
self.assertEqual(ps.size, ps3.size)
self.assertEqual(ps.flags, ps3.flags)
class PartCreatePartFullCase(PartTestCase):
@tag_test(TestTags.CORE)
def test_full_device_partition(self):
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.GPT, True)
self.assertTrue(succ)
# create partition spanning whole device even disregarding the partition table (loop_dev size)
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NEXT, 0, 100 * 1024**2, BlockDev.PartAlign.OPTIMAL)
succ = BlockDev.part_delete_part (self.loop_dev, ps.path)
self.assertTrue(succ)
# same, but create a maximal partition automatically
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NEXT, 0, 0, BlockDev.PartAlign.OPTIMAL)
succ = BlockDev.part_delete_part (self.loop_dev, ps.path)
self.assertTrue(succ)
# start at byte 1 and create partition spanning whole device explicitly
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NEXT, 1, 100 * 1024**2, BlockDev.PartAlign.OPTIMAL)
succ = BlockDev.part_delete_part (self.loop_dev, ps.path)
self.assertTrue(succ)
# start at byte 1 and create a maximal partition automatically
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NEXT, 1, 0, BlockDev.PartAlign.OPTIMAL)
succ = BlockDev.part_delete_part (self.loop_dev, ps.path)
self.assertTrue(succ)
def test_create_part_all_primary(self):
"""Verify that partition creation works as expected with all primary parts"""
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps)
self.assertEqual(ps.path, self.loop_dev + "1")
self.assertEqual(ps.type, BlockDev.PartType.NORMAL)
self.assertEqual(ps.start, 2048 * 512)
self.assertEqual(ps.size, 10 * 1024**2)
self.assertEqual(ps.flags, 0) # no flags (combination of bit flags)
ps2 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps.start + ps.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps2)
self.assertEqual(ps2.path, self.loop_dev + "2")
self.assertEqual(ps2.type, BlockDev.PartType.NORMAL)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps2.start - (ps.start + ps.size + 1)) < ps.start)
self.assertEqual(ps2.size, 10 * 1024**2)
self.assertEqual(ps2.flags, 0) # no flags (combination of bit flags)
ps3 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps2.start + ps2.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps3)
self.assertEqual(ps3.path, self.loop_dev + "3")
self.assertEqual(ps3.type, BlockDev.PartType.NORMAL)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps3.start - (ps2.start + ps2.size + 1)) < ps.start)
self.assertEqual(ps3.size, 10 * 1024**2)
self.assertEqual(ps3.flags, 0) # no flags (combination of bit flags)
ps4 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps3.start + ps3.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps4)
self.assertEqual(ps4.path, self.loop_dev + "4")
self.assertEqual(ps4.type, BlockDev.PartType.NORMAL)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps4.start - (ps3.start + ps3.size + 1)) < ps.start)
self.assertEqual(ps4.size, 10 * 1024**2)
self.assertEqual(ps4.flags, 0) # no flags (combination of bit flags)
# no more primary partitions allowed in the MSDOS table
with self.assertRaises(GLib.GError):
BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps4.start + ps4.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
with self.assertRaises(GLib.GError):
BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.EXTENDED, ps4.start + ps4.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
def test_create_part_with_extended(self):
"""Verify that partition creation works as expected with primary and extended parts"""
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps)
self.assertEqual(ps.path, self.loop_dev + "1")
self.assertEqual(ps.type, BlockDev.PartType.NORMAL)
self.assertEqual(ps.start, 2048 * 512)
self.assertEqual(ps.size, 10 * 1024**2)
self.assertEqual(ps.flags, 0) # no flags (combination of bit flags)
ps2 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps.start + ps.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps2)
self.assertEqual(ps2.path, self.loop_dev + "2")
self.assertEqual(ps2.type, BlockDev.PartType.NORMAL)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps2.start - (ps.start + ps.size + 1)) < ps.start)
self.assertEqual(ps2.size, 10 * 1024**2)
self.assertEqual(ps2.flags, 0) # no flags (combination of bit flags)
ps3 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps2.start + ps2.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps3)
self.assertEqual(ps3.path, self.loop_dev + "3")
self.assertEqual(ps3.type, BlockDev.PartType.NORMAL)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps3.start - (ps2.start + ps2.size + 1)) < ps.start)
self.assertEqual(ps3.size, 10 * 1024**2)
self.assertEqual(ps3.flags, 0) # no flags (combination of bit flags)
ps4 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.EXTENDED, ps3.start + ps3.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps4)
self.assertEqual(ps4.path, self.loop_dev + "4")
self.assertEqual(ps4.type, BlockDev.PartType.EXTENDED)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps4.start - (ps3.start + ps3.size + 1)) < ps.start)
self.assertEqual(ps4.size, 10 * 1024**2)
self.assertEqual(ps4.flags, 0) # no flags (combination of bit flags)
# no more primary partitions allowed in the MSDOS table
with self.assertRaises(GLib.GError):
BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps4.start + ps4.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
with self.assertRaises(GLib.GError):
BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.EXTENDED, ps4.start + ps4.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
@tag_test(TestTags.CORE)
def test_create_part_with_extended_logical(self):
"""Verify that partition creation works as expected with primary, extended and logical parts"""
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps)
self.assertEqual(ps.path, self.loop_dev + "1")
self.assertEqual(ps.type, BlockDev.PartType.NORMAL)
self.assertEqual(ps.start, 2048 * 512)
self.assertEqual(ps.size, 10 * 1024**2)
self.assertEqual(ps.flags, 0) # no flags (combination of bit flags)
ps2 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps.start + ps.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps2)
self.assertEqual(ps2.path, self.loop_dev + "2")
self.assertEqual(ps2.type, BlockDev.PartType.NORMAL)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps2.start - (ps.start + ps.size + 1)) < ps.start)
self.assertEqual(ps2.size, 10 * 1024**2)
self.assertEqual(ps2.flags, 0) # no flags (combination of bit flags)
ps3 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.EXTENDED, ps2.start + ps2.size + 1,
30 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps3)
self.assertEqual(ps3.path, self.loop_dev + "3")
self.assertEqual(ps3.type, BlockDev.PartType.EXTENDED)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps3.start - (ps2.start + ps2.size + 1)) < ps.start)
self.assertEqual(ps3.size, 30 * 1024**2)
self.assertEqual(ps3.flags, 0) # no flags (combination of bit flags)
# the logical partition has number 5 even though the extended partition
# has number 3
ps5 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.LOGICAL, ps3.start + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps5)
self.assertEqual(ps5.path, self.loop_dev + "5")
self.assertEqual(ps5.type, BlockDev.PartType.LOGICAL)
# the start has to be somewhere in the extended partition p3 which
# should need at most 2 MiB extra space
self.assertTrue(ps3.start < ps5.start < ps3.start + ps3.size)
self.assertTrue(abs(ps5.size - 10 * 1024**2) < 2 * 1024**2)
self.assertEqual(ps5.flags, 0) # no flags (combination of bit flags)
ps6 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.LOGICAL, ps5.start + ps5.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps6)
self.assertEqual(ps6.path, self.loop_dev + "6")
self.assertEqual(ps6.type, BlockDev.PartType.LOGICAL)
# the start has to be somewhere in the extended partition p3 which
# should need at most 2 MiB extra space
self.assertTrue(ps3.start < ps6.start < ps3.start + ps3.size)
self.assertEqual(ps6.size, 10 * 1024**2)
self.assertEqual(ps6.flags, 0) # no flags (combination of bit flags)
ps7 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.LOGICAL, ps6.start + ps6.size + 2 * 1024**2,
5 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps7)
self.assertEqual(ps7.path, self.loop_dev + "7")
self.assertEqual(ps7.type, BlockDev.PartType.LOGICAL)
# the start has to be somewhere in the extended partition p3 which
# should need at most 2 MiB extra space
self.assertTrue(ps3.start < ps7.start < ps3.start + ps3.size)
self.assertLess(abs(ps7.start - (ps6.start + ps6.size + 2 * 1024**2)), 512)
self.assertEqual(ps7.size, 5 * 1024**2)
self.assertEqual(ps7.flags, 0) # no flags (combination of bit flags)
# here we go with the partition number 4
ps4 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps3.start + ps3.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps4)
self.assertEqual(ps4.path, self.loop_dev + "4")
self.assertEqual(ps4.type, BlockDev.PartType.NORMAL)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps4.start - (ps3.start + ps3.size + 1)) < ps.start)
self.assertEqual(ps4.size, 10 * 1024**2)
self.assertEqual(ps4.flags, 0) # no flags (combination of bit flags)
# no more primary partitions allowed in the MSDOS table
with self.assertRaises(GLib.GError):
BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.EXTENDED, ps3.start + ps3.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
def test_create_part_with_extended_logical_gpt(self):
"""Verify that partition creation works as expected with primary, extended and logical parts on GPT"""
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.GPT, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps)
self.assertEqual(ps.path, self.loop_dev + "1")
self.assertEqual(ps.type, BlockDev.PartType.NORMAL)
self.assertEqual(ps.start, 2048 * 512)
self.assertEqual(ps.size, 10 * 1024**2)
self.assertEqual(ps.flags, 0) # no flags (combination of bit flags)
ps2 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps.start + ps.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps2)
self.assertEqual(ps2.path, self.loop_dev + "2")
self.assertEqual(ps2.type, BlockDev.PartType.NORMAL)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps2.start - (ps.start + ps.size + 1)) < ps.start)
self.assertEqual(ps2.size, 10 * 1024**2)
self.assertEqual(ps2.flags, 0) # no flags (combination of bit flags)
ps3 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps2.start + ps2.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps3)
self.assertEqual(ps3.path, self.loop_dev + "3")
self.assertEqual(ps3.type, BlockDev.PartType.NORMAL)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps3.start - (ps2.start + ps2.size + 1)) < ps.start)
self.assertEqual(ps3.size, 10 * 1024**2)
self.assertEqual(ps3.flags, 0) # no flags (combination of bit flags)
# no extended partitions allowed in the GPT table
with self.assertRaises(GLib.GError):
BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.EXTENDED, ps3.start + ps3.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
# no logical partitions allowed in the GPT table
with self.assertRaises(GLib.GError):
BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.LOGICAL, ps3.start + ps3.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
@tag_test(TestTags.CORE)
def test_create_part_next(self):
"""Verify that partition creation works as expected with the NEXT (auto) type"""
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NEXT, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps)
self.assertEqual(ps.path, self.loop_dev + "1")
self.assertEqual(ps.type, BlockDev.PartType.NORMAL)
self.assertEqual(ps.start, 2048 * 512)
self.assertEqual(ps.size, 10 * 1024**2)
self.assertEqual(ps.flags, 0) # no flags (combination of bit flags)
ps2 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NEXT, ps.start + ps.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps2)
self.assertEqual(ps2.path, self.loop_dev + "2")
self.assertEqual(ps2.type, BlockDev.PartType.NORMAL)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps2.start - (ps.start + ps.size + 1)) < ps.start)
self.assertEqual(ps2.size, 10 * 1024**2)
self.assertEqual(ps2.flags, 0) # no flags (combination of bit flags)
ps3 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NEXT, ps2.start + ps2.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps3)
self.assertEqual(ps3.path, self.loop_dev + "3")
self.assertEqual(ps3.type, BlockDev.PartType.NORMAL)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps3.start - (ps2.start + ps2.size + 1)) < ps.start)
self.assertEqual(ps3.size, 10 * 1024**2)
self.assertEqual(ps3.flags, 0) # no flags (combination of bit flags)
# we should get a logical partition which has number 5
ps5 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NEXT, ps3.start + ps3.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
ps4 = BlockDev.part_get_part_spec (self.loop_dev, self.loop_dev + "4")
self.assertTrue(ps4)
self.assertEqual(ps4.path, self.loop_dev + "4")
self.assertEqual(ps4.type, BlockDev.PartType.EXTENDED)
self.assertTrue(abs(ps4.start - (ps3.start + ps3.size + 1)) < ps.start)
self.assertGreater(ps4.size, 65 * 1024**2)
self.assertTrue(ps5)
self.assertEqual(ps5.path, self.loop_dev + "5")
self.assertEqual(ps5.type, BlockDev.PartType.LOGICAL)
# the start has to be somewhere in the extended partition p4 no more
# than 2 MiB after its start
self.assertLessEqual(ps5.start, ps4.start + 2*1024**2)
self.assertEqual(ps5.size, 10 * 1024**2)
self.assertEqual(ps5.flags, 0) # no flags (combination of bit flags)
ps6 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NEXT, ps5.start + ps5.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps6)
self.assertEqual(ps6.path, self.loop_dev + "6")
self.assertEqual(ps6.type, BlockDev.PartType.LOGICAL)
# logical partitions start 1 MiB after each other (no idea why)
self.assertLessEqual(abs(ps6.start - (ps5.start + ps5.size + 1)), 1024**2 + 512)
self.assertEqual(ps6.size, 10 * 1024**2)
self.assertEqual(ps6.flags, 0) # no flags (combination of bit flags)
ps7 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NEXT, ps6.start + ps6.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps7)
self.assertEqual(ps7.path, self.loop_dev + "7")
self.assertEqual(ps7.type, BlockDev.PartType.LOGICAL)
# logical partitions start 1 MiB after each other (no idea why)
self.assertLessEqual(abs(ps7.start - (ps6.start + ps6.size + 1)), 1024**2 + 512)
self.assertEqual(ps7.size, 10 * 1024**2)
self.assertEqual(ps7.flags, 0) # no flags (combination of bit flags)
# no more primary nor extended partitions allowed in the MSDOS table and
# there should be no space
with self.assertRaises(GLib.GError):
BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps4.start + ps4.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
with self.assertRaises(GLib.GError):
BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.EXTENDED, ps4.start + ps4.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
@tag_test(TestTags.CORE)
def test_create_part_next_gpt(self):
"""Verify that partition creation works as expected with the NEXT (auto) type on GPT"""
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.GPT, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NEXT, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps)
self.assertEqual(ps.path, self.loop_dev + "1")
self.assertEqual(ps.type, BlockDev.PartType.NORMAL)
self.assertEqual(ps.start, 2048 * 512)
self.assertEqual(ps.size, 10 * 1024**2)
self.assertEqual(ps.flags, 0) # no flags (combination of bit flags)
ps2 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NEXT, ps.start + ps.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps2)
self.assertEqual(ps2.path, self.loop_dev + "2")
self.assertEqual(ps2.type, BlockDev.PartType.NORMAL)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps2.start - (ps.start + ps.size + 1)) < ps.start)
self.assertEqual(ps2.size, 10 * 1024**2)
self.assertEqual(ps2.flags, 0) # no flags (combination of bit flags)
ps3 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NEXT, ps2.start + ps2.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps3)
self.assertEqual(ps3.path, self.loop_dev + "3")
self.assertEqual(ps3.type, BlockDev.PartType.NORMAL)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps3.start - (ps2.start + ps2.size + 1)) < ps.start)
self.assertEqual(ps3.size, 10 * 1024**2)
self.assertEqual(ps3.flags, 0) # no flags (combination of bit flags)
# we should get just next primary partition (GPT)
ps4 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NEXT, ps3.start + ps3.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps4)
self.assertEqual(ps4.path, self.loop_dev + "4")
self.assertEqual(ps4.type, BlockDev.PartType.NORMAL)
self.assertTrue(abs(ps4.start - (ps3.start + ps3.size + 1)) < ps.start)
self.assertEqual(ps4.size, 10 * 1024**2)
# we should get just next primary partition (GPT)
ps5 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NEXT, ps4.start + ps4.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps5)
self.assertEqual(ps5.path, self.loop_dev + "5")
self.assertEqual(ps5.type, BlockDev.PartType.NORMAL)
self.assertTrue(abs(ps5.start - (ps4.start + ps4.size + 1)) < ps.start)
self.assertEqual(ps5.size, 10 * 1024**2)
# we should get just next primary partition (GPT)
ps6 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NEXT, ps5.start + ps4.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps6)
self.assertEqual(ps6.path, self.loop_dev + "6")
self.assertEqual(ps6.type, BlockDev.PartType.NORMAL)
self.assertTrue(abs(ps6.start - (ps5.start + ps5.size + 1)) < ps.start)
self.assertEqual(ps6.size, 10 * 1024**2)
class PartGetDiskPartsCase(PartTestCase):
def test_get_disk_parts_empty(self):
"""Verify that getting info about partitions with no label works"""
with self.assertRaises(GLib.GError):
BlockDev.part_get_disk_parts (self.loop_dev)
class PartGetDiskFreeRegions(PartTestCase):
@tag_test(TestTags.CORE)
def test_get_disk_free_regions(self):
"""Verify that it is possible to get info about free regions on a disk"""
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 1, 10 * 1024**2, BlockDev.PartAlign.NONE)
# we should get proper data back
self.assertTrue(ps)
self.assertEqual(ps.path, self.loop_dev + "1")
self.assertEqual(ps.type, BlockDev.PartType.NORMAL)
self.assertEqual(ps.start, 512)
self.assertEqual(ps.size, 10 * 1024**2)
fis = BlockDev.part_get_disk_free_regions (self.loop_dev)
self.assertEqual(len(fis), 2) # 0-512, (512+10MiB)-EOD
fi = fis[0]
self.assertEqual(fi.start, 0)
self.assertEqual(fi.size, 512)
fi = fis[1]
self.assertEqual(fi.start, ps.start + ps.size)
self.assertGreater(fi.size, 89 * 1024**2)
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps.start + ps.size + 10 * 1024**2,
10 * 1024**2, BlockDev.PartAlign.NONE)
fis = BlockDev.part_get_disk_free_regions (self.loop_dev)
self.assertEqual(len(fis), 3) # 0-512, first part, gap, second part, free
fi = fis[0]
self.assertEqual(fi.start, 0)
self.assertEqual(fi.size, 512)
fi = fis[1]
self.assertEqual(fi.start, 512 + 10 * 1024**2)
self.assertGreater(fi.size, 9 * 1024**2)
fi = fis[2]
self.assertEqual(fi.start, 512 + 30 * 1024**2)
self.assertGreater(fi.size, 69 * 1024**2)
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.EXTENDED, ps.start + ps.size + 1,
50 * 1024**2, BlockDev.PartAlign.NONE)
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.LOGICAL, ps.start + 1024**2,
10 * 1024**2, BlockDev.PartAlign.NONE)
fis = BlockDev.part_get_disk_free_regions (self.loop_dev)
self.assertEqual(len(fis), 6) # 0-512[0], first part, gap[1], second part, gap[2], extended, gap[3], logical, free extended[4], free[5]
fi = fis[0]
self.assertEqual(fi.start, 0)
self.assertEqual(fi.size, 512)
fi = fis[1]
self.assertEqual(fi.start, 512 + 10 * 1024**2)
self.assertGreater(fi.size, 9 * 1024**2)
fi = fis[2]
self.assertGreater(fi.start, 30 * 1024**2)
self.assertLessEqual(fi.size, 512)
fi = fis[3]
self.assertGreater(fi.start, 30 * 1024**2)
self.assertLessEqual(fi.size, 1024**2)
fi = fis[4]
self.assertGreaterEqual(fi.start, ps.start + ps.size)
self.assertGreaterEqual(fi.size, 38 * 1024**2)
fi = fis[5]
self.assertGreaterEqual(fi.start, 80 * 1024**2)
self.assertGreaterEqual(fi.size, 19 * 1024**2)
# now something simple with GPT
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 1, 10 * 1024**2, BlockDev.PartAlign.NONE)
# we should get proper data back
self.assertTrue(ps)
self.assertEqual(ps.path, self.loop_dev + "1")
self.assertEqual(ps.type, BlockDev.PartType.NORMAL)
self.assertEqual(ps.start, 512)
self.assertEqual(ps.size, 10 * 1024**2)
fis = BlockDev.part_get_disk_free_regions (self.loop_dev)
self.assertEqual(len(fis), 2) # 0-512, (512+10MiB)-EOD
fi = fis[0]
self.assertEqual(fi.start, 0)
self.assertEqual(fi.size, 512)
fi = fis[1]
self.assertEqual(fi.start, ps.start + ps.size)
self.assertGreater(fi.size, 89 * 1024**2)
class PartGetBestFreeRegion(PartTestCase):
def test_get_best_free_region(self):
"""Verify that it is possible to get info about the best free region on a disk"""
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
ps1 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 1, 10 * 1024**2, BlockDev.PartAlign.NONE)
self.assertTrue(ps1)
self.assertEqual(ps1.path, self.loop_dev + "1")
self.assertEqual(ps1.type, BlockDev.PartType.NORMAL)
self.assertEqual(ps1.start, 512)
self.assertEqual(ps1.size, 10 * 1024**2)
# create a 20MiB gap between the partitions
ps2 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps1.start + ps1.size + 20 * 1024**2,
10 * 1024**2, BlockDev.PartAlign.NONE)
self.assertTrue(ps2)
self.assertEqual(ps2.path, self.loop_dev + "2")
self.assertEqual(ps2.type, BlockDev.PartType.NORMAL)
self.assertEqual(ps2.start, ps1.start + ps1.size + 20 * 1024**2)
self.assertEqual(ps2.size, 10 * 1024**2)
# normal partition should go in between the partitions because there's enough space for it
ps = BlockDev.part_get_best_free_region (self.loop_dev, BlockDev.PartType.NORMAL, 10 * 1024**2)
self.assertLess(ps.start, ps2.start)
# extended partition should be as big as possible so it shouldn't go in between the partitions
ps = BlockDev.part_get_best_free_region (self.loop_dev, BlockDev.PartType.EXTENDED, 10 * 1024**2)
self.assertGreaterEqual(ps.start, ps2.start + ps2.size)
# create a 10MiB gap between the partitions
ps3 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.EXTENDED, ps2.start + ps2.size + 10 * 1024**2,
45 * 1024**2, BlockDev.PartAlign.NONE)
self.assertTrue(ps3)
self.assertEqual(ps3.path, self.loop_dev + "3")
self.assertEqual(ps3.type, BlockDev.PartType.EXTENDED)
self.assertEqual(ps3.start, ps2.start + ps2.size + 10 * 1024**2)
self.assertEqual(ps3.size, 45 * 1024**2)
# there should now be 5 MiB left after the third partition which is enough for a 3MiB partition
ps = BlockDev.part_get_best_free_region (self.loop_dev, BlockDev.PartType.NORMAL, 3 * 1024**2)
self.assertGreaterEqual(ps.start, ps3.start + ps3.size)
# 7MiB partition should go in between the second and third partitions because there's enough space
# for it there
ps = BlockDev.part_get_best_free_region (self.loop_dev, BlockDev.PartType.NORMAL, 7 * 1024**2)
self.assertGreaterEqual(ps.start, ps2.start + ps2.size)
self.assertLess(ps.start, ps3.start)
# 15MiB partition should go in between the first and second partitions because that's the only
# space big enough for it
ps = BlockDev.part_get_best_free_region (self.loop_dev, BlockDev.PartType.NORMAL, 15 * 1024**2)
self.assertGreaterEqual(ps.start, ps1.start + ps1.size)
self.assertLess(ps.start, ps2.start)
ps5 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.LOGICAL, ps3.start + 20 * 1024**2,
15 * 1024**2, BlockDev.PartAlign.NONE)
self.assertEqual(ps5.path, self.loop_dev + "5")
self.assertEqual(ps5.type, BlockDev.PartType.LOGICAL)
self.assertEqual(ps5.start, ps3.start + 20 * 1024**2)
self.assertEqual(ps5.size, 15 * 1024**2)
# 5MiB logical partition should go after the fifth partition because there's enough space for it
ps = BlockDev.part_get_best_free_region (self.loop_dev, BlockDev.PartType.LOGICAL, 5 * 1024**2)
self.assertGreaterEqual(ps.start, ps5.start + ps5.size)
self.assertLess(ps.start, ps3.start + ps3.size)
# 15MiB logical partition should go before the fifth partition because there's enough space for it
ps = BlockDev.part_get_best_free_region (self.loop_dev, BlockDev.PartType.LOGICAL, 15 * 1024**2)
self.assertGreaterEqual(ps.start, ps3.start)
self.assertLess(ps.start, ps5.start)
class PartGetPartByPos(PartTestCase):
def test_get_part_by_pos(self):
"""Verify that getting partition by position works as expected"""
## prepare the disk with non-trivial setup first
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps)
self.assertEqual(ps.path, self.loop_dev + "1")
self.assertEqual(ps.type, BlockDev.PartType.NORMAL)
self.assertEqual(ps.start, 2048 * 512)
self.assertEqual(ps.size, 10 * 1024**2)
self.assertEqual(ps.flags, 0) # no flags (combination of bit flags)
ps2 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps.start + ps.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps2)
self.assertEqual(ps2.path, self.loop_dev + "2")
self.assertEqual(ps2.type, BlockDev.PartType.NORMAL)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps2.start - (ps.start + ps.size + 1)) < ps.start)
self.assertEqual(ps2.size, 10 * 1024**2)
self.assertEqual(ps2.flags, 0) # no flags (combination of bit flags)
ps3 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.EXTENDED, ps2.start + ps2.size + 1,
35 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps3)
self.assertEqual(ps3.path, self.loop_dev + "3")
self.assertEqual(ps3.type, BlockDev.PartType.EXTENDED)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps3.start - (ps2.start + ps2.size + 1)) < ps.start)
self.assertEqual(ps3.size, 35 * 1024**2)
self.assertEqual(ps3.flags, 0) # no flags (combination of bit flags)
# the logical partition has number 5 even though the extended partition
# has number 3
ps5 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.LOGICAL, ps3.start + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps5)
self.assertEqual(ps5.path, self.loop_dev + "5")
self.assertEqual(ps5.type, BlockDev.PartType.LOGICAL)
# the start has to be somewhere in the extended partition p3 which
# should need at most 2 MiB extra space
self.assertTrue(ps3.start < ps5.start < ps3.start + ps3.size)
self.assertTrue(abs(ps5.size - 10 * 1024**2) < 2 * 1024**2)
self.assertEqual(ps5.flags, 0) # no flags (combination of bit flags)
ps6 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.LOGICAL, ps5.start + ps5.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps6)
self.assertEqual(ps6.path, self.loop_dev + "6")
self.assertEqual(ps6.type, BlockDev.PartType.LOGICAL)
# the start has to be somewhere in the extended partition p3 which
# should need at most 2 MiB extra space
self.assertTrue(ps3.start < ps6.start < ps3.start + ps3.size)
self.assertEqual(ps6.size, 10 * 1024**2)
self.assertEqual(ps6.flags, 0) # no flags (combination of bit flags)
ps7 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.LOGICAL, ps6.start + ps6.size + 2 * 1024**2,
5 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps7)
self.assertEqual(ps7.path, self.loop_dev + "7")
self.assertEqual(ps7.type, BlockDev.PartType.LOGICAL)
# the start has to be somewhere in the extended partition p3 which
# should need at most 2 MiB extra space
self.assertTrue(ps3.start < ps7.start < ps3.start + ps3.size)
self.assertLess(abs(ps7.start - (ps6.start + ps6.size + 2 * 1024**2)), 512)
self.assertEqual(ps7.size, 5 * 1024**2)
self.assertEqual(ps7.flags, 0) # no flags (combination of bit flags)
# here we go with the partition number 4
ps4 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps3.start + ps3.size + 1,
10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(ps4)
self.assertEqual(ps4.path, self.loop_dev + "4")
self.assertEqual(ps4.type, BlockDev.PartType.NORMAL)
# the start has to be at most as far from the end of the previous part
# as is the start of the first part from the start of the disk
self.assertTrue(abs(ps4.start - (ps3.start + ps3.size + 1)) < ps.start)
self.assertEqual(ps4.size, 10 * 1024**2)
self.assertEqual(ps4.flags, 0) # no flags (combination of bit flags)
## now try to get the partitions
# XXX: Any way to get the extended partition (ps3)? Let's just skip it now.
for part in (ps, ps2, ps5, ps6, ps7, ps4):
ret = BlockDev.part_get_part_by_pos(self.loop_dev, part.start + 1 * 1024**2)
self.assertIsNotNone(ret)
self.assertEqual(ret.path, part.path)
self.assertEqual(ret.start, part.start)
self.assertEqual(ret.size, part.size)
self.assertEqual(ret.type, part.type)
self.assertEqual(ret.flags, part.flags)
# free space in the extended partition
ret = BlockDev.part_get_part_by_pos(self.loop_dev, ps3.start + 33 * 1024**2)
self.assertIsNotNone(ret)
self.assertIsNone(ret.path)
self.assertTrue(ret.type & BlockDev.PartType.FREESPACE)
self.assertTrue(ret.type & BlockDev.PartType.LOGICAL)
# there are two 10MiB and one 5MiB logical partitions
self.assertGreater(ret.start, ps3.start + 25 * 1024**2)
# the size of the extended partition is 35 MiB
self.assertLess(ret.size, 10 * 1024**2)
# free space at the end of the disk
ret = BlockDev.part_get_part_by_pos(self.loop_dev, 90 * 1024**2)
self.assertIsNotNone(ret)
self.assertIsNone(ret.path)
self.assertTrue(ret.type & BlockDev.PartType.FREESPACE)
self.assertEqual(ret.start, ps4.start + ps4.size)
self.assertLessEqual(ret.size, (100 * 1024**2) - (ps4.start + ps4.size))
class PartCreateResizePartCase(PartTestCase):
def test_create_resize_part_two(self):
"""Verify that it is possible to create and resize two paritions"""
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
ps1_half = 20 * 1024**2
ps1_start = 2 * 1024**2
# create a maximal second partition
ps2 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2* ps1_half, 0, BlockDev.PartAlign.NONE)
self.assertGreaterEqual(ps2.start, 2* ps1_half)
# create one maximal partition in the beginning
ps1 = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps1_start, 0, BlockDev.PartAlign.NONE)
self.assertGreaterEqual(ps1.size, ps1_half)
self.assertGreaterEqual(ps1.start, ps1_start)
self.assertLess(ps1.size, ps1_half * 2) # can't have full size from beginning to ps2 because of start offset
# resizing should give the same result
ps1_size = ps1.size
succ = BlockDev.part_resize_part (self.loop_dev, ps1.path, 0, BlockDev.PartAlign.NONE)
self.assertTrue(succ)
ps1 = BlockDev.part_get_part_spec(self.loop_dev, ps1.path)
self.assertEqual(ps1.start, ps1_start) # offset must not be moved
self.assertEqual(ps1.size, ps1_size)
succ = BlockDev.part_resize_part (self.loop_dev, ps1.path, ps1_half, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(succ)
ps1 = BlockDev.part_get_part_spec(self.loop_dev, ps1.path)
self.assertEqual(ps1.start, ps1_start) # offset must not be moved
self.assertGreaterEqual(ps1.size, ps1_half) # at least requested size
self.assertLess(ps1.size, ps1_half + 2 * 1024**2) # and only slightly bigger
ps2_size = ps2.size
ps2_start = ps2.start
succ = BlockDev.part_resize_part (self.loop_dev, ps2.path, 0, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(succ)
ps2 = BlockDev.part_get_part_spec(self.loop_dev, ps2.path)
self.assertEqual(ps2.start, ps2_start) # offset must not be moved
self.assertGreaterEqual(ps2.size, ps2_size - 2 * 1024**2) # almost as big as before
def test_create_resize_part_single(self):
"""Verify that it is possible to create and resize a parition"""
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.GPT, True)
self.assertTrue(succ)
# create a maximal partition
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2 * 1024**2, 0, BlockDev.PartAlign.OPTIMAL)
initial_start = ps.start
initial_size = ps.size
new_size = 20 * 1000**2 # resize to MB (not MiB) for a non-multiple of the blocksize
succ = BlockDev.part_resize_part (self.loop_dev, ps.path, new_size, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec(self.loop_dev, ps.path)
self.assertEqual(initial_start, ps.start) # offset must not be moved
self.assertGreaterEqual(ps.size, new_size) # at least the requested size
self.assertLess(ps.size, new_size + 1 * 1024**2) # but also not too big (assuming 1 MiB alignment)
# resize to maximum
succ = BlockDev.part_resize_part (self.loop_dev, ps.path, 0, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec(self.loop_dev, ps.path)
self.assertEqual(initial_start, ps.start)
self.assertEqual(initial_size, ps.size) # should grow to the same size again
# resize to maximum explicitly
succ = BlockDev.part_resize_part (self.loop_dev, ps.path, initial_size, BlockDev.PartAlign.OPTIMAL)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec(self.loop_dev, ps.path)
self.assertEqual(initial_start, ps.start)
self.assertGreaterEqual(ps.size, initial_size) # at least the requested size
# resize back to 20 MB (not MiB) with no alignment
succ = BlockDev.part_resize_part (self.loop_dev, ps.path, new_size, BlockDev.PartAlign.NONE)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec(self.loop_dev, ps.path)
self.assertEqual(initial_start, ps.start) # offset must not be moved
self.assertGreaterEqual(ps.size, new_size) # at least the requested size
self.assertLess(ps.size, new_size + 4 * 1024) # but also not too big (assuming max. 4 KiB blocks)
# resize to maximum with no alignment
succ = BlockDev.part_resize_part (self.loop_dev, ps.path, 0, BlockDev.PartAlign.NONE)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec(self.loop_dev, ps.path)
self.assertEqual(initial_start, ps.start)
self.assertGreaterEqual(ps.size, initial_size - 1 * 1024**2) # libparted sometimes creates smaller partitions for no alignment
new_size = ps.size
# resize to maximum with no alignment explicitly
succ = BlockDev.part_resize_part (self.loop_dev, ps.path, new_size, BlockDev.PartAlign.NONE)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec(self.loop_dev, ps.path)
self.assertEqual(initial_start, ps.start)
self.assertGreaterEqual(ps.size, new_size) # at least the requested size
# resize to previous maximum with no alignment explicitly
succ = BlockDev.part_resize_part (self.loop_dev, ps.path, initial_size, BlockDev.PartAlign.NONE)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec(self.loop_dev, ps.path)
self.assertEqual(initial_start, ps.start)
self.assertGreaterEqual(ps.size, initial_size) # at least the requested size
class PartCreateDeletePartCase(PartTestCase):
@tag_test(TestTags.CORE)
def test_create_delete_part_simple(self):
"""Verify that it is possible to create and delete a parition"""
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
pss = BlockDev.part_get_disk_parts (self.loop_dev)
self.assertEqual(len(pss), 1)
succ = BlockDev.part_delete_part (self.loop_dev, ps.path)
self.assertTrue(succ)
pss = BlockDev.part_get_disk_parts (self.loop_dev)
self.assertEqual(len(pss), 0)
class PartSetFlagCase(PartTestCase):
def test_set_part_flag(self):
"""Verify that it is possible to set a partition flag"""
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
# we should get proper data back
self.assertTrue(ps)
self.assertEqual(ps.flags, 0) # no flags (combination of bit flags)
succ = BlockDev.part_set_part_flag (self.loop_dev, ps.path, BlockDev.PartFlag.BOOT, True)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertTrue(ps.flags & BlockDev.PartFlag.BOOT)
succ = BlockDev.part_set_part_flag (self.loop_dev, ps.path, BlockDev.PartFlag.BOOT, False)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertFalse(ps.flags & BlockDev.PartFlag.BOOT)
# add another partition and do some more tests on that one
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps.start + ps.size + 1, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
succ = BlockDev.part_set_part_flag (self.loop_dev, ps.path, BlockDev.PartFlag.BOOT, True)
self.assertTrue(succ)
succ = BlockDev.part_set_part_flag (self.loop_dev, ps.path, BlockDev.PartFlag.LVM, True)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertTrue(ps.flags & BlockDev.PartFlag.BOOT)
self.assertTrue(ps.flags & BlockDev.PartFlag.LVM)
# SWAP label not supported on the MSDOS table
with self.assertRaises(GLib.GError):
BlockDev.part_set_part_flag (self.loop_dev, ps.path, BlockDev.PartFlag.SWAP, True)
with self.assertRaises(GLib.GError):
BlockDev.part_set_part_flag (self.loop_dev, ps.path, BlockDev.PartFlag.SWAP, False)
# so isn't GPT_HIDDEN
with self.assertRaises(GLib.GError):
BlockDev.part_set_part_flag (self.loop_dev, ps.path, BlockDev.PartFlag.GPT_HIDDEN, True)
with self.assertRaises(GLib.GError):
BlockDev.part_set_part_flag (self.loop_dev, ps.path, BlockDev.PartFlag.GPT_HIDDEN, False)
# also try some GPT-only flags
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.GPT, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
# we should get proper data back
self.assertTrue(ps)
self.assertEqual(ps.flags, 0) # no flags (combination of bit flags)
succ = BlockDev.part_set_part_flag (self.loop_dev, ps.path, BlockDev.PartFlag.GPT_READ_ONLY, True)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertTrue(ps.flags & BlockDev.PartFlag.GPT_READ_ONLY)
succ = BlockDev.part_set_part_flag (self.loop_dev, ps.path, BlockDev.PartFlag.GPT_HIDDEN, True)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertTrue(ps.flags & BlockDev.PartFlag.GPT_HIDDEN)
succ = BlockDev.part_set_part_flag (self.loop_dev, ps.path, BlockDev.PartFlag.GPT_READ_ONLY, False)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertFalse(ps.flags & BlockDev.PartFlag.GPT_READ_ONLY)
self.assertTrue(ps.flags & BlockDev.PartFlag.GPT_HIDDEN)
class PartSetDiskFlagCase(PartTestCase):
def test_set_disk_flag(self):
"""Verify that it is possible to set disk flag(s)"""
with self.assertRaises(GLib.GError):
BlockDev.part_set_disk_flag ("/non/existing/device", BlockDev.PartDiskFlag.PART_DISK_FLAG_GPT_PMBR_BOOT, True)
ps = BlockDev.part_get_disk_spec (self.loop_dev)
self.assertTrue(ps)
self.assertEqual(ps.flags, 0)
self.assertEqual(ps.table_type, BlockDev.PartTableType.UNDEF)
# no label/table
with self.assertRaises(GLib.GError):
BlockDev.part_set_disk_flag (self.loop_dev, BlockDev.PartDiskFlag.PART_DISK_FLAG_GPT_PMBR_BOOT, True)
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
ps = BlockDev.part_get_disk_spec (self.loop_dev)
self.assertTrue(ps)
self.assertEqual(ps.table_type, BlockDev.PartTableType.MSDOS)
self.assertEqual(ps.flags, 0)
# not supported on the MSDOS table
with self.assertRaises(GLib.GError):
BlockDev.part_set_disk_flag (self.loop_dev, BlockDev.PartDiskFlag.PART_DISK_FLAG_GPT_PMBR_BOOT, True)
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.GPT, True)
self.assertTrue(succ)
ps = BlockDev.part_get_disk_spec (self.loop_dev)
self.assertTrue(ps)
self.assertEqual(ps.table_type, BlockDev.PartTableType.GPT)
self.assertEqual(ps.flags, 0)
succ = BlockDev.part_set_disk_flag (self.loop_dev, BlockDev.PartDiskFlag.PART_DISK_FLAG_GPT_PMBR_BOOT, True)
ps = BlockDev.part_get_disk_spec (self.loop_dev)
self.assertTrue(ps)
self.assertEqual(ps.flags, BlockDev.PartDiskFlag.PART_DISK_FLAG_GPT_PMBR_BOOT)
succ = BlockDev.part_set_disk_flag (self.loop_dev, BlockDev.PartDiskFlag.PART_DISK_FLAG_GPT_PMBR_BOOT, False)
ps = BlockDev.part_get_disk_spec (self.loop_dev)
self.assertTrue(ps)
self.assertEqual(ps.flags, 0)
class PartSetFlagsCase(PartTestCase):
def test_set_part_flags(self):
"""Verify that it is possible to set multiple partition flags at once"""
# we first need a partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
# we should get proper data back
self.assertTrue(ps)
self.assertEqual(ps.flags, 0) # no flags (combination of bit flags)
succ = BlockDev.part_set_part_flags (self.loop_dev, ps.path, BlockDev.PartFlag.BOOT)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertTrue(ps.flags & BlockDev.PartFlag.BOOT)
# 0 -> unset all
succ = BlockDev.part_set_part_flags (self.loop_dev, ps.path, 0)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertFalse(ps.flags & BlockDev.PartFlag.BOOT)
# add another partition and do some more tests on that one
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, ps.start + ps.size + 1, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
succ = BlockDev.part_set_part_flags (self.loop_dev, ps.path, BlockDev.PartFlag.BOOT | BlockDev.PartFlag.LVM)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertTrue(ps.flags & BlockDev.PartFlag.BOOT)
self.assertTrue(ps.flags & BlockDev.PartFlag.LVM)
# SWAP label not supported on the MSDOS table
with self.assertRaises(GLib.GError):
BlockDev.part_set_part_flags (self.loop_dev, ps.path, BlockDev.PartFlag.SWAP)
# also try some GPT-only flags
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.GPT, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
# we should get proper data back
self.assertTrue(ps)
self.assertEqual(ps.flags, 0) # no flags (combination of bit flags)
succ = BlockDev.part_set_part_flags (self.loop_dev, ps.path, BlockDev.PartFlag.GPT_READ_ONLY | BlockDev.PartFlag.GPT_HIDDEN)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertTrue(ps.flags & BlockDev.PartFlag.GPT_READ_ONLY)
self.assertTrue(ps.flags & BlockDev.PartFlag.GPT_HIDDEN)
succ = BlockDev.part_set_part_flags (self.loop_dev, ps.path, 0) # no flags
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertFalse(ps.flags & BlockDev.PartFlag.GPT_READ_ONLY)
self.assertFalse(ps.flags & BlockDev.PartFlag.GPT_HIDDEN)
class PartSetNameCase(PartTestCase):
def test_set_part_name(self):
"""Verify that it is possible to set partition name"""
# we first need a GPT partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.GPT, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
# we should get proper data back
self.assertTrue(ps)
self.assertIn(ps.name, ("", None)) # no name
succ = BlockDev.part_set_part_name (self.loop_dev, ps.path, "TEST")
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertEqual(ps.name, "TEST")
succ = BlockDev.part_set_part_name (self.loop_dev, ps.path, "")
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertEqual(ps.name, "")
# let's now test an MSDOS partition table (doesn't support names)
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
# we should get proper data back
self.assertTrue(ps)
self.assertIn(ps.name, ("", None)) # no name
with self.assertRaises(GLib.GError):
BlockDev.part_set_part_name (self.loop_dev, ps.path, "")
# we should still get proper data back though
self.assertTrue(ps)
self.assertIn(ps.name, ("", None)) # no name
class PartSetTypeCase(PartTestCase):
def test_set_part_type(self):
"""Verify that it is possible to set and get partition type"""
# we first need a GPT partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.GPT, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
# we should get proper data back
self.assertTrue(ps)
self.assertTrue(ps.type_guid) # should have some type
succ = BlockDev.part_set_part_type (self.loop_dev, ps.path, "E6D6D379-F507-44C2-A23C-238F2A3DF928")
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertEqual(ps.type_guid, "E6D6D379-F507-44C2-A23C-238F2A3DF928")
succ = BlockDev.part_set_part_type (self.loop_dev, ps.path, "0FC63DAF-8483-4772-8E79-3D69D8477DE4")
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertEqual(ps.type_guid, "0FC63DAF-8483-4772-8E79-3D69D8477DE4")
# let's now test an MSDOS partition table (doesn't support type GUIDs)
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
# we should get proper data back
self.assertTrue(ps)
self.assertIn(ps.type_guid, ("", None)) # no type GUID
with self.assertRaises(GLib.GError):
BlockDev.part_set_part_type (self.loop_dev, ps.path, "0FC63DAF-8483-4772-8E79-3D69D8477DE4")
# we should still get proper data back though
self.assertTrue(ps)
self.assertIn(ps.type_guid, ("", None)) # no type GUID
class PartSetIdCase(PartTestCase):
def test_set_part_id(self):
"""Verify that it is possible to set partition id (msdos partition type)"""
# we first need an MBR partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.MSDOS, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
# we should get proper data back
self.assertTrue(ps)
succ = BlockDev.part_set_part_id (self.loop_dev, ps.path, "0x8e")
self.assertTrue(succ)
part_id = BlockDev.part_get_part_id (self.loop_dev, ps.path)
self.assertEqual(part_id, "0x8e")
# we can't change part id to extended partition id
with self.assertRaises(GLib.GError):
BlockDev.part_set_part_id (self.loop_dev, ps.path, "0x85")
class PartSetGptFlagsCase(PartTestCase):
def test_set_part_type(self):
"""Verify that it is possible to set and get partition flags on GPT"""
esp_guid = "C12A7328-F81F-11D2-BA4B-00A0C93EC93B"
# we first need a GPT partition table
succ = BlockDev.part_create_table (self.loop_dev, BlockDev.PartTableType.GPT, True)
self.assertTrue(succ)
# for now, let's just create a typical primary partition starting at the
# sector 2048, 10 MiB big with optimal alignment
ps = BlockDev.part_create_part (self.loop_dev, BlockDev.PartTypeReq.NORMAL, 2048*512, 10 * 1024**2, BlockDev.PartAlign.OPTIMAL)
# set GUID (part type) to check that changing flags doesn't change it
succ = BlockDev.part_set_part_type (self.loop_dev, ps.path, esp_guid)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertEqual(ps.type_guid, esp_guid)
# set LEGACY_BOOT flag and test it
succ = BlockDev.part_set_part_flags (self.loop_dev, ps.path, BlockDev.PartFlag.LEGACY_BOOT)
self.assertTrue(succ)
ps = BlockDev.part_get_part_spec (self.loop_dev, ps.path)
self.assertTrue(ps.flags & BlockDev.PartFlag.LEGACY_BOOT)
self.assertEqual(ps.type_guid, esp_guid)
| lgpl-2.1 |
DonaldTrumpHasTinyHands/tiny_hands_pac | products/migrations/0001_initial.py | 4 | 4963 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import modelcluster.fields
import wagtail.wagtailcore.fields
import modelcluster.contrib.taggit
class Migration(migrations.Migration):
dependencies = [
('taggit', '0001_initial'),
('wagtailimages', '0006_add_verbose_names'),
('wagtaildocs', '0003_add_verbose_names'),
('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),
]
operations = [
migrations.CreateModel(
name='ProductIndexPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('subtitle', models.CharField(max_length=255, blank=True)),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='ProductIndexPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name='+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='ProductPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('price', models.CharField(max_length=255, blank=True)),
('description', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
('image', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='ProductPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name='+', blank=True, to='wagtaildocs.Document', null=True)),
('link_page', models.ForeignKey(related_name='+', blank=True, to='wagtailcore.Page', null=True)),
('page', modelcluster.fields.ParentalKey(related_name='related_links', to='products.ProductPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='ProductPageTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content_object', modelcluster.fields.ParentalKey(related_name='tagged_items', to='products.ProductPage')),
('tag', models.ForeignKey(related_name='products_productpagetag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='productpage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(to='taggit.Tag', through='products.ProductPageTag', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'),
),
migrations.AddField(
model_name='productindexpagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name='+', blank=True, to='wagtailcore.Page', null=True),
),
migrations.AddField(
model_name='productindexpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name='related_links', to='products.ProductIndexPage'),
),
]
| mit |
helpmoeny/pythoncode | Python_projects/project03/proj03(RAM).py | 2 | 1049 | N_str = input("Input a large whole number: ")
while N_str.isdigit()==False or int(N_str)<1:
print("Input must be a whole number. Try again")
N_str = input("Input a large whole number: ")
while True:
spl_str=input("Split: ")
if spl_str.isdigit()==False:
print("Input must be a whole number. Try again")
elif int(spl_str)<=0:
print("Input must be a whole number. Try again")
elif (int(len(N_str))%int(spl_str) != 0):
print(N_str, " Must be evenly divisible by ",spl_str)
print("Try again")
else:
break
count=0
num_str=''
output_str=''
increasing = True
last=0
s=int(spl_str)
nr_groups=int(len(N_str)/s)+1
for i in range (1,nr_groups):
num_str=N_str[count*s:(count+1)*s]
output_str+=num_str
if i+1<nr_groups:
output_str+=", "
if int(num_str)<=last:
increasing = False
last=int(num_str)
count+=1
print(output_str)
if increasing == True:
print("Sequence is increasing")
else:
print("Sequence is NOT Increasing")
| unlicense |
shurihell/testasia | common/lib/xmodule/xmodule/contentstore/content.py | 80 | 11993 | import re
import uuid
XASSET_LOCATION_TAG = 'c4x'
XASSET_SRCREF_PREFIX = 'xasset:'
XASSET_THUMBNAIL_TAIL_NAME = '.jpg'
STREAM_DATA_CHUNK_SIZE = 1024
import os
import logging
import StringIO
from urlparse import urlparse, urlunparse, parse_qsl
from urllib import urlencode
from opaque_keys.edx.locator import AssetLocator
from opaque_keys.edx.keys import CourseKey, AssetKey
from opaque_keys import InvalidKeyError
from PIL import Image
class StaticContent(object):
def __init__(self, loc, name, content_type, data, last_modified_at=None, thumbnail_location=None, import_path=None,
length=None, locked=False):
self.location = loc
self.name = name # a display string which can be edited, and thus not part of the location which needs to be fixed
self.content_type = content_type
self._data = data
self.length = length
self.last_modified_at = last_modified_at
self.thumbnail_location = thumbnail_location
# optional information about where this file was imported from. This is needed to support import/export
# cycles
self.import_path = import_path
self.locked = locked
@property
def is_thumbnail(self):
return self.location.category == 'thumbnail'
@staticmethod
def generate_thumbnail_name(original_name):
name_root, ext = os.path.splitext(original_name)
if not ext == XASSET_THUMBNAIL_TAIL_NAME:
name_root = name_root + ext.replace(u'.', u'-')
return u"{name_root}{extension}".format(
name_root=name_root,
extension=XASSET_THUMBNAIL_TAIL_NAME,)
@staticmethod
def compute_location(course_key, path, revision=None, is_thumbnail=False):
"""
Constructs a location object for static content.
- course_key: the course that this asset belongs to
- path: is the name of the static asset
- revision: is the object's revision information
- is_thumbnail: is whether or not we want the thumbnail version of this
asset
"""
path = path.replace('/', '_')
return course_key.make_asset_key(
'asset' if not is_thumbnail else 'thumbnail',
AssetLocator.clean_keeping_underscores(path)
).for_branch(None)
def get_id(self):
return self.location
@property
def data(self):
return self._data
ASSET_URL_RE = re.compile(r"""
/?c4x/
(?P<org>[^/]+)/
(?P<course>[^/]+)/
(?P<category>[^/]+)/
(?P<name>[^/]+)
""", re.VERBOSE | re.IGNORECASE)
@staticmethod
def is_c4x_path(path_string):
"""
Returns a boolean if a path is believed to be a c4x link based on the leading element
"""
return StaticContent.ASSET_URL_RE.match(path_string) is not None
@staticmethod
def get_static_path_from_location(location):
"""
This utility static method will take a location identifier and create a 'durable' /static/.. URL representation of it.
This link is 'durable' as it can maintain integrity across cloning of courseware across course-ids, e.g. reruns of
courses.
In the LMS/CMS, we have runtime link-rewriting, so at render time, this /static/... format will get translated into
the actual /c4x/... path which the client needs to reference static content
"""
if location is not None:
return u"/static/{name}".format(name=location.name)
else:
return None
@staticmethod
def get_base_url_path_for_course_assets(course_key):
if course_key is None:
return None
assert isinstance(course_key, CourseKey)
placeholder_id = uuid.uuid4().hex
# create a dummy asset location with a fake but unique name. strip off the name, and return it
url_path = StaticContent.serialize_asset_key_with_slash(
course_key.make_asset_key('asset', placeholder_id).for_branch(None)
)
return url_path.replace(placeholder_id, '')
@staticmethod
def get_location_from_path(path):
"""
Generate an AssetKey for the given path (old c4x/org/course/asset/name syntax)
"""
try:
return AssetKey.from_string(path)
except InvalidKeyError:
# TODO - re-address this once LMS-11198 is tackled.
if path.startswith('/'):
# try stripping off the leading slash and try again
return AssetKey.from_string(path[1:])
@staticmethod
def convert_legacy_static_url_with_course_id(path, course_id):
"""
Returns a path to a piece of static content when we are provided with a filepath and
a course_id
"""
# Generate url of urlparse.path component
scheme, netloc, orig_path, params, query, fragment = urlparse(path)
loc = StaticContent.compute_location(course_id, orig_path)
loc_url = StaticContent.serialize_asset_key_with_slash(loc)
# parse the query params for "^/static/" and replace with the location url
orig_query = parse_qsl(query)
new_query_list = []
for query_name, query_value in orig_query:
if query_value.startswith("/static/"):
new_query = StaticContent.compute_location(
course_id,
query_value[len('/static/'):],
)
new_query_url = StaticContent.serialize_asset_key_with_slash(new_query)
new_query_list.append((query_name, new_query_url))
else:
new_query_list.append((query_name, query_value))
# Reconstruct with new path
return urlunparse((scheme, netloc, loc_url, params, urlencode(new_query_list), fragment))
def stream_data(self):
yield self._data
@staticmethod
def serialize_asset_key_with_slash(asset_key):
"""
Legacy code expects the serialized asset key to start w/ a slash; so, do that in one place
:param asset_key:
"""
url = unicode(asset_key)
if not url.startswith('/'):
url = '/' + url # TODO - re-address this once LMS-11198 is tackled.
return url
class StaticContentStream(StaticContent):
def __init__(self, loc, name, content_type, stream, last_modified_at=None, thumbnail_location=None, import_path=None,
length=None, locked=False):
super(StaticContentStream, self).__init__(loc, name, content_type, None, last_modified_at=last_modified_at,
thumbnail_location=thumbnail_location, import_path=import_path,
length=length, locked=locked)
self._stream = stream
def stream_data(self):
while True:
chunk = self._stream.read(STREAM_DATA_CHUNK_SIZE)
if len(chunk) == 0:
break
yield chunk
def stream_data_in_range(self, first_byte, last_byte):
"""
Stream the data between first_byte and last_byte (included)
"""
self._stream.seek(first_byte)
position = first_byte
while True:
if last_byte < position + STREAM_DATA_CHUNK_SIZE - 1:
chunk = self._stream.read(last_byte - position + 1)
yield chunk
break
chunk = self._stream.read(STREAM_DATA_CHUNK_SIZE)
position += STREAM_DATA_CHUNK_SIZE
yield chunk
def close(self):
self._stream.close()
def copy_to_in_mem(self):
self._stream.seek(0)
content = StaticContent(self.location, self.name, self.content_type, self._stream.read(),
last_modified_at=self.last_modified_at, thumbnail_location=self.thumbnail_location,
import_path=self.import_path, length=self.length, locked=self.locked)
return content
class ContentStore(object):
'''
Abstraction for all ContentStore providers (e.g. MongoDB)
'''
def save(self, content):
raise NotImplementedError
def find(self, filename):
raise NotImplementedError
def get_all_content_for_course(self, course_key, start=0, maxresults=-1, sort=None, filter_params=None):
'''
Returns a list of static assets for a course, followed by the total number of assets.
By default all assets are returned, but start and maxresults can be provided to limit the query.
The return format is a list of asset data dictionaries.
The asset data dictionaries have the following keys:
asset_key (:class:`opaque_keys.edx.AssetKey`): The key of the asset
displayname: The human-readable name of the asset
uploadDate (datetime.datetime): The date and time that the file was uploadDate
contentType: The mimetype string of the asset
md5: An md5 hash of the asset content
'''
raise NotImplementedError
def delete_all_course_assets(self, course_key):
"""
Delete all of the assets which use this course_key as an identifier
:param course_key:
"""
raise NotImplementedError
def copy_all_course_assets(self, source_course_key, dest_course_key):
"""
Copy all the course assets from source_course_key to dest_course_key
"""
raise NotImplementedError
def generate_thumbnail(self, content, tempfile_path=None):
thumbnail_content = None
# use a naming convention to associate originals with the thumbnail
thumbnail_name = StaticContent.generate_thumbnail_name(content.location.name)
thumbnail_file_location = StaticContent.compute_location(
content.location.course_key, thumbnail_name, is_thumbnail=True
)
# if we're uploading an image, then let's generate a thumbnail so that we can
# serve it up when needed without having to rescale on the fly
if content.content_type is not None and content.content_type.split('/')[0] == 'image':
try:
# use PIL to do the thumbnail generation (http://www.pythonware.com/products/pil/)
# My understanding is that PIL will maintain aspect ratios while restricting
# the max-height/width to be whatever you pass in as 'size'
# @todo: move the thumbnail size to a configuration setting?!?
if tempfile_path is None:
im = Image.open(StringIO.StringIO(content.data))
else:
im = Image.open(tempfile_path)
# I've seen some exceptions from the PIL library when trying to save palletted
# PNG files to JPEG. Per the google-universe, they suggest converting to RGB first.
im = im.convert('RGB')
size = 128, 128
im.thumbnail(size, Image.ANTIALIAS)
thumbnail_file = StringIO.StringIO()
im.save(thumbnail_file, 'JPEG')
thumbnail_file.seek(0)
# store this thumbnail as any other piece of content
thumbnail_content = StaticContent(thumbnail_file_location, thumbnail_name,
'image/jpeg', thumbnail_file)
self.save(thumbnail_content)
except Exception, e:
# log and continue as thumbnails are generally considered as optional
logging.exception(u"Failed to generate thumbnail for {0}. Exception: {1}".format(content.location, str(e)))
return thumbnail_content, thumbnail_file_location
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
"""
pass
| agpl-3.0 |
scenarios/tensorflow | tensorflow/contrib/distributions/python/ops/dirichlet_multinomial.py | 11 | 12347 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Dirichlet Multinomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
_dirichlet_multinomial_prob_note = """
For each batch of counts `[n_1,...,n_k]`, `P[counts]` is the probability
that after sampling `n` draws from this Dirichlet Multinomial
distribution, the number of draws falling in class `j` is `n_j`. Note that
different sequences of draws can result in the same counts, thus the
probability includes a combinatorial coefficient.
Note that input, "counts", must be a non-negative tensor with dtype `dtype`
and whose shape can be broadcast with `self.alpha`. For fixed leading
dimensions, the last dimension represents counts for the corresponding
Dirichlet Multinomial distribution in `self.alpha`. `counts` is only legal if
it sums up to `n` and its components are equal to integer values.
"""
class DirichletMultinomial(distribution.Distribution):
"""DirichletMultinomial mixture distribution.
This distribution is parameterized by a vector `alpha` of concentration
parameters for `k` classes and `n`, the counts per each class..
#### Mathematical details
The Dirichlet Multinomial is a distribution over k-class count data, meaning
for each k-tuple of non-negative integer `counts = [c_1,...,c_k]`, we have a
probability of these draws being made from the distribution. The distribution
has hyperparameters `alpha = (alpha_1,...,alpha_k)`, and probability mass
function (pmf):
```pmf(counts) = N! / (n_1!...n_k!) * Beta(alpha + c) / Beta(alpha)```
where above `N = sum_j n_j`, `N!` is `N` factorial, and
`Beta(x) = prod_j Gamma(x_j) / Gamma(sum_j x_j)` is the multivariate beta
function.
This is a mixture distribution in that `M` samples can be produced by:
1. Choose class probabilities `p = (p_1,...,p_k) ~ Dir(alpha)`
2. Draw integers `m = (n_1,...,n_k) ~ Multinomial(N, p)`
This class provides methods to create indexed batches of Dirichlet
Multinomial distributions. If the provided `alpha` is rank 2 or higher, for
every fixed set of leading dimensions, the last dimension represents one
single Dirichlet Multinomial distribution. When calling distribution
functions (e.g. `dist.pmf(counts)`), `alpha` and `counts` are broadcast to the
same shape (if possible). In all cases, the last dimension of alpha/counts
represents single Dirichlet Multinomial distributions.
#### Examples
```python
alpha = [1, 2, 3]
n = 2
dist = DirichletMultinomial(n, alpha)
```
Creates a 3-class distribution, with the 3rd class is most likely to be drawn.
The distribution functions can be evaluated on counts.
```python
# counts same shape as alpha.
counts = [0, 0, 2]
dist.pmf(counts) # Shape []
# alpha will be broadcast to [[1, 2, 3], [1, 2, 3]] to match counts.
counts = [[1, 1, 0], [1, 0, 1]]
dist.pmf(counts) # Shape [2]
# alpha will be broadcast to shape [5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.pmf(counts) # Shape [5, 7]
```
Creates a 2-batch of 3-class distributions.
```python
alpha = [[1, 2, 3], [4, 5, 6]] # Shape [2, 3]
n = [3, 3]
dist = DirichletMultinomial(n, alpha)
# counts will be broadcast to [[2, 1, 0], [2, 1, 0]] to match alpha.
counts = [2, 1, 0]
dist.pmf(counts) # Shape [2]
```
"""
# TODO(b/27419586) Change docstring for dtype of alpha once int allowed.
def __init__(self,
n,
alpha,
validate_args=False,
allow_nan_stats=True,
name="DirichletMultinomial"):
"""Initialize a batch of DirichletMultinomial distributions.
Args:
n: Non-negative floating point tensor, whose dtype is the same as
`alpha`. The shape is broadcastable to `[N1,..., Nm]` with `m >= 0`.
Defines this as a batch of `N1 x ... x Nm` different Dirichlet
multinomial distributions. Its components should be equal to integer
values.
alpha: Positive floating point tensor, whose dtype is the same as
`n` with shape broadcastable to `[N1,..., Nm, k]` `m >= 0`. Defines
this as a batch of `N1 x ... x Nm` different `k` class Dirichlet
multinomial distributions.
validate_args: `Boolean`, default `False`. Whether to assert valid
values for parameters `alpha` and `n`, and `x` in `prob` and
`log_prob`. If `False`, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to prefix Ops created by this distribution class.
Examples:
```python
# Define 1-batch of 2-class Dirichlet multinomial distribution,
# also known as a beta-binomial.
dist = DirichletMultinomial(2.0, [1.1, 2.0])
# Define a 2-batch of 3-class distributions.
dist = DirichletMultinomial([3., 4], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
```
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[n, alpha]) as ns:
# Broadcasting works because:
# * The broadcasting convention is to prepend dimensions of size [1], and
# we use the last dimension for the distribution, wherease
# the batch dimensions are the leading dimensions, which forces the
# distribution dimension to be defined explicitly (i.e. it cannot be
# created automatically by prepending). This forces enough
# explicitivity.
# * All calls involving `counts` eventually require a broadcast between
# `counts` and alpha.
self._alpha = self._assert_valid_alpha(alpha, validate_args)
self._n = self._assert_valid_n(n, validate_args)
self._alpha_sum = math_ops.reduce_sum(
self._alpha, reduction_indices=[-1], keep_dims=False)
super(DirichletMultinomial, self).__init__(
dtype=self._alpha.dtype,
is_continuous=False,
is_reparameterized=False,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._alpha, self._n, self._alpha_sum],
name=ns)
@property
def n(self):
"""Parameter defining this distribution."""
return self._n
@property
def alpha(self):
"""Parameter defining this distribution."""
return self._alpha
@property
def alpha_sum(self):
"""Summation of alpha parameter."""
return self._alpha_sum
def _batch_shape(self):
return array_ops.shape(self.alpha_sum)
def _get_batch_shape(self):
return self.alpha_sum.get_shape()
def _event_shape(self):
return array_ops.reverse_v2(array_ops.shape(self.alpha), [0])[0]
def _get_event_shape(self):
# Event shape depends only on alpha, not "n".
return self.alpha.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
n_draws = math_ops.cast(self.n, dtype=dtypes.int32)
if self.n.get_shape().ndims is not None:
if self.n.get_shape().ndims != 0:
raise NotImplementedError(
"Sample only supported for scalar number of draws.")
elif self.validate_args:
is_scalar = check_ops.assert_rank(
n_draws, 0,
message="Sample only supported for scalar number of draws.")
n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws)
k = self.event_shape()[0]
unnormalized_logits = array_ops.reshape(
math_ops.log(random_ops.random_gamma(
shape=[n],
alpha=self.alpha,
dtype=self.dtype,
seed=seed)),
shape=[-1, k])
draws = random_ops.multinomial(
logits=unnormalized_logits,
num_samples=n_draws,
seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial"))
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k),
reduction_indices=-2)
final_shape = array_ops.concat([[n], self.batch_shape(), [k]], 0)
return array_ops.reshape(x, final_shape)
@distribution_util.AppendDocstring(_dirichlet_multinomial_prob_note)
def _log_prob(self, counts):
counts = self._assert_valid_counts(counts)
ordered_prob = (special_math_ops.lbeta(self.alpha + counts) -
special_math_ops.lbeta(self.alpha))
log_prob = ordered_prob + distribution_util.log_combinations(
self.n, counts)
return log_prob
@distribution_util.AppendDocstring(_dirichlet_multinomial_prob_note)
def _prob(self, counts):
return math_ops.exp(self._log_prob(counts))
def _mean(self):
normalized_alpha = self.alpha / array_ops.expand_dims(self.alpha_sum, -1)
return self.n[..., None] * normalized_alpha
@distribution_util.AppendDocstring(
"""The variance for each batch member is defined as the following:
```
Var(X_j) = n * alpha_j / alpha_0 * (1 - alpha_j / alpha_0) *
(n + alpha_0) / (1 + alpha_0)
```
where `alpha_0 = sum_j alpha_j`.
The covariance between elements in a batch is defined as:
```
Cov(X_i, X_j) = -n * alpha_i * alpha_j / alpha_0 ** 2 *
(n + alpha_0) / (1 + alpha_0)
```
""")
def _variance(self):
alpha_sum = array_ops.expand_dims(self.alpha_sum, -1)
normalized_alpha = self.alpha / alpha_sum
variance = -math_ops.matmul(
array_ops.expand_dims(normalized_alpha, -1),
array_ops.expand_dims(normalized_alpha, -2))
variance = array_ops.matrix_set_diag(variance, normalized_alpha *
(1. - normalized_alpha))
shared_factor = (self.n * (alpha_sum + self.n) /
(alpha_sum + 1) * array_ops.ones_like(self.alpha))
variance *= array_ops.expand_dims(shared_factor, -1)
return variance
def _assert_valid_counts(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
counts = ops.convert_to_tensor(counts, name="counts")
if not self.validate_args:
return counts
candidate_n = math_ops.reduce_sum(counts, reduction_indices=[-1])
return control_flow_ops.with_dependencies([
check_ops.assert_non_negative(counts),
check_ops.assert_equal(
self._n, candidate_n,
message="counts do not sum to n"),
distribution_util.assert_integer_form(counts)], counts)
def _assert_valid_alpha(self, alpha, validate_args):
alpha = ops.convert_to_tensor(alpha, name="alpha")
if not validate_args:
return alpha
return control_flow_ops.with_dependencies(
[check_ops.assert_rank_at_least(alpha, 1),
check_ops.assert_positive(alpha)], alpha)
def _assert_valid_n(self, n, validate_args):
n = ops.convert_to_tensor(n, name="n")
if not validate_args:
return n
return control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(n),
distribution_util.assert_integer_form(n)], n)
| apache-2.0 |
lgp171188/fjord | vendor/packages/polib-1.0.7/tests/tests.py | 4 | 22686 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
import subprocess
import sys
import tempfile
import unittest
sys.path.insert(1, os.path.abspath('.'))
import polib
from polib import u
class TestFunctions(unittest.TestCase):
def test_pofile_and_mofile1(self):
"""
Test bad usage of pofile/mofile.
"""
data = u('''# test for pofile/mofile with string buffer
msgid ""
msgstr ""
"Project-Id-Version: django\n"
msgid "foo"
msgstr "bar"
''')
po = polib.pofile(data)
self.assertTrue(isinstance(po, polib.POFile))
self.assertEqual(po.encoding, 'utf-8')
self.assertEqual(po[0].msgstr, u("bar"))
def test_indented_pofile(self):
"""
Test that an indented pofile returns a POFile instance.
"""
po = polib.pofile('tests/test_indented.po')
self.assertTrue(isinstance(po, polib.POFile))
def test_pofile_and_mofile2(self):
"""
Test that the pofile function returns a POFile instance.
"""
po = polib.pofile('tests/test_utf8.po')
self.assertTrue(isinstance(po, polib.POFile))
def test_pofile_and_mofile3(self):
"""
Test that the mofile function returns a MOFile instance.
"""
mo = polib.mofile('tests/test_utf8.mo')
self.assertTrue(isinstance(mo, polib.MOFile))
def test_pofile_and_mofile4(self):
"""
Test that check_for_duplicates is passed to the instance.
"""
po = polib.pofile('tests/test_iso-8859-15.po', check_for_duplicates=True,
autodetect_encoding=False, encoding='iso-8859-15')
self.assertTrue(po.check_for_duplicates == True)
def test_pofile_and_mofile5(self):
"""
Test that detect_encoding works as expected.
"""
po = polib.pofile('tests/test_iso-8859-15.po')
self.assertTrue(po.encoding == 'ISO_8859-15')
def test_pofile_and_mofile6(self):
"""
Test that encoding is default_encoding when detect_encoding is False.
"""
po = polib.pofile('tests/test_noencoding.po')
self.assertTrue(po.encoding == 'utf-8')
def test_pofile_and_mofile7(self):
"""
Test that encoding is ok when encoding is explicitly given.
"""
po = polib.pofile('tests/test_iso-8859-15.po', encoding='iso-8859-15')
self.assertTrue(po.encoding == 'iso-8859-15')
def test_pofile_and_mofile8(self):
"""
Test that weird occurrences are correctly parsed.
"""
po = polib.pofile('tests/test_weird_occurrences.po')
self.assertEqual(len(po), 46)
def test_pofile_and_mofile9(self):
"""
Test that obsolete previous msgid are ignored
"""
po = polib.pofile('tests/test_obsolete_previousmsgid.po')
self.assertTrue(isinstance(po, polib.POFile))
def test_previous_msgid_1(self):
"""
Test previous msgid multiline.
"""
po = polib.pofile('tests/test_previous_msgid.po')
expected = "\nPartition table entries are not in disk order\n"
self.assertEqual(
po[0].previous_msgid,
expected
)
def test_previous_msgid_2(self):
"""
Test previous msgid single line.
"""
po = polib.pofile('tests/test_previous_msgid.po')
expected = "Partition table entries are not in disk order2\n"
self.assertEqual(
po[1].previous_msgid,
expected
)
def test_previous_msgctxt_1(self):
"""
Test previous msgctxt multiline.
"""
po = polib.pofile('tests/test_previous_msgid.po')
expected = "\nSome message context"
self.assertEqual(
po[0].previous_msgctxt,
expected
)
def test_previous_msgctxt_2(self):
"""
Test previous msgctxt single line.
"""
po = polib.pofile('tests/test_previous_msgid.po')
expected = "Some message context"
self.assertEqual(
po[1].previous_msgctxt,
expected
)
def test_unescaped_double_quote1(self):
"""
Test that polib reports an error when unescaped double quote is found.
"""
data = r'''
msgid "Some msgid with \"double\" quotes"
msgid "Some msgstr with "double\" quotes"
'''
try:
po = polib.pofile(data)
self.fail("Unescaped quote not detected")
except IOError:
exc = sys.exc_info()[1]
msg = 'Syntax error in po file None (line 3): unescaped double quote found'
self.assertEqual(str(exc), msg)
def test_unescaped_double_quote2(self):
"""
Test that polib reports an error when unescaped double quote is found.
"""
data = r'''
msgid "Some msgid with \"double\" quotes"
msgstr ""
"Some msgstr with "double\" quotes"
'''
try:
po = polib.pofile(data)
self.fail("Unescaped quote not detected")
except IOError:
exc = sys.exc_info()[1]
msg = 'Syntax error in po file None (line 4): unescaped double quote found'
self.assertEqual(str(exc), msg)
def test_unescaped_double_quote3(self):
"""
Test that polib reports an error when unescaped double quote is found at the beginning of the string.
"""
data = r'''
msgid "Some msgid with \"double\" quotes"
msgid ""Some msgstr with double\" quotes"
'''
try:
po = polib.pofile(data)
self.fail("Unescaped quote not detected")
except IOError:
exc = sys.exc_info()[1]
msg = 'Syntax error in po file None (line 3): unescaped double quote found'
self.assertEqual(str(exc), msg)
def test_unescaped_double_quote4(self):
"""
Test that polib reports an error when unescaped double quote is found at the beginning of the string.
"""
data = r'''
msgid "Some msgid with \"double\" quotes"
msgstr ""
""Some msgstr with double\" quotes"
'''
try:
po = polib.pofile(data)
self.fail("Unescaped quote not detected")
except IOError:
exc = sys.exc_info()[1]
msg = 'Syntax error in po file None (line 4): unescaped double quote found'
self.assertEqual(str(exc), msg)
def test_detect_encoding1(self):
"""
Test that given enconding is returned when file has no encoding defined.
"""
self.assertEqual(polib.detect_encoding('tests/test_noencoding.po'), 'utf-8')
def test_detect_encoding2(self):
"""
Test with a .pot file.
"""
self.assertEqual(polib.detect_encoding('tests/test_merge.pot'), 'utf-8')
def test_detect_encoding3(self):
"""
Test with an utf8 .po file.
"""
self.assertEqual(polib.detect_encoding('tests/test_utf8.po'), 'UTF-8')
def test_detect_encoding4(self):
"""
Test with utf8 data (no file).
"""
if polib.PY3:
f = open('tests/test_utf8.po', 'rb')
data = str(f.read(), 'utf-8')
else:
f = open('tests/test_utf8.po', 'r')
data = f.read()
try:
self.assertEqual(polib.detect_encoding(data), 'UTF-8')
finally:
f.close()
def test_detect_encoding5(self):
"""
Test with utf8 .mo file.
"""
self.assertEqual(polib.detect_encoding('tests/test_utf8.mo', True), 'UTF-8')
def test_detect_encoding6(self):
"""
Test with iso-8859-15 .po file.
"""
self.assertEqual(polib.detect_encoding('tests/test_iso-8859-15.po'), 'ISO_8859-15')
def test_detect_encoding7(self):
"""
Test with iso-8859-15 .mo file.
"""
self.assertEqual(polib.detect_encoding('tests/test_iso-8859-15.mo', True), 'ISO_8859-15')
def test_escape(self):
"""
Tests the escape function.
"""
self.assertEqual(
polib.escape('\\t and \\n and \\r and " and \\ and \\\\'),
'\\\\t and \\\\n and \\\\r and \\" and \\\\ and \\\\\\\\'
)
def test_unescape(self):
"""
Tests the unescape function.
"""
self.assertEqual(
polib.unescape('\\\\t and \\\\n and \\\\r and \\\\" and \\\\\\\\'),
'\\t and \\n and \\r and \\" and \\\\'
)
def test_pofile_with_subclass(self):
"""
Test that the pofile function correctly returns an instance of the
passed in class
"""
class CustomPOFile(polib.POFile):
pass
pofile = polib.pofile('tests/test_indented.po', klass=CustomPOFile)
self.assertEqual(pofile.__class__, CustomPOFile)
def test_mofile_with_subclass(self):
"""
Test that the mofile function correctly returns an instance of the
passed in class
"""
class CustomMOFile(polib.MOFile):
pass
mofile = polib.mofile('tests/test_utf8.mo', klass=CustomMOFile)
self.assertEqual(mofile.__class__, CustomMOFile)
def test_empty(self):
po = polib.pofile('')
self.assertEqual(po.__unicode__(), '#\nmsgid ""\nmsgstr ""\n')
def test_linenum_1(self):
po = polib.pofile('tests/test_utf8.po')
self.assertEqual(po[0].linenum, 18)
def test_linenum_2(self):
po = polib.pofile('tests/test_utf8.po')
self.assertEqual(po.find('XML text').linenum, 1799)
def test_linenum_3(self):
po = polib.pofile('tests/test_utf8.po')
self.assertEqual(po[-1].linenum, 3478)
class TestBaseFile(unittest.TestCase):
"""
Tests for the _BaseFile class.
"""
def test_append1(self):
pofile = polib.pofile('tests/test_pofile_helpers.po')
entry = polib.POEntry(msgid="Foo", msgstr="Bar", msgctxt="Some context")
pofile.append(entry)
self.assertTrue(entry in pofile)
def test_append2(self):
def add_duplicate():
pofile = polib.pofile('tests/test_pofile_helpers.po', check_for_duplicates=True)
pofile.append(polib.POEntry(msgid="and"))
self.assertRaises(ValueError, add_duplicate)
def test_append3(self):
def add_duplicate():
pofile = polib.pofile('tests/test_pofile_helpers.po', check_for_duplicates=True)
pofile.append(polib.POEntry(msgid="and", msgctxt="some context"))
self.assertRaises(ValueError, add_duplicate)
def test_append4(self):
pofile = polib.pofile('tests/test_pofile_helpers.po', check_for_duplicates=True)
entry = polib.POEntry(msgid="and", msgctxt="some different context")
pofile.append(entry)
self.assertTrue(entry in pofile)
def test_insert1(self):
pofile = polib.pofile('tests/test_pofile_helpers.po')
entry = polib.POEntry(msgid="Foo", msgstr="Bar", msgctxt="Some context")
pofile.insert(0, entry)
self.assertEqual(pofile[0], entry)
def test_insert2(self):
def add_duplicate():
pofile = polib.pofile('tests/test_pofile_helpers.po', check_for_duplicates=True)
pofile.insert(0, polib.POEntry(msgid="and", msgstr="y"))
self.assertRaises(ValueError, add_duplicate)
def test_metadata_as_entry(self):
pofile = polib.pofile('tests/test_fuzzy_header.po')
f = open('tests/test_fuzzy_header.po')
lines = f.readlines()[2:]
f.close()
self.assertEqual(pofile.metadata_as_entry().__unicode__(), "".join(lines))
def test_find1(self):
pofile = polib.pofile('tests/test_pofile_helpers.po')
entry = pofile.find('and')
self.assertEqual(entry.msgstr, u('y'))
def test_find2(self):
pofile = polib.pofile('tests/test_pofile_helpers.po')
entry = pofile.find('pacote', by="msgstr")
self.assertEqual(entry, None)
def test_find3(self):
pofile = polib.pofile('tests/test_pofile_helpers.po')
entry = pofile.find('package', include_obsolete_entries=True)
self.assertEqual(entry.msgstr, u('pacote'))
def test_find4(self):
pofile = polib.pofile('tests/test_utf8.po')
entry1 = pofile.find('test context', msgctxt='@context1')
entry2 = pofile.find('test context', msgctxt='@context2')
self.assertEqual(entry1.msgstr, u('test context 1'))
self.assertEqual(entry2.msgstr, u('test context 2'))
def test_save1(self):
pofile = polib.POFile()
self.assertRaises(IOError, pofile.save)
def test_save2(self):
fd, tmpfile = tempfile.mkstemp()
os.close(fd)
try:
pofile = polib.POFile()
pofile.save(tmpfile)
pofile.save()
self.assertTrue(os.path.isfile(tmpfile))
finally:
os.remove(tmpfile)
def test_ordered_metadata(self):
pofile = polib.pofile('tests/test_fuzzy_header.po')
f = open('tests/test_fuzzy_header.po')
lines = f.readlines()[2:]
f.close()
mdata = [
('Project-Id-Version', u('PACKAGE VERSION')),
('Report-Msgid-Bugs-To', u('')),
('POT-Creation-Date', u('2010-02-08 16:57+0100')),
('PO-Revision-Date', u('YEAR-MO-DA HO:MI+ZONE')),
('Last-Translator', u('FULL NAME <EMAIL@ADDRESS>')),
('Language-Team', u('LANGUAGE <LL@li.org>')),
('MIME-Version', u('1.0')),
('Content-Type', u('text/plain; charset=UTF-8')),
('Content-Transfer-Encoding', u('8bit'))
]
self.assertEqual(pofile.ordered_metadata(), mdata)
def test_unicode1(self):
pofile = polib.pofile('tests/test_merge_after.po')
f = codecs.open('tests/test_merge_after.po', encoding='utf8')
expected = f.read()
f.close()
self.assertEqual(pofile.__unicode__(), expected)
def test_unicode2(self):
pofile = polib.pofile('tests/test_iso-8859-15.po')
f = codecs.open('tests/test_iso-8859-15.po', encoding='iso-8859-15')
expected = f.read()
f.close()
self.assertEqual(pofile.__unicode__(), expected)
def test_str(self):
pofile = polib.pofile('tests/test_iso-8859-15.po')
if polib.PY3:
f = codecs.open('tests/test_iso-8859-15.po', encoding='iso-8859-15')
else:
f = open('tests/test_iso-8859-15.po')
expected = f.read()
f.close()
self.assertEqual(str(pofile), expected)
def test_wrapping(self):
pofile = polib.pofile('tests/test_wrap.po', wrapwidth=50)
expected = r'''# test wrapping
msgid ""
msgstr ""
msgid "This line will not be wrapped"
msgstr ""
msgid ""
"Some line that contain special characters \" and"
" that \t is very, very, very long...: %s \n"
msgstr ""
msgid ""
"Some line that contain special characters "
"\"foobar\" and that contains whitespace at the "
"end "
msgstr ""
'''
self.assertEqual(str(pofile), expected)
def test_sort(self):
a1 = polib.POEntry(msgid='a1', occurrences=[('b.py', 1), ('b.py', 3)])
a2 = polib.POEntry(msgid='a2')
a3 = polib.POEntry(msgid='a1', occurrences=[('b.py', 1), ('b.py', 3)], obsolete=True)
b1 = polib.POEntry(msgid='b1', occurrences=[('b.py', 1), ('b.py', 3)])
b2 = polib.POEntry(msgid='b2', occurrences=[('d.py', 3), ('b.py', 1)])
c1 = polib.POEntry(msgid='c1', occurrences=[('a.py', 1), ('b.py', 1)])
c2 = polib.POEntry(msgid='c2', occurrences=[('a.py', 1), ('a.py', 3)])
pofile = polib.POFile()
pofile.append(b1)
pofile.append(a3)
pofile.append(a2)
pofile.append(a1)
pofile.append(b2)
pofile.append(c1)
pofile.append(c2)
pofile.sort()
expected = u('''#
msgid ""
msgstr ""
msgid "a2"
msgstr ""
#: a.py:1 a.py:3
msgid "c2"
msgstr ""
#: a.py:1 b.py:1
msgid "c1"
msgstr ""
#: b.py:1 b.py:3
msgid "a1"
msgstr ""
#: b.py:1 b.py:3
msgid "b1"
msgstr ""
#: d.py:3 b.py:1
msgid "b2"
msgstr ""
#~ msgid "a1"
#~ msgstr ""
''')
self.assertEqual(pofile.__unicode__(), expected)
def test_trailing_comment(self):
pofile = polib.pofile('tests/test_trailing_comment.po')
expected = r'''#
msgid ""
msgstr "Content-Type: text/plain; charset=UTF-8\n"
msgid "foo"
msgstr "oof"
'''
self.assertEqual(str(pofile), expected)
class TestPoFile(unittest.TestCase):
"""
Tests for PoFile class.
"""
def test_save_as_mofile(self):
"""
Test for the POFile.save_as_mofile() method.
"""
import distutils.spawn
msgfmt = distutils.spawn.find_executable('msgfmt')
if msgfmt is None:
try:
return unittest.skip('msgfmt is not installed')
except AttributeError:
return
reffiles = ['tests/test_utf8.po', 'tests/test_iso-8859-15.po']
encodings = ['utf-8', 'iso-8859-15']
for reffile, encoding in zip(reffiles, encodings):
fd, tmpfile1 = tempfile.mkstemp()
os.close(fd)
fd, tmpfile2 = tempfile.mkstemp()
os.close(fd)
po = polib.pofile(reffile, autodetect_encoding=False, encoding=encoding)
po.save_as_mofile(tmpfile1)
subprocess.call([msgfmt, '--no-hash', '-o', tmpfile2, reffile])
try:
f = open(tmpfile1, 'rb')
s1 = f.read()
f.close()
f = open(tmpfile2, 'rb')
s2 = f.read()
f.close()
self.assertEqual(s1, s2)
finally:
os.remove(tmpfile1)
os.remove(tmpfile2)
def test_merge(self):
refpot = polib.pofile('tests/test_merge.pot')
po = polib.pofile('tests/test_merge_before.po')
po.merge(refpot)
expected_po = polib.pofile('tests/test_merge_after.po')
self.assertEqual(po, expected_po)
def test_percent_translated(self):
po = polib.pofile('tests/test_pofile_helpers.po')
self.assertEqual(po.percent_translated(), 53)
po = polib.POFile()
self.assertEqual(po.percent_translated(), 100)
def test_translated_entries(self):
po = polib.pofile('tests/test_pofile_helpers.po')
self.assertEqual(len(po.translated_entries()), 7)
def test_untranslated_entries(self):
po = polib.pofile('tests/test_pofile_helpers.po')
self.assertEqual(len(po.untranslated_entries()), 4)
def test_fuzzy_entries(self):
po = polib.pofile('tests/test_pofile_helpers.po')
self.assertEqual(len(po.fuzzy_entries()), 2)
def test_obsolete_entries(self):
po = polib.pofile('tests/test_pofile_helpers.po')
self.assertEqual(len(po.obsolete_entries()), 4)
def test_unusual_metadata_location(self):
po = polib.pofile('tests/test_unusual_metadata_location.po')
self.assertNotEqual(po.metadata, {})
self.assertEqual(po.metadata['Content-Type'], 'text/plain; charset=UTF-8')
def test_comment_starting_with_two_hashes(self):
po = polib.pofile('tests/test_utf8.po')
e = po.find("Some comment starting with two '#'", by='tcomment')
self.assertTrue(isinstance(e, polib.POEntry))
def test_word_garbage(self):
po = polib.pofile('tests/test_word_garbage.po')
e = po.find("Whatever", by='msgid')
self.assertTrue(isinstance(e, polib.POEntry))
class TestMoFile(unittest.TestCase):
"""
Tests for MoFile class.
"""
def test_dummy_methods(self):
"""
This is stupid and just here for code coverage.
"""
mo = polib.MOFile()
self.assertEqual(mo.percent_translated(), 100)
self.assertEqual(mo.translated_entries(), mo)
self.assertEqual(mo.untranslated_entries(), [])
self.assertEqual(mo.fuzzy_entries(), [])
self.assertEqual(mo.obsolete_entries(), [])
def test_save_as_pofile(self):
"""
Test for the MOFile.save_as_pofile() method.
"""
fd, tmpfile = tempfile.mkstemp()
os.close(fd)
mo = polib.mofile('tests/test_utf8.mo', wrapwidth=78)
mo.save_as_pofile(tmpfile)
try:
if polib.PY3:
f = open(tmpfile, encoding='utf-8')
else:
f = open(tmpfile)
s1 = f.read()
f.close()
if polib.PY3:
f = open('tests/test_save_as_pofile.po', encoding='utf-8')
else:
f = open('tests/test_save_as_pofile.po')
s2 = f.read()
f.close()
self.assertEqual(s1, s2)
finally:
os.remove(tmpfile)
def test_msgctxt(self):
#import pdb; pdb.set_trace()
mo = polib.mofile('tests/test_msgctxt.mo')
expected = u('''msgid ""
msgstr "Content-Type: text/plain; charset=UTF-8\u005cn"
msgctxt "Some message context"
msgid "some string"
msgstr "une cha\u00eene"
msgctxt "Some other message context"
msgid "singular"
msgid_plural "plural"
msgstr[0] "singulier"
msgstr[1] "pluriel"
''')
self.assertEqual(mo.__unicode__(), expected)
def test_invalid_version(self):
self.assertRaises(IOError, polib.mofile, 'tests/test_invalid_version.mo')
def test_no_header(self):
mo = polib.mofile('tests/test_no_header.mo')
expected = u('''msgid ""
msgstr ""
msgid "bar"
msgstr "rab"
msgid "foo"
msgstr "oof"
''')
self.assertEqual(mo.__unicode__(), expected)
class TestTextWrap(unittest.TestCase):
def test_wrap1(self):
text = ' Some line that is longer than fifteen characters (whitespace will not be preserved) '
ret = polib.TextWrapper(width=15).wrap(text)
expected = [
' Some line', 'that is longer', 'than fifteen', 'characters',
'(whitespace', 'will not be', 'preserved)'
]
self.assertEqual(ret, expected)
def test_wrap2(self):
text = ' Some line that is longer than fifteen characters (whitespace will be preserved) '
ret = polib.TextWrapper(width=15, drop_whitespace=False).wrap(text)
expected = [
' Some line ', 'that is longer ', 'than fifteen ', 'characters ',
'(whitespace ', 'will be ', 'preserved) '
]
self.assertEqual(ret, expected)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
dhuppenkothen/stingray | stingray/tests/test_powerspectrum.py | 1 | 14726 |
import numpy as np
from nose.tools import raises
from stingray import Lightcurve
from stingray import Powerspectrum, AveragedPowerspectrum
from stingray.powerspectrum import classical_pvalue
np.random.seed(20150907)
class TestPowerspectrum(object):
@classmethod
def setup_class(cls):
tstart = 0.0
tend = 1.0
dt = 0.0001
time = np.linspace(tstart, tend, int((tend-tstart)/dt))
mean_count_rate = 100.0
mean_counts = mean_count_rate * dt
poisson_counts = np.random.poisson(mean_counts,
size=time.shape[0])
cls.lc = Lightcurve(time, counts=poisson_counts)
def test_make_empty_periodogram(self):
ps = Powerspectrum()
assert ps.norm == "rms"
assert ps.freq is None
assert ps.ps is None
assert ps.df is None
assert ps.m == 1
assert ps.n is None
def test_make_periodogram_from_lightcurve(self):
ps = Powerspectrum(lc=self.lc)
assert ps.freq is not None
assert ps.ps is not None
assert ps.df == 1.0 / self.lc.tseg
assert ps.norm == "rms"
assert ps.m == 1
assert ps.n == self.lc.time.shape[0]
assert ps.nphots == np.sum(self.lc.counts)
def test_periodogram_types(self):
ps = Powerspectrum(lc=self.lc)
assert isinstance(ps.freq, np.ndarray)
assert isinstance(ps.ps, np.ndarray)
def test_init_with_lightcurve(self):
assert Powerspectrum(self.lc)
@raises(AssertionError)
def test_init_without_lightcurve(self):
assert Powerspectrum(self.lc.counts)
@raises(AssertionError)
def test_init_with_nonsense_data(self):
nonsense_data = [None for i in range(100)]
assert Powerspectrum(nonsense_data)
@raises(AssertionError)
def test_init_with_nonsense_norm(self):
nonsense_norm = "bla"
assert Powerspectrum(self.lc, norm=nonsense_norm)
@raises(AssertionError)
def test_init_with_wrong_norm_type(self):
nonsense_norm = 1.0
assert Powerspectrum(self.lc, norm=nonsense_norm)
def test_total_variance(self):
"""
the integral of powers (or Riemann sum) should be close
to the variance divided by twice the length of the light curve.
Note: make sure the factors of ncounts match!
Also, make sure to *exclude* the zeroth power!
"""
ps = Powerspectrum(lc=self.lc)
nn = ps.n
pp = ps.unnorm_powers / np.float(nn)**2
p_int = np.sum(pp[:-1]*ps.df) + (pp[-1]*ps.df)/2
var_lc = np.var(self.lc.counts) / (2.*self.lc.tseg)
assert np.isclose(p_int, var_lc, atol=0.01, rtol=0.01)
def test_rms_normalization_is_standard(self):
"""
Make sure the standard normalization of a periodogram is
rms and it stays that way!
"""
ps = Powerspectrum(lc=self.lc)
assert ps.norm == "rms"
def test_rms_normalization_correct(self):
"""
In rms normalization, the integral of the powers should be
equal to the variance of the light curve divided by the mean
of the light curve squared.
"""
ps = Powerspectrum(lc=self.lc, norm="rms")
ps_int = np.sum(ps.ps[:-1]*ps.df) + ps.ps[-1]*ps.df/2
std_lc = np.var(self.lc.counts) / np.mean(self.lc.counts)**2
assert np.isclose(ps_int, std_lc, atol=0.01, rtol=0.01)
def test_fractional_rms_in_rms_norm(self):
ps = Powerspectrum(lc=self.lc, norm="rms")
rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[1],
max_freq=ps.freq[-1])
rms_lc = np.std(self.lc.counts) / np.mean(self.lc.counts)
assert np.isclose(rms_ps, rms_lc, atol=0.01)
def test_leahy_norm_correct(self):
time = np.arange(0, 10.0, 10/1e6)
counts = np.random.poisson(1000, size=time.shape[0])
lc = Lightcurve(time, counts)
ps = Powerspectrum(lc, norm="leahy")
assert np.isclose(np.mean(ps.ps), 2.0, atol=0.01, rtol=0.01)
def test_leahy_norm_total_variance(self):
"""
In Leahy normalization, the total variance should be the sum of
powers multiplied by the number of counts and divided by the
square of the number of data points in the light curve
"""
ps = Powerspectrum(lc=self.lc, norm="Leahy")
ps_var = (np.sum(self.lc.counts)/ps.n**2.) * \
(np.sum(ps.ps[:-1]) + ps.ps[-1]/2.)
assert np.isclose(ps_var, np.var(self.lc.counts), atol=0.01)
def test_fractional_rms_in_leahy_norm(self):
"""
fractional rms should only be *approximately* equal the standard
deviation divided by the mean of the light curve. Therefore, we allow
for a larger tolerance in np.isclose()
"""
ps = Powerspectrum(lc=self.lc, norm="Leahy")
rms_ps, rms_err = ps.compute_rms(min_freq=ps.freq[0],
max_freq=ps.freq[-1])
rms_lc = np.std(self.lc.counts) / np.mean(self.lc.counts)
assert np.isclose(rms_ps, rms_lc, atol=0.01)
def test_fractional_rms_error(self):
"""
TODO: Need to write a test for the fractional rms error.
But I don't know how!
"""
pass
def test_rebin_makes_right_attributes(self):
ps = Powerspectrum(lc=self.lc, norm="Leahy")
# replace powers
ps.ps = np.ones_like(ps.ps) * 2.0
rebin_factor = 2.0
bin_ps = ps.rebin(rebin_factor*ps.df)
assert bin_ps.freq is not None
assert bin_ps.ps is not None
assert bin_ps.df == rebin_factor * 1.0 / self.lc.tseg
assert bin_ps.norm.lower() == "leahy"
assert bin_ps.m == 2
assert bin_ps.n == self.lc.time.shape[0]
assert bin_ps.nphots == np.sum(self.lc.counts)
def test_rebin_uses_mean(self):
"""
Make sure the rebin-method uses "mean" to average instead of summing
powers by default, and that this is not changed in the future!
Note: function defaults come as a tuple, so the first keyword argument
had better be 'method'
"""
ps = Powerspectrum(lc=self.lc, norm="Leahy")
assert ps.rebin.__defaults__[0] == "mean"
def rebin_several(self, df):
"""
TODO: Not sure how to write tests for the rebin method!
"""
ps = Powerspectrum(lc=self.lc, norm="Leahy")
bin_ps = ps.rebin(df)
assert np.isclose(bin_ps.freq[0], bin_ps.df, atol=1e-4, rtol=1e-4)
def test_rebin(self):
df_all = [2, 3, 5, 1.5, 1, 85]
for df in df_all:
yield self.rebin_several, df
def test_classical_significances_runs(self):
ps = Powerspectrum(lc=self.lc, norm="Leahy")
ps.classical_significances()
@raises(AssertionError)
def test_classical_significances_fails_in_rms(self):
ps = Powerspectrum(lc=self.lc, norm="rms")
ps.classical_significances()
def test_classical_significances_threshold(self):
ps = Powerspectrum(lc=self.lc, norm="leahy")
# change the powers so that just one exceeds the threshold
ps.ps = np.zeros(ps.ps.shape[0])+2.0
index = 1
ps.ps[index] = 10.0
threshold = 0.01
pval = ps.classical_significances(threshold=threshold,
trial_correction=False)
assert pval[0, 0] < threshold
assert pval[1, 0] == index
def test_classical_significances_trial_correction(self):
ps = Powerspectrum(lc=self.lc, norm="leahy")
# change the powers so that just one exceeds the threshold
ps.ps = np.zeros(ps.ps.shape[0]) + 2.0
index = 1
ps.ps[index] = 10.0
threshold = 0.01
pval = ps.classical_significances(threshold=threshold,
trial_correction=True)
assert np.size(pval) == 0
def test_pvals_is_numpy_array(self):
ps = Powerspectrum(lc=self.lc, norm="leahy")
# change the powers so that just one exceeds the threshold
ps.ps = np.zeros(ps.ps.shape[0])+2.0
index = 1
ps.ps[index] = 10.0
threshold = 1.0
pval = ps.classical_significances(threshold=threshold,
trial_correction=True)
assert isinstance(pval, np.ndarray)
assert pval.shape[0] == 2
class TestAveragedPowerspectrum(object):
@classmethod
def setup_class(cls):
tstart = 0.0
tend = 10.0
dt = 0.0001
time = np.linspace(tstart, tend, int((tend-tstart)/dt))
mean_count_rate = 1000.0
mean_counts = mean_count_rate*dt
poisson_counts = np.random.poisson(mean_counts,
size=time.shape[0])
cls.lc = Lightcurve(time, counts=poisson_counts)
def test_one_segment(self):
segment_size = self.lc.tseg
ps = AveragedPowerspectrum(self.lc, segment_size)
assert np.isclose(ps.segment_size, segment_size)
def test_n_segments(self):
nseg_all = [1, 2, 3, 5, 10, 20, 100]
for nseg in nseg_all:
yield self.check_segment_size, nseg
def check_segment_size(self, nseg):
segment_size = self.lc.tseg/nseg
ps = AveragedPowerspectrum(self.lc, segment_size)
assert ps.m == nseg
def test_segments_with_leftover(self):
segment_size = self.lc.tseg/2. - 1.
ps = AveragedPowerspectrum(self.lc, segment_size)
assert np.isclose(ps.segment_size, segment_size)
assert ps.m == 2
@raises(TypeError)
def test_init_without_segment(self):
assert AveragedPowerspectrum(self.lc)
@raises(TypeError)
def test_init_with_nonsense_segment(self):
segment_size = "foo"
assert AveragedPowerspectrum(self.lc, segment_size)
@raises(TypeError)
def test_init_with_none_segment(self):
segment_size = None
assert AveragedPowerspectrum(self.lc, segment_size)
@raises(AssertionError)
def test_init_with_inf_segment(self):
segment_size = np.inf
assert AveragedPowerspectrum(self.lc, segment_size)
@raises(AssertionError)
def test_init_with_nan_segment(self):
segment_size = np.nan
assert AveragedPowerspectrum(self.lc, segment_size)
def test_list_of_light_curves(self):
n_lcs = 10
tstart = 0.0
tend = 1.0
dt = 0.0001
time = np.linspace(tstart, tend, int((tend-tstart)/dt))
mean_count_rate = 1000.0
mean_counts = mean_count_rate*dt
lc_all = []
for n in range(n_lcs):
poisson_counts = np.random.poisson(mean_counts,
size=len(time))
lc = Lightcurve(time, counts=poisson_counts)
lc_all.append(lc)
segment_size = 0.5
assert AveragedPowerspectrum(lc_all, segment_size)
@raises(AssertionError)
def test_list_with_nonsense_component(self):
n_lcs = 10
tstart = 0.0
tend = 1.0
dt = 0.0001
time = np.linspace(tstart, tend, int((tend-tstart)/dt))
mean_count_rate = 1000.0
mean_counts = mean_count_rate*dt
lc_all = []
for n in range(n_lcs):
poisson_counts = np.random.poisson(mean_counts,
size=len(time))
lc = Lightcurve(time, counts=poisson_counts)
lc_all.append(lc)
lc_all.append(1.0)
segment_size = 0.5
assert AveragedPowerspectrum(lc_all, segment_size)
def test_leahy_correct_for_multiple(self):
n = 100
lc_all = []
for i in range(n):
time = np.arange(0.0, 10.0, 10./100000)
counts = np.random.poisson(1000, size=time.shape[0])
lc = Lightcurve(time, counts)
lc_all.append(lc)
ps = AveragedPowerspectrum(lc_all, 10.0, norm="leahy")
assert np.isclose(np.mean(ps.ps), 2.0, atol=1e-3, rtol=1e-3)
assert np.isclose(np.std(ps.ps), 2.0/np.sqrt(n), atol=0.1, rtol=0.1)
class TestClassicalSignificances(object):
def test_function_runs(self):
power = 2.0
nspec = 1.0
classical_pvalue(power, nspec)
@raises(AssertionError)
def test_power_is_not_infinite(self):
power = np.inf
nspec = 1
classical_pvalue(power, nspec)
@raises(AssertionError)
def test_power_is_not_infinite2(self):
power = -np.inf
nspec = 1
classical_pvalue(power, nspec)
@raises(AssertionError)
def test_power_is_non_nan(self):
power = np.nan
nspec = 1
classical_pvalue(power, nspec)
@raises(AssertionError)
def test_power_is_positive(self):
power = -2.0
nspec = 1.0
classical_pvalue(power, nspec)
@raises(AssertionError)
def test_nspec_is_not_infinite(self):
power = 2.0
nspec = np.inf
classical_pvalue(power, nspec)
@raises(AssertionError)
def test_nspec_is_not_infinite2(self):
power = 2.0
nspec = -np.inf
classical_pvalue(power, nspec)
@raises(AssertionError)
def test_nspec_is_not_nan(self):
power = 2.0
nspec = np.nan
classical_pvalue(power, nspec)
@raises(AssertionError)
def test_nspec_is_positive(self):
power = 2.0
nspec = -1.0
classical_pvalue(power, nspec)
@raises(AssertionError)
def test_nspec_is_nonzero(self):
power = 2.0
nspec = 0.0
classical_pvalue(power, nspec)
@raises(AssertionError)
def test_nspec_is_an_integer_number(self):
power = 2.0
nspec = 2.5
classical_pvalue(power, nspec)
def test_nspec_float_type_okay(self):
power = 2.0
nspec = 2.0
classical_pvalue(power, nspec)
def test_pvalue_decreases_with_increasing_power(self):
power1 = 2.0
power2 = 20.0
nspec = 1.0
pval1 = classical_pvalue(power1, nspec)
pval2 = classical_pvalue(power2, nspec)
assert pval1-pval2 > 0.0
def test_pvalue_must_decrease_with_increasing_nspec(self):
power = 3.0
nspec1 = 1.0
nspec2 = 10.0
pval1 = classical_pvalue(power, nspec1)
pval2 = classical_pvalue(power, nspec2)
assert pval1-pval2 > 0.0
def test_very_large_powers_produce_zero_prob(self):
power = 31000.0
nspec = 1
pval = classical_pvalue(power, nspec)
assert np.isclose(pval, 0.0)
| mit |
laslabs/odoo | addons/sale/tests/test_sale_order.py | 40 | 7670 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.exceptions import UserError, AccessError
from test_sale_common import TestSale
class TestSaleOrder(TestSale):
def test_sale_order(self):
""" Test the sale order flow (invoicing and quantity updates)
- Invoice repeatedly while varrying delivered quantities and check that invoice are always what we expect
"""
# DBO TODO: validate invoice and register payments
inv_obj = self.env['account.invoice']
so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': p.name, 'product_id': p.id, 'product_uom_qty': 2, 'product_uom': p.uom_id.id, 'price_unit': p.list_price}) for (_, p) in self.products.iteritems()],
'pricelist_id': self.env.ref('product.list0').id,
})
self.assertEqual(so.amount_total, sum([2 * p.list_price for (k, p) in self.products.iteritems()]), 'Sale: total amount is wrong')
# send quotation
so.force_quotation_send()
self.assertTrue(so.state == 'sent', 'Sale: state after sending is wrong')
# confirm quotation
so.action_confirm()
self.assertTrue(so.state == 'sale')
self.assertTrue(so.invoice_status == 'to invoice')
# create invoice: only 'invoice on order' products are invoiced
inv_id = so.action_invoice_create()
inv = inv_obj.browse(inv_id)
self.assertEqual(len(inv.invoice_line_ids), 2, 'Sale: invoice is missing lines')
self.assertEqual(inv.amount_total, sum([2 * p.list_price if p.invoice_policy == 'order' else 0 for (k, p) in self.products.iteritems()]), 'Sale: invoice total amount is wrong')
self.assertTrue(so.invoice_status == 'no', 'Sale: SO status after invoicing should be "nothing to invoice"')
self.assertTrue(len(so.invoice_ids) == 1, 'Sale: invoice is missing')
# deliver lines except 'time and material' then invoice again
for line in so.order_line:
line.qty_delivered = 2 if line.product_id.invoice_policy in ['order', 'delivery'] else 0
self.assertTrue(so.invoice_status == 'to invoice', 'Sale: SO status after delivery should be "to invoice"')
inv_id = so.action_invoice_create()
inv = inv_obj.browse(inv_id)
self.assertEqual(len(inv.invoice_line_ids), 2, 'Sale: second invoice is missing lines')
self.assertEqual(inv.amount_total, sum([2 * p.list_price if p.invoice_policy == 'delivery' else 0 for (k, p) in self.products.iteritems()]), 'Sale: second invoice total amount is wrong')
self.assertTrue(so.invoice_status == 'invoiced', 'Sale: SO status after invoicing everything should be "invoiced"')
self.assertTrue(len(so.invoice_ids) == 2, 'Sale: invoice is missing')
# go over the sold quantity
for line in so.order_line:
if line.product_id == self.products['serv_order']:
line.qty_delivered = 10
self.assertTrue(so.invoice_status == 'upselling', 'Sale: SO status after increasing delivered qty higher than ordered qty should be "upselling"')
# upsell and invoice
for line in so.order_line:
if line.product_id == self.products['serv_order']:
line.product_uom_qty = 10
inv_id = so.action_invoice_create()
inv = inv_obj.browse(inv_id)
self.assertEqual(len(inv.invoice_line_ids), 1, 'Sale: third invoice is missing lines')
self.assertEqual(inv.amount_total, 8 * self.products['serv_order'].list_price, 'Sale: second invoice total amount is wrong')
self.assertTrue(so.invoice_status == 'invoiced', 'Sale: SO status after invoicing everything (including the upsel) should be "invoiced"')
def test_unlink_cancel(self):
""" Test deleting and cancelling sale orders depending on their state and on the user's rights """
so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': p.name, 'product_id': p.id, 'product_uom_qty': 2, 'product_uom': p.uom_id.id, 'price_unit': p.list_price}) for (_, p) in self.products.iteritems()],
'pricelist_id': self.env.ref('product.list0').id,
})
# only quotations are deletable
with self.assertRaises(UserError):
so.action_confirm()
so.unlink()
so_copy = so.copy()
with self.assertRaises(AccessError):
so_copy.sudo(self.user).unlink()
self.assertTrue(so_copy.sudo(self.manager).unlink(), 'Sale: deleting a quotation should be possible')
# cancelling and setting to done, you should not be able to delete any SO ever
so.action_cancel()
self.assertTrue(so.state == 'cancel', 'Sale: cancelling SO should always be possible')
with self.assertRaises(UserError):
so.sudo(self.manager).unlink()
so.action_done()
self.assertTrue(so.state == 'done', 'Sale: SO not done')
def test_cost_invoicing(self):
""" Test confirming a vendor invoice to reinvoice cost on the so """
serv_cost = self.env.ref('product.product_product_1b')
prod_gap = self.env.ref('product.product_product_1')
so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': prod_gap.name, 'product_id': prod_gap.id, 'product_uom_qty': 2, 'product_uom': prod_gap.uom_id.id, 'price_unit': prod_gap.list_price})],
'pricelist_id': self.env.ref('product.list0').id,
})
so.action_confirm()
so._create_analytic_account()
inv_partner = self.env.ref('base.res_partner_2')
company = self.env.ref('base.main_company')
journal = self.env['account.journal'].create({'name': 'Purchase Journal - Test', 'code': 'STPJ', 'type': 'purchase', 'company_id': company.id})
account_payable = self.env['account.account'].create({'code': 'X1111', 'name': 'Sale - Test Payable Account', 'user_type_id': self.env.ref('account.data_account_type_payable').id, 'reconcile': True})
account_income = self.env['account.account'].create({'code': 'X1112', 'name': 'Sale - Test Account', 'user_type_id': self.env.ref('account.data_account_type_direct_costs').id})
invoice_vals = {
'name': '',
'type': 'in_invoice',
'partner_id': inv_partner.id,
'invoice_line_ids': [(0, 0, {'name': serv_cost.name, 'product_id': serv_cost.id, 'quantity': 2, 'uom_id': serv_cost.uom_id.id, 'price_unit': serv_cost.standard_price, 'account_analytic_id': so.project_id.id, 'account_id': account_income.id})],
'account_id': account_payable.id,
'journal_id': journal.id,
'currency_id': company.currency_id.id,
}
inv = self.env['account.invoice'].create(invoice_vals)
inv.signal_workflow('invoice_open')
sol = so.order_line.filtered(lambda l: l.product_id == serv_cost)
self.assertTrue(sol, 'Sale: cost invoicing does not add lines when confirming vendor invoice')
self.assertTrue(sol.price_unit == 160 and sol.qty_delivered == 2 and sol.product_uom_qty == sol.qty_invoiced == 0, 'Sale: line is wrong after confirming vendor invoice')
| agpl-3.0 |
1a1a11a/mimircache | PyMimircache/__init__.py | 1 | 1336 | # coding=utf-8
""" PyMimircache a cache trace analysis platform.
.. moduleauthor:: Juncheng Yang <peter.waynechina@gmail.com>, Ymir Vigfusson
"""
import os
import sys
try:
import matplotlib
matplotlib.use('Agg')
except Exception as e:
print("WARNING: {}, fail to import matplotlib, "
"plotting function may be limited".format(e), file=sys.stderr)
cwd = os.getcwd()
sys.path.extend([cwd, os.path.join(cwd, "..")])
from PyMimircache.const import *
if not INSTALL_PHASE:
from PyMimircache.cacheReader.binaryReader import BinaryReader
from PyMimircache.cacheReader.vscsiReader import VscsiReader
from PyMimircache.cacheReader.csvReader import CsvReader
from PyMimircache.cacheReader.plainReader import PlainReader
from PyMimircache.profiler.cLRUProfiler import CLRUProfiler as CLRUProfiler
from PyMimircache.profiler.cGeneralProfiler import CGeneralProfiler
from PyMimircache.profiler.pyGeneralProfiler import PyGeneralProfiler
from PyMimircache.profiler.cHeatmap import CHeatmap
from PyMimircache.profiler.pyHeatmap import PyHeatmap
from PyMimircache.top.cachecow import Cachecow
from PyMimircache.version import __version__
# import logging
# logging.basicConfig(filename="log", filemode='w', format='%(levelname)s:%(asctime)s:%(message)s', level=logging.DEBUG)
| gpl-3.0 |
davidyezsetz/kuma | vendor/packages/sqlalchemy/test/orm/inheritance/test_polymorph.py | 7 | 13368 | """tests basic polymorphic mapper loading/saving, minimal relationships"""
from sqlalchemy.test.testing import eq_, assert_raises, assert_raises_message
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.test import Column, testing
from sqlalchemy.util import function_named
from test.orm import _fixtures, _base
class Person(_fixtures.Base):
pass
class Engineer(Person):
pass
class Manager(Person):
pass
class Boss(Manager):
pass
class Company(_fixtures.Base):
pass
class PolymorphTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
global companies, people, engineers, managers, boss
companies = Table('companies', metadata,
Column('company_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(50)))
people = Table('people', metadata,
Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('company_id', Integer, ForeignKey('companies.company_id')),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True),
Column('status', String(30)),
Column('engineer_name', String(50)),
Column('primary_language', String(50)),
)
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True),
Column('status', String(30)),
Column('manager_name', String(50))
)
boss = Table('boss', metadata,
Column('boss_id', Integer, ForeignKey('managers.person_id'), primary_key=True),
Column('golf_swing', String(30)),
)
metadata.create_all()
class InsertOrderTest(PolymorphTest):
def test_insert_order(self):
"""test that classes of multiple types mix up mapper inserts
so that insert order of individual tables is maintained"""
person_join = polymorphic_union(
{
'engineer':people.join(engineers),
'manager':people.join(managers),
'person':people.select(people.c.type=='person'),
}, None, 'pjoin')
person_mapper = mapper(Person, people, with_polymorphic=('*', person_join), polymorphic_on=person_join.c.type, polymorphic_identity='person')
mapper(Engineer, engineers, inherits=person_mapper, polymorphic_identity='engineer')
mapper(Manager, managers, inherits=person_mapper, polymorphic_identity='manager')
mapper(Company, companies, properties={
'employees': relationship(Person,
backref='company',
order_by=person_join.c.person_id)
})
session = create_session()
c = Company(name='company1')
c.employees.append(Manager(status='AAB', manager_name='manager1', name='pointy haired boss'))
c.employees.append(Engineer(status='BBA', engineer_name='engineer1', primary_language='java', name='dilbert'))
c.employees.append(Person(status='HHH', name='joesmith'))
c.employees.append(Engineer(status='CGG', engineer_name='engineer2', primary_language='python', name='wally'))
c.employees.append(Manager(status='ABA', manager_name='manager2', name='jsmith'))
session.add(c)
session.flush()
session.expunge_all()
eq_(session.query(Company).get(c.company_id), c)
class RelationshipToSubclassTest(PolymorphTest):
def test_basic(self):
"""test a relationship to an inheriting mapper where the relationship is to a subclass
but the join condition is expressed by the parent table.
also test that backrefs work in this case.
this test touches upon a lot of the join/foreign key determination code in properties.py
and creates the need for properties.py to search for conditions individually within
the mapper's local table as well as the mapper's 'mapped' table, so that relationships
requiring lots of specificity (like self-referential joins) as well as relationships requiring
more generalization (like the example here) both come up with proper results."""
mapper(Person, people)
mapper(Engineer, engineers, inherits=Person)
mapper(Manager, managers, inherits=Person)
mapper(Company, companies, properties={
'managers': relationship(Manager, backref="company")
})
sess = create_session()
c = Company(name='company1')
c.managers.append(Manager(status='AAB', manager_name='manager1', name='pointy haired boss'))
sess.add(c)
sess.flush()
sess.expunge_all()
eq_(sess.query(Company).filter_by(company_id=c.company_id).one(), c)
assert c.managers[0].company is c
class RoundTripTest(PolymorphTest):
pass
def _generate_round_trip_test(include_base, lazy_relationship, redefine_colprop, with_polymorphic):
"""generates a round trip test.
include_base - whether or not to include the base 'person' type in the union.
lazy_relationship - whether or not the Company relationship to People is lazy or eager.
redefine_colprop - if we redefine the 'name' column to be 'people_name' on the base Person class
use_literal_join - primary join condition is explicitly specified
"""
def test_roundtrip(self):
if with_polymorphic == 'unions':
if include_base:
person_join = polymorphic_union(
{
'engineer':people.join(engineers),
'manager':people.join(managers),
'person':people.select(people.c.type=='person'),
}, None, 'pjoin')
else:
person_join = polymorphic_union(
{
'engineer':people.join(engineers),
'manager':people.join(managers),
}, None, 'pjoin')
manager_join = people.join(managers).outerjoin(boss)
person_with_polymorphic = ['*', person_join]
manager_with_polymorphic = ['*', manager_join]
elif with_polymorphic == 'joins':
person_join = people.outerjoin(engineers).outerjoin(managers).outerjoin(boss)
manager_join = people.join(managers).outerjoin(boss)
person_with_polymorphic = ['*', person_join]
manager_with_polymorphic = ['*', manager_join]
elif with_polymorphic == 'auto':
person_with_polymorphic = '*'
manager_with_polymorphic = '*'
else:
person_with_polymorphic = None
manager_with_polymorphic = None
if redefine_colprop:
person_mapper = mapper(Person, people, with_polymorphic=person_with_polymorphic, polymorphic_on=people.c.type, polymorphic_identity='person', properties= {'person_name':people.c.name})
else:
person_mapper = mapper(Person, people, with_polymorphic=person_with_polymorphic, polymorphic_on=people.c.type, polymorphic_identity='person')
mapper(Engineer, engineers, inherits=person_mapper, polymorphic_identity='engineer')
mapper(Manager, managers, inherits=person_mapper, with_polymorphic=manager_with_polymorphic, polymorphic_identity='manager')
mapper(Boss, boss, inherits=Manager, polymorphic_identity='boss')
mapper(Company, companies, properties={
'employees': relationship(Person, lazy=lazy_relationship,
cascade="all, delete-orphan",
backref="company", order_by=people.c.person_id
)
})
if redefine_colprop:
person_attribute_name = 'person_name'
else:
person_attribute_name = 'name'
employees = [
Manager(status='AAB', manager_name='manager1', **{person_attribute_name:'pointy haired boss'}),
Engineer(status='BBA', engineer_name='engineer1', primary_language='java', **{person_attribute_name:'dilbert'}),
]
if include_base:
employees.append(Person(**{person_attribute_name:'joesmith'}))
employees += [
Engineer(status='CGG', engineer_name='engineer2', primary_language='python', **{person_attribute_name:'wally'}),
Manager(status='ABA', manager_name='manager2', **{person_attribute_name:'jsmith'})
]
pointy = employees[0]
jsmith = employees[-1]
dilbert = employees[1]
session = create_session()
c = Company(name='company1')
c.employees = employees
session.add(c)
session.flush()
session.expunge_all()
eq_(session.query(Person).get(dilbert.person_id), dilbert)
session.expunge_all()
eq_(session.query(Person).filter(Person.person_id==dilbert.person_id).one(), dilbert)
session.expunge_all()
def go():
cc = session.query(Company).get(c.company_id)
eq_(cc.employees, employees)
if not lazy_relationship:
if with_polymorphic != 'none':
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 5)
else:
if with_polymorphic != 'none':
self.assert_sql_count(testing.db, go, 2)
else:
self.assert_sql_count(testing.db, go, 6)
# test selecting from the query, using the base mapped table (people) as the selection criterion.
# in the case of the polymorphic Person query, the "people" selectable should be adapted to be "person_join"
eq_(
session.query(Person).filter(getattr(Person, person_attribute_name)=='dilbert').first(),
dilbert
)
assert session.query(Person).filter(getattr(Person, person_attribute_name)=='dilbert').first().person_id
eq_(
session.query(Engineer).filter(getattr(Person, person_attribute_name)=='dilbert').first(),
dilbert
)
# test selecting from the query, joining against an alias of the base "people" table. test that
# the "palias" alias does *not* get sucked up into the "person_join" conversion.
palias = people.alias("palias")
dilbert = session.query(Person).get(dilbert.person_id)
assert dilbert is session.query(Person).filter((palias.c.name=='dilbert') & (palias.c.person_id==Person.person_id)).first()
assert dilbert is session.query(Engineer).filter((palias.c.name=='dilbert') & (palias.c.person_id==Person.person_id)).first()
assert dilbert is session.query(Person).filter((Engineer.engineer_name=="engineer1") & (engineers.c.person_id==people.c.person_id)).first()
assert dilbert is session.query(Engineer).filter(Engineer.engineer_name=="engineer1")[0]
dilbert.engineer_name = 'hes dibert!'
session.flush()
session.expunge_all()
def go():
session.query(Person).filter(getattr(Person, person_attribute_name)=='dilbert').first()
self.assert_sql_count(testing.db, go, 1)
session.expunge_all()
dilbert = session.query(Person).filter(getattr(Person, person_attribute_name)=='dilbert').first()
def go():
# assert that only primary table is queried for already-present-in-session
d = session.query(Person).filter(getattr(Person, person_attribute_name)=='dilbert').first()
self.assert_sql_count(testing.db, go, 1)
# test standalone orphans
daboss = Boss(status='BBB', manager_name='boss', golf_swing='fore', **{person_attribute_name:'daboss'})
session.add(daboss)
assert_raises(orm_exc.FlushError, session.flush)
c = session.query(Company).first()
daboss.company = c
manager_list = [e for e in c.employees if isinstance(e, Manager)]
session.flush()
session.expunge_all()
eq_(session.query(Manager).order_by(Manager.person_id).all(), manager_list)
c = session.query(Company).first()
session.delete(c)
session.flush()
eq_(people.count().scalar(), 0)
test_roundtrip = function_named(
test_roundtrip, "test_%s%s%s_%s" % (
(lazy_relationship and "lazy" or "eager"),
(include_base and "_inclbase" or ""),
(redefine_colprop and "_redefcol" or ""),
with_polymorphic))
setattr(RoundTripTest, test_roundtrip.__name__, test_roundtrip)
for lazy_relationship in [True, False]:
for redefine_colprop in [True, False]:
for with_polymorphic in ['unions', 'joins', 'auto', 'none']:
if with_polymorphic == 'unions':
for include_base in [True, False]:
_generate_round_trip_test(include_base, lazy_relationship, redefine_colprop, with_polymorphic)
else:
_generate_round_trip_test(False, lazy_relationship, redefine_colprop, with_polymorphic)
| mpl-2.0 |
crazy-canux/django | tests/delete/tests.py | 222 | 18346 | from __future__ import unicode_literals
from math import ceil
from django.db import IntegrityError, connection, models
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.utils.six.moves import range
from .models import (
A, M, MR, R, S, T, Avatar, Base, Child, HiddenUser, HiddenUserProfile,
M2MFrom, M2MTo, MRNull, Parent, RChild, User, create_a, get_default_r,
)
class OnDeleteTests(TestCase):
def setUp(self):
self.DEFAULT = get_default_r()
def test_auto(self):
a = create_a('auto')
a.auto.delete()
self.assertFalse(A.objects.filter(name='auto').exists())
def test_auto_nullable(self):
a = create_a('auto_nullable')
a.auto_nullable.delete()
self.assertFalse(A.objects.filter(name='auto_nullable').exists())
def test_setvalue(self):
a = create_a('setvalue')
a.setvalue.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setvalue.pk)
def test_setnull(self):
a = create_a('setnull')
a.setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setnull)
def test_setdefault(self):
a = create_a('setdefault')
a.setdefault.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setdefault.pk)
def test_setdefault_none(self):
a = create_a('setdefault_none')
a.setdefault_none.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setdefault_none)
def test_cascade(self):
a = create_a('cascade')
a.cascade.delete()
self.assertFalse(A.objects.filter(name='cascade').exists())
def test_cascade_nullable(self):
a = create_a('cascade_nullable')
a.cascade_nullable.delete()
self.assertFalse(A.objects.filter(name='cascade_nullable').exists())
def test_protect(self):
a = create_a('protect')
self.assertRaises(IntegrityError, a.protect.delete)
def test_do_nothing(self):
# Testing DO_NOTHING is a bit harder: It would raise IntegrityError for a normal model,
# so we connect to pre_delete and set the fk to a known value.
replacement_r = R.objects.create()
def check_do_nothing(sender, **kwargs):
obj = kwargs['instance']
obj.donothing_set.update(donothing=replacement_r)
models.signals.pre_delete.connect(check_do_nothing)
a = create_a('do_nothing')
a.donothing.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(replacement_r, a.donothing)
models.signals.pre_delete.disconnect(check_do_nothing)
def test_do_nothing_qscount(self):
"""
Test that a models.DO_NOTHING relation doesn't trigger a query.
"""
b = Base.objects.create()
with self.assertNumQueries(1):
# RelToBase should not be queried.
b.delete()
self.assertEqual(Base.objects.count(), 0)
def test_inheritance_cascade_up(self):
child = RChild.objects.create()
child.delete()
self.assertFalse(R.objects.filter(pk=child.pk).exists())
def test_inheritance_cascade_down(self):
child = RChild.objects.create()
parent = child.r_ptr
parent.delete()
self.assertFalse(RChild.objects.filter(pk=child.pk).exists())
def test_cascade_from_child(self):
a = create_a('child')
a.child.delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(R.objects.filter(pk=a.child_id).exists())
def test_cascade_from_parent(self):
a = create_a('child')
R.objects.get(pk=a.child_id).delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(RChild.objects.filter(pk=a.child_id).exists())
def test_setnull_from_child(self):
a = create_a('child_setnull')
a.child_setnull.delete()
self.assertFalse(R.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_setnull_from_parent(self):
a = create_a('child_setnull')
R.objects.get(pk=a.child_setnull_id).delete()
self.assertFalse(RChild.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_o2o_setnull(self):
a = create_a('o2o_setnull')
a.o2o_setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.o2o_setnull)
class DeletionTests(TestCase):
def test_m2m(self):
m = M.objects.create()
r = R.objects.create()
MR.objects.create(m=m, r=r)
r.delete()
self.assertFalse(MR.objects.exists())
r = R.objects.create()
MR.objects.create(m=m, r=r)
m.delete()
self.assertFalse(MR.objects.exists())
m = M.objects.create()
r = R.objects.create()
m.m2m.add(r)
r.delete()
through = M._meta.get_field('m2m').remote_field.through
self.assertFalse(through.objects.exists())
r = R.objects.create()
m.m2m.add(r)
m.delete()
self.assertFalse(through.objects.exists())
m = M.objects.create()
r = R.objects.create()
MRNull.objects.create(m=m, r=r)
r.delete()
self.assertFalse(not MRNull.objects.exists())
self.assertFalse(m.m2m_through_null.exists())
def test_bulk(self):
s = S.objects.create(r=R.objects.create())
for i in range(2 * GET_ITERATOR_CHUNK_SIZE):
T.objects.create(s=s)
# 1 (select related `T` instances)
# + 1 (select related `U` instances)
# + 2 (delete `T` instances in batches)
# + 1 (delete `s`)
self.assertNumQueries(5, s.delete)
self.assertFalse(S.objects.exists())
def test_instance_update(self):
deleted = []
related_setnull_sets = []
def pre_delete(sender, **kwargs):
obj = kwargs['instance']
deleted.append(obj)
if isinstance(obj, R):
related_setnull_sets.append(list(a.pk for a in obj.setnull_set.all()))
models.signals.pre_delete.connect(pre_delete)
a = create_a('update_setnull')
a.setnull.delete()
a = create_a('update_cascade')
a.cascade.delete()
for obj in deleted:
self.assertIsNone(obj.pk)
for pk_list in related_setnull_sets:
for a in A.objects.filter(id__in=pk_list):
self.assertIsNone(a.setnull)
models.signals.pre_delete.disconnect(pre_delete)
def test_deletion_order(self):
pre_delete_order = []
post_delete_order = []
def log_post_delete(sender, **kwargs):
pre_delete_order.append((sender, kwargs['instance'].pk))
def log_pre_delete(sender, **kwargs):
post_delete_order.append((sender, kwargs['instance'].pk))
models.signals.post_delete.connect(log_post_delete)
models.signals.pre_delete.connect(log_pre_delete)
r = R.objects.create(pk=1)
s1 = S.objects.create(pk=1, r=r)
s2 = S.objects.create(pk=2, r=r)
T.objects.create(pk=1, s=s1)
T.objects.create(pk=2, s=s2)
r.delete()
self.assertEqual(
pre_delete_order, [(T, 2), (T, 1), (S, 2), (S, 1), (R, 1)]
)
self.assertEqual(
post_delete_order, [(T, 1), (T, 2), (S, 1), (S, 2), (R, 1)]
)
models.signals.post_delete.disconnect(log_post_delete)
models.signals.pre_delete.disconnect(log_pre_delete)
def test_relational_post_delete_signals_happen_before_parent_object(self):
deletions = []
def log_post_delete(instance, **kwargs):
self.assertTrue(R.objects.filter(pk=instance.r_id))
self.assertIs(type(instance), S)
deletions.append(instance.id)
r = R.objects.create(pk=1)
S.objects.create(pk=1, r=r)
models.signals.post_delete.connect(log_post_delete, sender=S)
try:
r.delete()
finally:
models.signals.post_delete.disconnect(log_post_delete)
self.assertEqual(len(deletions), 1)
self.assertEqual(deletions[0], 1)
@skipUnlessDBFeature("can_defer_constraint_checks")
def test_can_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to delete the avatar
# The important thing is that when we can defer constraint checks there
# is no need to do an UPDATE on User.avatar to null it out.
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
self.assertNumQueries(3, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
@skipIfDBFeature("can_defer_constraint_checks")
def test_cannot_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
a = Avatar.objects.get(pk=u.avatar_id)
# The below doesn't make sense... Why do we need to null out
# user.avatar if we are going to delete the user immediately after it,
# and there are no more cascades.
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to null out user.avatar, because we can't defer the constraint
# 1 query to delete the avatar
self.assertNumQueries(4, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
def test_hidden_related(self):
r = R.objects.create()
h = HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h)
r.delete()
self.assertEqual(HiddenUserProfile.objects.count(), 0)
def test_large_delete(self):
TEST_SIZE = 2000
objs = [Avatar() for i in range(0, TEST_SIZE)]
Avatar.objects.bulk_create(objs)
# Calculate the number of queries needed.
batch_size = connection.ops.bulk_batch_size(['pk'], objs)
# The related fetches are done in batches.
batches = int(ceil(float(len(objs)) / batch_size))
# One query for Avatar.objects.all() and then one related fast delete for
# each batch.
fetches_to_mem = 1 + batches
# The Avatar objects are going to be deleted in batches of GET_ITERATOR_CHUNK_SIZE
queries = fetches_to_mem + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE
self.assertNumQueries(queries, Avatar.objects.all().delete)
self.assertFalse(Avatar.objects.exists())
def test_large_delete_related(self):
TEST_SIZE = 2000
s = S.objects.create(r=R.objects.create())
for i in range(TEST_SIZE):
T.objects.create(s=s)
batch_size = max(connection.ops.bulk_batch_size(['pk'], range(TEST_SIZE)), 1)
# TEST_SIZE // batch_size (select related `T` instances)
# + 1 (select related `U` instances)
# + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE (delete `T` instances in batches)
# + 1 (delete `s`)
expected_num_queries = (ceil(TEST_SIZE // batch_size) +
ceil(TEST_SIZE // GET_ITERATOR_CHUNK_SIZE) + 2)
self.assertNumQueries(expected_num_queries, s.delete)
self.assertFalse(S.objects.exists())
self.assertFalse(T.objects.exists())
def test_delete_with_keeping_parents(self):
child = RChild.objects.create()
parent_id = child.r_ptr_id
child.delete(keep_parents=True)
self.assertFalse(RChild.objects.filter(id=child.id).exists())
self.assertTrue(R.objects.filter(id=parent_id).exists())
def test_queryset_delete_returns_num_rows(self):
"""
QuerySet.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
Avatar.objects.bulk_create([Avatar(desc='a'), Avatar(desc='b'), Avatar(desc='c')])
avatars_count = Avatar.objects.count()
deleted, rows_count = Avatar.objects.all().delete()
self.assertEqual(deleted, avatars_count)
# more complex example with multiple object types
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
}
deleted, deleted_objs = R.objects.all().delete()
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
def test_model_delete_returns_num_rows(self):
"""
Model.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
h2 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
HiddenUserProfile.objects.create(user=h2)
m1 = M.objects.create()
m2 = M.objects.create()
MR.objects.create(r=r, m=m1)
r.m_set.add(m1)
r.m_set.add(m2)
r.save()
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
M.m2m.through._meta.label: M.m2m.through.objects.count(),
}
deleted, deleted_objs = r.delete()
self.assertEqual(deleted, sum(existed_objs.values()))
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
class FastDeleteTests(TestCase):
def test_fast_delete_fk(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to fast-delete the user
# 1 query to delete the avatar
self.assertNumQueries(2, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
def test_fast_delete_m2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete f, 1 to fast-delete m2m for f
self.assertNumQueries(2, f.delete)
def test_fast_delete_revm2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete t, 1 to fast-delete t's m_set
self.assertNumQueries(2, f.delete)
def test_fast_delete_qs(self):
u1 = User.objects.create()
u2 = User.objects.create()
self.assertNumQueries(1, User.objects.filter(pk=u1.pk).delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_joined_qs(self):
a = Avatar.objects.create(desc='a')
User.objects.create(avatar=a)
u2 = User.objects.create()
expected_queries = 1 if connection.features.update_can_self_select else 2
self.assertNumQueries(expected_queries,
User.objects.filter(avatar__desc='a').delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_inheritance(self):
c = Child.objects.create()
p = Parent.objects.create()
# 1 for self, 1 for parent
# However, this doesn't work as child.parent access creates a query,
# and this means we will be generating extra queries (a lot for large
# querysets). This is not a fast-delete problem.
# self.assertNumQueries(2, c.delete)
c.delete()
self.assertFalse(Child.objects.exists())
self.assertEqual(Parent.objects.count(), 1)
self.assertEqual(Parent.objects.filter(pk=p.pk).count(), 1)
# 1 for self delete, 1 for fast delete of empty "child" qs.
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
# 1 for self delete, 1 for fast delete of empty "child" qs.
c = Child.objects.create()
p = c.parent_ptr
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
self.assertFalse(Child.objects.exists())
def test_fast_delete_large_batch(self):
User.objects.bulk_create(User() for i in range(0, 2000))
# No problems here - we aren't going to cascade, so we will fast
# delete the objects in a single query.
self.assertNumQueries(1, User.objects.all().delete)
a = Avatar.objects.create(desc='a')
User.objects.bulk_create(User(avatar=a) for i in range(0, 2000))
# We don't hit parameter amount limits for a, so just one query for
# that + fast delete of the related objs.
self.assertNumQueries(2, a.delete)
self.assertEqual(User.objects.count(), 0)
| bsd-3-clause |
bitex-coin/backend | mailer/mandrill.py | 12 | 203026 | import requests, os.path, logging, sys, time
try:
import ujson as json
except ImportError:
try:
import simplejson as json
except ImportError:
import json
class Error(Exception):
pass
class ValidationError(Error):
pass
class InvalidKeyError(Error):
pass
class PaymentRequiredError(Error):
pass
class UnknownSubaccountError(Error):
pass
class UnknownTemplateError(Error):
pass
class ServiceUnavailableError(Error):
pass
class UnknownMessageError(Error):
pass
class InvalidTagNameError(Error):
pass
class InvalidRejectError(Error):
pass
class UnknownSenderError(Error):
pass
class UnknownUrlError(Error):
pass
class UnknownTrackingDomainError(Error):
pass
class InvalidTemplateError(Error):
pass
class UnknownWebhookError(Error):
pass
class UnknownInboundDomainError(Error):
pass
class UnknownInboundRouteError(Error):
pass
class UnknownExportError(Error):
pass
class IPProvisionLimitError(Error):
pass
class UnknownPoolError(Error):
pass
class NoSendingHistoryError(Error):
pass
class PoorReputationError(Error):
pass
class UnknownIPError(Error):
pass
class InvalidEmptyDefaultPoolError(Error):
pass
class InvalidDeleteDefaultPoolError(Error):
pass
class InvalidDeleteNonEmptyPoolError(Error):
pass
class InvalidCustomDNSError(Error):
pass
class InvalidCustomDNSPendingError(Error):
pass
class MetadataFieldLimitError(Error):
pass
class UnknownMetadataFieldError(Error):
pass
ROOT = 'https://mandrillapp.com/api/1.0/'
ERROR_MAP = {
'ValidationError': ValidationError,
'Invalid_Key': InvalidKeyError,
'PaymentRequired': PaymentRequiredError,
'Unknown_Subaccount': UnknownSubaccountError,
'Unknown_Template': UnknownTemplateError,
'ServiceUnavailable': ServiceUnavailableError,
'Unknown_Message': UnknownMessageError,
'Invalid_Tag_Name': InvalidTagNameError,
'Invalid_Reject': InvalidRejectError,
'Unknown_Sender': UnknownSenderError,
'Unknown_Url': UnknownUrlError,
'Unknown_TrackingDomain': UnknownTrackingDomainError,
'Invalid_Template': InvalidTemplateError,
'Unknown_Webhook': UnknownWebhookError,
'Unknown_InboundDomain': UnknownInboundDomainError,
'Unknown_InboundRoute': UnknownInboundRouteError,
'Unknown_Export': UnknownExportError,
'IP_ProvisionLimit': IPProvisionLimitError,
'Unknown_Pool': UnknownPoolError,
'NoSendingHistory': NoSendingHistoryError,
'PoorReputation': PoorReputationError,
'Unknown_IP': UnknownIPError,
'Invalid_EmptyDefaultPool': InvalidEmptyDefaultPoolError,
'Invalid_DeleteDefaultPool': InvalidDeleteDefaultPoolError,
'Invalid_DeleteNonEmptyPool': InvalidDeleteNonEmptyPoolError,
'Invalid_CustomDNS': InvalidCustomDNSError,
'Invalid_CustomDNSPending': InvalidCustomDNSPendingError,
'Metadata_FieldLimit': MetadataFieldLimitError,
'Unknown_MetadataField': UnknownMetadataFieldError
}
logger = logging.getLogger('mandrill')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stderr))
class Mandrill(object):
def __init__(self, apikey=None, debug=False):
'''Initialize the API client
Args:
apikey (str|None): provide your Mandrill API key. If this is left as None, we will attempt to get the API key from the following locations::
- MANDRILL_APIKEY in the environment vars
- ~/.mandrill.key for the user executing the script
- /etc/mandrill.key
debug (bool): set to True to log all the request and response information to the "mandrill" logger at the INFO level. When set to false, it will log at the DEBUG level. By default it will write log entries to STDERR
'''
self.session = requests.session()
if debug:
self.level = logging.INFO
else:
self.level = logging.DEBUG
self.last_request = None
if apikey is None:
if 'MANDRILL_APIKEY' in os.environ:
apikey = os.environ['MANDRILL_APIKEY']
else:
apikey = self.read_configs()
if apikey is None: raise Error('You must provide a Mandrill API key')
self.apikey = apikey
self.templates = Templates(self)
self.exports = Exports(self)
self.users = Users(self)
self.rejects = Rejects(self)
self.inbound = Inbound(self)
self.tags = Tags(self)
self.messages = Messages(self)
self.whitelists = Whitelists(self)
self.ips = Ips(self)
self.internal = Internal(self)
self.subaccounts = Subaccounts(self)
self.urls = Urls(self)
self.webhooks = Webhooks(self)
self.senders = Senders(self)
self.metadata = Metadata(self)
def call(self, url, params=None):
'''Actually make the API call with the given params - this should only be called by the namespace methods - use the helpers in regular usage like m.tags.list()'''
if params is None: params = {}
params['key'] = self.apikey
params = json.dumps(params)
self.log('POST to %s%s.json: %s' % (ROOT, url, params))
start = time.time()
r = self.session.post('%s%s.json' % (ROOT, url), data=params, headers={'content-type': 'application/json', 'user-agent': 'Mandrill-Python/1.0.55'})
try:
remote_addr = r.raw._original_response.fp._sock.getpeername() # grab the remote_addr before grabbing the text since the socket will go away
except:
remote_addr = (None, None) #we use two private fields when getting the remote_addr, so be a little robust against errors
response_body = r.text
complete_time = time.time() - start
self.log('Received %s in %.2fms: %s' % (r.status_code, complete_time * 1000, r.text))
self.last_request = {'url': url, 'request_body': params, 'response_body': r.text, 'remote_addr': remote_addr, 'response': r, 'time': complete_time}
result = json.loads(response_body)
if r.status_code != requests.codes.ok:
raise self.cast_error(result)
return result
def cast_error(self, result):
'''Take a result representing an error and cast it to a specific exception if possible (use a generic mandrill.Error exception for unknown cases)'''
if not 'status' in result or result['status'] != 'error' or not 'name' in result:
raise Error('We received an unexpected error: %r' % result)
if result['name'] in ERROR_MAP:
return ERROR_MAP[result['name']](result['message'])
return Error(result['message'])
def read_configs(self):
'''Try to read the API key from a series of files if it's not provided in code'''
paths = [os.path.expanduser('~/.mandrill.key'), '/etc/mandrill.key']
for path in paths:
try:
f = open(path, 'r')
apikey = f.read().strip()
f.close()
if apikey != '':
return apikey
except:
pass
return None
def log(self, *args, **kwargs):
'''Proxy access to the mandrill logger, changing the level based on the debug setting'''
logger.log(self.level, *args, **kwargs)
def __repr__(self):
return '<Mandrill %s>' % self.apikey
class Templates(object):
def __init__(self, master):
self.master = master
def add(self, name, from_email=None, from_name=None, subject=None, code=None, text=None, publish=True, labels=[]):
"""Add a new template
Args:
name (string): the name for the new template - must be unique
from_email (string): a default sending address for emails sent using this template
from_name (string): a default from name to be used
subject (string): a default subject line to be used
code (string): the HTML code for the template with mc:edit attributes for the editable elements
text (string): a default text part to be used when sending with this template
publish (boolean): set to false to add a draft template without publishing
labels (array): an optional array of up to 10 labels to use for filtering templates::
labels[] (string): a single label
Returns:
struct. the information saved about the new template::
slug (string): the immutable unique code name of the template
name (string): the name of the template
labels (array): the list of labels applied to the template::
labels[] (string): a single label
code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements - draft version
subject (string): the subject line of the template, if provided - draft version
from_email (string): the default sender address for the template, if provided - draft version
from_name (string): the default sender from name for the template, if provided - draft version
text (string): the default text part of messages sent with the template, if provided - draft version
publish_name (string): the same as the template name - kept as a separate field for backwards compatibility
publish_code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements that are available as published, if it has been published
publish_subject (string): the subject line of the template, if provided
publish_from_email (string): the default sender address for the template, if provided
publish_from_name (string): the default sender from name for the template, if provided
publish_text (string): the default text part of messages sent with the template, if provided
published_at (string): the date and time the template was last published as a UTC string in YYYY-MM-DD HH:MM:SS format, or null if it has not been published
created_at (string): the date and time the template was first created as a UTC string in YYYY-MM-DD HH:MM:SS format
updated_at (string): the date and time the template was last modified as a UTC string in YYYY-MM-DD HH:MM:SS format
Raises:
InvalidTemplateError: The given template name already exists or contains invalid characters
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'name': name, 'from_email': from_email, 'from_name': from_name, 'subject': subject, 'code': code, 'text': text, 'publish': publish, 'labels': labels}
return self.master.call('templates/add', _params)
def info(self, name):
"""Get the information for an existing template
Args:
name (string): the immutable name of an existing template
Returns:
struct. the requested template information::
slug (string): the immutable unique code name of the template
name (string): the name of the template
labels (array): the list of labels applied to the template::
labels[] (string): a single label
code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements - draft version
subject (string): the subject line of the template, if provided - draft version
from_email (string): the default sender address for the template, if provided - draft version
from_name (string): the default sender from name for the template, if provided - draft version
text (string): the default text part of messages sent with the template, if provided - draft version
publish_name (string): the same as the template name - kept as a separate field for backwards compatibility
publish_code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements that are available as published, if it has been published
publish_subject (string): the subject line of the template, if provided
publish_from_email (string): the default sender address for the template, if provided
publish_from_name (string): the default sender from name for the template, if provided
publish_text (string): the default text part of messages sent with the template, if provided
published_at (string): the date and time the template was last published as a UTC string in YYYY-MM-DD HH:MM:SS format, or null if it has not been published
created_at (string): the date and time the template was first created as a UTC string in YYYY-MM-DD HH:MM:SS format
updated_at (string): the date and time the template was last modified as a UTC string in YYYY-MM-DD HH:MM:SS format
Raises:
UnknownTemplateError: The requested template does not exist
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'name': name}
return self.master.call('templates/info', _params)
def update(self, name, from_email=None, from_name=None, subject=None, code=None, text=None, publish=True, labels=None):
"""Update the code for an existing template. If null is provided for any fields, the values will remain unchanged.
Args:
name (string): the immutable name of an existing template
from_email (string): the new default sending address
from_name (string): the new default from name
subject (string): the new default subject line
code (string): the new code for the template
text (string): the new default text part to be used
publish (boolean): set to false to update the draft version of the template without publishing
labels (array): an optional array of up to 10 labels to use for filtering templates::
labels[] (string): a single label
Returns:
struct. the template that was updated::
slug (string): the immutable unique code name of the template
name (string): the name of the template
labels (array): the list of labels applied to the template::
labels[] (string): a single label
code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements - draft version
subject (string): the subject line of the template, if provided - draft version
from_email (string): the default sender address for the template, if provided - draft version
from_name (string): the default sender from name for the template, if provided - draft version
text (string): the default text part of messages sent with the template, if provided - draft version
publish_name (string): the same as the template name - kept as a separate field for backwards compatibility
publish_code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements that are available as published, if it has been published
publish_subject (string): the subject line of the template, if provided
publish_from_email (string): the default sender address for the template, if provided
publish_from_name (string): the default sender from name for the template, if provided
publish_text (string): the default text part of messages sent with the template, if provided
published_at (string): the date and time the template was last published as a UTC string in YYYY-MM-DD HH:MM:SS format, or null if it has not been published
created_at (string): the date and time the template was first created as a UTC string in YYYY-MM-DD HH:MM:SS format
updated_at (string): the date and time the template was last modified as a UTC string in YYYY-MM-DD HH:MM:SS format
Raises:
UnknownTemplateError: The requested template does not exist
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'name': name, 'from_email': from_email, 'from_name': from_name, 'subject': subject, 'code': code, 'text': text, 'publish': publish, 'labels': labels}
return self.master.call('templates/update', _params)
def publish(self, name):
"""Publish the content for the template. Any new messages sent using this template will start using the content that was previously in draft.
Args:
name (string): the immutable name of an existing template
Returns:
struct. the template that was published::
slug (string): the immutable unique code name of the template
name (string): the name of the template
labels (array): the list of labels applied to the template::
labels[] (string): a single label
code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements - draft version
subject (string): the subject line of the template, if provided - draft version
from_email (string): the default sender address for the template, if provided - draft version
from_name (string): the default sender from name for the template, if provided - draft version
text (string): the default text part of messages sent with the template, if provided - draft version
publish_name (string): the same as the template name - kept as a separate field for backwards compatibility
publish_code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements that are available as published, if it has been published
publish_subject (string): the subject line of the template, if provided
publish_from_email (string): the default sender address for the template, if provided
publish_from_name (string): the default sender from name for the template, if provided
publish_text (string): the default text part of messages sent with the template, if provided
published_at (string): the date and time the template was last published as a UTC string in YYYY-MM-DD HH:MM:SS format, or null if it has not been published
created_at (string): the date and time the template was first created as a UTC string in YYYY-MM-DD HH:MM:SS format
updated_at (string): the date and time the template was last modified as a UTC string in YYYY-MM-DD HH:MM:SS format
Raises:
UnknownTemplateError: The requested template does not exist
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'name': name}
return self.master.call('templates/publish', _params)
def delete(self, name):
"""Delete a template
Args:
name (string): the immutable name of an existing template
Returns:
struct. the template that was deleted::
slug (string): the immutable unique code name of the template
name (string): the name of the template
labels (array): the list of labels applied to the template::
labels[] (string): a single label
code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements - draft version
subject (string): the subject line of the template, if provided - draft version
from_email (string): the default sender address for the template, if provided - draft version
from_name (string): the default sender from name for the template, if provided - draft version
text (string): the default text part of messages sent with the template, if provided - draft version
publish_name (string): the same as the template name - kept as a separate field for backwards compatibility
publish_code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements that are available as published, if it has been published
publish_subject (string): the subject line of the template, if provided
publish_from_email (string): the default sender address for the template, if provided
publish_from_name (string): the default sender from name for the template, if provided
publish_text (string): the default text part of messages sent with the template, if provided
published_at (string): the date and time the template was last published as a UTC string in YYYY-MM-DD HH:MM:SS format, or null if it has not been published
created_at (string): the date and time the template was first created as a UTC string in YYYY-MM-DD HH:MM:SS format
updated_at (string): the date and time the template was last modified as a UTC string in YYYY-MM-DD HH:MM:SS format
Raises:
UnknownTemplateError: The requested template does not exist
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'name': name}
return self.master.call('templates/delete', _params)
def list(self, label=None):
"""Return a list of all the templates available to this user
Args:
label (string): an optional label to filter the templates
Returns:
array. an array of structs with information about each template::
[] (struct): the information on each template in the account::
[].slug (string): the immutable unique code name of the template
[].name (string): the name of the template
[].labels (array): the list of labels applied to the template::
[].labels[] (string): a single label
[].code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements - draft version
[].subject (string): the subject line of the template, if provided - draft version
[].from_email (string): the default sender address for the template, if provided - draft version
[].from_name (string): the default sender from name for the template, if provided - draft version
[].text (string): the default text part of messages sent with the template, if provided - draft version
[].publish_name (string): the same as the template name - kept as a separate field for backwards compatibility
[].publish_code (string): the full HTML code of the template, with mc:edit attributes marking the editable elements that are available as published, if it has been published
[].publish_subject (string): the subject line of the template, if provided
[].publish_from_email (string): the default sender address for the template, if provided
[].publish_from_name (string): the default sender from name for the template, if provided
[].publish_text (string): the default text part of messages sent with the template, if provided
[].published_at (string): the date and time the template was last published as a UTC string in YYYY-MM-DD HH:MM:SS format, or null if it has not been published
[].created_at (string): the date and time the template was first created as a UTC string in YYYY-MM-DD HH:MM:SS format
[].updated_at (string): the date and time the template was last modified as a UTC string in YYYY-MM-DD HH:MM:SS format
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'label': label}
return self.master.call('templates/list', _params)
def time_series(self, name):
"""Return the recent history (hourly stats for the last 30 days) for a template
Args:
name (string): the name of an existing template
Returns:
array. the array of history information::
[] (struct): the stats for a single hour::
[].time (string): the hour as a UTC date string in YYYY-MM-DD HH:MM:SS format
[].sent (integer): the number of emails that were sent during the hour
[].hard_bounces (integer): the number of emails that hard bounced during the hour
[].soft_bounces (integer): the number of emails that soft bounced during the hour
[].rejects (integer): the number of emails that were rejected during the hour
[].complaints (integer): the number of spam complaints received during the hour
[].opens (integer): the number of emails opened during the hour
[].unique_opens (integer): the number of unique opens generated by messages sent during the hour
[].clicks (integer): the number of tracked URLs clicked during the hour
[].unique_clicks (integer): the number of unique clicks generated by messages sent during the hour
Raises:
UnknownTemplateError: The requested template does not exist
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'name': name}
return self.master.call('templates/time-series', _params)
def render(self, template_name, template_content, merge_vars=None):
"""Inject content and optionally merge fields into a template, returning the HTML that results
Args:
template_name (string): the immutable name of a template that exists in the user's account
template_content (array): an array of template content to render. Each item in the array should be a struct with two keys - name: the name of the content block to set the content for, and content: the actual content to put into the block::
template_content[] (struct): the injection of a single piece of content into a single editable region::
template_content[].name (string): the name of the mc:edit editable region to inject into
template_content[].content (string): the content to inject
merge_vars (array): optional merge variables to use for injecting merge field content. If this is not provided, no merge fields will be replaced.::
merge_vars[] (struct): a single merge variable::
merge_vars[].name (string): the merge variable's name. Merge variable names are case-insensitive and may not start with _
merge_vars[].content (string): the merge variable's content
Returns:
struct. the result of rendering the given template with the content and merge field values injected::
html (string): the rendered HTML as a string
Raises:
UnknownTemplateError: The requested template does not exist
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'template_name': template_name, 'template_content': template_content, 'merge_vars': merge_vars}
return self.master.call('templates/render', _params)
class Exports(object):
def __init__(self, master):
self.master = master
def info(self, id):
"""Returns information about an export job. If the export job's state is 'complete',
the returned data will include a URL you can use to fetch the results. Every export
job produces a zip archive, but the format of the archive is distinct for each job
type. The api calls that initiate exports include more details about the output format
for that job type.
Args:
id (string): an export job identifier
Returns:
struct. the information about the export::
id (string): the unique identifier for this Export. Use this identifier when checking the export job's status
created_at (string): the date and time that the export job was created as a UTC string in YYYY-MM-DD HH:MM:SS format
type (string): the type of the export job - activity, reject, or whitelist
finished_at (string): the date and time that the export job was finished as a UTC string in YYYY-MM-DD HH:MM:SS format
state (string): the export job's state - waiting, working, complete, error, or expired.
result_url (string): the url for the export job's results, if the job is completed.
Raises:
UnknownExportError: The requested export job does not exist
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'id': id}
return self.master.call('exports/info', _params)
def list(self, ):
"""Returns a list of your exports.
Returns:
array. the account's exports::
[] (struct): the individual export info::
[].id (string): the unique identifier for this Export. Use this identifier when checking the export job's status
[].created_at (string): the date and time that the export job was created as a UTC string in YYYY-MM-DD HH:MM:SS format
[].type (string): the type of the export job - activity, reject, or whitelist
[].finished_at (string): the date and time that the export job was finished as a UTC string in YYYY-MM-DD HH:MM:SS format
[].state (string): the export job's state - waiting, working, complete, error, or expired.
[].result_url (string): the url for the export job's results, if the job is completed.
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {}
return self.master.call('exports/list', _params)
def rejects(self, notify_email=None):
"""Begins an export of your rejection blacklist. The blacklist will be exported to a zip archive
containing a single file named rejects.csv that includes the following fields: email,
reason, detail, created_at, expires_at, last_event_at, expires_at.
Args:
notify_email (string): an optional email address to notify when the export job has finished.
Returns:
struct. information about the rejects export job that was started::
id (string): the unique identifier for this Export. Use this identifier when checking the export job's status
created_at (string): the date and time that the export job was created as a UTC string in YYYY-MM-DD HH:MM:SS format
type (string): the type of the export job
finished_at (string): the date and time that the export job was finished as a UTC string in YYYY-MM-DD HH:MM:SS format, or null for jobs that have not run
state (string): the export job's state
result_url (string): the url for the export job's results, if the job is complete
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'notify_email': notify_email}
return self.master.call('exports/rejects', _params)
def whitelist(self, notify_email=None):
"""Begins an export of your rejection whitelist. The whitelist will be exported to a zip archive
containing a single file named whitelist.csv that includes the following fields:
email, detail, created_at.
Args:
notify_email (string): an optional email address to notify when the export job has finished.
Returns:
struct. information about the whitelist export job that was started::
id (string): the unique identifier for this Export. Use this identifier when checking the export job's status
created_at (string): the date and time that the export job was created as a UTC string in YYYY-MM-DD HH:MM:SS format
type (string): the type of the export job
finished_at (string): the date and time that the export job was finished as a UTC string in YYYY-MM-DD HH:MM:SS format, or null for jobs that have not run
state (string): the export job's state
result_url (string): the url for the export job's results, if the job is complete
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'notify_email': notify_email}
return self.master.call('exports/whitelist', _params)
def activity(self, notify_email=None, date_from=None, date_to=None, tags=None, senders=None, states=None, api_keys=None):
"""Begins an export of your activity history. The activity will be exported to a zip archive
containing a single file named activity.csv in the same format as you would be able to export
from your account's activity view. It includes the following fields: Date, Email Address,
Sender, Subject, Status, Tags, Opens, Clicks, Bounce Detail. If you have configured any custom
metadata fields, they will be included in the exported data.
Args:
notify_email (string): an optional email address to notify when the export job has finished
date_from (string): start date as a UTC string in YYYY-MM-DD HH:MM:SS format
date_to (string): end date as a UTC string in YYYY-MM-DD HH:MM:SS format
tags (array): an array of tag names to narrow the export to; will match messages that contain ANY of the tags::
tags[] (string): a tag name
senders (array): an array of senders to narrow the export to::
senders[] (string): a sender address
states (array): an array of states to narrow the export to; messages with ANY of the states will be included::
states[] (string): a message state
api_keys (array): an array of api keys to narrow the export to; messsagse sent with ANY of the keys will be included::
api_keys[] (string): an API key associated with your account
Returns:
struct. information about the activity export job that was started::
id (string): the unique identifier for this Export. Use this identifier when checking the export job's status
created_at (string): the date and time that the export job was created as a UTC string in YYYY-MM-DD HH:MM:SS format
type (string): the type of the export job
finished_at (string): the date and time that the export job was finished as a UTC string in YYYY-MM-DD HH:MM:SS format, or null for jobs that have not run
state (string): the export job's state
result_url (string): the url for the export job's results, if the job is complete
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'notify_email': notify_email, 'date_from': date_from, 'date_to': date_to, 'tags': tags, 'senders': senders, 'states': states, 'api_keys': api_keys}
return self.master.call('exports/activity', _params)
class Users(object):
def __init__(self, master):
self.master = master
def info(self, ):
"""Return the information about the API-connected user
Returns:
struct. the user information including username, key, reputation, quota, and historical sending stats::
username (string): the username of the user (used for SMTP authentication)
created_at (string): the date and time that the user's Mandrill account was created as a UTC string in YYYY-MM-DD HH:MM:SS format
public_id (string): a unique, permanent identifier for this user
reputation (integer): the reputation of the user on a scale from 0 to 100, with 75 generally being a "good" reputation
hourly_quota (integer): the maximum number of emails Mandrill will deliver for this user each hour. Any emails beyond that will be accepted and queued for later delivery. Users with higher reputations will have higher hourly quotas
backlog (integer): the number of emails that are queued for delivery due to exceeding your monthly or hourly quotas
stats (struct): an aggregate summary of the account's sending stats::
stats.today (struct): stats for this user so far today::
stats.today.sent (integer): the number of emails sent for this user so far today
stats.today.hard_bounces (integer): the number of emails hard bounced for this user so far today
stats.today.soft_bounces (integer): the number of emails soft bounced for this user so far today
stats.today.rejects (integer): the number of emails rejected for sending this user so far today
stats.today.complaints (integer): the number of spam complaints for this user so far today
stats.today.unsubs (integer): the number of unsubscribes for this user so far today
stats.today.opens (integer): the number of times emails have been opened for this user so far today
stats.today.unique_opens (integer): the number of unique opens for emails sent for this user so far today
stats.today.clicks (integer): the number of URLs that have been clicked for this user so far today
stats.today.unique_clicks (integer): the number of unique clicks for emails sent for this user so far today
stats.last_7_days (struct): stats for this user in the last 7 days::
stats.last_7_days.sent (integer): the number of emails sent for this user in the last 7 days
stats.last_7_days.hard_bounces (integer): the number of emails hard bounced for this user in the last 7 days
stats.last_7_days.soft_bounces (integer): the number of emails soft bounced for this user in the last 7 days
stats.last_7_days.rejects (integer): the number of emails rejected for sending this user in the last 7 days
stats.last_7_days.complaints (integer): the number of spam complaints for this user in the last 7 days
stats.last_7_days.unsubs (integer): the number of unsubscribes for this user in the last 7 days
stats.last_7_days.opens (integer): the number of times emails have been opened for this user in the last 7 days
stats.last_7_days.unique_opens (integer): the number of unique opens for emails sent for this user in the last 7 days
stats.last_7_days.clicks (integer): the number of URLs that have been clicked for this user in the last 7 days
stats.last_7_days.unique_clicks (integer): the number of unique clicks for emails sent for this user in the last 7 days
stats.last_30_days (struct): stats for this user in the last 30 days::
stats.last_30_days.sent (integer): the number of emails sent for this user in the last 30 days
stats.last_30_days.hard_bounces (integer): the number of emails hard bounced for this user in the last 30 days
stats.last_30_days.soft_bounces (integer): the number of emails soft bounced for this user in the last 30 days
stats.last_30_days.rejects (integer): the number of emails rejected for sending this user in the last 30 days
stats.last_30_days.complaints (integer): the number of spam complaints for this user in the last 30 days
stats.last_30_days.unsubs (integer): the number of unsubscribes for this user in the last 30 days
stats.last_30_days.opens (integer): the number of times emails have been opened for this user in the last 30 days
stats.last_30_days.unique_opens (integer): the number of unique opens for emails sent for this user in the last 30 days
stats.last_30_days.clicks (integer): the number of URLs that have been clicked for this user in the last 30 days
stats.last_30_days.unique_clicks (integer): the number of unique clicks for emails sent for this user in the last 30 days
stats.last_60_days (struct): stats for this user in the last 60 days::
stats.last_60_days.sent (integer): the number of emails sent for this user in the last 60 days
stats.last_60_days.hard_bounces (integer): the number of emails hard bounced for this user in the last 60 days
stats.last_60_days.soft_bounces (integer): the number of emails soft bounced for this user in the last 60 days
stats.last_60_days.rejects (integer): the number of emails rejected for sending this user in the last 60 days
stats.last_60_days.complaints (integer): the number of spam complaints for this user in the last 60 days
stats.last_60_days.unsubs (integer): the number of unsubscribes for this user in the last 60 days
stats.last_60_days.opens (integer): the number of times emails have been opened for this user in the last 60 days
stats.last_60_days.unique_opens (integer): the number of unique opens for emails sent for this user in the last 60 days
stats.last_60_days.clicks (integer): the number of URLs that have been clicked for this user in the last 60 days
stats.last_60_days.unique_clicks (integer): the number of unique clicks for emails sent for this user in the last 60 days
stats.last_90_days (struct): stats for this user in the last 90 days::
stats.last_90_days.sent (integer): the number of emails sent for this user in the last 90 days
stats.last_90_days.hard_bounces (integer): the number of emails hard bounced for this user in the last 90 days
stats.last_90_days.soft_bounces (integer): the number of emails soft bounced for this user in the last 90 days
stats.last_90_days.rejects (integer): the number of emails rejected for sending this user in the last 90 days
stats.last_90_days.complaints (integer): the number of spam complaints for this user in the last 90 days
stats.last_90_days.unsubs (integer): the number of unsubscribes for this user in the last 90 days
stats.last_90_days.opens (integer): the number of times emails have been opened for this user in the last 90 days
stats.last_90_days.unique_opens (integer): the number of unique opens for emails sent for this user in the last 90 days
stats.last_90_days.clicks (integer): the number of URLs that have been clicked for this user in the last 90 days
stats.last_90_days.unique_clicks (integer): the number of unique clicks for emails sent for this user in the last 90 days
stats.all_time (struct): stats for the lifetime of the user's account::
stats.all_time.sent (integer): the number of emails sent in the lifetime of the user's account
stats.all_time.hard_bounces (integer): the number of emails hard bounced in the lifetime of the user's account
stats.all_time.soft_bounces (integer): the number of emails soft bounced in the lifetime of the user's account
stats.all_time.rejects (integer): the number of emails rejected for sending this user so far today
stats.all_time.complaints (integer): the number of spam complaints in the lifetime of the user's account
stats.all_time.unsubs (integer): the number of unsubscribes in the lifetime of the user's account
stats.all_time.opens (integer): the number of times emails have been opened in the lifetime of the user's account
stats.all_time.unique_opens (integer): the number of unique opens for emails sent in the lifetime of the user's account
stats.all_time.clicks (integer): the number of URLs that have been clicked in the lifetime of the user's account
stats.all_time.unique_clicks (integer): the number of unique clicks for emails sent in the lifetime of the user's account
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {}
return self.master.call('users/info', _params)
def ping(self, ):
"""Validate an API key and respond to a ping
Returns:
string. the string "PONG!"
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {}
return self.master.call('users/ping', _params)
def ping2(self, ):
"""Validate an API key and respond to a ping (anal JSON parser version)
Returns:
struct. a struct with one key "PING" with a static value "PONG!"
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {}
return self.master.call('users/ping2', _params)
def senders(self, ):
"""Return the senders that have tried to use this account, both verified and unverified
Returns:
array. an array of sender data, one for each sending addresses used by the account::
[] (struct): the information on each sending address in the account::
[].address (string): the sender's email address
[].created_at (string): the date and time that the sender was first seen by Mandrill as a UTC date string in YYYY-MM-DD HH:MM:SS format
[].sent (integer): the total number of messages sent by this sender
[].hard_bounces (integer): the total number of hard bounces by messages by this sender
[].soft_bounces (integer): the total number of soft bounces by messages by this sender
[].rejects (integer): the total number of rejected messages by this sender
[].complaints (integer): the total number of spam complaints received for messages by this sender
[].unsubs (integer): the total number of unsubscribe requests received for messages by this sender
[].opens (integer): the total number of times messages by this sender have been opened
[].clicks (integer): the total number of times tracked URLs in messages by this sender have been clicked
[].unique_opens (integer): the number of unique opens for emails sent for this sender
[].unique_clicks (integer): the number of unique clicks for emails sent for this sender
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {}
return self.master.call('users/senders', _params)
class Rejects(object):
def __init__(self, master):
self.master = master
def add(self, email, comment=None, subaccount=None):
"""Adds an email to your email rejection blacklist. Addresses that you
add manually will never expire and there is no reputation penalty
for removing them from your blacklist. Attempting to blacklist an
address that has been whitelisted will have no effect.
Args:
email (string): an email address to block
comment (string): an optional comment describing the rejection
subaccount (string): an optional unique identifier for the subaccount to limit the blacklist entry
Returns:
struct. a status object containing the address and the result of the operation::
email (string): the email address you provided
added (boolean): whether the operation succeeded
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownSubaccountError: The provided subaccount id does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'email': email, 'comment': comment, 'subaccount': subaccount}
return self.master.call('rejects/add', _params)
def list(self, email=None, include_expired=False, subaccount=None):
"""Retrieves your email rejection blacklist. You can provide an email
address to limit the results. Returns up to 1000 results. By default,
entries that have expired are excluded from the results; set
include_expired to true to include them.
Args:
email (string): an optional email address to search by
include_expired (boolean): whether to include rejections that have already expired.
subaccount (string): an optional unique identifier for the subaccount to limit the blacklist
Returns:
array. Up to 1000 rejection entries::
[] (struct): the information for each rejection blacklist entry::
[].email (string): the email that is blocked
[].reason (string): the type of event (hard-bounce, soft-bounce, spam, unsub) that caused this rejection
[].detail (string): extended details about the event, such as the SMTP diagnostic for bounces or the comment for manually-created rejections
[].created_at (string): when the email was added to the blacklist
[].last_event_at (string): the timestamp of the most recent event that either created or renewed this rejection
[].expires_at (string): when the blacklist entry will expire (this may be in the past)
[].expired (boolean): whether the blacklist entry has expired
[].sender (struct): the sender that this blacklist entry applies to, or null if none.::
[].sender.address (string): the sender's email address
[].sender.created_at (string): the date and time that the sender was first seen by Mandrill as a UTC date string in YYYY-MM-DD HH:MM:SS format
[].sender.sent (integer): the total number of messages sent by this sender
[].sender.hard_bounces (integer): the total number of hard bounces by messages by this sender
[].sender.soft_bounces (integer): the total number of soft bounces by messages by this sender
[].sender.rejects (integer): the total number of rejected messages by this sender
[].sender.complaints (integer): the total number of spam complaints received for messages by this sender
[].sender.unsubs (integer): the total number of unsubscribe requests received for messages by this sender
[].sender.opens (integer): the total number of times messages by this sender have been opened
[].sender.clicks (integer): the total number of times tracked URLs in messages by this sender have been clicked
[].sender.unique_opens (integer): the number of unique opens for emails sent for this sender
[].sender.unique_clicks (integer): the number of unique clicks for emails sent for this sender
[].subaccount (string): the subaccount that this blacklist entry applies to, or null if none.
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownSubaccountError: The provided subaccount id does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'email': email, 'include_expired': include_expired, 'subaccount': subaccount}
return self.master.call('rejects/list', _params)
def delete(self, email, subaccount=None):
"""Deletes an email rejection. There is no limit to how many rejections
you can remove from your blacklist, but keep in mind that each deletion
has an affect on your reputation.
Args:
email (string): an email address
subaccount (string): an optional unique identifier for the subaccount to limit the blacklist deletion
Returns:
struct. a status object containing the address and whether the deletion succeeded.::
email (string): the email address that was removed from the blacklist
deleted (boolean): whether the address was deleted successfully.
subaccount (string): the subaccount blacklist that the address was removed from, if any
Raises:
InvalidRejectError: The requested email is not in the rejection list
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownSubaccountError: The provided subaccount id does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'email': email, 'subaccount': subaccount}
return self.master.call('rejects/delete', _params)
class Inbound(object):
def __init__(self, master):
self.master = master
def domains(self, ):
"""List the domains that have been configured for inbound delivery
Returns:
array. the inbound domains associated with the account::
[] (struct): the individual domain info::
[].domain (string): the domain name that is accepting mail
[].created_at (string): the date and time that the inbound domain was added as a UTC string in YYYY-MM-DD HH:MM:SS format
[].valid_mx (boolean): true if this inbound domain has successfully set up an MX record to deliver mail to the Mandrill servers
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {}
return self.master.call('inbound/domains', _params)
def add_domain(self, domain):
"""Add an inbound domain to your account
Args:
domain (string): a domain name
Returns:
struct. information about the domain::
domain (string): the domain name that is accepting mail
created_at (string): the date and time that the inbound domain was added as a UTC string in YYYY-MM-DD HH:MM:SS format
valid_mx (boolean): true if this inbound domain has successfully set up an MX record to deliver mail to the Mandrill servers
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'domain': domain}
return self.master.call('inbound/add-domain', _params)
def check_domain(self, domain):
"""Check the MX settings for an inbound domain. The domain must have already been added with the add-domain call
Args:
domain (string): an existing inbound domain
Returns:
struct. information about the inbound domain::
domain (string): the domain name that is accepting mail
created_at (string): the date and time that the inbound domain was added as a UTC string in YYYY-MM-DD HH:MM:SS format
valid_mx (boolean): true if this inbound domain has successfully set up an MX record to deliver mail to the Mandrill servers
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownInboundDomainError: The requested inbound domain does not exist
Error: A general Mandrill error has occurred
"""
_params = {'domain': domain}
return self.master.call('inbound/check-domain', _params)
def delete_domain(self, domain):
"""Delete an inbound domain from the account. All mail will stop routing for this domain immediately.
Args:
domain (string): an existing inbound domain
Returns:
struct. information about the deleted domain::
domain (string): the domain name that is accepting mail
created_at (string): the date and time that the inbound domain was added as a UTC string in YYYY-MM-DD HH:MM:SS format
valid_mx (boolean): true if this inbound domain has successfully set up an MX record to deliver mail to the Mandrill servers
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownInboundDomainError: The requested inbound domain does not exist
Error: A general Mandrill error has occurred
"""
_params = {'domain': domain}
return self.master.call('inbound/delete-domain', _params)
def routes(self, domain):
"""List the mailbox routes defined for an inbound domain
Args:
domain (string): the domain to check
Returns:
array. the routes associated with the domain::
[] (struct): the individual mailbox route::
[].id (string): the unique identifier of the route
[].pattern (string): the search pattern that the mailbox name should match
[].url (string): the webhook URL where inbound messages will be published
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownInboundDomainError: The requested inbound domain does not exist
Error: A general Mandrill error has occurred
"""
_params = {'domain': domain}
return self.master.call('inbound/routes', _params)
def add_route(self, domain, pattern, url):
"""Add a new mailbox route to an inbound domain
Args:
domain (string): an existing inbound domain
pattern (string): the search pattern that the mailbox name should match
url (string): the webhook URL where the inbound messages will be published
Returns:
struct. the added mailbox route information::
id (string): the unique identifier of the route
pattern (string): the search pattern that the mailbox name should match
url (string): the webhook URL where inbound messages will be published
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownInboundDomainError: The requested inbound domain does not exist
Error: A general Mandrill error has occurred
"""
_params = {'domain': domain, 'pattern': pattern, 'url': url}
return self.master.call('inbound/add-route', _params)
def update_route(self, id, pattern=None, url=None):
"""Update the pattern or webhook of an existing inbound mailbox route. If null is provided for any fields, the values will remain unchanged.
Args:
id (string): the unique identifier of an existing mailbox route
pattern (string): the search pattern that the mailbox name should match
url (string): the webhook URL where the inbound messages will be published
Returns:
struct. the updated mailbox route information::
id (string): the unique identifier of the route
pattern (string): the search pattern that the mailbox name should match
url (string): the webhook URL where inbound messages will be published
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownInboundRouteError: The provided inbound route does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'id': id, 'pattern': pattern, 'url': url}
return self.master.call('inbound/update-route', _params)
def delete_route(self, id):
"""Delete an existing inbound mailbox route
Args:
id (string): the unique identifier of an existing route
Returns:
struct. the deleted mailbox route information::
id (string): the unique identifier of the route
pattern (string): the search pattern that the mailbox name should match
url (string): the webhook URL where inbound messages will be published
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownInboundRouteError: The provided inbound route does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'id': id}
return self.master.call('inbound/delete-route', _params)
def send_raw(self, raw_message, to=None, mail_from=None, helo=None, client_address=None):
"""Take a raw MIME document destined for a domain with inbound domains set up, and send it to the inbound hook exactly as if it had been sent over SMTP
Args:
raw_message (string): the full MIME document of an email message
to (array|null): optionally define the recipients to receive the message - otherwise we'll use the To, Cc, and Bcc headers provided in the document::
to[] (string): the email address of the recipient
mail_from (string): the address specified in the MAIL FROM stage of the SMTP conversation. Required for the SPF check.
helo (string): the identification provided by the client mta in the MTA state of the SMTP conversation. Required for the SPF check.
client_address (string): the remote MTA's ip address. Optional; required for the SPF check.
Returns:
array. an array of the information for each recipient in the message (usually one) that matched an inbound route::
[] (struct): the individual recipient information::
[].email (string): the email address of the matching recipient
[].pattern (string): the mailbox route pattern that the recipient matched
[].url (string): the webhook URL that the message was posted to
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'raw_message': raw_message, 'to': to, 'mail_from': mail_from, 'helo': helo, 'client_address': client_address}
return self.master.call('inbound/send-raw', _params)
class Tags(object):
def __init__(self, master):
self.master = master
def list(self, ):
"""Return all of the user-defined tag information
Returns:
array. a list of user-defined tags::
[] (struct): a user-defined tag::
[].tag (string): the actual tag as a string
[].reputation (integer): the tag's current reputation on a scale from 0 to 100.
[].sent (integer): the total number of messages sent with this tag
[].hard_bounces (integer): the total number of hard bounces by messages with this tag
[].soft_bounces (integer): the total number of soft bounces by messages with this tag
[].rejects (integer): the total number of rejected messages with this tag
[].complaints (integer): the total number of spam complaints received for messages with this tag
[].unsubs (integer): the total number of unsubscribe requests received for messages with this tag
[].opens (integer): the total number of times messages with this tag have been opened
[].clicks (integer): the total number of times tracked URLs in messages with this tag have been clicked
[].unique_opens (integer): the number of unique opens for emails sent with this tag
[].unique_clicks (integer): the number of unique clicks for emails sent with this tag
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {}
return self.master.call('tags/list', _params)
def delete(self, tag):
"""Deletes a tag permanently. Deleting a tag removes the tag from any messages
that have been sent, and also deletes the tag's stats. There is no way to
undo this operation, so use it carefully.
Args:
tag (string): a tag name
Returns:
struct. the tag that was deleted::
tag (string): the actual tag as a string
reputation (integer): the tag's current reputation on a scale from 0 to 100.
sent (integer): the total number of messages sent with this tag
hard_bounces (integer): the total number of hard bounces by messages with this tag
soft_bounces (integer): the total number of soft bounces by messages with this tag
rejects (integer): the total number of rejected messages with this tag
complaints (integer): the total number of spam complaints received for messages with this tag
unsubs (integer): the total number of unsubscribe requests received for messages with this tag
opens (integer): the total number of times messages with this tag have been opened
clicks (integer): the total number of times tracked URLs in messages with this tag have been clicked
unique_opens (integer): the number of unique opens for emails sent with this tag
unique_clicks (integer): the number of unique clicks for emails sent with this tag
Raises:
InvalidTagNameError: The requested tag does not exist or contains invalid characters
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'tag': tag}
return self.master.call('tags/delete', _params)
def info(self, tag):
"""Return more detailed information about a single tag, including aggregates of recent stats
Args:
tag (string): an existing tag name
Returns:
struct. the detailed information on the tag::
tag (string): the actual tag as a string
sent (integer): the total number of messages sent with this tag
hard_bounces (integer): the total number of hard bounces by messages with this tag
soft_bounces (integer): the total number of soft bounces by messages with this tag
rejects (integer): the total number of rejected messages with this tag
complaints (integer): the total number of spam complaints received for messages with this tag
unsubs (integer): the total number of unsubscribe requests received for messages with this tag
opens (integer): the total number of times messages with this tag have been opened
clicks (integer): the total number of times tracked URLs in messages with this tag have been clicked
stats (struct): an aggregate summary of the tag's sending stats::
stats.today (struct): stats with this tag so far today::
stats.today.sent (integer): the number of emails sent with this tag so far today
stats.today.hard_bounces (integer): the number of emails hard bounced with this tag so far today
stats.today.soft_bounces (integer): the number of emails soft bounced with this tag so far today
stats.today.rejects (integer): the number of emails rejected for sending this tag so far today
stats.today.complaints (integer): the number of spam complaints with this tag so far today
stats.today.unsubs (integer): the number of unsubscribes with this tag so far today
stats.today.opens (integer): the number of times emails have been opened with this tag so far today
stats.today.unique_opens (integer): the number of unique opens for emails sent with this tag so far today
stats.today.clicks (integer): the number of URLs that have been clicked with this tag so far today
stats.today.unique_clicks (integer): the number of unique clicks for emails sent with this tag so far today
stats.last_7_days (struct): stats with this tag in the last 7 days::
stats.last_7_days.sent (integer): the number of emails sent with this tag in the last 7 days
stats.last_7_days.hard_bounces (integer): the number of emails hard bounced with this tag in the last 7 days
stats.last_7_days.soft_bounces (integer): the number of emails soft bounced with this tag in the last 7 days
stats.last_7_days.rejects (integer): the number of emails rejected for sending this tag in the last 7 days
stats.last_7_days.complaints (integer): the number of spam complaints with this tag in the last 7 days
stats.last_7_days.unsubs (integer): the number of unsubscribes with this tag in the last 7 days
stats.last_7_days.opens (integer): the number of times emails have been opened with this tag in the last 7 days
stats.last_7_days.unique_opens (integer): the number of unique opens for emails sent with this tag in the last 7 days
stats.last_7_days.clicks (integer): the number of URLs that have been clicked with this tag in the last 7 days
stats.last_7_days.unique_clicks (integer): the number of unique clicks for emails sent with this tag in the last 7 days
stats.last_30_days (struct): stats with this tag in the last 30 days::
stats.last_30_days.sent (integer): the number of emails sent with this tag in the last 30 days
stats.last_30_days.hard_bounces (integer): the number of emails hard bounced with this tag in the last 30 days
stats.last_30_days.soft_bounces (integer): the number of emails soft bounced with this tag in the last 30 days
stats.last_30_days.rejects (integer): the number of emails rejected for sending this tag in the last 30 days
stats.last_30_days.complaints (integer): the number of spam complaints with this tag in the last 30 days
stats.last_30_days.unsubs (integer): the number of unsubscribes with this tag in the last 30 days
stats.last_30_days.opens (integer): the number of times emails have been opened with this tag in the last 30 days
stats.last_30_days.unique_opens (integer): the number of unique opens for emails sent with this tag in the last 30 days
stats.last_30_days.clicks (integer): the number of URLs that have been clicked with this tag in the last 30 days
stats.last_30_days.unique_clicks (integer): the number of unique clicks for emails sent with this tag in the last 30 days
stats.last_60_days (struct): stats with this tag in the last 60 days::
stats.last_60_days.sent (integer): the number of emails sent with this tag in the last 60 days
stats.last_60_days.hard_bounces (integer): the number of emails hard bounced with this tag in the last 60 days
stats.last_60_days.soft_bounces (integer): the number of emails soft bounced with this tag in the last 60 days
stats.last_60_days.rejects (integer): the number of emails rejected for sending this tag in the last 60 days
stats.last_60_days.complaints (integer): the number of spam complaints with this tag in the last 60 days
stats.last_60_days.unsubs (integer): the number of unsubscribes with this tag in the last 60 days
stats.last_60_days.opens (integer): the number of times emails have been opened with this tag in the last 60 days
stats.last_60_days.unique_opens (integer): the number of unique opens for emails sent with this tag in the last 60 days
stats.last_60_days.clicks (integer): the number of URLs that have been clicked with this tag in the last 60 days
stats.last_60_days.unique_clicks (integer): the number of unique clicks for emails sent with this tag in the last 60 days
stats.last_90_days (struct): stats with this tag in the last 90 days::
stats.last_90_days.sent (integer): the number of emails sent with this tag in the last 90 days
stats.last_90_days.hard_bounces (integer): the number of emails hard bounced with this tag in the last 90 days
stats.last_90_days.soft_bounces (integer): the number of emails soft bounced with this tag in the last 90 days
stats.last_90_days.rejects (integer): the number of emails rejected for sending this tag in the last 90 days
stats.last_90_days.complaints (integer): the number of spam complaints with this tag in the last 90 days
stats.last_90_days.unsubs (integer): the number of unsubscribes with this tag in the last 90 days
stats.last_90_days.opens (integer): the number of times emails have been opened with this tag in the last 90 days
stats.last_90_days.unique_opens (integer): the number of unique opens for emails sent with this tag in the last 90 days
stats.last_90_days.clicks (integer): the number of URLs that have been clicked with this tag in the last 90 days
stats.last_90_days.unique_clicks (integer): the number of unique clicks for emails sent with this tag in the last 90 days
Raises:
InvalidTagNameError: The requested tag does not exist or contains invalid characters
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'tag': tag}
return self.master.call('tags/info', _params)
def time_series(self, tag):
"""Return the recent history (hourly stats for the last 30 days) for a tag
Args:
tag (string): an existing tag name
Returns:
array. the array of history information::
[] (struct): the stats for a single hour::
[].time (string): the hour as a UTC date string in YYYY-MM-DD HH:MM:SS format
[].sent (integer): the number of emails that were sent during the hour
[].hard_bounces (integer): the number of emails that hard bounced during the hour
[].soft_bounces (integer): the number of emails that soft bounced during the hour
[].rejects (integer): the number of emails that were rejected during the hour
[].complaints (integer): the number of spam complaints received during the hour
[].unsubs (integer): the number of unsubscribes received during the hour
[].opens (integer): the number of emails opened during the hour
[].unique_opens (integer): the number of unique opens generated by messages sent during the hour
[].clicks (integer): the number of tracked URLs clicked during the hour
[].unique_clicks (integer): the number of unique clicks generated by messages sent during the hour
Raises:
InvalidTagNameError: The requested tag does not exist or contains invalid characters
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'tag': tag}
return self.master.call('tags/time-series', _params)
def all_time_series(self, ):
"""Return the recent history (hourly stats for the last 30 days) for all tags
Returns:
array. the array of history information::
[] (struct): the stats for a single hour::
[].time (string): the hour as a UTC date string in YYYY-MM-DD HH:MM:SS format
[].sent (integer): the number of emails that were sent during the hour
[].hard_bounces (integer): the number of emails that hard bounced during the hour
[].soft_bounces (integer): the number of emails that soft bounced during the hour
[].rejects (integer): the number of emails that were rejected during the hour
[].complaints (integer): the number of spam complaints received during the hour
[].unsubs (integer): the number of unsubscribes received during the hour
[].opens (integer): the number of emails opened during the hour
[].unique_opens (integer): the number of unique opens generated by messages sent during the hour
[].clicks (integer): the number of tracked URLs clicked during the hour
[].unique_clicks (integer): the number of unique clicks generated by messages sent during the hour
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {}
return self.master.call('tags/all-time-series', _params)
class Messages(object):
def __init__(self, master):
self.master = master
def send(self, message, async=False, ip_pool=None, send_at=None):
"""Send a new transactional message through Mandrill
Args:
message (struct): the information on the message to send::
message.html (string): the full HTML content to be sent
message.text (string): optional full text content to be sent
message.subject (string): the message subject
message.from_email (string): the sender email address.
message.from_name (string): optional from name to be used
message.to (array): an array of recipient information.::
message.to[] (struct): a single recipient's information.::
message.to[].email (string): the email address of the recipient
message.to[].name (string): the optional display name to use for the recipient
message.to[].type (string): the header type to use for the recipient, defaults to "to" if not provided
message.headers (struct): optional extra headers to add to the message (most headers are allowed)
message.important (boolean): whether or not this message is important, and should be delivered ahead of non-important messages
message.track_opens (boolean): whether or not to turn on open tracking for the message
message.track_clicks (boolean): whether or not to turn on click tracking for the message
message.auto_text (boolean): whether or not to automatically generate a text part for messages that are not given text
message.auto_html (boolean): whether or not to automatically generate an HTML part for messages that are not given HTML
message.inline_css (boolean): whether or not to automatically inline all CSS styles provided in the message HTML - only for HTML documents less than 256KB in size
message.url_strip_qs (boolean): whether or not to strip the query string from URLs when aggregating tracked URL data
message.preserve_recipients (boolean): whether or not to expose all recipients in to "To" header for each email
message.view_content_link (boolean): set to false to remove content logging for sensitive emails
message.bcc_address (string): an optional address to receive an exact copy of each recipient's email
message.tracking_domain (string): a custom domain to use for tracking opens and clicks instead of mandrillapp.com
message.signing_domain (string): a custom domain to use for SPF/DKIM signing instead of mandrill (for "via" or "on behalf of" in email clients)
message.return_path_domain (string): a custom domain to use for the messages's return-path
message.merge (boolean): whether to evaluate merge tags in the message. Will automatically be set to true if either merge_vars or global_merge_vars are provided.
message.global_merge_vars (array): global merge variables to use for all recipients. You can override these per recipient.::
message.global_merge_vars[] (struct): a single global merge variable::
message.global_merge_vars[].name (string): the global merge variable's name. Merge variable names are case-insensitive and may not start with _
message.global_merge_vars[].content (string): the global merge variable's content
message.merge_vars (array): per-recipient merge variables, which override global merge variables with the same name.::
message.merge_vars[] (struct): per-recipient merge variables::
message.merge_vars[].rcpt (string): the email address of the recipient that the merge variables should apply to
message.merge_vars[].vars (array): the recipient's merge variables::
message.merge_vars[].vars[] (struct): a single merge variable::
message.merge_vars[].vars[].name (string): the merge variable's name. Merge variable names are case-insensitive and may not start with _
message.merge_vars[].vars[].content (string): the merge variable's content
message.tags (array): an array of string to tag the message with. Stats are accumulated using tags, though we only store the first 100 we see, so this should not be unique or change frequently. Tags should be 50 characters or less. Any tags starting with an underscore are reserved for internal use and will cause errors.::
message.tags[] (string): a single tag - must not start with an underscore
message.subaccount (string): the unique id of a subaccount for this message - must already exist or will fail with an error
message.google_analytics_domains (array): an array of strings indicating for which any matching URLs will automatically have Google Analytics parameters appended to their query string automatically.
message.google_analytics_campaign (array|string): optional string indicating the value to set for the utm_campaign tracking parameter. If this isn't provided the email's from address will be used instead.
message.metadata (array): metadata an associative array of user metadata. Mandrill will store this metadata and make it available for retrieval. In addition, you can select up to 10 metadata fields to index and make searchable using the Mandrill search api.
message.recipient_metadata (array): Per-recipient metadata that will override the global values specified in the metadata parameter.::
message.recipient_metadata[] (struct): metadata for a single recipient::
message.recipient_metadata[].rcpt (string): the email address of the recipient that the metadata is associated with
message.recipient_metadata[].values (array): an associated array containing the recipient's unique metadata. If a key exists in both the per-recipient metadata and the global metadata, the per-recipient metadata will be used.
message.attachments (array): an array of supported attachments to add to the message::
message.attachments[] (struct): a single supported attachment::
message.attachments[].type (string): the MIME type of the attachment
message.attachments[].name (string): the file name of the attachment
message.attachments[].content (string): the content of the attachment as a base64-encoded string
message.images (array): an array of embedded images to add to the message::
message.images[] (struct): a single embedded image::
message.images[].type (string): the MIME type of the image - must start with "image/"
message.images[].name (string): the Content ID of the image - use <img src="cid:THIS_VALUE"> to reference the image in your HTML content
message.images[].content (string): the content of the image as a base64-encoded string
async (boolean): enable a background sending mode that is optimized for bulk sending. In async mode, messages/send will immediately return a status of "queued" for every recipient. To handle rejections when sending in async mode, set up a webhook for the 'reject' event. Defaults to false for messages with no more than 10 recipients; messages with more than 10 recipients are always sent asynchronously, regardless of the value of async.
ip_pool (string): the name of the dedicated ip pool that should be used to send the message. If you do not have any dedicated IPs, this parameter has no effect. If you specify a pool that does not exist, your default pool will be used instead.
send_at (string): when this message should be sent as a UTC timestamp in YYYY-MM-DD HH:MM:SS format. If you specify a time in the past, the message will be sent immediately. An additional fee applies for scheduled email, and this feature is only available to accounts with a positive balance.
Returns:
array. of structs for each recipient containing the key "email" with the email address and "status" as either "sent", "queued", or "rejected"::
[] (struct): the sending results for a single recipient::
[].email (string): the email address of the recipient
[].status (string): the sending status of the recipient - either "sent", "queued", "scheduled", "rejected", or "invalid"
[].reject_reason (string): the reason for the rejection if the recipient status is "rejected"
[]._id (string): the message's unique id
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
PaymentRequiredError: The requested feature requires payment.
UnknownSubaccountError: The provided subaccount id does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'message': message, 'async': async, 'ip_pool': ip_pool, 'send_at': send_at}
return self.master.call('messages/send', _params)
def send_template(self, template_name, template_content, message, async=False, ip_pool=None, send_at=None):
"""Send a new transactional message through Mandrill using a template
Args:
template_name (string): the immutable name or slug of a template that exists in the user's account. For backwards-compatibility, the template name may also be used but the immutable slug is preferred.
template_content (array): an array of template content to send. Each item in the array should be a struct with two keys - name: the name of the content block to set the content for, and content: the actual content to put into the block::
template_content[] (struct): the injection of a single piece of content into a single editable region::
template_content[].name (string): the name of the mc:edit editable region to inject into
template_content[].content (string): the content to inject
message (struct): the other information on the message to send - same as /messages/send, but without the html content::
message.html (string): optional full HTML content to be sent if not in template
message.text (string): optional full text content to be sent
message.subject (string): the message subject
message.from_email (string): the sender email address.
message.from_name (string): optional from name to be used
message.to (array): an array of recipient information.::
message.to[] (struct): a single recipient's information.::
message.to[].email (string): the email address of the recipient
message.to[].name (string): the optional display name to use for the recipient
message.to[].type (string): the header type to use for the recipient, defaults to "to" if not provided
message.headers (struct): optional extra headers to add to the message (most headers are allowed)
message.important (boolean): whether or not this message is important, and should be delivered ahead of non-important messages
message.track_opens (boolean): whether or not to turn on open tracking for the message
message.track_clicks (boolean): whether or not to turn on click tracking for the message
message.auto_text (boolean): whether or not to automatically generate a text part for messages that are not given text
message.auto_html (boolean): whether or not to automatically generate an HTML part for messages that are not given HTML
message.inline_css (boolean): whether or not to automatically inline all CSS styles provided in the message HTML - only for HTML documents less than 256KB in size
message.url_strip_qs (boolean): whether or not to strip the query string from URLs when aggregating tracked URL data
message.preserve_recipients (boolean): whether or not to expose all recipients in to "To" header for each email
message.view_content_link (boolean): set to false to remove content logging for sensitive emails
message.bcc_address (string): an optional address to receive an exact copy of each recipient's email
message.tracking_domain (string): a custom domain to use for tracking opens and clicks instead of mandrillapp.com
message.signing_domain (string): a custom domain to use for SPF/DKIM signing instead of mandrill (for "via" or "on behalf of" in email clients)
message.return_path_domain (string): a custom domain to use for the messages's return-path
message.merge (boolean): whether to evaluate merge tags in the message. Will automatically be set to true if either merge_vars or global_merge_vars are provided.
message.global_merge_vars (array): global merge variables to use for all recipients. You can override these per recipient.::
message.global_merge_vars[] (struct): a single global merge variable::
message.global_merge_vars[].name (string): the global merge variable's name. Merge variable names are case-insensitive and may not start with _
message.global_merge_vars[].content (string): the global merge variable's content
message.merge_vars (array): per-recipient merge variables, which override global merge variables with the same name.::
message.merge_vars[] (struct): per-recipient merge variables::
message.merge_vars[].rcpt (string): the email address of the recipient that the merge variables should apply to
message.merge_vars[].vars (array): the recipient's merge variables::
message.merge_vars[].vars[] (struct): a single merge variable::
message.merge_vars[].vars[].name (string): the merge variable's name. Merge variable names are case-insensitive and may not start with _
message.merge_vars[].vars[].content (string): the merge variable's content
message.tags (array): an array of string to tag the message with. Stats are accumulated using tags, though we only store the first 100 we see, so this should not be unique or change frequently. Tags should be 50 characters or less. Any tags starting with an underscore are reserved for internal use and will cause errors.::
message.tags[] (string): a single tag - must not start with an underscore
message.subaccount (string): the unique id of a subaccount for this message - must already exist or will fail with an error
message.google_analytics_domains (array): an array of strings indicating for which any matching URLs will automatically have Google Analytics parameters appended to their query string automatically.
message.google_analytics_campaign (array|string): optional string indicating the value to set for the utm_campaign tracking parameter. If this isn't provided the email's from address will be used instead.
message.metadata (array): metadata an associative array of user metadata. Mandrill will store this metadata and make it available for retrieval. In addition, you can select up to 10 metadata fields to index and make searchable using the Mandrill search api.
message.recipient_metadata (array): Per-recipient metadata that will override the global values specified in the metadata parameter.::
message.recipient_metadata[] (struct): metadata for a single recipient::
message.recipient_metadata[].rcpt (string): the email address of the recipient that the metadata is associated with
message.recipient_metadata[].values (array): an associated array containing the recipient's unique metadata. If a key exists in both the per-recipient metadata and the global metadata, the per-recipient metadata will be used.
message.attachments (array): an array of supported attachments to add to the message::
message.attachments[] (struct): a single supported attachment::
message.attachments[].type (string): the MIME type of the attachment
message.attachments[].name (string): the file name of the attachment
message.attachments[].content (string): the content of the attachment as a base64-encoded string
message.images (array): an array of embedded images to add to the message::
message.images[] (struct): a single embedded image::
message.images[].type (string): the MIME type of the image - must start with "image/"
message.images[].name (string): the Content ID of the image - use <img src="cid:THIS_VALUE"> to reference the image in your HTML content
message.images[].content (string): the content of the image as a base64-encoded string
async (boolean): enable a background sending mode that is optimized for bulk sending. In async mode, messages/send will immediately return a status of "queued" for every recipient. To handle rejections when sending in async mode, set up a webhook for the 'reject' event. Defaults to false for messages with no more than 10 recipients; messages with more than 10 recipients are always sent asynchronously, regardless of the value of async.
ip_pool (string): the name of the dedicated ip pool that should be used to send the message. If you do not have any dedicated IPs, this parameter has no effect. If you specify a pool that does not exist, your default pool will be used instead.
send_at (string): when this message should be sent as a UTC timestamp in YYYY-MM-DD HH:MM:SS format. If you specify a time in the past, the message will be sent immediately. An additional fee applies for scheduled email, and this feature is only available to accounts with a positive balance.
Returns:
array. of structs for each recipient containing the key "email" with the email address and "status" as either "sent", "queued", "scheduled", or "rejected"::
[] (struct): the sending results for a single recipient::
[].email (string): the email address of the recipient
[].status (string): the sending status of the recipient - either "sent", "queued", "rejected", or "invalid"
[].reject_reason (string): the reason for the rejection if the recipient status is "rejected"
[]._id (string): the message's unique id
Raises:
UnknownTemplateError: The requested template does not exist
PaymentRequiredError: The requested feature requires payment.
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownSubaccountError: The provided subaccount id does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'template_name': template_name, 'template_content': template_content, 'message': message, 'async': async, 'ip_pool': ip_pool, 'send_at': send_at}
return self.master.call('messages/send-template', _params)
def search(self, query='*', date_from=None, date_to=None, tags=None, senders=None, api_keys=None, limit=100):
"""Search the content of recently sent messages and optionally narrow by date range, tags and senders
Args:
query (string): the search terms to find matching messages for
date_from (string): start date
date_to (string): end date
tags (array): an array of tag names to narrow the search to, will return messages that contain ANY of the tags
senders (array): an array of sender addresses to narrow the search to, will return messages sent by ANY of the senders
api_keys (array): an array of API keys to narrow the search to, will return messages sent by ANY of the keys
limit (integer): the maximum number of results to return, defaults to 100, 1000 is the maximum
Returns:
array. of structs for each matching message::
[] (struct): the information for a single matching message::
[].ts (integer): the Unix timestamp from when this message was sent
[]._id (string): the message's unique id
[].sender (string): the email address of the sender
[].template (string): the unique name of the template used, if any
[].subject (string): the message's subject line
[].email (string): the recipient email address
[].tags (array): list of tags on this message::
[].tags[] (string): individual tag on this message
[].opens (integer): how many times has this message been opened
[].opens_detail (array): list of individual opens for the message::
[].opens_detail[] (struct): information on an individual open::
[].opens_detail[].ts (integer): the unix timestamp from when the message was opened
[].opens_detail[].ip (string): the IP address that generated the open
[].opens_detail[].location (string): the approximate region and country that the opening IP is located
[].opens_detail[].ua (string): the email client or browser data of the open
[].clicks (integer): how many times has a link been clicked in this message
[].clicks_detail (array): list of individual clicks for the message::
[].clicks_detail[] (struct): information on an individual click::
[].clicks_detail[].ts (integer): the unix timestamp from when the message was clicked
[].clicks_detail[].url (string): the URL that was clicked on
[].clicks_detail[].ip (string): the IP address that generated the click
[].clicks_detail[].location (string): the approximate region and country that the clicking IP is located
[].clicks_detail[].ua (string): the email client or browser data of the click
[].state (string): sending status of this message: sent, bounced, rejected
[].metadata (struct): any custom metadata provided when the message was sent
smtp_events (array): a log of up to 3 smtp events for the message::
smtp_events[] (struct): information about a specific smtp event::
smtp_events[].ts (integer): the Unix timestamp when the event occured
smtp_events[].type (string): the message's state as a result of this event
smtp_events[].diag (string): the SMTP response from the recipient's server
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
ServiceUnavailableError: The subsystem providing this API call is down for maintenance
Error: A general Mandrill error has occurred
"""
_params = {'query': query, 'date_from': date_from, 'date_to': date_to, 'tags': tags, 'senders': senders, 'api_keys': api_keys, 'limit': limit}
return self.master.call('messages/search', _params)
def search_time_series(self, query='*', date_from=None, date_to=None, tags=None, senders=None):
"""Search the content of recently sent messages and return the aggregated hourly stats for matching messages
Args:
query (string): the search terms to find matching messages for
date_from (string): start date
date_to (string): end date
tags (array): an array of tag names to narrow the search to, will return messages that contain ANY of the tags
senders (array): an array of sender addresses to narrow the search to, will return messages sent by ANY of the senders
Returns:
array. the array of history information::
[] (struct): the stats for a single hour::
[].time (string): the hour as a UTC date string in YYYY-MM-DD HH:MM:SS format
[].sent (integer): the number of emails that were sent during the hour
[].hard_bounces (integer): the number of emails that hard bounced during the hour
[].soft_bounces (integer): the number of emails that soft bounced during the hour
[].rejects (integer): the number of emails that were rejected during the hour
[].complaints (integer): the number of spam complaints received during the hour
[].unsubs (integer): the number of unsubscribes received during the hour
[].opens (integer): the number of emails opened during the hour
[].unique_opens (integer): the number of unique opens generated by messages sent during the hour
[].clicks (integer): the number of tracked URLs clicked during the hour
[].unique_clicks (integer): the number of unique clicks generated by messages sent during the hour
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
ServiceUnavailableError: The subsystem providing this API call is down for maintenance
Error: A general Mandrill error has occurred
"""
_params = {'query': query, 'date_from': date_from, 'date_to': date_to, 'tags': tags, 'senders': senders}
return self.master.call('messages/search-time-series', _params)
def info(self, id):
"""Get the information for a single recently sent message
Args:
id (string): the unique id of the message to get - passed as the "_id" field in webhooks, send calls, or search calls
Returns:
struct. the information for the message::
ts (integer): the Unix timestamp from when this message was sent
_id (string): the message's unique id
sender (string): the email address of the sender
template (string): the unique name of the template used, if any
subject (string): the message's subject line
email (string): the recipient email address
tags (array): list of tags on this message::
tags[] (string): individual tag on this message
opens (integer): how many times has this message been opened
opens_detail (array): list of individual opens for the message::
opens_detail[] (struct): information on an individual open::
opens_detail[].ts (integer): the unix timestamp from when the message was opened
opens_detail[].ip (string): the IP address that generated the open
opens_detail[].location (string): the approximate region and country that the opening IP is located
opens_detail[].ua (string): the email client or browser data of the open
clicks (integer): how many times has a link been clicked in this message
clicks_detail (array): list of individual clicks for the message::
clicks_detail[] (struct): information on an individual click::
clicks_detail[].ts (integer): the unix timestamp from when the message was clicked
clicks_detail[].url (string): the URL that was clicked on
clicks_detail[].ip (string): the IP address that generated the click
clicks_detail[].location (string): the approximate region and country that the clicking IP is located
clicks_detail[].ua (string): the email client or browser data of the click
state (string): sending status of this message: sent, bounced, rejected
metadata (struct): any custom metadata provided when the message was sent
smtp_events (array): a log of up to 3 smtp events for the message::
smtp_events[] (struct): information about a specific smtp event::
smtp_events[].ts (integer): the Unix timestamp when the event occured
smtp_events[].type (string): the message's state as a result of this event
smtp_events[].diag (string): the SMTP response from the recipient's server
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownMessageError: The provided message id does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'id': id}
return self.master.call('messages/info', _params)
def content(self, id):
"""Get the full content of a recently sent message
Args:
id (string): the unique id of the message to get - passed as the "_id" field in webhooks, send calls, or search calls
Returns:
struct. the content of the message::
ts (integer): the Unix timestamp from when this message was sent
_id (string): the message's unique id
from_email (string): the email address of the sender
from_name (string): the alias of the sender (if any)
subject (string): the message's subject line
to (struct): the message recipient's information::
to.email (string): the email address of the recipient
to.name (string): the alias of the recipient (if any)
tags (array): list of tags on this message::
tags[] (string): individual tag on this message
headers (struct): the key-value pairs of the custom MIME headers for the message's main document
text (string): the text part of the message, if any
html (string): the HTML part of the message, if any
attachments (array): an array of any attachments that can be found in the message::
attachments[] (struct): information about an individual attachment::
attachments[].name (string): the file name of the attachment
attachments[].type (string): the MIME type of the attachment
attachments[].content (string): the content of the attachment as a base64 encoded string
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownMessageError: The provided message id does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'id': id}
return self.master.call('messages/content', _params)
def parse(self, raw_message):
"""Parse the full MIME document for an email message, returning the content of the message broken into its constituent pieces
Args:
raw_message (string): the full MIME document of an email message
Returns:
struct. the parsed message::
subject (string): the subject of the message
from_email (string): the email address of the sender
from_name (string): the alias of the sender (if any)
to (array): an array of any recipients in the message::
to[] (struct): the information on a single recipient::
to[].email (string): the email address of the recipient
to[].name (string): the alias of the recipient (if any)
headers (struct): the key-value pairs of the MIME headers for the message's main document
text (string): the text part of the message, if any
html (string): the HTML part of the message, if any
attachments (array): an array of any attachments that can be found in the message::
attachments[] (struct): information about an individual attachment::
attachments[].name (string): the file name of the attachment
attachments[].type (string): the MIME type of the attachment
attachments[].binary (boolean): if this is set to true, the attachment is not pure-text, and the content will be base64 encoded
attachments[].content (string): the content of the attachment as a text string or a base64 encoded string based on the attachment type
images (array): an array of any embedded images that can be found in the message::
images[] (struct): information about an individual image::
images[].name (string): the Content-ID of the embedded image
images[].type (string): the MIME type of the image
images[].content (string): the content of the image as a base64 encoded string
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'raw_message': raw_message}
return self.master.call('messages/parse', _params)
def send_raw(self, raw_message, from_email=None, from_name=None, to=None, async=False, ip_pool=None, send_at=None, return_path_domain=None):
"""Take a raw MIME document for a message, and send it exactly as if it were sent through Mandrill's SMTP servers
Args:
raw_message (string): the full MIME document of an email message
from_email (string|null): optionally define the sender address - otherwise we'll use the address found in the provided headers
from_name (string|null): optionally define the sender alias
to (array|null): optionally define the recipients to receive the message - otherwise we'll use the To, Cc, and Bcc headers provided in the document::
to[] (string): the email address of the recipient
async (boolean): enable a background sending mode that is optimized for bulk sending. In async mode, messages/sendRaw will immediately return a status of "queued" for every recipient. To handle rejections when sending in async mode, set up a webhook for the 'reject' event. Defaults to false for messages with no more than 10 recipients; messages with more than 10 recipients are always sent asynchronously, regardless of the value of async.
ip_pool (string): the name of the dedicated ip pool that should be used to send the message. If you do not have any dedicated IPs, this parameter has no effect. If you specify a pool that does not exist, your default pool will be used instead.
send_at (string): when this message should be sent as a UTC timestamp in YYYY-MM-DD HH:MM:SS format. If you specify a time in the past, the message will be sent immediately.
return_path_domain (string): a custom domain to use for the messages's return-path
Returns:
array. of structs for each recipient containing the key "email" with the email address and "status" as either "sent", "queued", or "rejected"::
[] (struct): the sending results for a single recipient::
[].email (string): the email address of the recipient
[].status (string): the sending status of the recipient - either "sent", "queued", "scheduled", "rejected", or "invalid"
[].reject_reason (string): the reason for the rejection if the recipient status is "rejected"
[]._id (string): the message's unique id
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
PaymentRequiredError: The requested feature requires payment.
UnknownTemplateError: The requested template does not exist
UnknownSubaccountError: The provided subaccount id does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'raw_message': raw_message, 'from_email': from_email, 'from_name': from_name, 'to': to, 'async': async, 'ip_pool': ip_pool, 'send_at': send_at, 'return_path_domain': return_path_domain}
return self.master.call('messages/send-raw', _params)
def list_scheduled(self, to=None):
"""Queries your scheduled emails by sender or recipient, or both.
Args:
to (string): an optional recipient address to restrict results to
Returns:
array. a list of up to 1000 scheduled emails::
[] (struct): a scheduled email::
[]._id (string): the scheduled message id
[].created_at (string): the UTC timestamp when the message was created, in YYYY-MM-DD HH:MM:SS format
[].send_at (string): the UTC timestamp when the message will be sent, in YYYY-MM-DD HH:MM:SS format
[].from_email (string): the email's sender address
[].to (string): the email's recipient
[].subject (string): the email's subject
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'to': to}
return self.master.call('messages/list-scheduled', _params)
def cancel_scheduled(self, id):
"""Cancels a scheduled email.
Args:
id (string): a scheduled email id, as returned by any of the messages/send calls or messages/list-scheduled
Returns:
struct. information about the scheduled email that was cancelled.::
_id (string): the scheduled message id
created_at (string): the UTC timestamp when the message was created, in YYYY-MM-DD HH:MM:SS format
send_at (string): the UTC timestamp when the message will be sent, in YYYY-MM-DD HH:MM:SS format
from_email (string): the email's sender address
to (string): the email's recipient
subject (string): the email's subject
Raises:
UnknownMessageError: The provided message id does not exist.
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'id': id}
return self.master.call('messages/cancel-scheduled', _params)
def reschedule(self, id, send_at):
"""Reschedules a scheduled email.
Args:
id (string): a scheduled email id, as returned by any of the messages/send calls or messages/list-scheduled
send_at (string): the new UTC timestamp when the message should sent. Mandrill can't time travel, so if you specify a time in past the message will be sent immediately
Returns:
struct. information about the scheduled email that was rescheduled.::
_id (string): the scheduled message id
created_at (string): the UTC timestamp when the message was created, in YYYY-MM-DD HH:MM:SS format
send_at (string): the UTC timestamp when the message will be sent, in YYYY-MM-DD HH:MM:SS format
from_email (string): the email's sender address
to (string): the email's recipient
subject (string): the email's subject
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownMessageError: The provided message id does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'id': id, 'send_at': send_at}
return self.master.call('messages/reschedule', _params)
class Whitelists(object):
def __init__(self, master):
self.master = master
def add(self, email):
"""Adds an email to your email rejection whitelist. If the address is
currently on your blacklist, that blacklist entry will be removed
automatically.
Args:
email (string): an email address to add to the whitelist
Returns:
struct. a status object containing the address and the result of the operation::
email (string): the email address you provided
whether (boolean): the operation succeeded
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'email': email}
return self.master.call('whitelists/add', _params)
def list(self, email=None):
"""Retrieves your email rejection whitelist. You can provide an email
address or search prefix to limit the results. Returns up to 1000 results.
Args:
email (string): an optional email address or prefix to search by
Returns:
array. up to 1000 whitelist entries::
[] (struct): the information for each whitelist entry::
[].email (string): the email that is whitelisted
[].detail (string): a description of why the email was whitelisted
[].created_at (string): when the email was added to the whitelist
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'email': email}
return self.master.call('whitelists/list', _params)
def delete(self, email):
"""Removes an email address from the whitelist.
Args:
email (string): the email address to remove from the whitelist
Returns:
struct. a status object containing the address and whether the deletion succeeded::
email (string): the email address that was removed from the blacklist
deleted (boolean): whether the address was deleted successfully
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'email': email}
return self.master.call('whitelists/delete', _params)
class Ips(object):
def __init__(self, master):
self.master = master
def list(self, ):
"""Lists your dedicated IPs.
Returns:
array. an array of structs for each dedicated IP::
[] (struct): information about a single dedicated IP::
[].ip (string): the ip address
[].created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format
[].pool (string): the name of the pool that this dedicated IP belongs to
[].domain (string): the domain name (reverse dns) of this dedicated IP
[].custom_dns (struct): information about the ip's custom dns, if it has been configured::
[].custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip
[].custom_dns.valid (boolean): whether the ip's custom dns is currently valid
[].custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error
[].warmup (struct): information about the ip's warmup status::
[].warmup.warming_up (boolean): whether the ip is currently in warmup mode
[].warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
[].warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {}
return self.master.call('ips/list', _params)
def info(self, ip):
"""Retrieves information about a single dedicated ip.
Args:
ip (string): a dedicated IP address
Returns:
struct. Information about the dedicated ip::
ip (string): the ip address
created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format
pool (string): the name of the pool that this dedicated IP belongs to
domain (string): the domain name (reverse dns) of this dedicated IP
custom_dns (struct): information about the ip's custom dns, if it has been configured::
custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip
custom_dns.valid (boolean): whether the ip's custom dns is currently valid
custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error
warmup (struct): information about the ip's warmup status::
warmup.warming_up (boolean): whether the ip is currently in warmup mode
warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'ip': ip}
return self.master.call('ips/info', _params)
def provision(self, warmup=False, pool=None):
"""Requests an additional dedicated IP for your account. Accounts may
have one outstanding request at any time, and provisioning requests
are processed within 24 hours.
Args:
warmup (boolean): whether to enable warmup mode for the ip
pool (string): the id of the pool to add the dedicated ip to, or null to use your account's default pool
Returns:
struct. a description of the provisioning request that was created::
requested_at (string): the date and time that the request was created as a UTC timestamp in YYYY-MM-DD HH:MM:SS format
Raises:
IPProvisionLimitError: A dedicated IP cannot be provisioned while another request is pending.
UnknownPoolError: The provided dedicated IP pool does not exist.
PaymentRequiredError: The requested feature requires payment.
InvalidKeyError: The provided API key is not a valid Mandrill API key
NoSendingHistoryError: The user hasn't started sending yet.
PoorReputationError: The user's reputation is too low to continue.
Error: A general Mandrill error has occurred
"""
_params = {'warmup': warmup, 'pool': pool}
return self.master.call('ips/provision', _params)
def start_warmup(self, ip):
"""Begins the warmup process for a dedicated IP. During the warmup process,
Mandrill will gradually increase the percentage of your mail that is sent over
the warming-up IP, over a period of roughly 30 days. The rest of your mail
will be sent over shared IPs or other dedicated IPs in the same pool.
Args:
ip (string): a dedicated ip address
Returns:
struct. Information about the dedicated IP::
ip (string): the ip address
created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format
pool (string): the name of the pool that this dedicated IP belongs to
domain (string): the domain name (reverse dns) of this dedicated IP
custom_dns (struct): information about the ip's custom dns, if it has been configured::
custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip
custom_dns.valid (boolean): whether the ip's custom dns is currently valid
custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error
warmup (struct): information about the ip's warmup status::
warmup.warming_up (boolean): whether the ip is currently in warmup mode
warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
Raises:
UnknownIPError: The provided dedicated IP does not exist.
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'ip': ip}
return self.master.call('ips/start-warmup', _params)
def cancel_warmup(self, ip):
"""Cancels the warmup process for a dedicated IP.
Args:
ip (string): a dedicated ip address
Returns:
struct. Information about the dedicated IP::
ip (string): the ip address
created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format
pool (string): the name of the pool that this dedicated IP belongs to
domain (string): the domain name (reverse dns) of this dedicated IP
custom_dns (struct): information about the ip's custom dns, if it has been configured::
custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip
custom_dns.valid (boolean): whether the ip's custom dns is currently valid
custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error
warmup (struct): information about the ip's warmup status::
warmup.warming_up (boolean): whether the ip is currently in warmup mode
warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
Raises:
UnknownIPError: The provided dedicated IP does not exist.
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'ip': ip}
return self.master.call('ips/cancel-warmup', _params)
def set_pool(self, ip, pool, create_pool=False):
"""Moves a dedicated IP to a different pool.
Args:
ip (string): a dedicated ip address
pool (string): the name of the new pool to add the dedicated ip to
create_pool (boolean): whether to create the pool if it does not exist; if false and the pool does not exist, an Unknown_Pool will be thrown.
Returns:
struct. Information about the updated state of the dedicated IP::
ip (string): the ip address
created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format
pool (string): the name of the pool that this dedicated IP belongs to
domain (string): the domain name (reverse dns) of this dedicated IP
custom_dns (struct): information about the ip's custom dns, if it has been configured::
custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip
custom_dns.valid (boolean): whether the ip's custom dns is currently valid
custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error
warmup (struct): information about the ip's warmup status::
warmup.warming_up (boolean): whether the ip is currently in warmup mode
warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
Raises:
UnknownIPError: The provided dedicated IP does not exist.
UnknownPoolError: The provided dedicated IP pool does not exist.
InvalidKeyError: The provided API key is not a valid Mandrill API key
InvalidEmptyDefaultPoolError: You cannot remove the last IP from your default IP pool.
Error: A general Mandrill error has occurred
"""
_params = {'ip': ip, 'pool': pool, 'create_pool': create_pool}
return self.master.call('ips/set-pool', _params)
def delete(self, ip):
"""Deletes a dedicated IP. This is permanent and cannot be undone.
Args:
ip (string): the dedicated ip to remove from your account
Returns:
struct. a description of the ip that was removed from your account.::
ip (string): the ip address
deleted (string): a boolean indicating whether the ip was successfully deleted
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'ip': ip}
return self.master.call('ips/delete', _params)
def list_pools(self, ):
"""Lists your dedicated IP pools.
Returns:
array. the dedicated IP pools for your account, up to a maximum of 1,000::
[] (struct): information about each dedicated IP pool::
[].name (string): this pool's name
[].created_at (string): the date and time that this pool was created as a UTC timestamp in YYYY-MM-DD HH:MM:SS format
[].ips (array): the dedicated IPs in this pool::
[].ips[] (struct): information about each dedicated IP::
[].ips[].ip (string): the ip address
[].ips[].created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format
[].ips[].pool (string): the name of the pool that this dedicated IP belongs to
[].ips[].domain (string): the domain name (reverse dns) of this dedicated IP
[].ips[].custom_dns (struct): information about the ip's custom dns, if it has been configured::
[].ips[].custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip
[].ips[].custom_dns.valid (boolean): whether the ip's custom dns is currently valid
[].ips[].custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error
[].ips[].warmup (struct): information about the ip's warmup status::
[].ips[].warmup.warming_up (boolean): whether the ip is currently in warmup mode
[].ips[].warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
[].ips[].warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {}
return self.master.call('ips/list-pools', _params)
def pool_info(self, pool):
"""Describes a single dedicated IP pool.
Args:
pool (string): a pool name
Returns:
struct. Information about the dedicated ip pool::
name (string): this pool's name
created_at (string): the date and time that this pool was created as a UTC timestamp in YYYY-MM-DD HH:MM:SS format
ips (array): the dedicated IPs in this pool::
ips[] (struct): information about each dedicated IP::
ips[].ip (string): the ip address
ips[].created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format
ips[].pool (string): the name of the pool that this dedicated IP belongs to
ips[].domain (string): the domain name (reverse dns) of this dedicated IP
ips[].custom_dns (struct): information about the ip's custom dns, if it has been configured::
ips[].custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip
ips[].custom_dns.valid (boolean): whether the ip's custom dns is currently valid
ips[].custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error
ips[].warmup (struct): information about the ip's warmup status::
ips[].warmup.warming_up (boolean): whether the ip is currently in warmup mode
ips[].warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
ips[].warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
Raises:
UnknownPoolError: The provided dedicated IP pool does not exist.
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'pool': pool}
return self.master.call('ips/pool-info', _params)
def create_pool(self, pool):
"""Creates a pool and returns it. If a pool already exists with this
name, no action will be performed.
Args:
pool (string): the name of a pool to create
Returns:
struct. Information about the dedicated ip pool::
name (string): this pool's name
created_at (string): the date and time that this pool was created as a UTC timestamp in YYYY-MM-DD HH:MM:SS format
ips (array): the dedicated IPs in this pool::
ips[] (struct): information about each dedicated IP::
ips[].ip (string): the ip address
ips[].created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format
ips[].pool (string): the name of the pool that this dedicated IP belongs to
ips[].domain (string): the domain name (reverse dns) of this dedicated IP
ips[].custom_dns (struct): information about the ip's custom dns, if it has been configured::
ips[].custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip
ips[].custom_dns.valid (boolean): whether the ip's custom dns is currently valid
ips[].custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error
ips[].warmup (struct): information about the ip's warmup status::
ips[].warmup.warming_up (boolean): whether the ip is currently in warmup mode
ips[].warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
ips[].warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'pool': pool}
return self.master.call('ips/create-pool', _params)
def delete_pool(self, pool):
"""Deletes a pool. A pool must be empty before you can delete it, and you cannot delete your default pool.
Args:
pool (string): the name of the pool to delete
Returns:
struct. information about the status of the pool that was deleted::
pool (string): the name of the pool
deleted (boolean): whether the pool was deleted
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownPoolError: The provided dedicated IP pool does not exist.
InvalidDeleteDefaultPoolError: The default pool cannot be deleted.
InvalidDeleteNonEmptyPoolError: Non-empty pools cannot be deleted.
Error: A general Mandrill error has occurred
"""
_params = {'pool': pool}
return self.master.call('ips/delete-pool', _params)
def check_custom_dns(self, ip, domain):
"""Tests whether a domain name is valid for use as the custom reverse
DNS for a dedicated IP.
Args:
ip (string): a dedicated ip address
domain (string): the domain name to test
Returns:
struct. validation results for the domain::
valid (string): whether the domain name has a correctly-configured A record pointing to the ip address
error (string): if valid is false, this will contain details about why the domain's A record is incorrect
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownIPError: The provided dedicated IP does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'ip': ip, 'domain': domain}
return self.master.call('ips/check-custom-dns', _params)
def set_custom_dns(self, ip, domain):
"""Configures the custom DNS name for a dedicated IP.
Args:
ip (string): a dedicated ip address
domain (string): a domain name to set as the dedicated IP's custom dns name.
Returns:
struct. information about the dedicated IP's new configuration::
ip (string): the ip address
created_at (string): the date and time that the dedicated IP was created as a UTC string in YYYY-MM-DD HH:MM:SS format
pool (string): the name of the pool that this dedicated IP belongs to
domain (string): the domain name (reverse dns) of this dedicated IP
custom_dns (struct): information about the ip's custom dns, if it has been configured::
custom_dns.enabled (boolean): a boolean indicating whether custom dns has been configured for this ip
custom_dns.valid (boolean): whether the ip's custom dns is currently valid
custom_dns.error (string): if the ip's custom dns is invalid, this will include details about the error
warmup (struct): information about the ip's warmup status::
warmup.warming_up (boolean): whether the ip is currently in warmup mode
warmup.start_at (string): the start time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
warmup.end_at (string): the end date and time for the warmup process as a UTC string in YYYY-MM-DD HH:MM:SS format
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownIPError: The provided dedicated IP does not exist.
InvalidCustomDNSError: The domain name is not configured for use as the dedicated IP's custom reverse DNS.
InvalidCustomDNSPendingError: A custom DNS change for this dedicated IP is currently pending.
Error: A general Mandrill error has occurred
"""
_params = {'ip': ip, 'domain': domain}
return self.master.call('ips/set-custom-dns', _params)
class Internal(object):
def __init__(self, master):
self.master = master
class Subaccounts(object):
def __init__(self, master):
self.master = master
def list(self, q=None):
"""Get the list of subaccounts defined for the account, optionally filtered by a prefix
Args:
q (string): an optional prefix to filter the subaccounts' ids and names
Returns:
array. the subaccounts for the account, up to a maximum of 1,000::
[] (struct): the individual subaccount info::
[].id (string): a unique indentifier for the subaccount
[].name (string): an optional display name for the subaccount
[].custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, the hourly quota will be managed based on reputation
[].status (string): the current sending status of the subaccount, one of "active" or "paused"
[].reputation (integer): the subaccount's current reputation on a scale from 0 to 100
[].created_at (string): the date and time that the subaccount was created as a UTC string in YYYY-MM-DD HH:MM:SS format
[].first_sent_at (string): the date and time that the subaccount first sent as a UTC string in YYYY-MM-DD HH:MM:SS format
[].sent_weekly (integer): the number of emails the subaccount has sent so far this week (weeks start on midnight Monday, UTC)
[].sent_monthly (integer): the number of emails the subaccount has sent so far this month (months start on midnight of the 1st, UTC)
[].sent_total (integer): the number of emails the subaccount has sent since it was created
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'q': q}
return self.master.call('subaccounts/list', _params)
def add(self, id, name=None, notes=None, custom_quota=None):
"""Add a new subaccount
Args:
id (string): a unique identifier for the subaccount to be used in sending calls
name (string): an optional display name to further identify the subaccount
notes (string): optional extra text to associate with the subaccount
custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, Mandrill will manage this based on reputation
Returns:
struct. the information saved about the new subaccount::
id (string): a unique indentifier for the subaccount
name (string): an optional display name for the subaccount
custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, the hourly quota will be managed based on reputation
status (string): the current sending status of the subaccount, one of "active" or "paused"
reputation (integer): the subaccount's current reputation on a scale from 0 to 100
created_at (string): the date and time that the subaccount was created as a UTC string in YYYY-MM-DD HH:MM:SS format
first_sent_at (string): the date and time that the subaccount first sent as a UTC string in YYYY-MM-DD HH:MM:SS format
sent_weekly (integer): the number of emails the subaccount has sent so far this week (weeks start on midnight Monday, UTC)
sent_monthly (integer): the number of emails the subaccount has sent so far this month (months start on midnight of the 1st, UTC)
sent_total (integer): the number of emails the subaccount has sent since it was created
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'id': id, 'name': name, 'notes': notes, 'custom_quota': custom_quota}
return self.master.call('subaccounts/add', _params)
def info(self, id):
"""Given the ID of an existing subaccount, return the data about it
Args:
id (string): the unique identifier of the subaccount to query
Returns:
struct. the information about the subaccount::
id (string): a unique indentifier for the subaccount
name (string): an optional display name for the subaccount
notes (string): optional extra text to associate with the subaccount
custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, the hourly quota will be managed based on reputation
status (string): the current sending status of the subaccount, one of "active" or "paused"
reputation (integer): the subaccount's current reputation on a scale from 0 to 100
created_at (string): the date and time that the subaccount was created as a UTC string in YYYY-MM-DD HH:MM:SS format
first_sent_at (string): the date and time that the subaccount first sent as a UTC string in YYYY-MM-DD HH:MM:SS format
sent_weekly (integer): the number of emails the subaccount has sent so far this week (weeks start on midnight Monday, UTC)
sent_monthly (integer): the number of emails the subaccount has sent so far this month (months start on midnight of the 1st, UTC)
sent_total (integer): the number of emails the subaccount has sent since it was created
sent_hourly (integer): the number of emails the subaccount has sent in the last hour
hourly_quota (integer): the current hourly quota for the subaccount, either manual or reputation-based
last_30_days (struct): stats for this subaccount in the last 30 days::
last_30_days.sent (integer): the number of emails sent for this subaccount in the last 30 days
last_30_days.hard_bounces (integer): the number of emails hard bounced for this subaccount in the last 30 days
last_30_days.soft_bounces (integer): the number of emails soft bounced for this subaccount in the last 30 days
last_30_days.rejects (integer): the number of emails rejected for sending this subaccount in the last 30 days
last_30_days.complaints (integer): the number of spam complaints for this subaccount in the last 30 days
last_30_days.unsubs (integer): the number of unsbuscribes for this subaccount in the last 30 days
last_30_days.opens (integer): the number of times emails have been opened for this subaccount in the last 30 days
last_30_days.unique_opens (integer): the number of unique opens for emails sent for this subaccount in the last 30 days
last_30_days.clicks (integer): the number of URLs that have been clicked for this subaccount in the last 30 days
last_30_days.unique_clicks (integer): the number of unique clicks for emails sent for this subaccount in the last 30 days
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownSubaccountError: The provided subaccount id does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'id': id}
return self.master.call('subaccounts/info', _params)
def update(self, id, name=None, notes=None, custom_quota=None):
"""Update an existing subaccount
Args:
id (string): the unique identifier of the subaccount to update
name (string): an optional display name to further identify the subaccount
notes (string): optional extra text to associate with the subaccount
custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, Mandrill will manage this based on reputation
Returns:
struct. the information for the updated subaccount::
id (string): a unique indentifier for the subaccount
name (string): an optional display name for the subaccount
custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, the hourly quota will be managed based on reputation
status (string): the current sending status of the subaccount, one of "active" or "paused"
reputation (integer): the subaccount's current reputation on a scale from 0 to 100
created_at (string): the date and time that the subaccount was created as a UTC string in YYYY-MM-DD HH:MM:SS format
first_sent_at (string): the date and time that the subaccount first sent as a UTC string in YYYY-MM-DD HH:MM:SS format
sent_weekly (integer): the number of emails the subaccount has sent so far this week (weeks start on midnight Monday, UTC)
sent_monthly (integer): the number of emails the subaccount has sent so far this month (months start on midnight of the 1st, UTC)
sent_total (integer): the number of emails the subaccount has sent since it was created
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownSubaccountError: The provided subaccount id does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'id': id, 'name': name, 'notes': notes, 'custom_quota': custom_quota}
return self.master.call('subaccounts/update', _params)
def delete(self, id):
"""Delete an existing subaccount. Any email related to the subaccount will be saved, but stats will be removed and any future sending calls to this subaccount will fail.
Args:
id (string): the unique identifier of the subaccount to delete
Returns:
struct. the information for the deleted subaccount::
id (string): a unique indentifier for the subaccount
name (string): an optional display name for the subaccount
custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, the hourly quota will be managed based on reputation
status (string): the current sending status of the subaccount, one of "active" or "paused"
reputation (integer): the subaccount's current reputation on a scale from 0 to 100
created_at (string): the date and time that the subaccount was created as a UTC string in YYYY-MM-DD HH:MM:SS format
first_sent_at (string): the date and time that the subaccount first sent as a UTC string in YYYY-MM-DD HH:MM:SS format
sent_weekly (integer): the number of emails the subaccount has sent so far this week (weeks start on midnight Monday, UTC)
sent_monthly (integer): the number of emails the subaccount has sent so far this month (months start on midnight of the 1st, UTC)
sent_total (integer): the number of emails the subaccount has sent since it was created
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownSubaccountError: The provided subaccount id does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'id': id}
return self.master.call('subaccounts/delete', _params)
def pause(self, id):
"""Pause a subaccount's sending. Any future emails delivered to this subaccount will be queued for a maximum of 3 days until the subaccount is resumed.
Args:
id (string): the unique identifier of the subaccount to pause
Returns:
struct. the information for the paused subaccount::
id (string): a unique indentifier for the subaccount
name (string): an optional display name for the subaccount
custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, the hourly quota will be managed based on reputation
status (string): the current sending status of the subaccount, one of "active" or "paused"
reputation (integer): the subaccount's current reputation on a scale from 0 to 100
created_at (string): the date and time that the subaccount was created as a UTC string in YYYY-MM-DD HH:MM:SS format
first_sent_at (string): the date and time that the subaccount first sent as a UTC string in YYYY-MM-DD HH:MM:SS format
sent_weekly (integer): the number of emails the subaccount has sent so far this week (weeks start on midnight Monday, UTC)
sent_monthly (integer): the number of emails the subaccount has sent so far this month (months start on midnight of the 1st, UTC)
sent_total (integer): the number of emails the subaccount has sent since it was created
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownSubaccountError: The provided subaccount id does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'id': id}
return self.master.call('subaccounts/pause', _params)
def resume(self, id):
"""Resume a paused subaccount's sending
Args:
id (string): the unique identifier of the subaccount to resume
Returns:
struct. the information for the resumed subaccount::
id (string): a unique indentifier for the subaccount
name (string): an optional display name for the subaccount
custom_quota (integer): an optional manual hourly quota for the subaccount. If not specified, the hourly quota will be managed based on reputation
status (string): the current sending status of the subaccount, one of "active" or "paused"
reputation (integer): the subaccount's current reputation on a scale from 0 to 100
created_at (string): the date and time that the subaccount was created as a UTC string in YYYY-MM-DD HH:MM:SS format
first_sent_at (string): the date and time that the subaccount first sent as a UTC string in YYYY-MM-DD HH:MM:SS format
sent_weekly (integer): the number of emails the subaccount has sent so far this week (weeks start on midnight Monday, UTC)
sent_monthly (integer): the number of emails the subaccount has sent so far this month (months start on midnight of the 1st, UTC)
sent_total (integer): the number of emails the subaccount has sent since it was created
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownSubaccountError: The provided subaccount id does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'id': id}
return self.master.call('subaccounts/resume', _params)
class Urls(object):
def __init__(self, master):
self.master = master
def list(self, ):
"""Get the 100 most clicked URLs
Returns:
array. the 100 most clicked URLs and their stats::
[] (struct): the individual URL stats::
[].url (string): the URL to be tracked
[].sent (integer): the number of emails that contained the URL
[].clicks (integer): the number of times the URL has been clicked from a tracked email
[].unique_clicks (integer): the number of unique emails that have generated clicks for this URL
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {}
return self.master.call('urls/list', _params)
def search(self, q):
"""Return the 100 most clicked URLs that match the search query given
Args:
q (string): a search query
Returns:
array. the 100 most clicked URLs matching the search query::
[] (struct): the URL matching the query::
[].url (string): the URL to be tracked
[].sent (integer): the number of emails that contained the URL
[].clicks (integer): the number of times the URL has been clicked from a tracked email
[].unique_clicks (integer): the number of unique emails that have generated clicks for this URL
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'q': q}
return self.master.call('urls/search', _params)
def time_series(self, url):
"""Return the recent history (hourly stats for the last 30 days) for a url
Args:
url (string): an existing URL
Returns:
array. the array of history information::
[] (struct): the information for a single hour::
[].time (string): the hour as a UTC date string in YYYY-MM-DD HH:MM:SS format
[].sent (integer): the number of emails that were sent with the URL during the hour
[].clicks (integer): the number of times the URL was clicked during the hour
[].unique_clicks (integer): the number of unique clicks generated for emails sent with this URL during the hour
Raises:
UnknownUrlError: The requested URL has not been seen in a tracked link
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'url': url}
return self.master.call('urls/time-series', _params)
def tracking_domains(self, ):
"""Get the list of tracking domains set up for this account
Returns:
array. the tracking domains and their status::
[] (struct): the individual tracking domain::
[].domain (string): the tracking domain name
[].created_at (string): the date and time that the tracking domain was added as a UTC string in YYYY-MM-DD HH:MM:SS format
[].last_tested_at (string): when the domain's DNS settings were last tested as a UTC string in YYYY-MM-DD HH:MM:SS format
[].cname (struct): details about the domain's CNAME record::
[].cname.valid (boolean): whether the domain's CNAME record is valid for use with Mandrill
[].cname.valid_after (string): when the domain's CNAME record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it.
[].cname.error (string): an error describing the CNAME record, or null if the record is correct
[].valid_tracking (boolean): whether this domain can be used as a tracking domain for email.
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {}
return self.master.call('urls/tracking-domains', _params)
def add_tracking_domain(self, domain):
"""Add a tracking domain to your account
Args:
domain (string): a domain name
Returns:
struct. information about the domain::
domain (string): the tracking domain name
created_at (string): the date and time that the tracking domain was added as a UTC string in YYYY-MM-DD HH:MM:SS format
last_tested_at (string): when the domain's DNS settings were last tested as a UTC string in YYYY-MM-DD HH:MM:SS format
cname (struct): details about the domain's CNAME record::
cname.valid (boolean): whether the domain's CNAME record is valid for use with Mandrill
cname.valid_after (string): when the domain's CNAME record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it.
cname.error (string): an error describing the CNAME record, or null if the record is correct
valid_tracking (boolean): whether this domain can be used as a tracking domain for email.
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'domain': domain}
return self.master.call('urls/add-tracking-domain', _params)
def check_tracking_domain(self, domain):
"""Checks the CNAME settings for a tracking domain. The domain must have been added already with the add-tracking-domain call
Args:
domain (string): an existing tracking domain name
Returns:
struct. information about the tracking domain::
domain (string): the tracking domain name
created_at (string): the date and time that the tracking domain was added as a UTC string in YYYY-MM-DD HH:MM:SS format
last_tested_at (string): when the domain's DNS settings were last tested as a UTC string in YYYY-MM-DD HH:MM:SS format
cname (struct): details about the domain's CNAME record::
cname.valid (boolean): whether the domain's CNAME record is valid for use with Mandrill
cname.valid_after (string): when the domain's CNAME record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it.
cname.error (string): an error describing the CNAME record, or null if the record is correct
valid_tracking (boolean): whether this domain can be used as a tracking domain for email.
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownTrackingDomainError: The provided tracking domain does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'domain': domain}
return self.master.call('urls/check-tracking-domain', _params)
class Webhooks(object):
def __init__(self, master):
self.master = master
def list(self, ):
"""Get the list of all webhooks defined on the account
Returns:
array. the webhooks associated with the account::
[] (struct): the individual webhook info::
[].id (integer): a unique integer indentifier for the webhook
[].url (string): The URL that the event data will be posted to
[].description (string): a description of the webhook
[].auth_key (string): the key used to requests for this webhook
[].events (array): The message events that will be posted to the hook::
[].events[] (string): the individual message event (send, hard_bounce, soft_bounce, open, click, spam, unsub, or reject)
[].created_at (string): the date and time that the webhook was created as a UTC string in YYYY-MM-DD HH:MM:SS format
[].last_sent_at (string): the date and time that the webhook last successfully received events as a UTC string in YYYY-MM-DD HH:MM:SS format
[].batches_sent (integer): the number of event batches that have ever been sent to this webhook
[].events_sent (integer): the total number of events that have ever been sent to this webhook
[].last_error (string): if we've ever gotten an error trying to post to this webhook, the last error that we've seen
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {}
return self.master.call('webhooks/list', _params)
def add(self, url, description=None, events=[]):
"""Add a new webhook
Args:
url (string): the URL to POST batches of events
description (string): an optional description of the webhook
events (array): an optional list of events that will be posted to the webhook::
events[] (string): the individual event to listen for
Returns:
struct. the information saved about the new webhook::
id (integer): a unique integer indentifier for the webhook
url (string): The URL that the event data will be posted to
description (string): a description of the webhook
auth_key (string): the key used to requests for this webhook
events (array): The message events that will be posted to the hook::
events[] (string): the individual message event (send, hard_bounce, soft_bounce, open, click, spam, unsub, or reject)
created_at (string): the date and time that the webhook was created as a UTC string in YYYY-MM-DD HH:MM:SS format
last_sent_at (string): the date and time that the webhook last successfully received events as a UTC string in YYYY-MM-DD HH:MM:SS format
batches_sent (integer): the number of event batches that have ever been sent to this webhook
events_sent (integer): the total number of events that have ever been sent to this webhook
last_error (string): if we've ever gotten an error trying to post to this webhook, the last error that we've seen
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'url': url, 'description': description, 'events': events}
return self.master.call('webhooks/add', _params)
def info(self, id):
"""Given the ID of an existing webhook, return the data about it
Args:
id (integer): the unique identifier of a webhook belonging to this account
Returns:
struct. the information about the webhook::
id (integer): a unique integer indentifier for the webhook
url (string): The URL that the event data will be posted to
description (string): a description of the webhook
auth_key (string): the key used to requests for this webhook
events (array): The message events that will be posted to the hook::
events[] (string): the individual message event (send, hard_bounce, soft_bounce, open, click, spam, unsub, or reject)
created_at (string): the date and time that the webhook was created as a UTC string in YYYY-MM-DD HH:MM:SS format
last_sent_at (string): the date and time that the webhook last successfully received events as a UTC string in YYYY-MM-DD HH:MM:SS format
batches_sent (integer): the number of event batches that have ever been sent to this webhook
events_sent (integer): the total number of events that have ever been sent to this webhook
last_error (string): if we've ever gotten an error trying to post to this webhook, the last error that we've seen
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownWebhookError: The requested webhook does not exist
Error: A general Mandrill error has occurred
"""
_params = {'id': id}
return self.master.call('webhooks/info', _params)
def update(self, id, url, description=None, events=[]):
"""Update an existing webhook
Args:
id (integer): the unique identifier of a webhook belonging to this account
url (string): the URL to POST batches of events
description (string): an optional description of the webhook
events (array): an optional list of events that will be posted to the webhook::
events[] (string): the individual event to listen for
Returns:
struct. the information for the updated webhook::
id (integer): a unique integer indentifier for the webhook
url (string): The URL that the event data will be posted to
description (string): a description of the webhook
auth_key (string): the key used to requests for this webhook
events (array): The message events that will be posted to the hook::
events[] (string): the individual message event (send, hard_bounce, soft_bounce, open, click, spam, unsub, or reject)
created_at (string): the date and time that the webhook was created as a UTC string in YYYY-MM-DD HH:MM:SS format
last_sent_at (string): the date and time that the webhook last successfully received events as a UTC string in YYYY-MM-DD HH:MM:SS format
batches_sent (integer): the number of event batches that have ever been sent to this webhook
events_sent (integer): the total number of events that have ever been sent to this webhook
last_error (string): if we've ever gotten an error trying to post to this webhook, the last error that we've seen
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownWebhookError: The requested webhook does not exist
Error: A general Mandrill error has occurred
"""
_params = {'id': id, 'url': url, 'description': description, 'events': events}
return self.master.call('webhooks/update', _params)
def delete(self, id):
"""Delete an existing webhook
Args:
id (integer): the unique identifier of a webhook belonging to this account
Returns:
struct. the information for the deleted webhook::
id (integer): a unique integer indentifier for the webhook
url (string): The URL that the event data will be posted to
description (string): a description of the webhook
auth_key (string): the key used to requests for this webhook
events (array): The message events that will be posted to the hook::
events[] (string): the individual message event (send, hard_bounce, soft_bounce, open, click, spam, unsub, or reject)
created_at (string): the date and time that the webhook was created as a UTC string in YYYY-MM-DD HH:MM:SS format
last_sent_at (string): the date and time that the webhook last successfully received events as a UTC string in YYYY-MM-DD HH:MM:SS format
batches_sent (integer): the number of event batches that have ever been sent to this webhook
events_sent (integer): the total number of events that have ever been sent to this webhook
last_error (string): if we've ever gotten an error trying to post to this webhook, the last error that we've seen
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownWebhookError: The requested webhook does not exist
Error: A general Mandrill error has occurred
"""
_params = {'id': id}
return self.master.call('webhooks/delete', _params)
class Senders(object):
def __init__(self, master):
self.master = master
def list(self, ):
"""Return the senders that have tried to use this account.
Returns:
array. an array of sender data, one for each sending addresses used by the account::
[] (struct): the information on each sending address in the account::
[].address (string): the sender's email address
[].created_at (string): the date and time that the sender was first seen by Mandrill as a UTC date string in YYYY-MM-DD HH:MM:SS format
[].sent (integer): the total number of messages sent by this sender
[].hard_bounces (integer): the total number of hard bounces by messages by this sender
[].soft_bounces (integer): the total number of soft bounces by messages by this sender
[].rejects (integer): the total number of rejected messages by this sender
[].complaints (integer): the total number of spam complaints received for messages by this sender
[].unsubs (integer): the total number of unsubscribe requests received for messages by this sender
[].opens (integer): the total number of times messages by this sender have been opened
[].clicks (integer): the total number of times tracked URLs in messages by this sender have been clicked
[].unique_opens (integer): the number of unique opens for emails sent for this sender
[].unique_clicks (integer): the number of unique clicks for emails sent for this sender
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {}
return self.master.call('senders/list', _params)
def domains(self, ):
"""Returns the sender domains that have been added to this account.
Returns:
array. an array of sender domain data, one for each sending domain used by the account::
[] (struct): the information on each sending domain for the account::
[].domain (string): the sender domain name
[].created_at (string): the date and time that the sending domain was first seen as a UTC string in YYYY-MM-DD HH:MM:SS format
[].last_tested_at (string): when the domain's DNS settings were last tested as a UTC string in YYYY-MM-DD HH:MM:SS format
[].spf (struct): details about the domain's SPF record::
[].spf.valid (boolean): whether the domain's SPF record is valid for use with Mandrill
[].spf.valid_after (string): when the domain's SPF record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it.
[].spf.error (string): an error describing the spf record, or null if the record is correct
[].dkim (struct): details about the domain's DKIM record::
[].dkim.valid (boolean): whether the domain's DKIM record is valid for use with Mandrill
[].dkim.valid_after (string): when the domain's DKIM record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it.
[].dkim.error (string): an error describing the DKIM record, or null if the record is correct
[].verified_at (string): if the domain has been verified, this indicates when that verification occurred as a UTC string in YYYY-MM-DD HH:MM:SS format
[].valid_signing (boolean): whether this domain can be used to authenticate mail, either for itself or as a custom signing domain. If this is false but spf and dkim are both valid, you will need to verify the domain before using it to authenticate mail
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {}
return self.master.call('senders/domains', _params)
def add_domain(self, domain):
"""Adds a sender domain to your account. Sender domains are added automatically as you
send, but you can use this call to add them ahead of time.
Args:
domain (string): a domain name
Returns:
struct. information about the domain::
domain (string): the sender domain name
created_at (string): the date and time that the sending domain was first seen as a UTC string in YYYY-MM-DD HH:MM:SS format
last_tested_at (string): when the domain's DNS settings were last tested as a UTC string in YYYY-MM-DD HH:MM:SS format
spf (struct): details about the domain's SPF record::
spf.valid (boolean): whether the domain's SPF record is valid for use with Mandrill
spf.valid_after (string): when the domain's SPF record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it.
spf.error (string): an error describing the spf record, or null if the record is correct
dkim (struct): details about the domain's DKIM record::
dkim.valid (boolean): whether the domain's DKIM record is valid for use with Mandrill
dkim.valid_after (string): when the domain's DKIM record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it.
dkim.error (string): an error describing the DKIM record, or null if the record is correct
verified_at (string): if the domain has been verified, this indicates when that verification occurred as a UTC string in YYYY-MM-DD HH:MM:SS format
valid_signing (boolean): whether this domain can be used to authenticate mail, either for itself or as a custom signing domain. If this is false but spf and dkim are both valid, you will need to verify the domain before using it to authenticate mail
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'domain': domain}
return self.master.call('senders/add-domain', _params)
def check_domain(self, domain):
"""Checks the SPF and DKIM settings for a domain. If you haven't already added this domain to your
account, it will be added automatically.
Args:
domain (string): a domain name
Returns:
struct. information about the sender domain::
domain (string): the sender domain name
created_at (string): the date and time that the sending domain was first seen as a UTC string in YYYY-MM-DD HH:MM:SS format
last_tested_at (string): when the domain's DNS settings were last tested as a UTC string in YYYY-MM-DD HH:MM:SS format
spf (struct): details about the domain's SPF record::
spf.valid (boolean): whether the domain's SPF record is valid for use with Mandrill
spf.valid_after (string): when the domain's SPF record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it.
spf.error (string): an error describing the spf record, or null if the record is correct
dkim (struct): details about the domain's DKIM record::
dkim.valid (boolean): whether the domain's DKIM record is valid for use with Mandrill
dkim.valid_after (string): when the domain's DKIM record will be considered valid for use with Mandrill as a UTC string in YYYY-MM-DD HH:MM:SS format. If set, this indicates that the record is valid now, but was previously invalid, and Mandrill will wait until the record's TTL elapses to start using it.
dkim.error (string): an error describing the DKIM record, or null if the record is correct
verified_at (string): if the domain has been verified, this indicates when that verification occurred as a UTC string in YYYY-MM-DD HH:MM:SS format
valid_signing (boolean): whether this domain can be used to authenticate mail, either for itself or as a custom signing domain. If this is false but spf and dkim are both valid, you will need to verify the domain before using it to authenticate mail
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'domain': domain}
return self.master.call('senders/check-domain', _params)
def verify_domain(self, domain, mailbox):
"""Sends a verification email in order to verify ownership of a domain.
Domain verification is an optional step to confirm ownership of a domain. Once a
domain has been verified in a Mandrill account, other accounts may not have their
messages signed by that domain unless they also verify the domain. This prevents
other Mandrill accounts from sending mail signed by your domain.
Args:
domain (string): a domain name at which you can receive email
mailbox (string): a mailbox at the domain where the verification email should be sent
Returns:
struct. information about the verification that was sent::
status (string): "sent" indicates that the verification has been sent, "already_verified" indicates that the domain has already been verified with your account
domain (string): the domain name you provided
email (string): the email address the verification email was sent to
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'domain': domain, 'mailbox': mailbox}
return self.master.call('senders/verify-domain', _params)
def info(self, address):
"""Return more detailed information about a single sender, including aggregates of recent stats
Args:
address (string): the email address of the sender
Returns:
struct. the detailed information on the sender::
address (string): the sender's email address
created_at (string): the date and time that the sender was first seen by Mandrill as a UTC date string in YYYY-MM-DD HH:MM:SS format
sent (integer): the total number of messages sent by this sender
hard_bounces (integer): the total number of hard bounces by messages by this sender
soft_bounces (integer): the total number of soft bounces by messages by this sender
rejects (integer): the total number of rejected messages by this sender
complaints (integer): the total number of spam complaints received for messages by this sender
unsubs (integer): the total number of unsubscribe requests received for messages by this sender
opens (integer): the total number of times messages by this sender have been opened
clicks (integer): the total number of times tracked URLs in messages by this sender have been clicked
stats (struct): an aggregate summary of the sender's sending stats::
stats.today (struct): stats for this sender so far today::
stats.today.sent (integer): the number of emails sent for this sender so far today
stats.today.hard_bounces (integer): the number of emails hard bounced for this sender so far today
stats.today.soft_bounces (integer): the number of emails soft bounced for this sender so far today
stats.today.rejects (integer): the number of emails rejected for sending this sender so far today
stats.today.complaints (integer): the number of spam complaints for this sender so far today
stats.today.unsubs (integer): the number of unsubscribes for this sender so far today
stats.today.opens (integer): the number of times emails have been opened for this sender so far today
stats.today.unique_opens (integer): the number of unique opens for emails sent for this sender so far today
stats.today.clicks (integer): the number of URLs that have been clicked for this sender so far today
stats.today.unique_clicks (integer): the number of unique clicks for emails sent for this sender so far today
stats.last_7_days (struct): stats for this sender in the last 7 days::
stats.last_7_days.sent (integer): the number of emails sent for this sender in the last 7 days
stats.last_7_days.hard_bounces (integer): the number of emails hard bounced for this sender in the last 7 days
stats.last_7_days.soft_bounces (integer): the number of emails soft bounced for this sender in the last 7 days
stats.last_7_days.rejects (integer): the number of emails rejected for sending this sender in the last 7 days
stats.last_7_days.complaints (integer): the number of spam complaints for this sender in the last 7 days
stats.last_7_days.unsubs (integer): the number of unsubscribes for this sender in the last 7 days
stats.last_7_days.opens (integer): the number of times emails have been opened for this sender in the last 7 days
stats.last_7_days.unique_opens (integer): the number of unique opens for emails sent for this sender in the last 7 days
stats.last_7_days.clicks (integer): the number of URLs that have been clicked for this sender in the last 7 days
stats.last_7_days.unique_clicks (integer): the number of unique clicks for emails sent for this sender in the last 7 days
stats.last_30_days (struct): stats for this sender in the last 30 days::
stats.last_30_days.sent (integer): the number of emails sent for this sender in the last 30 days
stats.last_30_days.hard_bounces (integer): the number of emails hard bounced for this sender in the last 30 days
stats.last_30_days.soft_bounces (integer): the number of emails soft bounced for this sender in the last 30 days
stats.last_30_days.rejects (integer): the number of emails rejected for sending this sender in the last 30 days
stats.last_30_days.complaints (integer): the number of spam complaints for this sender in the last 30 days
stats.last_30_days.unsubs (integer): the number of unsubscribes for this sender in the last 30 days
stats.last_30_days.opens (integer): the number of times emails have been opened for this sender in the last 30 days
stats.last_30_days.unique_opens (integer): the number of unique opens for emails sent for this sender in the last 30 days
stats.last_30_days.clicks (integer): the number of URLs that have been clicked for this sender in the last 30 days
stats.last_30_days.unique_clicks (integer): the number of unique clicks for emails sent for this sender in the last 30 days
stats.last_60_days (struct): stats for this sender in the last 60 days::
stats.last_60_days.sent (integer): the number of emails sent for this sender in the last 60 days
stats.last_60_days.hard_bounces (integer): the number of emails hard bounced for this sender in the last 60 days
stats.last_60_days.soft_bounces (integer): the number of emails soft bounced for this sender in the last 60 days
stats.last_60_days.rejects (integer): the number of emails rejected for sending this sender in the last 60 days
stats.last_60_days.complaints (integer): the number of spam complaints for this sender in the last 60 days
stats.last_60_days.unsubs (integer): the number of unsubscribes for this sender in the last 60 days
stats.last_60_days.opens (integer): the number of times emails have been opened for this sender in the last 60 days
stats.last_60_days.unique_opens (integer): the number of unique opens for emails sent for this sender in the last 60 days
stats.last_60_days.clicks (integer): the number of URLs that have been clicked for this sender in the last 60 days
stats.last_60_days.unique_clicks (integer): the number of unique clicks for emails sent for this sender in the last 60 days
stats.last_90_days (struct): stats for this sender in the last 90 days::
stats.last_90_days.sent (integer): the number of emails sent for this sender in the last 90 days
stats.last_90_days.hard_bounces (integer): the number of emails hard bounced for this sender in the last 90 days
stats.last_90_days.soft_bounces (integer): the number of emails soft bounced for this sender in the last 90 days
stats.last_90_days.rejects (integer): the number of emails rejected for sending this sender in the last 90 days
stats.last_90_days.complaints (integer): the number of spam complaints for this sender in the last 90 days
stats.last_90_days.unsubs (integer): the number of unsubscribes for this sender in the last 90 days
stats.last_90_days.opens (integer): the number of times emails have been opened for this sender in the last 90 days
stats.last_90_days.unique_opens (integer): the number of unique opens for emails sent for this sender in the last 90 days
stats.last_90_days.clicks (integer): the number of URLs that have been clicked for this sender in the last 90 days
stats.last_90_days.unique_clicks (integer): the number of unique clicks for emails sent for this sender in the last 90 days
Raises:
UnknownSenderError: The requested sender does not exist
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'address': address}
return self.master.call('senders/info', _params)
def time_series(self, address):
"""Return the recent history (hourly stats for the last 30 days) for a sender
Args:
address (string): the email address of the sender
Returns:
array. the array of history information::
[] (struct): the stats for a single hour::
[].time (string): the hour as a UTC date string in YYYY-MM-DD HH:MM:SS format
[].sent (integer): the number of emails that were sent during the hour
[].hard_bounces (integer): the number of emails that hard bounced during the hour
[].soft_bounces (integer): the number of emails that soft bounced during the hour
[].rejects (integer): the number of emails that were rejected during the hour
[].complaints (integer): the number of spam complaints received during the hour
[].opens (integer): the number of emails opened during the hour
[].unique_opens (integer): the number of unique opens generated by messages sent during the hour
[].clicks (integer): the number of tracked URLs clicked during the hour
[].unique_clicks (integer): the number of unique clicks generated by messages sent during the hour
Raises:
UnknownSenderError: The requested sender does not exist
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {'address': address}
return self.master.call('senders/time-series', _params)
class Metadata(object):
def __init__(self, master):
self.master = master
def list(self, ):
"""Get the list of custom metadata fields indexed for the account.
Returns:
array. the custom metadata fields for the account::
[] (struct): the individual custom metadata field info::
[].name (string): the unique identifier of the metadata field to update
[].state (string): the current state of the metadata field, one of "active", "delete", or "index"
[].view_template (string): Mustache template to control how the metadata is rendered in your activity log
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
Error: A general Mandrill error has occurred
"""
_params = {}
return self.master.call('metadata/list', _params)
def add(self, name, view_template=None):
"""Add a new custom metadata field to be indexed for the account.
Args:
name (string): a unique identifier for the metadata field
view_template (string): optional Mustache template to control how the metadata is rendered in your activity log
Returns:
struct. the information saved about the new metadata field::
name (string): the unique identifier of the metadata field to update
state (string): the current state of the metadata field, one of "active", "delete", or "index"
view_template (string): Mustache template to control how the metadata is rendered in your activity log
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
MetadataFieldLimitError: Custom metadata field limit reached.
Error: A general Mandrill error has occurred
"""
_params = {'name': name, 'view_template': view_template}
return self.master.call('metadata/add', _params)
def update(self, name, view_template):
"""Update an existing custom metadata field.
Args:
name (string): the unique identifier of the metadata field to update
view_template (string): optional Mustache template to control how the metadata is rendered in your activity log
Returns:
struct. the information for the updated metadata field::
name (string): the unique identifier of the metadata field to update
state (string): the current state of the metadata field, one of "active", "delete", or "index"
view_template (string): Mustache template to control how the metadata is rendered in your activity log
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownMetadataFieldError: The provided metadata field name does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'name': name, 'view_template': view_template}
return self.master.call('metadata/update', _params)
def delete(self, name):
"""Delete an existing custom metadata field. Deletion isn't instataneous, and /metadata/list will continue to return the field until the asynchronous deletion process is complete.
Args:
name (string): the unique identifier of the metadata field to update
Returns:
struct. the information for the deleted metadata field::
name (string): the unique identifier of the metadata field to update
state (string): the current state of the metadata field, one of "active", "delete", or "index"
view_template (string): Mustache template to control how the metadata is rendered in your activity log
Raises:
InvalidKeyError: The provided API key is not a valid Mandrill API key
UnknownMetadataFieldError: The provided metadata field name does not exist.
Error: A general Mandrill error has occurred
"""
_params = {'name': name}
return self.master.call('metadata/delete', _params)
| gpl-2.0 |
ville-k/tensorflow | tensorflow/contrib/slim/python/slim/evaluation_test.py | 62 | 9668 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import time
import numpy as np
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.slim.python.slim import evaluation
from tensorflow.contrib.training.python.training import evaluation as evaluation_lib
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import input
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
FLAGS = flags.FLAGS
def GenerateTestData(num_classes, batch_size):
inputs = np.random.rand(batch_size, num_classes)
np.random.seed(0)
labels = np.random.randint(low=0, high=num_classes, size=batch_size)
labels = labels.reshape((batch_size,))
return inputs, labels
def TestModel(inputs):
scale = variables.Variable(1.0, trainable=False)
# Scaling the outputs wont change the result...
outputs = math_ops.multiply(inputs, scale)
return math_ops.argmax(outputs, 1), scale
def GroundTruthAccuracy(inputs, labels, batch_size):
predictions = np.argmax(inputs, 1)
num_correct = np.sum(predictions == labels)
return float(num_correct) / batch_size
class EvaluationTest(test.TestCase):
def setUp(self):
super(EvaluationTest, self).setUp()
num_classes = 8
batch_size = 16
inputs, labels = GenerateTestData(num_classes, batch_size)
self._expected_accuracy = GroundTruthAccuracy(inputs, labels, batch_size)
self._global_step = variables_lib.get_or_create_global_step()
self._inputs = constant_op.constant(inputs, dtype=dtypes.float32)
self._labels = constant_op.constant(labels, dtype=dtypes.int64)
self._predictions, self._scale = TestModel(self._inputs)
def testFinalOpsOnEvaluationLoop(self):
value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
self._labels)
init_op = control_flow_ops.group(variables.global_variables_initializer(),
variables.local_variables_initializer())
# Create checkpoint and log directories:
chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
gfile.MakeDirs(chkpt_dir)
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
gfile.MakeDirs(logdir)
# Save initialized variables to a checkpoint directory:
saver = saver_lib.Saver()
with self.test_session() as sess:
init_op.run()
saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))
class Object(object):
def __init__(self):
self.hook_was_run = False
obj = Object()
# Create a custom session run hook.
class CustomHook(session_run_hook.SessionRunHook):
def __init__(self, obj):
self.obj = obj
def end(self, session):
self.obj.hook_was_run = True
# Now, run the evaluation loop:
accuracy_value = evaluation.evaluation_loop(
'',
chkpt_dir,
logdir,
eval_op=update_op,
final_op=value_op,
hooks=[CustomHook(obj)],
max_number_of_evaluations=1)
self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
# Validate that custom hook ran.
self.assertTrue(obj.hook_was_run)
def _create_names_to_metrics(self, predictions, labels):
accuracy0, update_op0 = metric_ops.streaming_accuracy(predictions, labels)
accuracy1, update_op1 = metric_ops.streaming_accuracy(predictions + 1,
labels)
names_to_values = {'Accuracy': accuracy0, 'Another_accuracy': accuracy1}
names_to_updates = {'Accuracy': update_op0, 'Another_accuracy': update_op1}
return names_to_values, names_to_updates
def _verify_summaries(self, output_dir, names_to_values):
"""Verifies that the given `names_to_values` are found in the summaries.
Args:
output_dir: An existing directory where summaries are found.
names_to_values: A dictionary of strings to values.
"""
# Check that the results were saved. The events file may have additional
# entries, e.g. the event version stamp, so have to parse things a bit.
output_filepath = glob.glob(os.path.join(output_dir, '*'))
self.assertEqual(len(output_filepath), 1)
events = summary_iterator.summary_iterator(output_filepath[0])
summaries = [e.summary for e in events if e.summary.value]
values = []
for summary in summaries:
for value in summary.value:
values.append(value)
saved_results = {v.tag: v.simple_value for v in values}
for name in names_to_values:
self.assertAlmostEqual(names_to_values[name], saved_results[name])
def testLatestCheckpointReturnsNoneAfterTimeout(self):
start = time.time()
ret = evaluation_lib.wait_for_new_checkpoint(
'/non-existent-dir', 'foo', timeout=1.0, seconds_to_sleep=0.5)
end = time.time()
self.assertIsNone(ret)
# We've waited one time.
self.assertGreater(end, start + 0.5)
# The timeout kicked in.
self.assertLess(end, start + 1.1)
def testMonitorCheckpointsLoopTimeout(self):
ret = list(
evaluation_lib.checkpoints_iterator(
'/non-existent-dir', timeout=0))
self.assertEqual(ret, [])
def testWithEpochLimit(self):
predictions_limited = input.limit_epochs(self._predictions, num_epochs=1)
labels_limited = input.limit_epochs(self._labels, num_epochs=1)
value_op, update_op = metric_ops.streaming_accuracy(
predictions_limited, labels_limited)
init_op = control_flow_ops.group(variables.global_variables_initializer(),
variables.local_variables_initializer())
# Create checkpoint and log directories:
chkpt_dir = os.path.join(self.get_temp_dir(), 'tmp_logs/')
gfile.MakeDirs(chkpt_dir)
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
gfile.MakeDirs(logdir)
# Save initialized variables to a checkpoint directory:
saver = saver_lib.Saver()
with self.test_session() as sess:
init_op.run()
saver.save(sess, os.path.join(chkpt_dir, 'chkpt'))
# Now, run the evaluation loop:
accuracy_value = evaluation.evaluation_loop(
'', chkpt_dir, logdir, eval_op=update_op, final_op=value_op,
max_number_of_evaluations=1, num_evals=10000)
self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
class SingleEvaluationTest(test.TestCase):
def setUp(self):
super(SingleEvaluationTest, self).setUp()
num_classes = 8
batch_size = 16
inputs, labels = GenerateTestData(num_classes, batch_size)
self._expected_accuracy = GroundTruthAccuracy(inputs, labels, batch_size)
self._global_step = variables_lib.get_or_create_global_step()
self._inputs = constant_op.constant(inputs, dtype=dtypes.float32)
self._labels = constant_op.constant(labels, dtype=dtypes.int64)
self._predictions, self._scale = TestModel(self._inputs)
def testErrorRaisedIfCheckpointDoesntExist(self):
checkpoint_path = os.path.join(self.get_temp_dir(),
'this_file_doesnt_exist')
log_dir = os.path.join(self.get_temp_dir(), 'error_raised')
with self.assertRaises(errors.NotFoundError):
evaluation.evaluate_once('', checkpoint_path, log_dir)
def testRestoredModelPerformance(self):
checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt')
log_dir = os.path.join(self.get_temp_dir(), 'log_dir1/')
# First, save out the current model to a checkpoint:
init_op = control_flow_ops.group(variables.global_variables_initializer(),
variables.local_variables_initializer())
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)
with self.test_session() as sess:
sess.run(init_op)
saver.save(sess, checkpoint_path)
# Next, determine the metric to evaluate:
value_op, update_op = metric_ops.streaming_accuracy(self._predictions,
self._labels)
# Run the evaluation and verify the results:
accuracy_value = evaluation.evaluate_once(
'', checkpoint_path, log_dir, eval_op=update_op, final_op=value_op)
self.assertAlmostEqual(accuracy_value, self._expected_accuracy)
if __name__ == '__main__':
test.main()
| apache-2.0 |
gangadhar-kadam/sapphite_lib | webnotes/widgets/moduleview.py | 3 | 1363 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes, json
from webnotes.widgets import reportview
@webnotes.whitelist()
def get_data(module, doctypes='[]'):
doctypes = json.loads(doctypes)
return {
"reports": get_report_list(module),
"item_count": get_count(doctypes)
}
def get_count(doctypes):
count = {}
can_read = webnotes.user.get_can_read()
for d in doctypes:
if d in can_read:
count[d] = get_doctype_count_from_table(d)
return count
def get_doctype_count_from_table(doctype):
try:
count = reportview.execute(doctype, fields=["count(*)"], as_list=True)[0][0]
except Exception, e:
if e.args[0]==1146:
count = None
else:
raise e
return count
def get_report_list(module):
"""return list on new style reports for modules"""
return webnotes.conn.sql("""
select distinct tabReport.name, tabReport.ref_doctype as doctype,
if((tabReport.report_type='Query Report' or
tabReport.report_type='Script Report'), 1, 0) as is_query_report
from `tabReport`, `tabDocType`
where tabDocType.module=%s
and tabDocType.name = tabReport.ref_doctype
and tabReport.docstatus in (0, NULL)
and ifnull(tabReport.is_standard, "No")="No"
and ifnull(tabReport.disabled,0) != 1
order by tabReport.name""", module, as_dict=True) | mit |
marcelometal/pyvows | pyvows/runner/gevent.py | 1 | 10537 | # -*- coding: utf-8 -*-
'''The GEvent implementation of PyVows runner.'''
# pyvows testing engine
# https://github.com/heynemann/pyvows
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 Bernardo Heynemann heynemann@gmail.com
from __future__ import absolute_import
import inspect
import sys
import time
import StringIO
try:
from colorama.ansitowin32 import AnsiToWin32
except ImportError:
def AnsiToWin32(*args, **kwargs):
return args[0]
from gevent.pool import Pool
import gevent.local
from pyvows.async_topic import VowsAsyncTopic, VowsAsyncTopicValue
from pyvows.runner.utils import get_topics_for
from pyvows.result import VowsResult
from pyvows.utils import elapsed
from pyvows.runner.abc import VowsRunnerABC, VowsTopicError
from pyvows.runner import SkipTest
#-----------------------------------------------------------------------------
class _LocalOutput(gevent.local.local):
def __init__(self):
self.__dict__['stdout'] = StringIO.StringIO()
self.__dict__['stderr'] = StringIO.StringIO()
class _StreamCapture(object):
def __init__(self, streamName):
self.__streamName = streamName
def __getattr__(self, name):
return getattr(getattr(VowsParallelRunner.output, self.__streamName), name)
class VowsParallelRunner(VowsRunnerABC):
# FIXME: Add Docstring
# Class is called from `pyvows.core:Vows.run()`,
# which is called from `pyvows.cli.run()`
output = _LocalOutput()
orig_stdout = sys.stdout
orig_stderr = sys.stderr
def __init__(self, *args, **kwargs):
super(VowsParallelRunner, self).__init__(*args, **kwargs)
self.pool = Pool(1000)
def run(self):
# FIXME: Add Docstring
# called from `pyvows.core:Vows.run()`,
# which is called from `pyvows.cli.run()`
start_time = time.time()
result = VowsResult()
if self.capture_output:
self._capture_streams(self.capture_output)
try:
for suiteName, suitePlan in self.execution_plan.iteritems():
batches = [batch for batch in self.suites[suiteName] if batch.__name__ in suitePlan['contexts']]
for batch in batches:
self.pool.spawn(
self.run_context,
result.contexts,
batch.__name__,
batch(None),
suitePlan['contexts'][batch.__name__],
index=-1,
suite=suiteName
)
self.pool.join()
finally:
self._capture_streams(False)
result.elapsed_time = elapsed(start_time)
return result
def run_context(self, ctx_collection, ctx_name, ctx_obj, execution_plan, index=-1, suite=None, skipReason=None):
# FIXME: Add Docstring
#-----------------------------------------------------------------------
# Local variables and defs
#-----------------------------------------------------------------------
ctx_result = {
'filename': suite or inspect.getsourcefile(ctx_obj.__class__),
'name': ctx_name,
'tests': [],
'contexts': [],
'topic_elapsed': 0,
'error': None,
'skip': skipReason
}
ctx_collection.append(ctx_result)
ctx_obj.index = index
ctx_obj.pool = self.pool
teardown_blockers = []
def _run_setup_and_topic(ctx_obj, index):
# If we're already mid-skip, don't run anything
if skipReason:
raise skipReason
# Run setup function
try:
ctx_obj.setup()
except Exception:
raise VowsTopicError('setup', sys.exc_info())
# Find & run topic function
if not hasattr(ctx_obj, 'topic'): # ctx_obj has no topic
return ctx_obj._get_first_available_topic(index)
try:
topic_func = ctx_obj.topic
topic_list = get_topics_for(topic_func, ctx_obj)
start_time = time.time()
if topic_func is None:
return None
topic = topic_func(*topic_list)
ctx_result['topic_elapsed'] = elapsed(start_time)
return topic
except SkipTest:
raise
except Exception:
raise VowsTopicError('topic', sys.exc_info())
def _run_tests(topic):
def _run_with_topic(topic):
def _run_vows_and_subcontexts(topic, index=-1, enumerated=False):
# methods
for vow_name, vow in vows:
if skipReason:
skipped_result = self.get_vow_result(vow, topic, ctx_obj, vow_name, enumerated)
skipped_result['skip'] = skipReason
ctx_result['tests'].append(skipped_result)
else:
vow_greenlet = self._run_vow(
ctx_result['tests'],
topic,
ctx_obj,
vow,
vow_name,
enumerated=enumerated)
teardown_blockers.append(vow_greenlet)
# classes
for subctx_name, subctx in subcontexts:
# resolve user-defined Context classes
if not issubclass(subctx, self.context_class):
subctx = type(ctx_name, (subctx, self.context_class), {})
subctx_obj = subctx(ctx_obj)
subctx_obj.pool = self.pool
subctx_greenlet = self.pool.spawn(
self.run_context,
ctx_result['contexts'],
subctx_name,
subctx_obj,
execution_plan['contexts'][subctx_name],
index=index,
suite=suite or ctx_result['filename'],
skipReason=skipReason
)
teardown_blockers.append(subctx_greenlet)
# setup generated topics if needed
is_generator = inspect.isgenerator(topic)
if is_generator:
try:
ctx_obj.generated_topic = True
topic = ctx_obj.topic_value = list(topic)
except Exception:
# Actually getting the values from the generator may raise exception
raise VowsTopicError('topic', sys.exc_info())
else:
ctx_obj.topic_value = topic
if is_generator:
for index, topic_value in enumerate(topic):
_run_vows_and_subcontexts(topic_value, index=index, enumerated=True)
else:
_run_vows_and_subcontexts(topic)
vows = set((vow_name, getattr(type(ctx_obj), vow_name)) for vow_name in execution_plan['vows'])
subcontexts = set((subctx_name, getattr(type(ctx_obj), subctx_name)) for subctx_name in execution_plan['contexts'])
if not isinstance(topic, VowsAsyncTopic):
_run_with_topic(topic)
else:
def handle_callback(*args, **kw):
_run_with_topic(VowsAsyncTopicValue(args, kw))
topic(handle_callback)
def _run_teardown():
try:
for blocker in teardown_blockers:
blocker.join()
ctx_obj.teardown()
except Exception:
raise VowsTopicError('teardown', sys.exc_info())
def _update_execution_plan():
'''Since Context.ignore can modify the ignored_members during setup or topic,
update the execution_plan to reflect the new ignored_members'''
for name in ctx_obj.ignored_members:
if name in execution_plan['vows']:
execution_plan['vows'].remove(name)
if name in execution_plan['contexts']:
del execution_plan['contexts'][name]
#-----------------------------------------------------------------------
# Begin
#-----------------------------------------------------------------------
try:
try:
topic = _run_setup_and_topic(ctx_obj, index)
_update_execution_plan()
except SkipTest, se:
ctx_result['skip'] = se
skipReason = se
topic = None
except VowsTopicError, e:
ctx_result['error'] = e
skipReason = SkipTest('topic dependency failed')
topic = None
_run_tests(topic)
if not ctx_result['error']:
try:
_run_teardown()
except Exception, e:
ctx_result['error'] = e
finally:
ctx_result['stdout'] = VowsParallelRunner.output.stdout.getvalue()
ctx_result['stderr'] = VowsParallelRunner.output.stderr.getvalue()
def _capture_streams(self, capture):
if capture:
sys.stdout = AnsiToWin32(_StreamCapture('stdout'), convert=False, strip=True)
sys.stderr = AnsiToWin32(_StreamCapture('stderr'), convert=False, strip=True)
else:
sys.stdout = VowsParallelRunner.orig_stdout
sys.stderr = VowsParallelRunner.orig_stderr
def _run_vow(self, tests_collection, topic, ctx_obj, vow, vow_name, enumerated=False):
# FIXME: Add Docstring
return self.pool.spawn(self.run_vow, tests_collection, topic, ctx_obj, vow, vow_name, enumerated)
def run_vow(self, tests_collection, topic, ctx_obj, vow, vow_name, enumerated=False):
results = super(VowsParallelRunner, self).run_vow(tests_collection, topic, ctx_obj, vow, vow_name, enumerated=enumerated)
results['stdout'] = VowsParallelRunner.output.stdout.getvalue()
results['stderr'] = VowsParallelRunner.output.stderr.getvalue()
| mit |
Jorge-Rodriguez/ansible | lib/ansible/executor/action_write_locks.py | 140 | 1911 | # (c) 2016 - Red Hat, Inc. <info@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from multiprocessing import Lock
from ansible.module_utils.facts.system.pkg_mgr import PKG_MGRS
if 'action_write_locks' not in globals():
# Do not initialize this more than once because it seems to bash
# the existing one. multiprocessing must be reloading the module
# when it forks?
action_write_locks = dict()
# Below is a Lock for use when we weren't expecting a named module. It gets used when an action
# plugin invokes a module whose name does not match with the action's name. Slightly less
# efficient as all processes with unexpected module names will wait on this lock
action_write_locks[None] = Lock()
# These plugins are known to be called directly by action plugins with names differing from the
# action plugin name. We precreate them here as an optimization.
# If a list of service managers is created in the future we can do the same for them.
mods = set(p['name'] for p in PKG_MGRS)
mods.update(('copy', 'file', 'setup', 'slurp', 'stat'))
for mod_name in mods:
action_write_locks[mod_name] = Lock()
| gpl-3.0 |
closeio/nylas | migrations/versions/182_add_data_processing_cache_table.py | 3 | 1311 | """add data processing cache table
Revision ID: 3857f395fb1d
Revises: 10da2e0bc3bb
Create Date: 2015-06-17 17:53:13.049138
"""
# revision identifiers, used by Alembic.
revision = '3857f395fb1d'
down_revision = '10da2e0bc3bb'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
op.create_table(
'dataprocessingcache',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('namespace_id', sa.Integer(), nullable=False),
sa.Column('contact_rankings', mysql.MEDIUMBLOB(), nullable=True),
sa.Column('contact_rankings_last_updated', sa.DateTime(),
nullable=True),
sa.Column('contact_groups', mysql.MEDIUMBLOB(), nullable=True),
sa.Column('contact_groups_last_updated', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['namespace_id'], [u'namespace.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('namespace_id')
)
def downgrade():
op.drop_table('dataprocessingcache')
| agpl-3.0 |
terbolous/SickRage | lib/guessit/rules/properties/crc.py | 34 | 2273 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
crc and uuid properties
"""
from rebulk.remodule import re
from rebulk import Rebulk
from ..common.validators import seps_surround
def crc():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE)
rebulk.defaults(validator=seps_surround)
rebulk.regex('(?:[a-fA-F]|[0-9]){8}', name='crc32',
conflict_solver=lambda match, other: match
if other.name in ['episode', 'season']
else '__default__')
rebulk.functional(guess_idnumber, name='uuid',
conflict_solver=lambda match, other: match
if other.name in ['episode', 'season']
else '__default__')
return rebulk
_DIGIT = 0
_LETTER = 1
_OTHER = 2
_idnum = re.compile(r'(?P<uuid>[a-zA-Z0-9-]{20,})') # 1.0, (0, 0))
def guess_idnumber(string):
"""
Guess id number function
:param string:
:type string:
:return:
:rtype:
"""
# pylint:disable=invalid-name
ret = []
matches = list(_idnum.finditer(string))
for match in matches:
result = match.groupdict()
switch_count = 0
switch_letter_count = 0
letter_count = 0
last_letter = None
last = _LETTER
for c in result['uuid']:
if c in '0123456789':
ci = _DIGIT
elif c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ':
ci = _LETTER
if c != last_letter:
switch_letter_count += 1
last_letter = c
letter_count += 1
else:
ci = _OTHER
if ci != last:
switch_count += 1
last = ci
# only return the result as probable if we alternate often between
# char type (more likely for hash values than for common words)
switch_ratio = float(switch_count) / len(result['uuid'])
letters_ratio = (float(switch_letter_count) / letter_count) if letter_count > 0 else 1
if switch_ratio > 0.4 and letters_ratio > 0.4:
ret.append(match.span())
return ret
| gpl-3.0 |
psiinon/addons-server | src/olympia/amo/monitors.py | 1 | 5255 | import os
import io
import socket
import traceback
from django.conf import settings
import requests
from kombu import Connection
from PIL import Image
import olympia.core.logger
from olympia.amo import search
from olympia.amo.templatetags.jinja_helpers import user_media_path
monitor_log = olympia.core.logger.getLogger('z.monitor')
def memcache():
memcache = getattr(settings, 'CACHES', {}).get('default')
memcache_results = []
status = ''
if memcache and 'memcache' in memcache['BACKEND']:
hosts = memcache['LOCATION']
using_twemproxy = False
if not isinstance(hosts, (tuple, list)):
hosts = [hosts]
for host in hosts:
ip, port = host.split(':')
if ip == '127.0.0.1':
using_twemproxy = True
try:
s = socket.socket()
s.connect((ip, int(port)))
except Exception as e:
result = False
status = 'Failed to connect to memcached (%s): %s' % (host, e)
monitor_log.critical(status)
else:
result = True
finally:
s.close()
memcache_results.append((ip, port, result))
if not using_twemproxy and len(memcache_results) < 2:
status = ('2+ memcache servers are required.'
'%s available') % len(memcache_results)
monitor_log.warning(status)
if not memcache_results:
status = 'Memcache is not configured'
monitor_log.info(status)
return status, memcache_results
def libraries():
# Check Libraries and versions
libraries_results = []
status = ''
try:
Image.new('RGB', (16, 16)).save(io.BytesIO(), 'JPEG')
libraries_results.append(('PIL+JPEG', True, 'Got it!'))
except Exception as e:
msg = "Failed to create a jpeg image: %s" % e
libraries_results.append(('PIL+JPEG', False, msg))
missing_libs = [l for l, s, m in libraries_results if not s]
if missing_libs:
status = 'missing libs: %s' % ",".join(missing_libs)
return status, libraries_results
def elastic():
elastic_results = None
status = ''
try:
es = search.get_es()
health = es.cluster.health()
if health['status'] == 'red':
status = 'ES is red'
elastic_results = health
except Exception:
elastic_results = {'exception': traceback.format_exc()}
return status, elastic_results
def path():
# Check file paths / permissions
read_and_write = (
settings.TMP_PATH,
settings.MEDIA_ROOT,
user_media_path('addons'),
user_media_path('guarded_addons'),
user_media_path('addon_icons'),
user_media_path('previews'),
user_media_path('userpics'),)
read_only = [os.path.join(settings.ROOT, 'locale')]
filepaths = [(path, os.R_OK | os.W_OK, 'We want read + write')
for path in read_and_write]
filepaths += [(path, os.R_OK, 'We want read') for path in read_only]
filepath_results = []
filepath_status = True
for path, perms, notes in filepaths:
path_exists = os.path.exists(path)
path_perms = os.access(path, perms)
filepath_status = filepath_status and path_exists and path_perms
if not isinstance(path, bytes):
notes += ' / should be a bytestring!'
filepath_results.append((path, path_exists, path_perms, notes))
status = filepath_status
status = ''
if not filepath_status:
status = 'check main status page for broken perms / values'
return status, filepath_results
def rabbitmq():
# Check rabbitmq
rabbitmq_results = []
status = ''
with Connection(settings.CELERY_BROKER_URL, connect_timeout=2) as broker:
hostname = broker.hostname
try:
broker.connect()
rabbitmq_results.append((hostname, True))
except Exception as e:
rabbitmq_results.append((hostname, False))
status = 'Failed to chat with rabbitmq %s: %s' % (hostname, e)
monitor_log.critical(status)
return status, rabbitmq_results
def signer():
# Check Signing Server Endpoint
signer_results = None
status = ''
autograph_url = settings.AUTOGRAPH_CONFIG['server_url']
if autograph_url:
try:
response = requests.get(
'{host}/__heartbeat__'.format(host=autograph_url),
timeout=settings.SIGNING_SERVER_MONITORING_TIMEOUT)
if response.status_code != 200:
status = (
'Failed to chat with signing service. '
'Invalid HTTP response code.')
monitor_log.critical(status)
signer_results = False
else:
signer_results = True
except Exception as exc:
status = 'Failed to chat with signing service: %s' % exc
monitor_log.critical(status)
signer_results = False
else:
status = 'server_url in AUTOGRAPH_CONFIG is not set'
monitor_log.critical(status)
signer_results = False
return status, signer_results
| bsd-3-clause |
lrq3000/author-detector | authordetector/run.py | 1 | 33417 | #!/usr/bin/env python
# encoding: utf-8
#
# AuthorDetector
# Copyright (C) 2013 Larroque Stephen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from auxlib import *
from authordetector.configparser import ConfigParser
import os, sys, StringIO
import pandas as pd
import time
import traceback
from collections import OrderedDict
json = import_module('ujson')
if json is None:
json = import_module('json')
if json is None:
raise RuntimeError('Unable to find a json implementation')
class Runner:
rootdir = 'authordetector'
## @var vars contain a dynamical dict of variables used for data mining, and will be passed to every other computational function
vars = {} # we create a reference at startup so that this dict can be passed as a reference to children objects
## Initialize a runner object, with all constructs necessary to use the algorithms and process data according to the provided configuration file and commandline arguments
# @param args recognized and processed commandline arguments
# @param extras extra commandline arguments that are not explicitly recognized (but will nevertheless be appended to the config file, so that you can overwrite pretty much any configuration parameter you want at commandline)
def init(self, args, extras):
self.vars = dict()
#-- Loading config
self.config = ConfigParser()
configfile = args['config']; del args['config'] # delete the config argument, which is at best a self reference
self.config.init(configfile)
self.config.load(args, extras, comments=True)
#-- Loading classes
for (submod, classname) in self.config.config["classes"].iteritems(): # for each item/module specified in classes
localname = submod
if self.config.get('classes_alias') and self.config.get('classes_alias').get(localname):
submod = self.config.get('classes_alias').get(localname)
else:
submod = localname
# if it is a list of classes, we load all the classes into a local list of classes
if type(classname) == type(list()):
#self.__dict__[localname] = {} # initializing the local list of classes
# for each class in the list
for oneclassname in classname:
# we add the class
self.addclass(localname, submod, oneclassname, True)
# if a special string "all" is supplied, we load all the classes into a local list of classes (if a class with a similar name to the filename exists - the classname can have mixed case, the case doesn't matter if the filename corresponds to the classname.lower())
elif classname == 'all':
#self.__dict__[localname] = {} # initializing the local list of classes
modlist = os.listdir(os.path.join(self.rootdir, submod)) # loading the list of files/submodules
modlist = list(set([os.path.splitext(mod)[0] for mod in modlist])) # strip out the extension + get only unique values (else we will get .py and .pyc filenames, which are in fact the same module)
# Trim out the base class and __init__
# Remove all base modules (modules whose names starts with 'base')
[modlist.remove(mod) for mod in modlist if mod.startswith('base')]
# Remove all __init__ modules (these are only used by the python interpreter)
if '__init__' in modlist:
modlist.remove('__init__')
# For each submodule
for classname2 in modlist:
full_package = '.'.join([self.rootdir, submod, classname2.lower()])
mod = import_module(full_package) # we need to load the package before being able to list the classes contained inside
# We list all objects contained in this module (normally we expect only one class, but nevermind)
for iclass, iclassname in [(obj, obj.__name__) for obj in [getattr(mod, name) for name in dir(mod)] if isinstance(obj, type)]:
# If the object is a class, and the class name is the same as the filename, we add an instance of this class!
if iclassname.lower() == classname2.lower():
# we add the class
self.addclass(localname, submod, iclassname, True)
# if we just have a string, we add the class that corresponds to this string
# Note: the format must be "submodule": "ClassName", where submodule is the folder containing the subsubmodule, and "ClassName" both the class name, and the subsubmodule filename ("classname.py")
else:
self.addclass(localname, submod, classname)
return True
## Instanciate dynamically a class and add it to the local dict
# @param name key property under which name the module will be accessible (eg: if name='reader', you will have self.reader) - can be set = submod in most cases
# @param submod name of the subfolder/submodule where the subsubmodule/class resides
# @param classname both the subsubmodule filename (eg: classname.py) and the class name (eg: ClassName)
# @param listofclasses if True, instanciate this class in a list of classes, instead of just directly a property of the current object (eg: instead of self.reader, you will get: self.reader["firstparser"], self.reader["secondparser"], etc...)
def addclass(self, name, submod, classname, listofclasses=False):
try:
# Import the class dynamically
aclass = import_class('.'.join([self.rootdir, submod, classname.lower()]), classname)
# If we have several classes, we append all of them in a subdict of the attribute assigned for this module ( eg: runner.MyModule[firstclass] )
if listofclasses:
# Create the attribute as an OrderedDict if it does not exist yet
if self.__dict__.get(name) is None: self.__dict__[name] = OrderedDict() # keep the order in which the items were given (all iterators will also follow the order). This allows to set in config the order in which we want the items to be executed.
# Managing several calls to the same class: if we try to call twice the same class (eg: DropEmptyFeatures) for example at the beginning of PreOptimization and also at the end, there will be only one call because we are using a dict to store the classes. Here we try to avoid that by renaming the class by appending a number at the end.
# If the class does not already exist in the dict, we can use the name as-is
if not classname.lower() in self.__dict__[name]:
self.__dict__[name][classname.lower()] = aclass(config=self.config, parent=self)
# Else the class already exists (and is called before in the routine), and thus we have to rename this one
else:
count = 2
# Try to rename and increment the counter until we can store the class
while 1:
# Ok, this class name + current counter does not exist, we can store it
newclassname = "%s_%s" % (classname.lower(), str(count))
if not newclassname in self.__dict__[name]:
self.__dict__[name][newclassname] = aclass(config=self.config, parent=self)
break # exit the While loop
# Else we increment the counter and continue
else:
count += 1
# Else we have only one class, we store it straight away in an attribute
else:
self.__dict__[name] = aclass(config=self.config, parent=self)
return True
except Exception, e:
package_full = '.'.join([self.rootdir, submod, classname.lower()])
print("CRITICAL ERROR: importing a class failed: classname: %s package: %s\nException: %s" % (package_full, classname, e.__repr__()))
traceback.print_exc() # print traceback
raise RuntimeError('Unable to import a class/module')
## Update the local dict vars of variables
#
# This function is used as a proxy to accept any arbitrary number of returned arguments from functions, and will store them locally, and then the vars dict will be passed onto other children objects
def updatevars(self, dictofvars):
# Create the local vars dict if it does not exist
if not hasattr(self, 'vars'):
self.vars = {}
# Update/add the values inside dictofvars (if it is a dictionary of variables)
if type(dictofvars) == type(dict()):
self.vars.update(dictofvars) # add new variables from dict and merge updated values for already existing variables
# Else, it may be a list or an object or just a scalar (this means the function is not conforming to the dev standards), then can't know where to put those results and we just memorize them inside a "lastout" entry as-is.
# In summary: unnamed variables gets stored as temporary variables which may be overwritten at any time by subsequent functions
else:
# Delete the previous output
if self.vars.get("lastout", None): del self.vars["lastout"]
# Save this output
self.vars.update({"lastout": dictofvars})
## Generically call one object and its method (if obj is a list, it will call the method of each and every one of the modules in the list)
# @param obj Object or list of objects
# @param method Method to call in the object(s) (as string)
# @param args Optional arguments to pass to the method (must be a dictionary, with they keys being the name of the variables)
# @param return_vars Return a value instead of updating the local vars dict
# @param verbose Print more details about the executed routine
# TODO: reduce the number of maintained dictionaries (there are 4: self.vars, allvars, dictofvars and args)
# TODO: fix return_vars, it does work, but sometimes there is a bleeding effect (mixing up local variables and arguments variables. The best would be to use local variables where needed, but use argument variables foremost, and keep tracking of argument variables that are changed)
def generic_call(self, obj, method, args=None, return_vars=False, verbose=False):
# Create the local dict of vars
allvars = dict() # input dict of vars
if return_vars: dictofvars = dict() # output dict of vars (if return_vars is True)
# Append the optional arguments to pass to methods
if args is not None and type(args) == dict:
allvars.update(args)
if return_vars: dictofvars.update(args) # args and dictofvars must have ascendance over everything else when using return_vars
# If we have a list of modules to call, we call the method of each and every one of those modules
if isinstance(obj, (dict, OrderedDict)):
# For every module in the list
for submodule in obj.itervalues():
# Print infos
if verbose:
print("Routine: Calling module %s..." % submodule.__class__.__name__)
sys.stdout.flush()
# Update the local dict of vars
allvars.update(self.vars)
if return_vars: allvars.update(dictofvars)
# Get the callable object's method
fullfunc = getattr(submodule, method)
# Call the specified function for the specified module
if not return_vars:
# By default we store in the local dict
self.updatevars(fullfunc(**allvars))
else:
# Else we update a temporary dictofvars and we return it at the end
dictofvars.update(fullfunc(**allvars))
# Force flusing the text into the terminal
sys.stdout.flush()
# Return the dictofvars at the end of the loop if the user wants to return the variables to the caller instead of storing them locally
if return_vars:
allvars.update(dictofvars) # return the input vars updated with the outputvars
return allvars
# Else if it is an object (thus only one module to call), we directly call its method
else:
# Print infos
if verbose: print("Routine: Calling module %s..." % obj.__class__.__name__)
# Get the callable object's method
fullfunc = getattr(obj, method)
# Update the local dict of vars
allvars.update(self.vars)
if return_vars: allvars.update(dictofvars)
# Call the specified function for the specified module
if not return_vars:
self.updatevars(fullfunc(**allvars))
else:
allvars.update(fullfunc(**allvars)) # return the input vars updated with the outputvars
return allvars
# Force flusing the text into the terminal
sys.stdout.flush()
## Execute a routine: call any module(s) given a list of dicts containing {"submodule name": "method of the class to call"}
# @param executelist A list containing the sequence of modules to launch (Note: the order of the contained elements matters!)
# @param verbose Print more details about the executed routine
def execute(self, executelist, verbose=False):
# Checking constraints first
if not self.check_constraints():
print("FATAL ERROR while checking constraints. Please check your configuration. Exiting.")
return False
# Loop through all modules in run_learn list
for mod in executelist:
# Catch exceptions: if a module fails, we continue onto the next one - TODO: try to set this option ("robust") in a config variable: for dev we want exceptions, in production maybe not (just a warning and then pass).
#try:
# Special case: this is a sublist, we run all the modules in the list in parallel
if type(mod) == type(list()):
self.generic_call(mod, verbose=verbose) # TODO: launch each submodule in parallel (using subprocess or threading, but be careful: Python's threads aren't efficient so this is not useful at all, and subprocess creates a new object, so how to communicate the computed/returned variables efficiently in memory?)
else:
# If it's a dict (specifying the module type and the method to call, format: {"moduletype":"method"})
if isinstance(mod, dict):
# Unpacking the dict
module = mod.keys()[0]
func = mod.values()[0]
# Else if it's a string, thus there's only the module type, we will call the default public method
elif isinstance(mod, basestring):
module = mod # it's just a string, the name of the category of modules to call
# For the method it's a bit more tricky: we try to get the publicmethod, declared in the base class of each category of modules (and thus inherited by modules)
# Special case: we defined multiples modules to load in "classes" config for this category of modules, so we just get publicmethod from the first module in the dict
if isinstance (self.__dict__[module], (dict, OrderedDict)):
func = self.__dict__[module].itervalues().next().publicmethod
# Else it's a single module, we can get the publicmethod right away
else:
func = self.__dict__[module].publicmethod
# Else it's not a recognized format, we pass
else:
continue
# Call the module's method
self.generic_call(self.__dict__[module], func, verbose=verbose)
#except Exception, e:
#print "Exception when executing the routine: %s" % str(e)
# Force flusing the text into the terminal
sys.stdout.flush()
return True
## Write down the parameters into a file
# Format of the file: json structure consisting of a dict where the keys are the names of the vars, and the values are strings encoding the data in csv format
# TODO: replace by pandas.to_json() when the feature will be put back in the main branch?
@staticmethod
def save_vars(jsonfile, dictofvars, exclude=None):
## Simple function to insert an item in either a dict or a list
def addtolistordict(finaldict, key, item):
if isinstance(finaldict, (dict)):
finaldict[key] = item
elif isinstance(finaldict, (list, tuple)):
finaldict.insert(key, item)
## Recursively convert variables into a json intelligible format
def convert_vars(dictofvars, exclude=None):
# Loading the correct generator depending on the type of dictofvars
# If it's a dict we iter over items
if (isinstance(dictofvars, dict)):
iter = dictofvars.iteritems()
finaldict = dict()
# If it's a list we enumerate it
elif (isinstance(dictofvars, (list, tuple))):
iter = enumerate(dictofvars)
finaldict = list()
# For each object in our dict of variables
for (key, item) in iter:
try:
# If this variable is in the exclude list, we skip it
if exclude and key in exclude:
continue
# Try to save the pandas object as CSV
#try:
# Only try if there is a method to_csv()
# TODO: replace by pandas.to_json() when the feature will be put back in the main branch?
if (hasattr(item, 'to_csv')):
out = StringIO.StringIO()
item.to_csv(out)
addtolistordict(finaldict, key, out.getvalue())
# Else it is probably not a Pandas object since this method is not available, we just save the value as-is or recursively convert pandas objects if possible
else:
# If possible, try to convert the item to a list
if (hasattr(item, 'tolist')):
item = item.tolist()
# If this is a recursive object, try to convert the variables inside (they may be pandas objects)
if (isinstance(item, (list, dict, tuple)) and not isinstance(item, basestring)):
addtolistordict(finaldict, key, convert_vars(item))
# Else just save the item as-is
else:
addtolistordict(finaldict, key, item)
# Else if it is not a pandas object, we save as-is
except Exception, e:
addtolistordict(finaldict, key, item)
print("Notice: couldn't correctly convert the value for the key %s. The value will be saved as-is. Error: %s" % (key, e))
pass
return finaldict
# Convert recursively the dict of vars
finaldict = convert_vars(dictofvars, exclude)
# Save the dict of csv data as a JSON file
try:
f = open(jsonfile, 'wb') # open in binary mode to avoid line returns translation (else the reading will be flawed!). We have to do it both at saving and at reading.
f.write( json.dumps(finaldict, sort_keys=True, indent=4) ) # write the file as a json serialized string, but beautified to be more human readable
f.close()
return True
except Exception, e:
print("Exception while trying to save the parameters into the parameters file: %s. The parameters have not been saved!" % e)
return False
## Load the parameters from a file
# Format of the file: json structure consisting of a dict where the keys are the names of the vars, and the values are strings encoding the data in csv format
# TODO: replace by pandas.from_json() when the feature will be put back in the main branch?
# @param jsonfile Path to the json file containing the variables to load
# @param prefixkey A prefix to prepend to the root keys (only for the root variables!)
@staticmethod
def load_vars(jsonfile, prefixkey=None):
## Simple function to insert an item in either a dict or a list
def addtolistordict(finaldict, key, item):
if isinstance(finaldict, (dict)):
finaldict[key] = item
elif isinstance(finaldict, (list, tuple)):
finaldict.insert(key, item)
## Convert back variables and returns a dict
# This is mainly because we need to convert back pandas objects, because pandas does not provide a to_json() function anymore
# TODO: replace all this by a simple to_json() when it will be fixed in Pandas?
# @param d Can be either a dict or a list
# @param prefixkey A prefix to prepend to the root keys (only for the root variables!)
def convert_vars(d, prefixkey=None, level=0):
# Loading the correct generator depending on the type of dictofvars
# If it's a dict we iter over items
if (isinstance(d, dict)):
iter = d.iteritems()
dictofvars = dict()
# If it's a list we enumerate it
elif (isinstance(d, (list, tuple))):
iter = enumerate(d)
dictofvars = list()
# For each item in the json
for key, item in iter:
# Prepend the prefix to key if specified, and if we are at the root (we don't prefix below)
if prefixkey and isinstance(prefixkey, (basestring, str)) and level == 0:
key = prefixkey + key
# TODO: Pandas objects are stored in a string for the moment because to_json() was removed. Fix this with a more reliable way to decode those structures in the future.
if (isinstance(item, basestring)):
# Try to load a pandas object (Series or DataFrame)
try:
buf = StringIO.StringIO(item)
df = pd.read_csv(buf, index_col=0, header=0) # by default, load as a DataFrame
# if in fact it's a Series (a vector), we reload as a Series
# TODO: replace all this by pd.read_csv(buf, squeeze=True) when squeeze will work!
if df.shape[1] == 1:
buf.seek(0)
addtolistordict(dictofvars, key, pd.Series.from_csv(buf))
# Failsafe: in case we tried to load a Series but it didn't work well (pandas will failsafe and return the original string), we finally set as a DataFrame
if (type(dictofvars[key]) != type(pd.Series()) and type(dictofvars[key]) != type(pd.DataFrame()) or dictofvars[key].dtype == object ): # if it's neither a Series nor DataFrame, we expect the item to be a DataFrame and not a Series
addtolistordict(dictofvars, key, df)
# Else if it is really a DataFrame, we set it as DataFrame
else:
if (not df.empty):
addtolistordict(dictofvars, key, df)
# In the case it is really a string (the resulting pandas object is empty), we just store the string as-is
else:
addtolistordict(dictofvars, key, item)
# If it didn't work well, we load the object as-is (maybe it's simply a string)
except Exception, e:
addtolistordict(dictofvars, key, item)
print("Exception: couldn't correctly load the value for the key %s. Error: %s. This item will be skipped." % (key, e))
pass
# Else it is already a converted Python object (eg: a list, a dict, a number, etc...), we just use it as-is
else:
if isinstance(item, (dict, list, tuple)) and not isinstance(item, basestring):
addtolistordict(dictofvars, key, convert_vars(item, level=level+1))
else:
addtolistordict(dictofvars, key, item)
return dictofvars
# Open the file
with open(jsonfile, 'rb') as f:
filecontent = f.read()
# Load the json tree
jsontree = json.loads(filecontent)
# Convert recursively the dict of vars (for pandas objects)
dictofvars = convert_vars(jsontree, prefixkey)
# Return the list of variables/parameters
return dictofvars
## Check constraints integrity based on classes' definitions
# This will NOT stop execution, but rather display a warning that integrity might not be safe and thus errors can be encountered in execution. But that might not be the case if the user knows what s/he's doing.
def check_constraints(self):
def constraint_error(submodname, constraint):
print("WARNING: in submodule %s constraint %s is not satisfied! Please check your config." % (submodname, constraint))
print("Checking constraints...")
sys.stdout.flush()
#== Checking workflow
print("Checking constraints in workflow...")
if self.vars['Mode'] == 'Learning':
routine = self.config.get('workflow_learn')
else:
routine = self.config.get('workflow')
prevmod = list() # list of the past modules
# Iterate over all modules categories
for mod in routine:
# mod is an item of the routine, and it can either be a string (module category name), or a dict (modname + method)
if isinstance(mod, (dict, OrderedDict)): # in this case, we need to unpack the name
modname = mod.iterkeys().next() # unpack the key
else: # else it's just a string, it's directly the name
modname = mod
# Get the module object
module = self.__dict__[modname]
# Little trick to do a for each loop in any case (in case we have only one submodule for this category of modules, or if we have a dict of submodules)
if (isinstance(module, (dict, OrderedDict))):
submods = module
else: # only one submodule, we convert it do a dict
classname = module.__class__.__name__.lower()
submods = {classname: module}
# For each submodule
for submodname, submod in submods.iteritems():
# If some constraints are set for this submodule
if getattr(submod, 'constraints', None) is not None:
#-- Checking "after" constraint
if submod.constraints.get('after') is not None:
# If this submodule must be launched after another module, but this other module was not set before in the workflow, then warning
if submod.constraints['after'] not in prevmod:
constraint_error(submodname, "%s:%s" % ('after', submod.constraints['after']))
# Add current submodule name into the list of past modules
prevmod.append(submodname)
# Flush output
sys.stdout.flush()
# Add current module category into the list of past modules
prevmod.append(modname)
return True
## Learning routine: Train the system to learn how to detect cheating
def learn(self, executelist=None):
# Specify the mode
self.updatevars({'Mode': 'Learning'})
self.config.update({'Mode': 'Learning'})
# Reload the texts config
if self.__dict__.get('reader', None):
self.reader.reloadconfig() # make sure the textreader updates the list of texts it must load (depending on Mode)
# We can pass an execution list either as an argument (used for recursion) or in the configuration
if not executelist:
executelist = self.config.get('workflow_learn', None)
# Standard learning routine
# If no routine is given, then we execute the standard learning routine
if not executelist:
executelist = []
if self.__dict__.get('preprocessing', None):
executelist.append({"preprocessing": "process"})
executelist.append({"featuresextractor": "extract"})
if self.__dict__.get('patternsextractor', None):
executelist.append({"patternsextractor": "extract"})
if self.__dict__.get('merger', None):
executelist.append({"merger": "merge"})
if self.__dict__.get('postprocessing', None):
executelist.append({"postprocessing": "process"})
# Initialization, do various stuff
print("Initializing, this can take a few moments, please wait..."); sys.stdout.flush()
# Execute all modules of the routine (either of config['workflow_learn'] or the standard routine)
if not self.execute(executelist, verbose=True): # We generally prefer to print all infos when learning
return False
print('All done!')
# End of learning, we save the parameters if a parametersfile was specified
if self.config.get('parametersfile', None):
Runner.save_vars(self.config.get('parametersfile'), self.vars, ['X', 'Y', 'X_raw', 'Weights', 'Mode']) # save all vars but X and Y (which may be VERY big and aren't parameters anyway)
print('Learned parameters saved in: %s' % self.config.get('parametersfile'))
return True
## Detection routine: identify the labels for the unlabeled texts
def run(self, executelist=None):
# Specify the mode
self.updatevars({'Mode': 'Detection'})
self.config.update({'Mode': 'Detection'})
if self.__dict__.get('reader', None):
self.reader.reloadconfig() # make sure the textreader updates the list of texts it must load (depending on Mode)
# Load the parameters if a file is specified
if self.config.get('parametersfile', None):
self.updatevars(Runner.load_vars(self.config.config['parametersfile'], prefixkey='L_'))
# We can pass an execution list either as an argument (used for recursion) or in the configuration
if not executelist:
executelist = self.config.get('workflow', None)
# Standard detection routine
# If no routine is given, then we execute the standard detection routine
if not executelist:
executelist = []
if self.__dict__.get('preprocessing', None):
executelist.append({"preprocessing": "process"})
executelist.append({"featuresextractor": "extract"})
if self.__dict__.get('patternsextractor', None):
executelist.append({"patternsextractor": "extract"})
if self.__dict__.get('postprocessing', None):
executelist.append({"postprocessing": "process"})
executelist.append({"detector": "detect"})
# Execute all modules of the routine (either of config['workflow'] or the standard routine)
if not self.execute(executelist, verbose=True): # We generally prefer to print all infos
return False
# End of identification, we save the results in a file if specified
if self.config.get('resultsfile', None):
Runner.save_vars(self.config.get('resultsfile'), {'Result': self.vars.get('Result'), 'Result_details': self.vars.get('Result_details')}) # save the Result and Result_details variable
print('Identification results saved in: %s' % self.config.get('resultsfile'))
return True
if __name__ == '__main__':
runner = Runner()
runner.init()
runner.run() | gpl-3.0 |
gravyboat/streamlink | src/streamlink/plugins/n13tv.py | 4 | 4752 | import logging
import re
from urllib.parse import urljoin, urlunparse
from streamlink.exceptions import PluginError
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
log = logging.getLogger(__name__)
class N13TV(Plugin):
url_re = re.compile(r"https?://(?:www\.)?13tv\.co\.il/(live|.*?/)")
api_url = "https://13tv-api.oplayer.io/api/getlink/"
main_js_url_re = re.compile(r'type="text/javascript" src="(.*?main\..+\.js)"')
user_id_re = re.compile(r'"data-ccid":"(.*?)"')
video_name_re = re.compile(r'"videoRef":"(.*?)"')
server_addr_re = re.compile(r'(.*[^/])(/.*)')
media_file_re = re.compile(r'(.*)(\.[^\.].*)')
live_schema = validate.Schema(validate.all(
[{'Link': validate.url()}],
validate.get(0),
validate.get('Link')
))
vod_schema = validate.Schema(validate.all([{
'ShowTitle': validate.text,
'ProtocolType': validate.all(
validate.text,
validate.transform(lambda x: x.replace("://", ""))
),
'ServerAddress': validate.text,
'MediaRoot': validate.text,
'MediaFile': validate.text,
'Bitrates': validate.text,
'StreamingType': validate.text,
'Token': validate.all(
validate.text,
validate.transform(lambda x: x.lstrip("?"))
)
}], validate.get(0)))
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def _get_live(self, user_id):
res = self.session.http.get(
self.api_url,
params=dict(
userId=user_id,
serverType="web",
ch=1,
cdnName="casttime"
)
)
url = self.session.http.json(res, schema=self.live_schema)
log.debug("URL={0}".format(url))
return HLSStream.parse_variant_playlist(self.session, url)
def _get_vod(self, user_id, video_name):
res = self.session.http.get(
urljoin(self.api_url, "getVideoByFileName"),
params=dict(
userId=user_id,
videoName=video_name,
serverType="web",
callback="x"
)
)
vod_data = self.session.http.json(res, schema=self.vod_schema)
if video_name == vod_data['ShowTitle']:
host, base_path = self.server_addr_re.search(
vod_data['ServerAddress']
).groups()
if not host or not base_path:
raise PluginError("Could not split 'ServerAddress' components")
base_file, file_ext = self.media_file_re.search(
vod_data['MediaFile']
).groups()
if not base_file or not file_ext:
raise PluginError("Could not split 'MediaFile' components")
media_path = "{0}{1}{2}{3}{4}{5}".format(
base_path,
vod_data['MediaRoot'],
base_file,
vod_data['Bitrates'],
file_ext,
vod_data['StreamingType']
)
log.debug("Media path={0}".format(media_path))
vod_url = urlunparse((
vod_data['ProtocolType'],
host,
media_path,
'',
vod_data['Token'],
''
))
log.debug("URL={0}".format(vod_url))
return HLSStream.parse_variant_playlist(self.session, vod_url)
def _get_streams(self):
m = self.url_re.match(self.url)
url_type = m and m.group(1)
log.debug("URL type={0}".format(url_type))
res = self.session.http.get(self.url)
if url_type != "live":
m = self.video_name_re.search(res.text)
video_name = m and m.group(1)
if not video_name:
raise PluginError('Could not determine video_name')
log.debug("Video name={0}".format(video_name))
m = self.main_js_url_re.search(res.text)
main_js_path = m and m.group(1)
if not main_js_path:
raise PluginError('Could not determine main_js_path')
log.debug("Main JS path={0}".format(main_js_path))
res = self.session.http.get(urljoin(self.url, main_js_path))
m = self.user_id_re.search(res.text)
user_id = m and m.group(1)
if not user_id:
raise PluginError('Could not determine user_id')
log.debug("User ID={0}".format(user_id))
if url_type == "live":
return self._get_live(user_id)
else:
return self._get_vod(user_id, video_name)
__plugin__ = N13TV
| bsd-2-clause |
AndreaCrotti/ansible | contrib/inventory/rax.py | 98 | 15875 | #!/usr/bin/env python
# (c) 2013, Jesse Keating <jesse.keating@rackspace.com,
# Paul Durivage <paul.durivage@rackspace.com>,
# Matt Martz <matt@sivel.net>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
Rackspace Cloud Inventory
Authors:
Jesse Keating <jesse.keating@rackspace.com,
Paul Durivage <paul.durivage@rackspace.com>,
Matt Martz <matt@sivel.net>
Description:
Generates inventory that Ansible can understand by making API request to
Rackspace Public Cloud API
When run against a specific host, this script returns variables similar to:
rax_os-ext-sts_task_state
rax_addresses
rax_links
rax_image
rax_os-ext-sts_vm_state
rax_flavor
rax_id
rax_rax-bandwidth_bandwidth
rax_user_id
rax_os-dcf_diskconfig
rax_accessipv4
rax_accessipv6
rax_progress
rax_os-ext-sts_power_state
rax_metadata
rax_status
rax_updated
rax_hostid
rax_name
rax_created
rax_tenant_id
rax_loaded
Configuration:
rax.py can be configured using a rax.ini file or via environment
variables. The rax.ini file should live in the same directory along side
this script.
The section header for configuration values related to this
inventory plugin is [rax]
[rax]
creds_file = ~/.rackspace_cloud_credentials
regions = IAD,ORD,DFW
env = prod
meta_prefix = meta
access_network = public
access_ip_version = 4
Each of these configurations also has a corresponding environment variable.
An environment variable will override a configuration file value.
creds_file:
Environment Variable: RAX_CREDS_FILE
An optional configuration that points to a pyrax-compatible credentials
file.
If not supplied, rax.py will look for a credentials file
at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK,
and therefore requires a file formatted per the SDK's specifications.
https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
regions:
Environment Variable: RAX_REGION
An optional environment variable to narrow inventory search
scope. If used, needs a value like ORD, DFW, SYD (a Rackspace
datacenter) and optionally accepts a comma-separated list.
environment:
Environment Variable: RAX_ENV
A configuration that will use an environment as configured in
~/.pyrax.cfg, see
https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
meta_prefix:
Environment Variable: RAX_META_PREFIX
Default: meta
A configuration that changes the prefix used for meta key/value groups.
For compatibility with ec2.py set to "tag"
access_network:
Environment Variable: RAX_ACCESS_NETWORK
Default: public
A configuration that will tell the inventory script to use a specific
server network to determine the ansible_ssh_host value. If no address
is found, ansible_ssh_host will not be set. Accepts a comma-separated
list of network names, the first found wins.
access_ip_version:
Environment Variable: RAX_ACCESS_IP_VERSION
Default: 4
A configuration related to "access_network" that will attempt to
determine the ansible_ssh_host value for either IPv4 or IPv6. If no
address is found, ansible_ssh_host will not be set.
Acceptable values are: 4 or 6. Values other than 4 or 6
will be ignored, and 4 will be used. Accepts a comma-separated list,
the first found wins.
Examples:
List server instances
$ RAX_CREDS_FILE=~/.raxpub rax.py --list
List servers in ORD datacenter only
$ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list
List servers in ORD and DFW datacenters
$ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list
Get server details for server named "server.example.com"
$ RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com
Use the instance private IP to connect (instead of public IP)
$ RAX_CREDS_FILE=~/.raxpub RAX_ACCESS_NETWORK=private rax.py --list
"""
import os
import re
import sys
import argparse
import warnings
import collections
import ConfigParser
from six import iteritems
from ansible.constants import get_config, mk_boolean
try:
import json
except ImportError:
import simplejson as json
try:
import pyrax
from pyrax.utils import slugify
except ImportError:
print('pyrax is required for this module')
sys.exit(1)
from time import time
NON_CALLABLES = (basestring, bool, dict, int, list, type(None))
def load_config_file():
p = ConfigParser.ConfigParser()
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'rax.ini')
try:
p.read(config_file)
except ConfigParser.Error:
return None
else:
return p
p = load_config_file()
def rax_slugify(value):
return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
def to_dict(obj):
instance = {}
for key in dir(obj):
value = getattr(obj, key)
if isinstance(value, NON_CALLABLES) and not key.startswith('_'):
key = rax_slugify(key)
instance[key] = value
return instance
def host(regions, hostname):
hostvars = {}
for region in regions:
# Connect to the region
cs = pyrax.connect_to_cloudservers(region=region)
for server in cs.servers.list():
if server.name == hostname:
for key, value in to_dict(server).items():
hostvars[key] = value
# And finally, add an IP address
hostvars['ansible_ssh_host'] = server.accessIPv4
print(json.dumps(hostvars, sort_keys=True, indent=4))
def _list_into_cache(regions):
groups = collections.defaultdict(list)
hostvars = collections.defaultdict(dict)
images = {}
cbs_attachments = collections.defaultdict(dict)
prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta')
networks = get_config(p, 'rax', 'access_network', 'RAX_ACCESS_NETWORK',
'public', islist=True)
try:
ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
'RAX_ACCESS_IP_VERSION', 4,
islist=True))
except:
ip_versions = [4]
else:
ip_versions = [v for v in ip_versions if v in [4, 6]]
if not ip_versions:
ip_versions = [4]
# Go through all the regions looking for servers
for region in regions:
# Connect to the region
cs = pyrax.connect_to_cloudservers(region=region)
if cs is None:
warnings.warn(
'Connecting to Rackspace region "%s" has caused Pyrax to '
'return None. Is this a valid region?' % region,
RuntimeWarning)
continue
for server in cs.servers.list():
# Create a group on region
groups[region].append(server.name)
# Check if group metadata key in servers' metadata
group = server.metadata.get('group')
if group:
groups[group].append(server.name)
for extra_group in server.metadata.get('groups', '').split(','):
if extra_group:
groups[extra_group].append(server.name)
# Add host metadata
for key, value in to_dict(server).items():
hostvars[server.name][key] = value
hostvars[server.name]['rax_region'] = region
for key, value in iteritems(server.metadata):
groups['%s_%s_%s' % (prefix, key, value)].append(server.name)
groups['instance-%s' % server.id].append(server.name)
groups['flavor-%s' % server.flavor['id']].append(server.name)
# Handle boot from volume
if not server.image:
if not cbs_attachments[region]:
cbs = pyrax.connect_to_cloud_blockstorage(region)
for vol in cbs.list():
if mk_boolean(vol.bootable):
for attachment in vol.attachments:
metadata = vol.volume_image_metadata
server_id = attachment['server_id']
cbs_attachments[region][server_id] = {
'id': metadata['image_id'],
'name': slugify(metadata['image_name'])
}
image = cbs_attachments[region].get(server.id)
if image:
server.image = {'id': image['id']}
hostvars[server.name]['rax_image'] = server.image
hostvars[server.name]['rax_boot_source'] = 'volume'
images[image['id']] = image['name']
else:
hostvars[server.name]['rax_boot_source'] = 'local'
try:
imagegroup = 'image-%s' % images[server.image['id']]
groups[imagegroup].append(server.name)
groups['image-%s' % server.image['id']].append(server.name)
except KeyError:
try:
image = cs.images.get(server.image['id'])
except cs.exceptions.NotFound:
groups['image-%s' % server.image['id']].append(server.name)
else:
images[image.id] = image.human_id
groups['image-%s' % image.human_id].append(server.name)
groups['image-%s' % server.image['id']].append(server.name)
# And finally, add an IP address
ansible_ssh_host = None
# use accessIPv[46] instead of looping address for 'public'
for network_name in networks:
if ansible_ssh_host:
break
if network_name == 'public':
for version_name in ip_versions:
if ansible_ssh_host:
break
if version_name == 6 and server.accessIPv6:
ansible_ssh_host = server.accessIPv6
elif server.accessIPv4:
ansible_ssh_host = server.accessIPv4
if not ansible_ssh_host:
addresses = server.addresses.get(network_name, [])
for address in addresses:
for version_name in ip_versions:
if ansible_ssh_host:
break
if address.get('version') == version_name:
ansible_ssh_host = address.get('addr')
break
if ansible_ssh_host:
hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host
if hostvars:
groups['_meta'] = {'hostvars': hostvars}
with open(get_cache_file_path(regions), 'w') as cache_file:
json.dump(groups, cache_file)
def get_cache_file_path(regions):
regions_str = '.'.join([reg.strip().lower() for reg in regions])
ansible_tmp_path = os.path.join(os.path.expanduser("~"), '.ansible', 'tmp')
if not os.path.exists(ansible_tmp_path):
os.makedirs(ansible_tmp_path)
return os.path.join(ansible_tmp_path,
'ansible-rax-%s-%s.cache' % (
pyrax.identity.username, regions_str))
def _list(regions, refresh_cache=True):
if (not os.path.exists(get_cache_file_path(regions)) or
refresh_cache or
(time() - os.stat(get_cache_file_path(regions))[-1]) > 600):
# Cache file doesn't exist or older than 10m or refresh cache requested
_list_into_cache(regions)
with open(get_cache_file_path(regions), 'r') as cache_file:
groups = json.load(cache_file)
print(json.dumps(groups, sort_keys=True, indent=4))
def parse_args():
parser = argparse.ArgumentParser(description='Ansible Rackspace Cloud '
'inventory module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specific host')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help=('Force refresh of cache, making API requests to'
'RackSpace (default: False - use cache files)'))
return parser.parse_args()
def setup():
default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials')
env = get_config(p, 'rax', 'environment', 'RAX_ENV', None)
if env:
pyrax.set_environment(env)
keyring_username = pyrax.get_setting('keyring_username')
# Attempt to grab credentials from environment first
creds_file = get_config(p, 'rax', 'creds_file',
'RAX_CREDS_FILE', None)
if creds_file is not None:
creds_file = os.path.expanduser(creds_file)
else:
# But if that fails, use the default location of
# ~/.rackspace_cloud_credentials
if os.path.isfile(default_creds_file):
creds_file = default_creds_file
elif not keyring_username:
sys.stderr.write('No value in environment variable %s and/or no '
'credentials file at %s\n'
% ('RAX_CREDS_FILE', default_creds_file))
sys.exit(1)
identity_type = pyrax.get_setting('identity_type')
pyrax.set_setting('identity_type', identity_type or 'rackspace')
region = pyrax.get_setting('region')
try:
if keyring_username:
pyrax.keyring_auth(keyring_username, region=region)
else:
pyrax.set_credential_file(creds_file, region=region)
except Exception as e:
sys.stderr.write("%s: %s\n" % (e, e.message))
sys.exit(1)
regions = []
if region:
regions.append(region)
else:
region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all',
islist=True)
for region in region_list:
region = region.strip().upper()
if region == 'ALL':
regions = pyrax.regions
break
elif region not in pyrax.regions:
sys.stderr.write('Unsupported region %s' % region)
sys.exit(1)
elif region not in regions:
regions.append(region)
return regions
def main():
args = parse_args()
regions = setup()
if args.list:
_list(regions, refresh_cache=args.refresh_cache)
elif args.host:
host(regions, args.host)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-3.0 |
sambitgaan/nupic | examples/opf/clients/hotgym/anomaly/model_params.py | 35 | 8548 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
MODEL_PARAMS = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [(u'c1', 'sum'), (u'c0', 'first')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'timestamp_timeOfDay': {
'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 9.5),
'type': 'DateEncoder'
},
u'timestamp_dayOfWeek': None,
u'timestamp_weekend': None,
u'consumption': {
'clipInput': True,
'fieldname': u'consumption',
'maxval': 100.0,
'minval': 0.0,
'n': 50,
'name': u'consumption',
'type': 'ScalarEncoder',
'w': 21
},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
# Spatial Pooler implementation selector.
# Options: 'py', 'cpp' (speed optimized, new)
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of columns in the SP (must be same as in TP)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses.
'potentialPct': 0.8,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10.
'synPermConnected': 0.1,
'synPermActiveInc': 0.0001,
'synPermInactiveDec': 0.0005,
'maxBoost': 1.0,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 9,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 12,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 3,
},
# Don't create the classifier since we don't need predictions.
'clEnable': False,
'clParams': None,
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': 2184},
'trainSPNetOnlyIfRequested': False,
},
}
| agpl-3.0 |
sudheesh001/oh-mainline | mysite/search/migrations/0057_rename_wanna_helper_note.py | 17 | 18485 | # This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.search.models import *
class Migration:
def forwards(self, orm):
# Adding model 'WannaHelperNote'
db.create_table('search_wannahelpernote', (
('id', orm['search.wannahelpernote:id']),
('created_date', orm['search.wannahelpernote:created_date']),
('modified_date', orm['search.wannahelpernote:modified_date']),
('person', orm['search.wannahelpernote:person']),
('project', orm['search.wannahelpernote:project']),
))
db.send_create_signal('search', ['WannaHelperNote'])
# Deleting model 'notethatsomeonewantstohelpaproject'
db.delete_table('search_notethatsomeonewantstohelpaproject')
# Creating unique_together for [project, person] on WannaHelperNote.
db.create_unique('search_wannahelpernote', ['project_id', 'person_id'])
def backwards(self, orm):
# Deleting unique_together for [project, person] on WannaHelperNote.
db.delete_unique('search_wannahelpernote', ['project_id', 'person_id'])
# Deleting model 'WannaHelperNote'
db.delete_table('search_wannahelpernote')
# Adding model 'notethatsomeonewantstohelpaproject'
db.create_table('search_notethatsomeonewantstohelpaproject', (
('modified_date', orm['search.wannahelpernote:modified_date']),
('project', orm['search.wannahelpernote:project']),
('person', orm['search.wannahelpernote:person']),
('created_date', orm['search.wannahelpernote:created_date']),
('id', orm['search.wannahelpernote:id']),
))
db.send_create_signal('search', ['notethatsomeonewantstohelpaproject'])
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'customs.webresponse': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response_headers': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'text': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
},
'profile.dataimportattempt': {
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 4, 6, 18, 13, 18, 64050)'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'query': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'web_response': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customs.WebResponse']", 'null': 'True'})
},
'profile.person': {
'bio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'blacklisted_repository_committers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profile.RepositoryCommitter']"}),
'contact_blurb': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dont_guess_my_location': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'expand_next_steps': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'gotten_name_from_ohloh': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interested_in_working_on': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'location_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'location_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100'}),
'photo_thumbnail': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True'}),
'photo_thumbnail_30px_wide': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True'}),
'show_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'profile.repositorycommitter': {
'Meta': {'unique_together': "(('project', 'data_import_attempt'),)"},
'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.DataImportAttempt']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"})
},
'search.answer': {
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['search.ProjectInvolvementQuestion']"}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
'search.bug': {
'as_appears_in_distribution': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'bize_size_tag_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'canonical_bug_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'unique': 'True'}),
'concerns_just_documentation': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_reported': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'last_touched': ('django.db.models.fields.DateTimeField', [], {}),
'looks_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'people_involved': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_realname': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'submitter_username': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'search.bugalert': {
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
'how_many_bugs_at_time_of_request': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'query_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'search.epoch': {
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'search.hitcountcache': {
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'hashed_query': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'hit_count': ('django.db.models.fields.IntegerField', [], {}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'search.project': {
'cached_contributor_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'logo_contains_name': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'people_who_wanna_help': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profile.Person']"})
},
'search.projectinvolvementquestion': {
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bug_style': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'key_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'search.wannahelpernote': {
'Meta': {'unique_together': "[('project', 'person')]"},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"})
},
'search.wrongicon': {
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo_contains_name': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"})
}
}
complete_apps = ['search']
| agpl-3.0 |
cdsgroup/qcdb | data/nu_build_pickles.py | 2 | 4359 | import os
import sys
import time
#try:
import cPickle as pickle
#except ImportError:
# import pickle
sys.path.append('/Users/loriab/linux/qcdb')
sys.path.append('/Users/loriab/linux/qcdb/databases')
#import qcdb
import qcdb.dbwrap
homewrite = '.'
dbnet = {}
#dbnet['S22'] = ['dft', 'saptone', 'pt2']
#dbnet['NBC10'] = ['dft', 'saptone', 'pt2']
#dbnet['HBC6'] = ['dft', 'saptone', 'pt2']
#dbnet['HSG'] = ['dft', 'saptone', 'pt2', 'bfdbmm']
#dbnet['SSI'] = ['saptmisc', 'bfdbmm', 'dfit', 'bfdbdft', 'pt2misc', 'ccmisc', 'dfitm', 'bfdbdftm', 'dftmisc'] #, 'efp']
#dbnet['BBI'] = ['saptmisc', 'bfdbmm', 'dfit', 'bfdbdft', 'pt2misc', 'ccmisc', 'dfitm', 'bfdbdftm', 'dftmisc']
#dbnet['PCONF'] = ['dfit', 'dfitm']
#dbnet['SCONF'] = ['dfit', 'dfitm']
#dbnet['ACONF'] = ['dfit', 'dfitm']
#dbnet['CYCONF'] = ['dfit', 'dfitm']
#dbnet['NBC10ext'] = ['saptmisc', 'dfit']
#dbnet['ACHC'] = ['saptmisc', 'dfit']
#dbnet['UBQ'] = ['saptmisc', 'bfdbmm']
#dbnet['S22by7'] = ['saptmisc']
#dbnet['S66'] = ['saptmisc']
#dbnet['A24'] = ['saptmisc', 'dilabio']
#dbnet['JSCH'] = ['saptmisc']
#dbnet[''] = []
# [1-4 Aug 2017] LAB
# * added merz3 & 1hsg citations to HSG_bfdbmm.py
# * added merz3 & 1ubq citations to UBQ_bfdbmm.py
# * anon's allowed to remain b/c not dist. in qcdb conda: JSCH, S66, S22by7
# * updated dfit, 1hsg, merz3, 1ubq, bfdbefp Citations
# * so regen ACHC, *CONF, NBC10ext just in case
# * added merz3 citations to BBI_bfdbdftm.py, BBI_bfdbdft.py (was bfdbdft), BBI_bfdbmm.py (some were bfdbmm), BBI_dftmisc.py, BBI_ccmisc.py, BBI_pt2misc.py, BBI_saptmisc.py
# * added merz3 citations to SSI_bfdbdftm.py, SSI_bfdbdft.py (was bfdbdft), BBI_bfdbmm.py (some were bfdbmm), SSI_dftmisc.py, SSI_ccmisc.py, SSI_pt2misc.py, SSI_saptmisc.py
# * added bfdbefp citation to SSI_efp.py
# * added merz3 citation to UBQ_saptmisc.py as well as updating it to alpha0
# * added cdsgroup (internal) citation to S66_saptmisc.py and JSCH_saptmisc.py
# * added lots more citations & tagl so pretty much regen all
# * added dfit citation to S22by7_saptmisc.py
#dbse = 'SSI' # UNCOMMENT for local WDb
for db, lproj in dbnet.iteritems():
print '\n<<< %s >>>' % (db)
t0 = time.time()
asdf = qcdb.dbwrap.WrappedDatabase(db) # COMMENT for local WDb
dbse = asdf.dbse # COMMENT for local WDb
t1 = time.time()
print '%-70s %8.1f' % ('database.py --> WrappedDatabase', t1-t0)
WDbfilename = homewrite + '/' + db + '_WDb.pickle'
with open(WDbfilename, 'wb') as handle: # COMMENT for local WDb
pickle.dump(asdf, handle, pickle.HIGHEST_PROTOCOL) # COMMENT for local WDb
t2 = time.time()
print '%-70s %8.1f' % ('* WrappedDatabase --> database.pickle', t2-t1)
for pj in lproj:
print ' * ' + pj
t3 = time.time()
with open(WDbfilename, 'rb') as handle:
qwer = pickle.load(handle)
t4 = time.time()
qwer.load_qcdata_byproject(pj)
#qwer.load_qcdata_hdf5_trusted(project=pj)
print '%-70s %8.1f' % (' * database.pickle --> WrappedDatabase', t4-t3)
t5 = time.time()
hrxnfilename = homewrite + '/' + dbse + '_hrxn_' + pj + '.pickle'
print '%-70s %8.1f' % (' * WrappedDatabase --> WrappedDatabase + project', t5-t4)
guts = {}
for rxn, orxn in qwer.hrxn.iteritems():
guts[rxn] = orxn.data
with open(hrxnfilename, 'wb') as handle:
#pickle.dump(qwer.hrxn, handle, pickle.HIGHEST_PROTOCOL)
pickle.dump(guts, handle, pickle.HIGHEST_PROTOCOL)
t6 = time.time()
print '%-70s %8.1f' % (' * WrappedDatabase+Project --> database_hrxn_project.pickle', t6-t5)
print ' * Simulation'
t7 = time.time()
zxcv = qcdb.Database(db, loadfrompickle=True, path=homewrite)
t8 = time.time()
print '%-70s %8.1f' % (' * database.pickle --> Database', t8-t7)
for pj in lproj:
t9 = time.time()
zxcv.load_qcdata_hrxn_byproject(pj, path=homewrite)
t10 = time.time()
print '%-70s %8.1f' % (' * ' + pj + 'database_hrxn_project.pickle --> Database + project', t10-t9)
t11 = time.time()
print '%-70s %8.1f' % (' * pickles --> Database + all projects', t11-t7)
nmc = len(zxcv.fancy_mcs().keys())
t12 = time.time()
print '%-70s %8.1f' % (' * Access all projects ' + str(nmc), t12-t7)
| lgpl-3.0 |
willcast/kernel_d851 | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
diyclassics/cltk | src/cltk/sentence/grc.py | 4 | 1212 | """Code for sentences tokenization: Greek.
Sentence tokenization for Ancient Greek is available using a regular-expression based tokenizer.
>>> from cltk.sentence.grc import GreekRegexSentenceTokenizer
>>> from cltk.languages.example_texts import get_example_text
>>> splitter = GreekRegexSentenceTokenizer()
>>> sentences = splitter.tokenize(get_example_text("grc"))
>>> sentences[:2]
['ὅτι μὲν ὑμεῖς, ὦ ἄνδρες Ἀθηναῖοι, πεπόνθατε ὑπὸ τῶν ἐμῶν κατηγόρων, οὐκ οἶδα: ἐγὼ δ᾽ οὖν καὶ αὐτὸς ὑπ᾽ αὐτῶν ὀλίγου ἐμαυτοῦ ἐπελαθόμην, οὕτω πιθανῶς ἔλεγον.', 'καίτοι ἀληθές γε ὡς ἔπος εἰπεῖν οὐδὲν εἰρήκασιν.']
>>> len(sentences)
9
"""
__author__ = ["Patrick J. Burns <patrick@diyclassics.org>"]
from cltk.sentence.sentence import RegexSentenceTokenizer
sent_end_chars = [".", ";", "·"]
class GreekRegexSentenceTokenizer(RegexSentenceTokenizer):
"""``RegexSentenceTokenizer`` for Ancient Greek."""
def __init__(self: object):
super().__init__(language="greek", sent_end_chars=sent_end_chars)
| mit |
mmpagani/oq-hazardlib | openquake/hazardlib/tests/gsim/toro_1997_test.py | 4 | 1688 | # The Hazard Library
# Copyright (C) 2014, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.toro_1997 import (
ToroEtAl1997MblgNSHMP2008,
ToroEtAl1997MwNSHMP2008
)
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
# Test data generated from subroutine 'getToro' in hazgridXnga2.f
class ToroEtAl1997MblgNSHMPTestCase(BaseGSIMTestCase):
GSIM_CLASS = ToroEtAl1997MblgNSHMP2008
def test_mean(self):
self.check('TORO97NSHMP/T097MblgNSHMP_MEAN.csv',
max_discrep_percentage=0.1)
def test_std_total(self):
self.check('TORO97NSHMP/T097MblgNSHMP_STD_TOTAL.csv',
max_discrep_percentage=0.1)
class ToroEtAl1997NSHMPTestCase(BaseGSIMTestCase):
GSIM_CLASS = ToroEtAl1997MwNSHMP2008
def test_mean(self):
self.check('TORO97NSHMP/T097MwNSHMP_MEAN.csv',
max_discrep_percentage=0.1)
def test_std_total(self):
self.check('TORO97NSHMP/T097MwNSHMP_STD_TOTAL.csv',
max_discrep_percentage=0.1)
| agpl-3.0 |
futurecoin1/futurecoin1 | contrib/linearize/linearize-hashes.py | 105 | 2762 | #!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def get_block_hashes(settings):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
for height in xrange(settings['min_height'], settings['max_height']+1):
hash = rpc.getblockhash(height)
print(hash)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize-hashes.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 15715
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 319000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| mit |
meredith-digops/ansible | lib/ansible/utils/module_docs_fragments/backup.py | 427 | 1071 | # Copyright (c) 2015 Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = '''
options:
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
choices: [ "yes", "no" ]
default: "no"
'''
| gpl-3.0 |
areitz/pants | src/python/pants/backend/codegen/targets/java_thrift_library.py | 12 | 3264 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.exceptions import TargetDefinitionException
class JavaThriftLibrary(JvmTarget):
"""Generates a stub Java or Scala library from thrift IDL files."""
# TODO(John Sirois): Tasks should register the values they support in a plugin-registration goal.
# In general a plugin will contribute a target and a task, but in this case we have a shared
# target that can be used by at least 2 tasks - ThriftGen and ScroogeGen. This is likely not
# uncommon (gcc & clang) so the arrangement needs to be cleaned up and supported well.
_COMPILERS = frozenset(['thrift', 'scrooge'])
_LANGUAGES = frozenset(['java', 'scala', 'android'])
_RPC_STYLES = frozenset(['sync', 'finagle', 'ostrich'])
def __init__(self,
compiler=None,
language=None,
rpc_style=None,
namespace_map=None,
thrift_linter_strict=None,
**kwargs):
"""
:param compiler: The compiler used to compile the thrift files. The default is defined in
the global options under ``--thrift-default-compiler``.
:param language: The language used to generate the output files. The default is defined in
the global options under ``--thrift-default-language``.
:param rpc_style: An optional rpc style to generate service stubs with. The default is defined
in the global options under ``--thrift-default-rpc-style``.
:param namespace_map: An optional dictionary of namespaces to remap {old: new}
:param thrift_linter_strict: If True, fail if thrift linter produces any warnings.
"""
super(JavaThriftLibrary, self).__init__(**kwargs)
# TODO(Eric Ayers) As of 2/5/2015 this call is DEPRECATED and should be removed soon
self.add_labels('codegen')
def check_value_for_arg(arg, value, values):
if value and value not in values:
raise TargetDefinitionException(self, "{} may only be set to {} ('{}' not valid)"
.format(arg, ', or '.join(map(repr, values)), value))
return value
# The following fields are only added to the fingerprint via FingerprintStrategy when their
# values impact the outcome of the task. See JavaThriftLibraryFingerprintStrategy.
self._compiler = check_value_for_arg('compiler', compiler, self._COMPILERS)
self._language = check_value_for_arg('language', language, self._LANGUAGES)
self._rpc_style = check_value_for_arg('rpc_style', rpc_style, self._RPC_STYLES)
self.namespace_map = namespace_map
self.thrift_linter_strict = thrift_linter_strict
@property
def compiler(self):
return self._compiler
@property
def language(self):
return self._language
@property
def rpc_style(self):
return self._rpc_style
# TODO(Eric Ayers) As of 2/5/2015 this call is DEPRECATED and should be removed soon
@property
def is_thrift(self):
return True
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.