text stringlengths 0 1.05M | meta dict |
|---|---|
## An implementation of the credential scheme based on an algebraic
## MAC proposed by Chase, Meiklejohn and Zaverucha in Algebraic MACs and Keyed-Verification
## Anonymous Credentials", at ACM CCS 2014. The credentials scheme
## is based on the GGM based aMAC. (see section 4.2, pages 8-9)
from amacs import *
from genzkp import ZKEnv, ZKProof, ConstGen, Gen, Sec, ConstPub, Pub
from petlib.bn import Bn
def cred_setup():
""" Generates the parameters of the algebraic MAC scheme"""
params = setup_ggm()
return params
def cred_CredKeyge(params, n):
""" Generates keys and parameters for the credential issuer """
_, g, h, o = params
sk, iparams = keyGen_ggm(params, n)
x0_bar = o.random()
Cx0 = sk[0] * g + x0_bar * h
return (Cx0, iparams), (sk, x0_bar)
def cred_UserKeyge(params):
""" Generates keys and parameters for credential user """
G, g, h, o = params
priv = o.random()
pub = priv * g # This is just an EC El-Gamal key
return (priv, pub)
def secret_proof(params, n):
""" Builds a proof of correct El-Gamal encryption for a number of secret attributes. """
G, _, _, _ = params
# Contruct the proof
zk = ZKProof(G)
# Some constants and secrets
pub, g, h = zk.get(ConstGen, ["pub", "g", "h"])
priv = zk.get(Sec, "priv")
## The El-Gamal ciphertexts and secrets
ris = zk.get_array(Sec, "ri", n)
attrs = zk.get_array(Sec, "attri", n)
sKis = zk.get_array(ConstGen, "sKi", n)
Cis = zk.get_array(ConstGen, "Ci", n)
# The proof obligations
zk.add_proof(pub, priv * g)
for (Ci, sKi, ri, attr) in zip(Cis, sKis, ris, attrs):
zk.add_proof(sKi, ri * g)
zk.add_proof(Ci, ri * pub + attr * g)
return zk
def cred_secret_issue_user(params, keypair, attrib):
""" Encodes a number of secret attributes to be issued. """
# We simply encrypt all parameters and make a proof we know
# the decryption.
G, g, h, o = params
priv, pub = keypair
ris = []
sKis = []
Cis = []
for i, attr in enumerate(attrib):
ri = o.random()
ris += [ri]
sKis += [ri * g]
Cis += [ri * pub + attr * g]
zk = secret_proof(params, len(attrib))
## Run the proof
env = ZKEnv(zk)
env.g, env.h = g, h
env.pub = pub
env.priv = priv
env.ri = ris
env.attri = attrib
env.sKi = sKis
env.Ci = Cis
## Extract the proof
sig = zk.build_proof(env.get())
return (pub, (sKis, Cis), sig)
def _check_enc(params, keypair, EGenc, attrib):
G, g, h, o = params
priv, pub = keypair
for (a, b, atr) in zip(EGenc[0], EGenc[1], attrib):
assert (b - (priv * a)) == (atr * g)
def cred_secret_issue_user_check(params, pub, EGenc, sig):
""" Check the encrypted attributes of a user are well formed.
"""
G, g, h, o = params
(sKis, Cis) = EGenc
## First check the inputs (EG ciphertexts) are well formed.
assert len(sKis) == len(Cis)
zk = secret_proof(params, len(Cis))
## Run the proof
env = ZKEnv(zk)
env.g, env.h = g, h
env.pub = pub
env.sKi = sKis
env.Ci = Cis
## Extract the proof
if not zk.verify_proof(env.get(), sig):
raise Exception("Proof of knowledge of plaintexts failed.")
return True
def cred_secret_issue_proof(params, num_privs, num_pubs):
""" The proof that the mixed public / private credential issuing is correct """
G, _, _, _ = params
n = num_privs + num_pubs
# Contruct the proof
zk = ZKProof(G)
## The variables
bCx0 = zk.get(Gen, "bCx_0")
u, g, h, Cx0, pub = zk.get(ConstGen, ["u", "g", "h", "Cx_0", "pub"])
b, x0, x0_bar, bx0, bx0_bar = zk.get(Sec, ["b", "x_0", "x_0_bar", "bx_0", "bx_0_bar"])
xis = zk.get_array(Sec, "xi", n, 1)
bxis = zk.get_array(Sec, "bxi", n, 1)
Xis = zk.get_array(ConstGen, "Xi", n, 1)
bXis = zk.get_array(Gen, "bXi", n, 1)
## Proof of knowing the secret of MAC
zk.add_proof(Cx0, x0 * g + x0_bar * h)
zk.add_proof(bCx0, b * Cx0)
zk.add_proof(bCx0, bx0 * g + bx0_bar * h)
zk.add_proof(u, b * g)
## Proof of correct Xi's
for (xi, Xi, bXi, bxi) in zip(xis, Xis, bXis, bxis):
zk.add_proof(Xi, xi * h)
zk.add_proof(bXi, b * Xi)
zk.add_proof(bXi, bxi * h)
# Proof of correct Credential Ciphertext
mis = zk.get_array(ConstPub, "mi", num_pubs)
CredA, CredB = zk.get(ConstGen, ["CredA", "CredB"])
EGa = zk.get_array(ConstGen, "EGai", num_privs)
EGb = zk.get_array(ConstGen, "EGbi", num_privs)
r_prime = zk.get(Sec, "r_prime")
A = r_prime * g
B = r_prime * pub + bx0 * g
for mi, bxi in zip(mis, bxis[:num_pubs]):
B = B + bxi * (mi * g)
bxis_sec = bxis[num_pubs:num_pubs + num_privs]
for eg_a, eg_b, bxi in zip(EGa, EGb, bxis_sec):
A = A + bxi * eg_a
B = B + bxi * eg_b
zk.add_proof(CredA, A)
zk.add_proof(CredB, B)
return zk
def cred_secret_issue(params, pub, EGenc, publics, secrets, messages):
""" Encode a mixture of secret (EGenc) and public (messages) attributes"""
# Parse variables
G, g, h, o = params
sk, x0_bar = secrets
Cx0, iparams = publics
(sKis, Cis) = EGenc
assert len(sKis) == len(Cis)
assert len(iparams) == len(messages) + len(Cis)
# Get a blinding b
b = o.random()
u = b * g
bx0_bar = b.mod_mul(x0_bar, o)
bsk = []
for xi in sk:
bsk += [b.mod_mul(xi, o)]
bCx0 = b * Cx0
bXi = []
for Xi in iparams:
bXi += [b * Xi]
bsk0 = bsk[0]
open_bsk = bsk[1:len(messages)+1]
sec_bsk = bsk[len(messages)+1:len(messages)+1+len(Cis)]
assert [bsk0] + open_bsk + sec_bsk == bsk
# First build a proto-credential in clear using all public attribs
r_prime = o.random()
EG_a = r_prime * g
EG_b = r_prime * pub + bsk0 * g
for mi, bxi in zip(messages, open_bsk):
EG_b = EG_b + (bxi.mod_mul(mi,o) * g)
for (eg_ai, eg_bi, bxi) in zip(sKis, Cis, sec_bsk):
EG_a = EG_a + bxi * eg_ai
EG_b = EG_b + bxi * eg_bi
# Now build an epic proof for all this.
zk = cred_secret_issue_proof(params, len(Cis), len(messages))
env = ZKEnv(zk)
env.pub = pub
env.g, env.h = g, h
env.u = u
env.b = b
# These relate to the proof of x0 ...
env.x_0 = sk[0]
env.bx_0 = bsk0
env.x_0_bar = x0_bar
env.bx_0_bar = b.mod_mul(x0_bar, o)
env.Cx_0 = Cx0
env.bCx_0 = bCx0
# These relate to the knowledge of Xi, xi ...
env.xi = sk[1:]
env.Xi = iparams
env.bxi = bsk[1:]
env.bXi = bXi
# These relate to the knowledge of the plaintext ...
env.r_prime = r_prime
env.mi = messages
env.CredA = EG_a
env.CredB = EG_b
env.EGai = sKis
env.EGbi = Cis
## Extract the proof
sig = zk.build_proof(env.get())
if __debug__:
assert zk.verify_proof(env.get(), sig, strict=False)
return u, (EG_a, EG_b), sig
def _internal_ckeck(keypair, u, EncE, secrets, all_attribs):
""" Check the invariant that the ciphertexts are the encrypted attributes """
## First do decryption
priv, pub = keypair
(a, b) = EncE
Cred = b - (priv * a)
sk, _ = secrets
v = Hx(sk, all_attribs)
assert Cred == v * u
def cred_secret_issue_user_decrypt(params, keypair, u, EncE, publics, messages, EGab, sig):
""" Decrypts the private / public credential and checks the proof of its correct generation """
G, g, h, _ = params
Cx0, iparams = publics
priv, pub = keypair
(EG_a, EG_b) = EncE
uprime = EG_b - (priv * EG_a)
sKis, Cis = EGab
# Now build an epic proof for all this.
zk = cred_secret_issue_proof(params, len(Cis), len(messages))
env = ZKEnv(zk)
env.g, env.h = g, h
env.u = u
env.Cx_0 = Cx0
env.pub = pub
env.Xi = iparams
env.mi = messages
env.CredA = EG_a
env.CredB = EG_b
env.EGai = sKis
env.EGbi = Cis
## Extract the proof
if not zk.verify_proof(env.get(), sig):
raise Exception("Decryption of credential failed.")
return (u, uprime)
def cred_issue_proof(params, n):
""" The proof of public credential generation """
G, _, _, _ = params
# Contruct the proof
zk = ZKProof(G)
## The variables
u, up, g, h, Cx0 = zk.get(ConstGen, ["u", "up", "g", "h", "Cx0"])
x0, x0_bar = zk.get(Sec, ["x0", "x0_bar"])
xis = zk.get_array(Sec, "xi", n)
mis = zk.get_array(ConstPub, "mi", n)
Xis = zk.get_array(ConstGen, "Xi", n)
## Proof of correct MAC
Prod = x0 * u
for (xi, mi) in zip(xis, mis):
Prod = Prod + xi*(mi * u)
zk.add_proof(up, Prod)
## Proof of knowing the secret of MAC
zk.add_proof(Cx0, x0 * g + x0_bar * h)
## Proof of correct Xi's
for (xi, Xi) in zip(xis, Xis):
zk.add_proof(Xi, xi * h)
return zk
def cred_issue(params, publics, secrets, messages):
# Parse variables
G, g, h, _ = params
sk, x0_bar = secrets
Cx0, iparams = publics
(u, uprime) = mac_ggm(params, sk, messages)
# Build the proof and associate real variables
n = len(messages)
zk = cred_issue_proof(params, n)
env = ZKEnv(zk)
env.g, env.h = g, h
env.u, env.up = u, uprime
env.x0 = sk[0]
env.x0_bar = x0_bar
env.Cx0 = Cx0
env.xi = sk[1:]
env.mi = messages
env.Xi = iparams
## Extract the proof
sig = zk.build_proof(env.get())
if __debug__:
assert zk.verify_proof(env.get(), sig, strict=False)
## Return the credential (MAC) and proof of correctness
return (u, uprime), sig
def cred_issue_check(params, publics, mac, sig, messages):
# Parse public variables
G, g, h, _ = params
Cx0, iparams = publics
(u, uprime) = mac
# Build the proof and assign public variables
n = len(messages)
zk = cred_issue_proof(params, n)
env = ZKEnv(zk)
env.g, env.h = g, h
env.u, env.up = u, uprime
env.Cx0 = Cx0
env.mi = messages
env.Xi = iparams
# Return the result of the verification
return zk.verify_proof(env.get(), sig)
def cred_show_proof(params, n):
G, _, _, _ = params
# Contruct the proof
zk = ZKProof(G)
## The variables
u, g, h = zk.get(ConstGen, ["u", "g", "h"])
V = zk.get(ConstGen, "V")
minus_one = zk.get(ConstPub, "minus1")
r = zk.get(Sec, "r")
zis = zk.get_array(Sec, "zi", n)
mis = zk.get_array(Sec, "mi", n)
Xis = zk.get_array(ConstGen, "Xi", n)
Cmis = zk.get_array(ConstGen, "Cmi", n)
# Define the relations to prove
Vp = r * (minus_one * g)
for zi, Xi in zip(zis, Xis):
Vp = Vp + (zi * Xi)
zk.add_proof(V, Vp)
for (Cmi, mi, zi) in zip(Cmis, mis, zis):
zk.add_proof(Cmi, mi*u + zi*h)
return zk
def cred_show(params, publics, mac, sig, messages, cred_show_proof=cred_show_proof, xenv=None, export_zi=False):
## Parse and re-randomize
G, g, h, o = params
Cx0, iparams = publics
## WARNING: this step not in paper description of protocol
# Checked correctness with Sarah Meiklejohn.
u, uprime = rerandomize_sig_ggm(params, mac)
n = len(messages)
## Blinding variables for the proof
r = o.random()
zis = [o.random() for _ in range(n)]
Cup = uprime + r * g
Cmis = [mi * u + zi * h for (mi, zi) in zip(messages, zis)]
cred = (u, Cmis, Cup)
V = r * ( (-1) * g)
for zi, Xi in zip(zis, iparams):
V = V + zi * Xi
# Define the proof, and instanciate it with variables
zk = cred_show_proof(params, n)
env = ZKEnv(zk)
env.u = u
env.g, env.h = g, h
env.V = V
env.r = r
env.minus1 = -Bn(1)
env.zi = zis
env.mi = messages
env.Xi = iparams
env.Cmi = Cmis
if xenv:
xenv(env)
sig = zk.build_proof(env.get())
## Just a sanity check
if __debug__:
assert zk.verify_proof(env.get(), sig, strict=False)
if export_zi:
return cred, sig, zis
else:
return cred, sig
def cred_show_check(params, publics, secrets, creds, sig, cred_show_proof=cred_show_proof, xenv={}):
# Parse the inputs
G, g, h, _ = params
sk, _ = secrets
Cx0, iparams = publics
(u, Cmis, Cup) = creds
n = len(iparams)
## Recompute a V
V = sk[0] * u + (- Cup)
for xi, Cmi in zip(sk[1:], Cmis):
V = V + xi * Cmi
# Define the proof, and instanciate it with variables
zk = cred_show_proof(params, n)
env = ZKEnv(zk)
env.u = u
env.g, env.h = g, h
env.V = V
env.minus1 = -Bn(1)
env.Xi = iparams
env.Cmi = Cmis
if xenv:
xenv(env)
# Return the result of the verification
return zk.verify_proof(env.get(), sig)
def time_it_all(repetitions = 1000):
import time
print("Timings of operations (%s repetitions)" % repetitions)
t0 = time.clock()
for _ in range(repetitions):
i = 0
T = time.clock() - t0
print("%.3f ms\tIdle" % (1000 * T/repetitions))
t0 = time.clock()
for _ in range(repetitions):
## Setup from credential issuer.
params = cred_setup()
T = time.clock() - t0
print("%.3f ms\tCredential Group Setup" % (1000 * T/repetitions))
G, _, _, o = params
## Attriutes we want to encode
public_attr = [o.random(), o.random()]
private_attr = [o.random(), o.random()]
n = len(public_attr) + len(private_attr)
t0 = time.clock()
for _ in range(repetitions):
ipub, isec = cred_CredKeyge(params, n)
T = time.clock() - t0
print("%.3f ms\tCredential Key generation" % (1000 * T/repetitions))
## User generates keys and encrypts some secret attributes
# the secret attributes are [10, 20]
t0 = time.clock()
for _ in range(repetitions):
keypair = cred_UserKeyge(params)
T = time.clock() - t0
print("%.3f ms\tUser Key generation" % (1000 * T/repetitions))
t0 = time.clock()
for _ in range(repetitions):
pub, EGenc, sig = cred_secret_issue_user(params, keypair, private_attr)
T = time.clock() - t0
print("%.3f ms\tUser Key generation (proof)" % (1000 * T/repetitions))
if __debug__:
_check_enc(params, keypair, EGenc, private_attr)
## The issuer checks the secret attributes and encrypts a amac
# It also includes some public attributes, namely [30, 40].
t0 = time.clock()
for _ in range(repetitions):
if not cred_secret_issue_user_check(params, pub, EGenc, sig):
raise Exception("User key generation invalid")
T = time.clock() - t0
print("%.3f ms\tUser Key generation (verification)" % (1000 * T/repetitions))
t0 = time.clock()
for _ in range(repetitions):
u, EncE, sig = cred_secret_issue(params, pub, EGenc, ipub, isec, public_attr)
T = time.clock() - t0
print("%.3f ms\tCredential issuing" % (1000 * T/repetitions))
if __debug__:
_internal_ckeck(keypair, u, EncE, isec, public_attr + private_attr)
## The user decrypts the amac
t0 = time.clock()
for _ in range(repetitions):
mac = cred_secret_issue_user_decrypt(params, keypair, u, EncE, ipub, public_attr, EGenc, sig)
T = time.clock() - t0
print("%.3f ms\tCredential decryption & verification" % (1000 * T/repetitions))
## The show protocol using the decrypted amac
# The proof just proves knowledge of the attributes, but any other
# ZK statement is also possible by augmenting the proof.
t0 = time.clock()
for _ in range(repetitions):
(creds, sig) = cred_show(params, ipub, mac, sig, public_attr + private_attr)
T = time.clock() - t0
print("%.3f ms\tCredential Show (proof)" % (1000 * T/repetitions))
t0 = time.clock()
for _ in range(repetitions):
if not cred_show_check(params, ipub, isec, creds, sig):
raise Exception("Credential show failed.")
T = time.clock() - t0
print("%.3f ms\tCredential Show (verification)" % (1000 * T/repetitions))
def test_creds():
## Setup from credential issuer.
params = cred_setup()
ipub, isec = cred_CredKeyge(params, 2)
## Credential issuing and checking
mac, sig = cred_issue(params, ipub, isec, [10, 20])
assert cred_issue_check(params, ipub, mac, sig, [10, 20])
## The show protocol
(creds, sig) = cred_show(params, ipub, mac, sig, [10, 20])
assert cred_show_check(params, ipub, isec, creds, sig)
def test_creds_custom_show():
## Test attaching custom proofs to the show prototcol
# for the credential scheme. This should work with both
# all public and partly secret attributes.
## Setup from credential issuer. Can also setup with secrets (see test_secret_creds)
params = cred_setup()
ipub, isec = cred_CredKeyge(params, 2)
## Credential issuing and checking
mac, sig = cred_issue(params, ipub, isec, [10, 20])
assert cred_issue_check(params, ipub, mac, sig, [10, 20])
## Custom proofs require two things:
# - cred_show_proof_custom: a custom "cred_show_proof" with additional statements
# to prove on the Commitements Cmi = mi * u + zi * h
# - xenv: a custom function that instanciates the values of the proof, either
# public secret or constant.
# Example: Prove that the second attribute is double the first
def cred_show_proof_custom(params, n):
zk = cred_show_proof(params, n)
u, g, h = zk.get(ConstGen, ["u", "g", "h"])
zis = zk.get_array(Sec, "zi", n)
mis = zk.get_array(Sec, "mi", n)
Cmis = zk.get_array(ConstGen, "Cmi", n)
twou = zk.get(ConstGen, "twou")
# Statement that proves Cmi1 = (2 * m0) * u + z1 * h
zk.add_proof(Cmis[1], mis[0]*twou + zis[1]*h)
return zk
def xenv(env):
# Ensure the constant 2u is correct, both ends.
env.twou = 2 * env.u
## The show protocol -- note the use of "cred_show_proof_custom" and "xenv"
(creds, sig) = cred_show(params, ipub, mac, sig, [10, 20], cred_show_proof_custom, xenv)
assert cred_show_check(params, ipub, isec, creds, sig, cred_show_proof_custom, xenv)
def test_secret_creds():
## Setup from credential issuer.
params = cred_setup()
## Attriutes we want to encode
public_attr = [30, 40]
private_attr = [10, 20]
n = len(public_attr) + len(private_attr)
ipub, isec = cred_CredKeyge(params, n)
## User generates keys and encrypts some secret attributes
# the secret attributes are [10, 20]
keypair = cred_UserKeyge(params)
pub, EGenc, sig = cred_secret_issue_user(params, keypair, private_attr)
if __debug__:
_check_enc(params, keypair, EGenc, private_attr)
## The issuer checks the secret attributes and encrypts a amac
# It also includes some public attributes, namely [30, 40].
assert cred_secret_issue_user_check(params, pub, EGenc, sig)
u, EncE, sig = cred_secret_issue(params, pub, EGenc, ipub, isec, public_attr)
if __debug__:
_internal_ckeck(keypair, u, EncE, isec, public_attr + private_attr)
## The user decrypts the amac
mac = cred_secret_issue_user_decrypt(params, keypair, u, EncE, ipub, public_attr, EGenc, sig)
## The show protocol using the decrypted amac
# The proof just proves knowledge of the attributes, but any other
# ZK statement is also possible by augmenting the proof.
(creds, sig) = cred_show(params, ipub, mac, sig, public_attr + private_attr)
assert cred_show_check(params, ipub, isec, creds, sig)
if __name__ == "__main__":
time_it_all(repetitions=100)
params = cred_setup()
print("Proof of secret attributes")
zk1 = secret_proof(params, 2)
print(zk1.render_proof_statement())
print("Proof of secret issuing")
zk2 = cred_secret_issue_proof(params, 2, 2)
print(zk2.render_proof_statement())
print("Proof of public issuing")
zk3 = cred_issue_proof(params, 2)
print(zk3.render_proof_statement())
print("Proof of credential show")
zk4 = cred_show_proof(params, 4)
print(zk4.render_proof_statement())
| {
"repo_name": "moullos/UnlimitID",
"path": "UnlimitID/IdP/amacscreds/amacscreds.py",
"copies": "3",
"size": "20237",
"license": "bsd-2-clause",
"hash": -3776869563205313500,
"line_mean": 27.3828892006,
"line_max": 112,
"alpha_frac": 0.5888718684,
"autogenerated": false,
"ratio": 2.852692416126304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9896499543661842,
"avg_score": 0.009012948172892324,
"num_lines": 713
} |
"""An implementation of the CYK algorithm.
The CYK algorithm was chosen because the Google
docstring format allows for ambiguous representations,
which CYK can handle without devolving into a terrible
complexity. (It has a worst case of O(n^3).
There are faster, on average, algorithms, which might
be better suited to the average task of Darglint.
However, CYK is relatively simple, and is well documented.
(Others, like chart parsing, are much more difficult
to find examples of.)
This representation was based directly on the wikipedia
article, https://en.wikipedia.org/wiki/CYK_algorithm.
"""
from typing import (
Optional,
List,
)
from .grammar import (
BaseGrammar,
)
from ..token import (
Token,
)
from ..node import (
CykNode,
)
def parse(grammar, tokens):
# type: (BaseGrammar, List[Token]) -> Optional[CykNode]
if not tokens:
return None
n = len(tokens)
r = len(grammar.productions)
P = [
[[None for _ in range(r)] for _ in range(n)]
for _ in range(n)
] # type: List[List[List[Optional[CykNode]]]]
lookup = grammar.get_symbol_lookup()
for s, token in enumerate(tokens):
for v, production in enumerate(grammar.productions):
for rhs in production.rhs:
if len(rhs) > 2:
continue
# TODO: Cast to a TerminalDerivation?
token_type, weight = rhs # type: ignore
if token.token_type == token_type:
P[0][s][v] = CykNode(
production.lhs,
value=token,
weight=weight,
)
for l in range(2, n + 1):
for s in range(n - l + 2):
for p in range(l):
for a, production in enumerate(grammar.productions):
for derivation in production.rhs:
is_terminal_derivation = len(derivation) <= 2
if is_terminal_derivation:
continue
# TODO: Cast the derivation to a NonTerminalDerivation?
annotations, B, C, weight = derivation # type: ignore
b = lookup[B]
c = lookup[C]
lchild = P[p - 1][s - 1][b]
rchild = P[l - p - 1][s + p - 1][c]
if lchild and rchild:
old = P[l - 1][s - 1][a]
if old and old.weight > weight:
continue
P[l - 1][s - 1][a] = CykNode(
production.lhs,
lchild,
rchild,
annotations=annotations,
weight=weight,
)
return P[n - 1][0][lookup[grammar.start]]
| {
"repo_name": "terrencepreilly/darglint",
"path": "darglint/parse/cyk.py",
"copies": "1",
"size": "2961",
"license": "mit",
"hash": 7560341049837262000,
"line_mean": 33.4302325581,
"line_max": 79,
"alpha_frac": 0.4981425194,
"autogenerated": false,
"ratio": 4.2727272727272725,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5270869792127273,
"avg_score": null,
"num_lines": null
} |
"""An implementation of the Finite State Machine.
This module can be used to build and describe finite-state automata.
Author: Slawek Ligus <root@ooz.ie>
Overview of classes:
State -- a class representing a state which can be used in a finite state
machine of any type.
FiniteStateMachine -- a semiautomaton base for all following classes.
This class implements the process() method which takes an iterator
as input and processes it.
http://en.wikipedia.org/wiki/Semiautomaton
Acceptor -- an acceptor returning either True or False from the process()
method depending on whether its final state is classified as accepting
or not.
http://en.wikipedia.org/wiki/Finite_state_machine#Acceptors_and_recognizers
Transducer -- a transducer class extends FiniteStateMachine by implementing
an output() method which takes an input value passed to a the current
state and returns current state's name.
http://en.wikipedia.org/wiki/Finite-state_machine#Transducers
MooreMachine -- a specialized transducer. Its output() method returns
an output value stored in the current state.
http://en.wikipedia.org/wiki/Moore_machine
MealyMachine -- another specialized transducer. Its output() method returns
a value assigned to the transition cause by the input value.
http://en.wikipedia.org/wiki/Mealy_machine
"""
__version__ = '0.01'
MACHINES = dict()
NOOP = lambda: None
NOOP_ARG = lambda arg: None
class FSMError(Exception):
"""Base FSM exception."""
pass
class TransitionError(FSMError):
"""Transition exception."""
pass
class StateError(FSMError):
"""State manipulation error."""
class FiniteStateMachine(object):
"""Generic Finite State Machine."""
DOT_ATTRS = {
'directed': True,
'strict': False,
'rankdir': 'LR',
'ratio': '0.3'
}
def __init__(self, name, default=True):
"""Construct a FSM."""
self.name = name
FiniteStateMachine._setup(self)
self._setup()
self.current_state = None
MACHINES[name] = self
if default:
MACHINES['default'] = MACHINES[name]
def _setup(self):
"""Setup a FSM."""
# All finite state machines share the following attributes.
self.inputs = list()
self.states = list()
self.init_state = None
@property
def all_transitions(self):
"""Get transitions from states.
Returns:
List of three element tuples each consisting of
(source state, input, destination state)
"""
transitions = list()
for src_state in self.states:
for input_value, dst_state in src_state.items():
transitions.append((src_state, input_value, dst_state))
return transitions
def transition(self, input_value):
"""Transition to the next state."""
current = self.current_state
if current is None:
raise TransitionError('Current state not set.')
destination_state = current.get(input_value, current.default_transition)
if destination_state is None:
raise TransitionError('Cannot transition from state %r'
' on input %r.' % (current.name, input_value))
else:
self.current_state = destination_state
def reset(self):
"""Enter the Finite State Machine."""
self.current_state = self.init_state
def process(self, input_data):
"""Process input data."""
self.reset()
for item in input_data:
self.transition(item)
class Acceptor(FiniteStateMachine):
"""Acceptor machine."""
def _setup(self):
"""Setup an acceptor."""
self.accepting_states = list()
def process(self, input_data):
"""Process input data."""
self.reset()
for item in input_data:
self.transition(item)
return id(self.current_state) in [id(s) for s in self.accepting_states]
class Transducer(FiniteStateMachine):
"""A semiautomaton transducer."""
def _setup(self):
"""Setup a transducer."""
self.outputs = list()
def output(self, input_value):
"""Return state's name as output."""
return self.current_state.name
def process(self, input_data, yield_none=True):
"""Process input data."""
self.reset()
for item in input_data:
if yield_none:
yield self.output(item)
elif self.output(item) is not None:
yield self.output(item)
self.transition(item)
class MooreMachine(Transducer):
"""Moore Machine."""
def output(self, input_value):
"""Return output value assigned to the current state."""
return self.current_state.output_values[0][1]
class MealyMachine(Transducer):
"""Mealy Machine."""
def output(self, input_value):
"""Return output for a given state transition."""
return dict(self.current_state.output_values).get(input_value)
class State(dict):
"""State class."""
DOT_ATTRS = {
'shape': 'circle',
'height': '1.2',
}
DOT_ACCEPTING = 'doublecircle'
def __init__(self, name, initial=False, accepting=False, output=None,
on_entry=NOOP, on_exit=NOOP, on_input=NOOP_ARG,
on_transition=NOOP_ARG, machine=None, default=None):
"""Construct a state."""
dict.__init__(self)
self.name = name
self.entry_action = on_entry
self.exit_action = on_exit
self.input_action = on_input
self.transition_action = on_transition
self.output_values = [(None, output)]
self.default_transition = default
if machine is None:
try:
machine = MACHINES['default']
except KeyError:
pass
if machine:
machine.states.append(self)
if accepting:
try:
machine.accepting_states.append(self)
except AttributeError:
raise StateError('The %r %s does not support accepting '
'states.' % (machine.name,
machine.__class__.__name__))
if initial:
machine.init_state = self
def __getitem__(self, input_value):
"""Make a transition to the next state."""
next_state = dict.__getitem__(self, input_value)
self.input_action(input_value)
self.exit_action()
self.transition_action(next_state)
next_state.entry_action()
return next_state
def __setitem__(self, input_value, next_state):
"""Set a transition to a new state."""
if not isinstance(next_state, State):
raise StateError('A state must transition to another state,'
' got %r instead.' % next_state)
if isinstance(input_value, tuple):
input_value, output_value = input_value
self.output_values.append((input_value, output_value))
dict.__setitem__(self, input_value, next_state)
def __repr__(self):
"""Represent the object in a string."""
return '<%r %s @ 0x%x>' % (self.name, self.__class__.__name__, id(self))
def get_graph(fsm, title=None):
"""Generate a DOT graph with pygraphviz."""
try:
import pygraphviz as pgv
except ImportError:
pgv = None
if title is None:
title = fsm.name
elif title is False:
title = ''
pgv.AGraph()
fsm_graph = pgv.AGraph(title=title, **fsm.DOT_ATTRS)
fsm_graph.node_attr.update(State.DOT_ATTRS)
for state in [fsm.init_state] + fsm.states:
shape = State.DOT_ATTRS['shape']
if hasattr(fsm, 'accepting_states'):
if id(state) in [id(s) for s in fsm.accepting_states]:
shape = state.DOT_ACCEPTING
fsm_graph.add_node(n=state.name, shape=shape)
fsm_graph.add_node('null', shape='plaintext', label=' ')
fsm_graph.add_edge('null', fsm.init_state.name)
for src, input_value, dst in fsm.all_transitions:
label = str(input_value)
if isinstance(fsm, MealyMachine):
label += ' / %s' % dict(src.output_values).get(input_value)
fsm_graph.add_edge(src.name, dst.name, label=label)
for state in fsm.states:
if state.default_transition is not None:
fsm_graph.add_edge(state.name, state.default_transition.name,
label='else')
return fsm_graph
| {
"repo_name": "lnerit/ktailFSM",
"path": "build/lib.linux-x86_64-2.7/fsm.py",
"copies": "1",
"size": "8758",
"license": "bsd-3-clause",
"hash": 875990444219504800,
"line_mean": 30.6173285199,
"line_max": 80,
"alpha_frac": 0.5926010505,
"autogenerated": false,
"ratio": 4.100187265917603,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5192788316417603,
"avg_score": null,
"num_lines": null
} |
# An implementation of the greedy algorithm for decomposing a fraction into an
# Egyptian fraction (a sum of distinct unit fractions). Egyptian fractions
# are a representation of fractions that dates back at least 3500 years (the
# Rhind Mathematical Papyrus contains a table of fractions written out this
# way). A number is expressed as an Egyptian fraction if it is written as a
# sum of unit fractions (fractions whose numerators are all zero) such that no
# fraction is duplicated. For example, 1/2 is already an Egyptian fraction,
# but 2/3 is not. However, 2/3 = 1/2 + 1/6 is an Egyptian fraction, though
# 2/3 = 1/3 + 1/3 is not because 1/3 is duplicated.
#
# Any rational number has at least one representation as an Egyptian fraction,
# and one simple algorithm for finding an Egyptian fraction representation of
# a rational number is given in Fibonacci's Liber Abaci (the same text that
# contains the eponymous Fibonacci sequence and the introduction of Hindu-
# Arabic numerals to Europe). This algorithm is a greedy algorithm that works
# as follows: if the rational number already has a numerator of 1, then we are
# done. Otherwise, subtract out the largest possible unit fraction from the
# number and repeat this process. For example, to compute an Egyptian fraction
# representation of 42/137, we would write
#
# 42/137 = 1/4 + 31/548
# = 1/4 + 1/18 + 5/4932
# = 1/4 + 1/18 + 1/987 + 1/1622628
# Python3 program to print a fraction
# in Egyptian Form using Greedy
# Algorithm
# import math package to use
# ceiling function
import math
# define a function egyptianFraction
# which receive parameter nr as
# numerator and dr as denominator
def egyptianFraction(nr, dr):
print("The Egyptian Fraction " +
"Representation of {0}/{1} is".
format(nr, dr), end="\n")
# empty list ef to store
# denominator
ef = []
# while loop runs until
# fraction becomes 0 i.e,
# numerator becomes 0
while nr != 0:
# taking ceiling
x = math.ceil(dr / nr)
# storing value in ef list
ef.append(x)
# updating new nr and dr
nr = x * nr - dr
dr = dr * x
# printing the values
for i in range(len(ef)):
if i != len(ef) - 1:
print(" 1/{0} +" .
format(ef[i]), end = " ")
else:
print(" 1/{0}" .
format(ef[i]), end = " ")
# calling the function
egyptianFraction(4, 17)
| {
"repo_name": "saisankargochhayat/algo_quest",
"path": "greedy/egyptian_fraction.py",
"copies": "1",
"size": "2562",
"license": "apache-2.0",
"hash": 2052203973146058500,
"line_mean": 34.5833333333,
"line_max": 79,
"alpha_frac": 0.643637783,
"autogenerated": false,
"ratio": 3.5632823365785815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.929411083420181,
"avg_score": 0.08256185707535436,
"num_lines": 72
} |
# An implementation of the Indexer interface using XDS. This depends on the
# XDS wrappers to actually implement the functionality.
import logging
import os
import math
import shutil
from cctbx.array_family import flex
from scitbx import matrix
# wrappers for programs that this needs
from xia2.Wrappers.XDS.XDSXycorr import XDSXycorr as _Xycorr
from xia2.Wrappers.XDS.XDSInit import XDSInit as _Init
from xia2.Wrappers.XDS.XDSColspot import XDSColspot as _Colspot
from xia2.Wrappers.XDS.XDSIdxref import XDSIdxref as _Idxref
# helper functions
from xia2.Wrappers.XDS.XDS import XDSException
from xia2.Modules.Indexer.XDSCheckIndexerSolution import xds_check_indexer_solution
# interfaces that this must implement to be an indexer
from xia2.Schema.Interfaces.Indexer import IndexerSingleSweep
# odds and sods that are needed
from xia2.lib.bits import auto_logfiler
from xia2.Handlers.Flags import Flags
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.Files import FileHandler
from xia2.Wrappers.Dials.Spotfinder import Spotfinder
from xia2.Wrappers.Dials.ExportSpotXDS import ExportSpotXDS
from dxtbx.serialize.xds import to_xds
import dxtbx
from dxtbx.model import Experiment, ExperimentList
from iotbx.xds import spot_xds
from dxtbx.serialize.xds import to_crystal
logger = logging.getLogger("xia2.Modules.Indexer.XDSIndexer")
class XDSIndexer(IndexerSingleSweep):
"""An implementation of the Indexer interface using XDS."""
def __init__(self):
super().__init__()
# check that the programs exist - this will raise an exception if
# they do not...
_Idxref()
self._background_images = None
self._index_select_images = "i"
# place to store working data
self._data_files = {}
# factory functions
def Xycorr(self):
xycorr = _Xycorr()
xycorr.set_working_directory(self.get_working_directory())
xycorr.setup_from_imageset(self.get_imageset())
if self.get_distance():
xycorr.set_distance(self.get_distance())
if self.get_wavelength():
xycorr.set_wavelength(self.get_wavelength())
auto_logfiler(xycorr, "XYCORR")
return xycorr
def Init(self):
init = _Init(params=PhilIndex.params.xds.init)
init.set_working_directory(self.get_working_directory())
init.setup_from_imageset(self.get_imageset())
if self.get_distance():
init.set_distance(self.get_distance())
if self.get_wavelength():
init.set_wavelength(self.get_wavelength())
auto_logfiler(init, "INIT")
return init
def Colspot(self):
colspot = _Colspot(params=PhilIndex.params.xds.colspot)
colspot.set_working_directory(self.get_working_directory())
colspot.setup_from_imageset(self.get_imageset())
if self.get_distance():
colspot.set_distance(self.get_distance())
if self.get_wavelength():
colspot.set_wavelength(self.get_wavelength())
auto_logfiler(colspot, "COLSPOT")
return colspot
def DialsSpotfinder(self):
spotfinder = Spotfinder(params=PhilIndex.params.dials.find_spots)
spotfinder.set_working_directory(self.get_working_directory())
spotfinder.setup_from_imageset(self.get_imageset())
auto_logfiler(spotfinder, "SPOTFINDER")
return spotfinder
def DialsExportSpotXDS(self):
export = ExportSpotXDS()
export.set_working_directory(self.get_working_directory())
return export
def Idxref(self):
idxref = _Idxref(params=PhilIndex.params.xds.index)
idxref.set_working_directory(self.get_working_directory())
idxref.setup_from_imageset(self.get_imageset())
if self.get_distance():
idxref.set_distance(self.get_distance())
if self.get_wavelength():
idxref.set_wavelength(self.get_wavelength())
# if we have a refined set of parameters to apply, apply these
if Flags.get_xparm():
idxref.set_refined_origin(Flags.get_xparm_origin())
idxref.set_refined_beam_vector(Flags.get_xparm_beam_vector())
idxref.set_refined_rotation_axis(Flags.get_xparm_rotation_axis())
idxref.set_refined_distance(Flags.get_xparm_distance())
# hacks for Jira 493
if Flags.get_xparm_a():
idxref.set_a_axis(Flags.get_xparm_a())
if Flags.get_xparm_b():
idxref.set_b_axis(Flags.get_xparm_b())
if Flags.get_xparm_c():
idxref.set_c_axis(Flags.get_xparm_c())
auto_logfiler(idxref, "IDXREF")
return idxref
# helper functions
def _index_remove_masked_regions(self):
if not PhilIndex.params.xia2.settings.untrusted_rectangle_indexing:
return
untrusted_rectangle_indexing = (
PhilIndex.params.xia2.settings.untrusted_rectangle_indexing
)
spot_xds = []
removed = 0
lines = open(self._indxr_payload["SPOT.XDS"], "rb").readlines()
for record in lines:
if not record.strip():
continue
remove = False
x, y, phi, i = list(map(float, record.split()[:4]))
for limits in untrusted_rectangle_indexing:
if x > limits[0] and x < limits[1] and y > limits[2] and y < limits[3]:
removed += 1
remove = True
break
if not remove:
spot_xds.append("%s" % record)
logger.debug("Removed %d peaks from SPOT.XDS", removed)
masked_spot_xds = (
os.path.splitext(self._indxr_payload["SPOT.XDS"])[0] + "_masked.XDS"
)
with open(masked_spot_xds, "w") as f:
f.writelines(spot_xds)
self._indxr_payload["SPOT.XDS"] = masked_spot_xds
def _index_select_images_i(self):
"""Select correct images based on image headers."""
phi_width = self.get_phi_width()
images = self.get_matching_images()
# characterise the images - are there just two (e.g. dna-style
# reference images) or is there a full block?
wedges = []
if len(images) < 3:
# work on the assumption that this is a reference pair
wedges.append(images[0])
if len(images) > 1:
wedges.append(images[1])
else:
max_wedge_size_degrees = PhilIndex.params.xds.index.max_wedge_size_degrees
max_wedge_size = PhilIndex.params.xds.index.max_wedge_size
if max_wedge_size_degrees is not None:
n = int(math.floor(max_wedge_size_degrees / self.get_phi_width()))
if max_wedge_size is not None:
max_wedge_size = min(max_wedge_size, max(n, 1))
else:
max_wedge_size = n
logger.debug("Using max_wedge_size: %d", max_wedge_size)
block_size = min(len(images), max_wedge_size)
logger.debug(
"Adding images for indexer: %d -> %d", images[0], images[block_size - 1]
)
wedges.append((images[0], images[block_size - 1]))
if int(90.0 / phi_width) + block_size in images:
# assume we can add a wedge around 45 degrees as well...
logger.debug(
"Adding images for indexer: %d -> %d",
int(45.0 / phi_width) + images[0],
int(45.0 / phi_width) + images[0] + block_size - 1,
)
logger.debug(
"Adding images for indexer: %d -> %d",
int(90.0 / phi_width) + images[0],
int(90.0 / phi_width) + images[0] + block_size - 1,
)
wedges.append(
(
int(45.0 / phi_width) + images[0],
int(45.0 / phi_width) + images[0] + block_size - 1,
)
)
wedges.append(
(
int(90.0 / phi_width) + images[0],
int(90.0 / phi_width) + images[0] + block_size - 1,
)
)
else:
# add some half-way anyway
first = (len(images) // 2) - (block_size // 2) + images[0] - 1
if first > wedges[0][1]:
last = first + block_size - 1
logger.debug("Adding images for indexer: %d -> %d", first, last)
wedges.append((first, last))
if len(images) > block_size:
logger.debug(
"Adding images for indexer: %d -> %d",
images[-block_size],
images[-1],
)
wedges.append((images[-block_size], images[-1]))
return wedges
# do-er functions
def _index_prepare(self):
"""Prepare to do autoindexing - in XDS terms this will mean
calling xycorr, init and colspot on the input images."""
# decide on images to work with
logger.debug("XDS INDEX PREPARE:")
logger.debug("Wavelength: %.6f", self.get_wavelength())
logger.debug("Distance: %.2f", self.get_distance())
if self._indxr_images == []:
_select_images_function = getattr(
self, "_index_select_images_%s" % self._index_select_images
)
wedges = _select_images_function()
for wedge in wedges:
self.add_indexer_image_wedge(wedge)
self.set_indexer_prepare_done(True)
all_images = self.get_matching_images()
first = min(all_images)
last = max(all_images)
# next start to process these - first xycorr
xycorr = self.Xycorr()
xycorr.set_data_range(first, last)
xycorr.set_background_range(self._indxr_images[0][0], self._indxr_images[0][1])
converter = to_xds(self.get_imageset())
xds_beam_centre = converter.detector_origin
xycorr.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])
for block in self._indxr_images:
xycorr.add_spot_range(block[0], block[1])
# FIXME need to set the origin here
xycorr.run()
for file in ["X-CORRECTIONS.cbf", "Y-CORRECTIONS.cbf"]:
self._indxr_payload[file] = xycorr.get_output_data_file(file)
# next start to process these - then init
if PhilIndex.params.xia2.settings.input.format.dynamic_shadowing:
imageset = self._indxr_imagesets[0]
masker = (
imageset.get_format_class()
.get_instance(imageset.paths()[0])
.get_masker()
)
if masker is None:
# disable dynamic_shadowing
PhilIndex.params.xia2.settings.input.format.dynamic_shadowing = False
if PhilIndex.params.xia2.settings.input.format.dynamic_shadowing:
# find the region of the scan with the least predicted shadow
# to use for background determination in XDS INIT step
from dxtbx.model.experiment_list import ExperimentListFactory
imageset = self._indxr_imagesets[0]
xsweep = self._indxr_sweeps[0]
sweep_filename = os.path.join(
self.get_working_directory(), "%s_indexed.expt" % xsweep.get_name()
)
ExperimentListFactory.from_imageset_and_crystal(imageset, None).as_file(
sweep_filename
)
from xia2.Wrappers.Dials.ShadowPlot import ShadowPlot
shadow_plot = ShadowPlot()
shadow_plot.set_working_directory(self.get_working_directory())
auto_logfiler(shadow_plot)
shadow_plot.set_sweep_filename(sweep_filename)
shadow_plot.set_json_filename(
os.path.join(
self.get_working_directory(),
"%s_shadow_plot.json" % shadow_plot.get_xpid(),
)
)
shadow_plot.run()
results = shadow_plot.get_results()
fraction_shadowed = flex.double(results["fraction_shadowed"])
if flex.max(fraction_shadowed) == 0:
PhilIndex.params.xia2.settings.input.format.dynamic_shadowing = False
else:
scan_points = flex.double(results["scan_points"])
scan = imageset.get_scan()
oscillation = scan.get_oscillation()
if self._background_images is not None:
bg_images = self._background_images
bg_range_deg = (
scan.get_angle_from_image_index(bg_images[0]),
scan.get_angle_from_image_index(bg_images[1]),
)
bg_range_width = bg_range_deg[1] - bg_range_deg[0]
min_shadow = 100
best_bg_range = bg_range_deg
from libtbx.utils import frange
for bg_range_start in frange(
flex.min(scan_points),
flex.max(scan_points) - bg_range_width,
step=oscillation[1],
):
bg_range_deg = (bg_range_start, bg_range_start + bg_range_width)
sel = (scan_points >= bg_range_deg[0]) & (
scan_points <= bg_range_deg[1]
)
mean_shadow = flex.mean(fraction_shadowed.select(sel))
if mean_shadow < min_shadow:
min_shadow = mean_shadow
best_bg_range = bg_range_deg
self._background_images = (
scan.get_image_index_from_angle(best_bg_range[0]),
scan.get_image_index_from_angle(best_bg_range[1]),
)
logger.debug(
"Setting background images: %s -> %s" % self._background_images
)
init = self.Init()
for file in ["X-CORRECTIONS.cbf", "Y-CORRECTIONS.cbf"]:
init.set_input_data_file(file, self._indxr_payload[file])
init.set_data_range(first, last)
if self._background_images:
init.set_background_range(
self._background_images[0], self._background_images[1]
)
else:
init.set_background_range(
self._indxr_images[0][0], self._indxr_images[0][1]
)
for block in self._indxr_images:
init.add_spot_range(block[0], block[1])
init.run()
# at this stage, need to (perhaps) modify the BKGINIT.cbf image
# to mark out the back stop
if PhilIndex.params.xds.backstop_mask:
logger.debug("Applying mask to BKGINIT.pck")
# copy the original file
cbf_old = os.path.join(init.get_working_directory(), "BKGINIT.cbf")
cbf_save = os.path.join(init.get_working_directory(), "BKGINIT.sav")
shutil.copyfile(cbf_old, cbf_save)
# modify the file to give the new mask
from xia2.Toolkit.BackstopMask import BackstopMask
mask = BackstopMask(PhilIndex.params.xds.backstop_mask)
mask.apply_mask_xds(self.get_header(), cbf_save, cbf_old)
init.reload()
for file in ["BLANK.cbf", "BKGINIT.cbf", "GAIN.cbf"]:
self._indxr_payload[file] = init.get_output_data_file(file)
if PhilIndex.params.xia2.settings.developmental.use_dials_spotfinder:
spotfinder = self.DialsSpotfinder()
for block in self._indxr_images:
spotfinder.add_spot_range(block[0], block[1])
spotfinder.run()
export = self.DialsExportSpotXDS()
export.set_input_data_file(
"observations.refl",
spotfinder.get_output_data_file("observations.refl"),
)
export.run()
for file in ["SPOT.XDS"]:
self._indxr_payload[file] = export.get_output_data_file(file)
else:
# next start to process these - then colspot
colspot = self.Colspot()
for file in (
"X-CORRECTIONS.cbf",
"Y-CORRECTIONS.cbf",
"BLANK.cbf",
"BKGINIT.cbf",
"GAIN.cbf",
):
colspot.set_input_data_file(file, self._indxr_payload[file])
colspot.set_data_range(first, last)
colspot.set_background_range(
self._indxr_images[0][0], self._indxr_images[0][1]
)
for block in self._indxr_images:
colspot.add_spot_range(block[0], block[1])
colspot.run()
for file in ["SPOT.XDS"]:
self._indxr_payload[file] = colspot.get_output_data_file(file)
# that should be everything prepared... all of the important
# files should be loaded into memory to be able to cope with
# integration happening somewhere else
def _index(self):
"""Actually do the autoindexing using the data prepared by the
previous method."""
idxref = self.Idxref()
self._index_remove_masked_regions()
for file in ["SPOT.XDS"]:
idxref.set_input_data_file(file, self._indxr_payload[file])
# edit SPOT.XDS to remove reflections in untrusted regions of the detector
idxref.set_data_range(self._indxr_images[0][0], self._indxr_images[0][1])
idxref.set_background_range(self._indxr_images[0][0], self._indxr_images[0][1])
# set the phi start etc correctly
for block in self._indxr_images[:1]:
starting_frame = block[0]
starting_angle = self.get_scan().get_angle_from_image_index(starting_frame)
idxref.set_starting_frame(starting_frame)
idxref.set_starting_angle(starting_angle)
idxref.add_spot_range(block[0], block[1])
for block in self._indxr_images[1:]:
idxref.add_spot_range(block[0], block[1])
if self._indxr_user_input_lattice:
idxref.set_indexer_user_input_lattice(True)
if self._indxr_input_lattice and self._indxr_input_cell:
idxref.set_indexer_input_lattice(self._indxr_input_lattice)
idxref.set_indexer_input_cell(self._indxr_input_cell)
logger.debug("Set lattice: %s", self._indxr_input_lattice)
logger.debug("Set cell: %f %f %f %f %f %f" % self._indxr_input_cell)
original_cell = self._indxr_input_cell
elif self._indxr_input_lattice:
idxref.set_indexer_input_lattice(self._indxr_input_lattice)
original_cell = None
else:
original_cell = None
converter = to_xds(self.get_imageset())
xds_beam_centre = converter.detector_origin
idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])
# fixme need to check if the lattice, cell have been set already,
# and if they have, pass these in as input to the indexing job.
done = False
while not done:
try:
done = idxref.run()
# N.B. in here if the IDXREF step was being run in the first
# pass done is FALSE however there should be a refined
# P1 orientation matrix etc. available - so keep it!
except XDSException as e:
# inspect this - if we have complaints about not
# enough reflections indexed, and we have a target
# unit cell, and they are the same, well ignore it
if "solution is inaccurate" in str(e):
logger.debug("XDS complains solution inaccurate - ignoring")
done = idxref.continue_from_error()
elif (
"insufficient percentage (< 70%)" in str(e)
or "insufficient percentage (< 50%)" in str(e)
) and original_cell:
done = idxref.continue_from_error()
lattice, cell, mosaic = idxref.get_indexing_solution()
# compare solutions FIXME should use xds_cell_deviation
check = PhilIndex.params.xia2.settings.xds_check_cell_deviation
for j in range(3):
# allow two percent variation in unit cell length
if (
math.fabs((cell[j] - original_cell[j]) / original_cell[j])
> 0.02
and check
):
logger.debug("XDS unhappy and solution wrong")
raise e
# and two degree difference in angle
if (
math.fabs(cell[j + 3] - original_cell[j + 3]) > 2.0
and check
):
logger.debug("XDS unhappy and solution wrong")
raise e
logger.debug("XDS unhappy but solution ok")
elif "insufficient percentage (< 70%)" in str(
e
) or "insufficient percentage (< 50%)" in str(e):
logger.debug("XDS unhappy but solution probably ok")
done = idxref.continue_from_error()
else:
raise e
FileHandler.record_log_file(
"%s INDEX" % self.get_indexer_full_name(),
os.path.join(self.get_working_directory(), "IDXREF.LP"),
)
for file in ["SPOT.XDS", "XPARM.XDS"]:
self._indxr_payload[file] = idxref.get_output_data_file(file)
# need to get the indexing solutions out somehow...
self._indxr_other_lattice_cell = idxref.get_indexing_solutions()
(
self._indxr_lattice,
self._indxr_cell,
self._indxr_mosaic,
) = idxref.get_indexing_solution()
xparm_file = os.path.join(self.get_working_directory(), "XPARM.XDS")
models = dxtbx.load(xparm_file)
crystal_model = to_crystal(xparm_file)
# this information gets lost when re-creating the models from the
# XDS results - however is not refined so can simply copy from the
# input - https://github.com/xia2/xia2/issues/372
models.get_detector()[0].set_thickness(
converter.get_detector()[0].get_thickness()
)
experiment = Experiment(
beam=models.get_beam(),
detector=models.get_detector(),
goniometer=models.get_goniometer(),
scan=models.get_scan(),
crystal=crystal_model,
# imageset=self.get_imageset(),
)
experiment_list = ExperimentList([experiment])
self.set_indexer_experiment_list(experiment_list)
# I will want this later on to check that the lattice was ok
self._idxref_subtree_problem = idxref.get_index_tree_problem()
def _index_finish(self):
"""Perform the indexer post-processing as required."""
# ok, in here now ask if this solution was sensible!
if not self.get_indexer_user_input_lattice():
lattice = self._indxr_lattice
cell = self._indxr_cell
lattice2, cell2 = xds_check_indexer_solution(
os.path.join(self.get_working_directory(), "XPARM.XDS"),
os.path.join(self.get_working_directory(), "SPOT.XDS"),
)
logger.debug("Centring analysis: %s => %s", lattice, lattice2)
doubled_lattice = False
for j in range(3):
if int(round(cell2[j] / cell[j])) == 2:
doubled_lattice = True
axes = "A", "B", "C"
logger.debug("Lattice axis doubled: %s", axes[j])
if (
self._idxref_subtree_problem and (lattice2 != lattice)
) or doubled_lattice:
# hmm.... looks like we don't agree on the correct result...
# update the putative correct result as input
logger.debug("Detected pseudocentred lattice")
logger.debug(
"Inserting solution: %s " % lattice2
+ "%6.2f %6.2f %6.2f %6.2f %6.2f %6.2f" % cell2
)
self._indxr_replace(lattice2, cell2)
logger.debug("Set lattice: %s", lattice2)
logger.debug("Set cell: %f %f %f %f %f %f" % cell2)
# then rerun
self.set_indexer_done(False)
return
# finally read through SPOT.XDS and XPARM.XDS to get an estimate
# of the low resolution limit - this should be pretty straightforward
# since what I want is the resolution of the lowest resolution indexed
# spot..
spot_file = os.path.join(self.get_working_directory(), "SPOT.XDS")
experiment = self.get_indexer_experiment_list()[0]
crystal_model = experiment.crystal
spot_xds_handle = spot_xds.reader()
spot_xds_handle.read_file(spot_file)
miller_indices = flex.miller_index(spot_xds_handle.miller_index)
# only those reflections that were actually indexed
miller_indices = miller_indices.select(miller_indices != (0, 0, 0))
ub = matrix.sqr(crystal_model.get_A())
dmax = 1.05 * flex.max(1 / (ub.elems * miller_indices.as_vec3_double()).norms())
logger.debug("Low resolution limit assigned as: %.2f", dmax)
self._indxr_low_resolution = dmax
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Modules/Indexer/XDSIndexer.py",
"copies": "1",
"size": "26100",
"license": "bsd-3-clause",
"hash": -6747512125613562000,
"line_mean": 35.4016736402,
"line_max": 88,
"alpha_frac": 0.554559387,
"autogenerated": false,
"ratio": 3.836542701749228,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4891102088749228,
"avg_score": null,
"num_lines": null
} |
# An implementation of the Integrater interface using Dials. This depends on the
# Dials wrappers to actually implement the functionality.
import logging
import math
import os
import xia2.Wrappers.Dials.Integrate
from dxtbx.serialize import load
from xia2.Handlers.Citations import Citations
from xia2.Handlers.Files import FileHandler
from xia2.Handlers.Phil import PhilIndex
from xia2.lib.bits import auto_logfiler
from xia2.lib.SymmetryLib import lattice_to_spacegroup
from xia2.Schema.Interfaces.Integrater import Integrater
from xia2.Wrappers.Dials.anvil_correction import anvil_correction as _anvil_correction
from xia2.Wrappers.Dials.ExportMtz import ExportMtz as _ExportMtz
from xia2.Wrappers.Dials.ExportXDSASCII import ExportXDSASCII
from xia2.Wrappers.Dials.Report import Report as _Report
logger = logging.getLogger("xia2.Modules.Integrater.DialsIntegrater")
class DialsIntegrater(Integrater):
"""A class to implement the Integrater interface using *only* DIALS
programs."""
def __init__(self):
super().__init__()
# check that the programs exist - this will raise an exception if
# they do not...
xia2.Wrappers.Dials.Integrate.Integrate()
# place to store working data
self._data_files = {}
# internal parameters to pass around
self._integrate_parameters = {}
self._intgr_integrated_filename = None
self._intgr_integrated_reflections = None
self._intgr_experiments_filename = None
# Check whether to do diamond anvil cell attenuation correction.
self.high_pressure = PhilIndex.params.dials.high_pressure.correction
# overload these methods as we don't want the resolution range
# feeding back... aha - but we may want to assign them
# from outside!
def set_integrater_resolution(self, dmin, dmax, user=False):
if user:
Integrater.set_integrater_resolution(self, dmin, dmax, user)
def set_integrater_high_resolution(self, dmin, user=False):
if user:
Integrater.set_integrater_high_resolution(self, dmin, user)
def set_integrater_low_resolution(self, dmax, user=False):
self._intgr_reso_low = dmax
# admin functions
def get_integrated_experiments(self):
self.integrate()
return self._intgr_experiments_filename
def get_integrated_filename(self):
self.integrate()
return self._intgr_integrated_filename
def get_integrated_reflections(self):
self.integrate()
return self._intgr_integrated_reflections
def set_integrated_experiments(self, filename):
Integrater.set_integrated_experiments = filename
def set_integrated_reflections(self, filename):
Integrater.set_integrated_reflections = filename
# factory functions
def Integrate(self):
params = PhilIndex.params.dials.integrate
integrate = xia2.Wrappers.Dials.Integrate.Integrate()
integrate.set_phil_file(params.phil_file)
if params.mosaic == "new":
integrate.set_new_mosaic()
if PhilIndex.params.dials.fast_mode:
integrate.set_profile_fitting(False)
else:
profile_fitting = PhilIndex.params.xia2.settings.integration.profile_fitting
integrate.set_profile_fitting(profile_fitting)
# Options for profile modelling.
integrate.set_scan_varying_profile(params.scan_varying_profile)
high_pressure = PhilIndex.params.dials.high_pressure.correction
integrate.set_profile_params(
params.min_spots.per_degree, params.min_spots.overall, high_pressure
)
integrate.set_background_outlier_algorithm(params.background_outlier_algorithm)
integrate.set_background_algorithm(params.background_algorithm)
integrate.set_working_directory(self.get_working_directory())
integrate.set_experiments_filename(self._intgr_experiments_filename)
integrate.set_reflections_filename(self._intgr_indexed_filename)
auto_logfiler(integrate, "INTEGRATE")
return integrate
def Report(self):
report = _Report()
report.set_working_directory(self.get_working_directory())
report.set_experiments_filename(self._intgr_experiments_filename)
report.set_reflections_filename(self._intgr_integrated_reflections)
auto_logfiler(report, "REPORT")
return report
def ExportMtz(self):
params = PhilIndex.params.dials.integrate
export = _ExportMtz()
pname, xname, _ = self.get_integrater_project_info()
export.crystal_name = xname
export.project_name = pname
export.set_working_directory(self.get_working_directory())
export.set_experiments_filename(self._intgr_experiments_filename)
export.set_combine_partials(params.combine_partials)
export.set_partiality_threshold(params.partiality_threshold)
if len(self.get_matching_images()) == 1:
export.set_partiality_threshold(0.1)
if (
len(self.get_matching_images()) == 1
or PhilIndex.params.dials.fast_mode
or not PhilIndex.params.xia2.settings.integration.profile_fitting
):
# With no profiles available have to rely on summation alone
export.set_intensity_choice("sum")
auto_logfiler(export, "EXPORTMTZ")
return export
# now some real functions, which do useful things
def _integrater_reset_callback(self):
"""Delete all results on a reset."""
logger.debug("Deleting all stored results.")
self._data_files = {}
self._integrate_parameters = {}
def _integrate_prepare(self):
"""Prepare for integration - in XDS terms this may mean rerunning
IDXREF to get the XPARM etc. DEFPIX is considered part of the full
integration as it is resolution dependent."""
Citations.cite("dials")
# decide what images we are going to process, if not already
# specified
if not self._intgr_wedge:
images = self.get_matching_images()
self.set_integrater_wedge(min(images), max(images))
logger.debug("DIALS INTEGRATE PREPARE:")
logger.debug("Wavelength: %.6f" % self.get_wavelength())
logger.debug("Distance: %.2f" % self.get_distance())
if not self.get_integrater_low_resolution():
dmax = self._intgr_refiner.get_indexer_low_resolution(
self.get_integrater_epoch()
)
self.set_integrater_low_resolution(dmax)
logger.debug(
"Low resolution set to: %s" % self.get_integrater_low_resolution()
)
## copy the data across
refiner = self.get_integrater_refiner()
# For multi-sweep refinement, get the split experiments from after refinement.
if PhilIndex.params.xia2.settings.multi_sweep_refinement:
self._intgr_experiments_filename = refiner.get_refiner_payload(
f"{self._intgr_sweep._name}_models.expt"
)
self._intgr_indexed_filename = refiner.get_refiner_payload(
f"{self._intgr_sweep._name}_observations.refl"
)
# Otherwise, there should only be a single experiment list and reflection table.
else:
self._intgr_experiments_filename = refiner.get_refiner_payload(
"models.expt"
)
self._intgr_indexed_filename = refiner.get_refiner_payload(
"observations.refl"
)
experiments = load.experiment_list(self._intgr_experiments_filename)
experiment = experiments[0]
# this is the result of the cell refinement
self._intgr_cell = experiment.crystal.get_unit_cell().parameters()
logger.debug("Files available at the end of DIALS integrate prepare:")
for f in self._data_files:
logger.debug("%s" % f)
self.set_detector(experiment.detector)
self.set_beam_obj(experiment.beam)
self.set_goniometer(experiment.goniometer)
def _integrate(self):
"""Actually do the integration - in XDS terms this will mean running
DEFPIX and INTEGRATE to measure all the reflections."""
integrate = self.Integrate()
# decide what images we are going to process, if not already
# specified
if not self._intgr_wedge:
images = self.get_matching_images()
self.set_integrater_wedge(min(images), max(images))
imageset = self.get_imageset()
beam = imageset.get_beam()
detector = imageset.get_detector()
d_min_limit = detector.get_max_resolution(beam.get_s0())
if (
d_min_limit > self._intgr_reso_high
or PhilIndex.params.xia2.settings.resolution.keep_all_reflections
):
logger.debug(
"Overriding high resolution limit: %f => %f"
% (self._intgr_reso_high, d_min_limit)
)
self._intgr_reso_high = d_min_limit
integrate.set_experiments_filename(self._intgr_experiments_filename)
integrate.set_reflections_filename(self._intgr_indexed_filename)
if PhilIndex.params.dials.integrate.d_max:
integrate.set_d_max(PhilIndex.params.dials.integrate.d_max)
else:
integrate.set_d_max(self._intgr_reso_low)
if PhilIndex.params.dials.integrate.d_min:
integrate.set_d_min(PhilIndex.params.dials.integrate.d_min)
else:
integrate.set_d_min(self._intgr_reso_high)
pname, xname, dname = self.get_integrater_project_info()
sweep = self.get_integrater_sweep_name()
FileHandler.record_log_file(
f"{pname} {xname} {dname} {sweep} INTEGRATE",
integrate.get_log_file(),
)
integrate.run()
self._intgr_experiments_filename = integrate.get_integrated_experiments()
# also record the batch range - needed for the analysis of the
# radiation damage in chef...
self._intgr_batches_out = (self._intgr_wedge[0], self._intgr_wedge[1])
# FIXME (i) record the log file, (ii) get more information out from the
# integration log on the quality of the data and (iii) the mosaic spread
# range observed and R.M.S. deviations.
self._intgr_integrated_reflections = integrate.get_integrated_reflections()
if not os.path.isfile(self._intgr_integrated_reflections):
raise RuntimeError(
"Integration failed: %s does not exist."
% self._intgr_integrated_reflections
)
self._intgr_per_image_statistics = integrate.get_per_image_statistics()
logger.info(self.show_per_image_statistics())
report = self.Report()
html_filename = os.path.join(
self.get_working_directory(),
"%i_dials.integrate.report.html" % report.get_xpid(),
)
report.set_html_filename(html_filename)
report.run(wait_for_completion=True)
FileHandler.record_html_file(
f"{pname} {xname} {dname} {sweep} INTEGRATE", html_filename
)
experiments = load.experiment_list(self._intgr_experiments_filename)
profile = experiments.profiles()[0]
mosaic = profile.sigma_m()
try:
m_min, m_max, m_mean = mosaic.min_max_mean().as_tuple()
self.set_integrater_mosaic_min_mean_max(m_min, m_mean, m_max)
except AttributeError:
self.set_integrater_mosaic_min_mean_max(mosaic, mosaic, mosaic)
logger.info(
"Mosaic spread: %.3f < %.3f < %.3f"
% self.get_integrater_mosaic_min_mean_max()
)
# If running in high-pressure mode, run dials.anvil_correction to
# correct for the attenuation of the incident and diffracted beams by the
# diamond anvils.
if self.high_pressure:
self._anvil_correction()
return self._intgr_integrated_reflections
def _integrate_finish(self):
"""
Finish off the integration.
If in high-pressure mode run dials.anvil_correction.
Run dials.export.
"""
# FIXME - do we want to export every time we call this method
# (the file will not have changed) and also (more important) do
# we want a different exported MTZ file every time (I do not think
# that we do; these can be very large) - was exporter.get_xpid() ->
# now dials
if self._output_format == "hkl":
exporter = self.ExportMtz()
exporter.set_reflections_filename(self._intgr_integrated_reflections)
mtz_filename = os.path.join(
self.get_working_directory(), "%s_integrated.mtz" % "dials"
)
exporter.set_mtz_filename(mtz_filename)
exporter.run()
self._intgr_integrated_filename = mtz_filename
# record integrated MTZ file
pname, xname, dname = self.get_integrater_project_info()
sweep = self.get_integrater_sweep_name()
FileHandler.record_more_data_file(
f"{pname} {xname} {dname} {sweep} INTEGRATE", mtz_filename
)
from iotbx.reflection_file_reader import any_reflection_file
miller_arrays = any_reflection_file(
self._intgr_integrated_filename
).as_miller_arrays()
# look for profile-fitted intensities
intensities = [
ma for ma in miller_arrays if ma.info().labels == ["IPR", "SIGIPR"]
]
if len(intensities) == 0:
# look instead for summation-integrated intensities
intensities = [
ma for ma in miller_arrays if ma.info().labels == ["I", "SIGI"]
]
assert len(intensities)
self._intgr_n_ref = intensities[0].size()
if not os.path.isfile(self._intgr_integrated_filename):
raise RuntimeError(
"dials.export failed: %s does not exist."
% self._intgr_integrated_filename
)
if (
self._intgr_reindex_operator is None
and self._intgr_spacegroup_number
== lattice_to_spacegroup(
self.get_integrater_refiner().get_refiner_lattice()
)
):
logger.debug(
"Not reindexing to spacegroup %d (%s)"
% (self._intgr_spacegroup_number, self._intgr_reindex_operator)
)
return mtz_filename
if (
self._intgr_reindex_operator is None
and self._intgr_spacegroup_number == 0
):
logger.debug(
"Not reindexing to spacegroup %d (%s)"
% (self._intgr_spacegroup_number, self._intgr_reindex_operator)
)
return mtz_filename
logger.debug(
"Reindexing to spacegroup %d (%s)"
% (self._intgr_spacegroup_number, self._intgr_reindex_operator)
)
hklin = mtz_filename
from xia2.Wrappers.CCP4.Reindex import Reindex
reindex = Reindex()
reindex.set_working_directory(self.get_working_directory())
auto_logfiler(reindex)
reindex.set_operator(self._intgr_reindex_operator)
if self._intgr_spacegroup_number:
reindex.set_spacegroup(self._intgr_spacegroup_number)
else:
reindex.set_spacegroup(
lattice_to_spacegroup(
self.get_integrater_refiner().get_refiner_lattice()
)
)
hklout = "%s_reindex.mtz" % hklin[:-4]
reindex.set_hklin(hklin)
reindex.set_hklout(hklout)
reindex.reindex()
self._intgr_integrated_filename = hklout
self._intgr_cell = reindex.get_cell()
pname, xname, dname = self.get_integrater_project_info()
sweep = self.get_integrater_sweep_name()
FileHandler.record_more_data_file(
f"{pname} {xname} {dname} {sweep}",
self.get_integrated_experiments(),
)
FileHandler.record_more_data_file(
f"{pname} {xname} {dname} {sweep}",
self.get_integrated_reflections(),
)
return hklout
elif self._output_format == "pickle":
if (
self._intgr_reindex_operator is None
and self._intgr_spacegroup_number
== lattice_to_spacegroup(
self.get_integrater_refiner().get_refiner_lattice()
)
):
logger.debug(
"Not reindexing to spacegroup %d (%s)"
% (self._intgr_spacegroup_number, self._intgr_reindex_operator)
)
return self._intgr_integrated_reflections
if (
self._intgr_reindex_operator is None
and self._intgr_spacegroup_number == 0
):
logger.debug(
"Not reindexing to spacegroup %d (%s)"
% (self._intgr_spacegroup_number, self._intgr_reindex_operator)
)
return self._intgr_integrated_reflections
logger.debug(
"Reindexing to spacegroup %d (%s)"
% (self._intgr_spacegroup_number, self._intgr_reindex_operator)
)
from xia2.Wrappers.Dials.Reindex import Reindex
reindex = Reindex()
reindex.set_working_directory(self.get_working_directory())
auto_logfiler(reindex)
reindex.set_cb_op(self._intgr_reindex_operator)
if self._intgr_spacegroup_number:
reindex.set_space_group(self._intgr_spacegroup_number)
else:
reindex.set_space_group(
lattice_to_spacegroup(
self.get_integrater_refiner().get_refiner_lattice()
)
)
reindex.set_experiments_filename(self.get_integrated_experiments())
reindex.set_indexed_filename(self.get_integrated_reflections())
reindex.run()
self._intgr_integrated_reflections = (
reindex.get_reindexed_reflections_filename()
)
self._intgr_integrated_filename = (
reindex.get_reindexed_reflections_filename()
)
self._intgr_experiments_filename = (
reindex.get_reindexed_experiments_filename()
)
pname, xname, dname = self.get_integrater_project_info()
sweep = self.get_integrater_sweep_name()
FileHandler.record_more_data_file(
f"{pname} {xname} {dname} {sweep}",
self.get_integrated_experiments(),
)
FileHandler.record_more_data_file(
f"{pname} {xname} {dname} {sweep}",
self.get_integrated_reflections(),
)
return None # this will be set to intgr_hklout - better to cause failure
# due to it being none than it be set wrong and not knowing?
def _integrate_select_images_wedges(self):
"""Select correct images based on image headers."""
phi_width = self.get_phi_width()
images = self.get_matching_images()
# characterise the images - are there just two (e.g. dna-style
# reference images) or is there a full block?
wedges = []
if len(images) < 3:
# work on the assumption that this is a reference pair
wedges.append(images[0])
if len(images) > 1:
wedges.append(images[1])
else:
block_size = min(len(images), int(math.ceil(5 / phi_width)))
logger.debug(
"Adding images for indexer: %d -> %d"
% (images[0], images[block_size - 1])
)
wedges.append((images[0], images[block_size - 1]))
if int(90.0 / phi_width) + block_size in images:
# assume we can add a wedge around 45 degrees as well...
logger.debug(
"Adding images for indexer: %d -> %d"
% (
int(45.0 / phi_width) + images[0],
int(45.0 / phi_width) + images[0] + block_size - 1,
)
)
logger.debug(
"Adding images for indexer: %d -> %d"
% (
int(90.0 / phi_width) + images[0],
int(90.0 / phi_width) + images[0] + block_size - 1,
)
)
wedges.append(
(
int(45.0 / phi_width) + images[0],
int(45.0 / phi_width) + images[0] + block_size - 1,
)
)
wedges.append(
(
int(90.0 / phi_width) + images[0],
int(90.0 / phi_width) + images[0] + block_size - 1,
)
)
else:
# add some half-way anyway
first = (len(images) // 2) - (block_size // 2) + images[0] - 1
if first > wedges[0][1]:
last = first + block_size - 1
logger.debug("Adding images for indexer: %d -> %d" % (first, last))
wedges.append((first, last))
if len(images) > block_size:
logger.debug(
"Adding images for indexer: %d -> %d"
% (images[-block_size], images[-1])
)
wedges.append((images[-block_size], images[-1]))
return wedges
def get_integrater_corrected_intensities(self):
self.integrate()
exporter = ExportXDSASCII()
exporter.set_experiments_filename(self.get_integrated_experiments())
exporter.set_reflections_filename(self.get_integrated_reflections())
exporter.set_working_directory(self.get_working_directory())
auto_logfiler(exporter)
self._intgr_corrected_hklout = os.path.join(
self.get_working_directory(), "%i_DIALS.HKL" % exporter.get_xpid()
)
exporter.set_hkl_filename(self._intgr_corrected_hklout)
exporter.run()
assert os.path.exists(self._intgr_corrected_hklout)
return self._intgr_corrected_hklout
def _anvil_correction(self):
"""Correct for attenuation in a diamond anvil pressure cell."""
logger.info(
"Rescaling integrated reflections for attenuation in the diamond anvil "
"cell."
)
params = PhilIndex.params.dials.high_pressure
anvil_correct = _anvil_correction()
# Take the filenames of the last integration step as input.
anvil_correct.experiments_filenames.append(self._intgr_experiments_filename)
anvil_correct.reflections_filenames.append(self._intgr_integrated_reflections)
# The output reflections have a filename appended with '_corrected'.
output_reflections = "_corrected".join(
os.path.splitext(self._intgr_integrated_reflections)
)
anvil_correct.output_reflections_filename = output_reflections
# Set the user-specified parameters from the PHIL scope.
anvil_correct.density = params.anvil.density
anvil_correct.thickness = params.anvil.thickness
anvil_correct.normal = params.anvil.normal
# Run dials.anvil_correction with the parameters as set above.
anvil_correct.set_working_directory(self.get_working_directory())
auto_logfiler(anvil_correct)
anvil_correct.run()
self._intgr_integrated_reflections = output_reflections
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Modules/Integrater/DialsIntegrater.py",
"copies": "1",
"size": "24379",
"license": "bsd-3-clause",
"hash": -1982380141785672400,
"line_mean": 37.0921875,
"line_max": 88,
"alpha_frac": 0.5848065958,
"autogenerated": false,
"ratio": 3.931462667311724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5016269263111723,
"avg_score": null,
"num_lines": null
} |
# An implementation of the Integrater interface using XDS. This depends on the
# XDS wrappers to actually implement the functionality.
#
# This will "wrap" the XDS programs DEFPIX and INTEGRATE - CORRECT is
# considered to be a part of the scaling - see XDSScaler.py.
import copy
import inspect
import logging
import math
import os
import shutil
import time
import scitbx.matrix
from dials.array_family import flex
from iotbx.xds import xparm
from xia2.Experts.SymmetryExpert import (
lattice_to_spacegroup_number,
mat_to_symop,
r_to_rt,
rt_to_r,
)
from xia2.Handlers.Citations import Citations
from xia2.Handlers.Files import FileHandler
from xia2.Handlers.Phil import PhilIndex
from xia2.lib.bits import auto_logfiler
from xia2.Modules.Indexer.XDSIndexer import XDSIndexer
from xia2.Schema.Exceptions.BadLatticeError import BadLatticeError
from xia2.Schema.Interfaces.Integrater import Integrater
from xia2.Wrappers.CCP4.CCP4Factory import CCP4Factory
from xia2.Wrappers.CCP4.Reindex import Reindex
from xia2.Wrappers.Dials.ImportXDS import ImportXDS
from xia2.Wrappers.XDS.XDSCorrect import XDSCorrect as _Correct
from xia2.Wrappers.XDS.XDSDefpix import XDSDefpix as _Defpix
from xia2.Wrappers.XDS.XDSIntegrate import XDSIntegrate as _Integrate
logger = logging.getLogger("xia2.Modules.Integrater.XDSIntegrater")
class XDSIntegrater(Integrater):
"""A class to implement the Integrater interface using *only* XDS
programs."""
def __init__(self):
super().__init__()
# check that the programs exist - this will raise an exception if
# they do not...
_Integrate()
# place to store working data
self._xds_data_files = {}
self._intgr_experiments_filename = None
# internal parameters to pass around
self._xds_integrate_parameters = {}
# factory for pointless -used for converting INTEGRATE.HKL to .mtz
self._factory = CCP4Factory()
def to_dict(self):
obj = Integrater.to_dict(self)
attributes = inspect.getmembers(self, lambda m: not (inspect.isroutine(m)))
for a in attributes:
if a[0].startswith("_xds_"):
obj[a[0]] = a[1]
return obj
@classmethod
def from_dict(cls, obj):
return_obj = super().from_dict(obj)
return_obj._factory = CCP4Factory()
return return_obj
# overload these methods as we don't want the resolution range
# feeding back... aha - but we may want to assign them
# from outside!
def set_integrater_resolution(self, dmin, dmax, user=False):
if user:
Integrater.set_integrater_resolution(self, dmin, dmax, user)
def set_integrater_high_resolution(self, dmin, user=False):
if user:
Integrater.set_integrater_high_resolution(self, dmin, user)
def set_integrater_low_resolution(self, dmax, user=False):
self._intgr_reso_low = dmax
def get_integrater_corrected_intensities(self):
self.integrate()
return self._intgr_corrected_hklout
# admin functions
def _set_integrater_reindex_operator_callback(self):
"""If a REMOVE.HKL file exists in the working
directory, remove it..."""
if os.path.exists(os.path.join(self.get_working_directory(), "REMOVE.HKL")):
os.remove(os.path.join(self.get_working_directory(), "REMOVE.HKL"))
logger.debug("Deleting REMOVE.HKL as reindex op set.")
# factory functions
def Defpix(self):
defpix = _Defpix()
defpix.set_working_directory(self.get_working_directory())
defpix.setup_from_imageset(self.get_imageset())
if self.get_distance():
defpix.set_distance(self.get_distance())
if self.get_wavelength():
defpix.set_wavelength(self.get_wavelength())
value_range_for_trusted_detector_pixels = (
PhilIndex.params.xds.defpix.value_range_for_trusted_detector_pixels
)
if value_range_for_trusted_detector_pixels is not None:
defpix.set_value_range_for_trusted_detector_pixels(
value_range_for_trusted_detector_pixels
)
auto_logfiler(defpix, "DEFPIX")
return defpix
def Integrate(self):
integrate = _Integrate(params=PhilIndex.params.xds.integrate)
integrate.set_working_directory(self.get_working_directory())
integrate.setup_from_imageset(self.get_imageset())
if self.get_distance():
integrate.set_distance(self.get_distance())
if self.get_wavelength():
integrate.set_wavelength(self.get_wavelength())
auto_logfiler(integrate, "INTEGRATE")
return integrate
def Correct(self):
correct = _Correct(params=PhilIndex.params.xds.correct)
correct.set_working_directory(self.get_working_directory())
correct.setup_from_imageset(self.get_imageset())
if self.get_distance():
correct.set_distance(self.get_distance())
if self.get_wavelength():
correct.set_wavelength(self.get_wavelength())
if self.get_integrater_ice():
correct.set_ice(self.get_integrater_ice())
if self.get_integrater_excluded_regions():
correct.set_excluded_regions(self.get_integrater_excluded_regions())
if self.get_integrater_anomalous():
correct.set_anomalous(True)
if self.get_integrater_low_resolution() > 0.0:
logger.debug(
"Using low resolution limit: %.2f"
% self.get_integrater_low_resolution()
)
correct.set_resolution_high(0.0)
correct.set_resolution_low(self.get_integrater_low_resolution())
auto_logfiler(correct, "CORRECT")
return correct
# now some real functions, which do useful things
def _integrater_reset_callback(self):
"""Delete all results on a reset."""
logger.debug("Deleting all stored results.")
self._xds_data_files = {}
self._xds_integrate_parameters = {}
def _integrate_prepare(self):
"""Prepare for integration - in XDS terms this may mean rerunning
IDXREF to get the XPARM etc. DEFPIX is considered part of the full
integration as it is resolution dependent."""
Citations.cite("xds")
# decide what images we are going to process, if not already
# specified
if not self._intgr_wedge:
images = self.get_matching_images()
self.set_integrater_wedge(
min(images) + self.get_frame_offset(),
max(images) + self.get_frame_offset(),
)
logger.debug("XDS INTEGRATE PREPARE:")
logger.debug("Wavelength: %.6f" % self.get_wavelength())
logger.debug("Distance: %.2f" % self.get_distance())
idxr = self._intgr_refiner.get_refiner_indexer(self.get_integrater_epoch())
if idxr is None:
idxr = XDSIndexer()
self._intgr_refiner.add_refiner_indexer(self.get_integrater_epoch(), idxr)
self.set_integrater_prepare_done(False)
# self.set_integrater_indexer()
idxr.set_indexer_sweep(self.get_integrater_sweep())
idxr.set_working_directory(self.get_working_directory())
idxr.setup_from_imageset(self.get_imageset())
if self.get_frame_wedge():
wedge = self.get_frame_wedge()
logger.debug("Propogating wedge limit: %d %d" % wedge)
idxr.set_frame_wedge(wedge[0], wedge[1], apply_offset=False)
# this needs to be set up from the contents of the
# Integrater frame processer - wavelength &c.
if self.get_beam_centre():
idxr.set_beam_centre(self.get_beam_centre())
if self.get_distance():
idxr.set_distance(self.get_distance())
if self.get_wavelength():
idxr.set_wavelength(self.get_wavelength())
# get the unit cell from this indexer to initiate processing
# if it is new... and also copy out all of the information for
# the XDS indexer if not...
# copy the data across
self._xds_data_files = copy.deepcopy(
self._intgr_refiner.get_refiner_payload(self.get_integrater_epoch())
)
if self._xds_data_files is None:
self._xds_data_files = {}
logger.debug("Files available at the end of XDS integrate prepare:")
for f in self._xds_data_files:
logger.debug("%s" % f)
experiment = self._intgr_refiner.get_refined_experiment_list(
self.get_integrater_epoch()
)[0]
# copy across the trusted_range - it got lost along the way
old_detector = self.get_detector()
self.set_detector(experiment.detector)
for p1, p2 in zip(old_detector, self.get_detector()):
p2.set_trusted_range(p1.get_trusted_range())
self.set_beam_obj(experiment.beam)
self.set_goniometer(experiment.goniometer)
# set a low resolution limit (which isn't really used...)
# this should perhaps be done more intelligently from an
# analysis of the spot list or something...?
if not self.get_integrater_low_resolution():
dmax = self._intgr_refiner.get_indexer_low_resolution(
self.get_integrater_epoch()
)
self.set_integrater_low_resolution(dmax)
logger.debug(
"Low resolution set to: %s" % self.get_integrater_low_resolution()
)
# delete things we should not know e.g. the postrefined cell from
# CORRECT - c/f bug # 2695
self._intgr_cell = None
self._intgr_spacegroup_number = None
def _integrate(self):
"""Actually do the integration - in XDS terms this will mean running
DEFPIX and INTEGRATE to measure all the reflections."""
experiment = self._intgr_refiner.get_refined_experiment_list(
self.get_integrater_epoch()
)[0]
crystal_model = experiment.crystal
self._intgr_refiner_cell = crystal_model.get_unit_cell().parameters()
defpix = self.Defpix()
# pass in the correct data
for file in (
"X-CORRECTIONS.cbf",
"Y-CORRECTIONS.cbf",
"BKGINIT.cbf",
"XPARM.XDS",
):
defpix.set_input_data_file(file, self._xds_data_files[file])
defpix.set_data_range(
self._intgr_wedge[0] + self.get_frame_offset(),
self._intgr_wedge[1] + self.get_frame_offset(),
)
if (
self.get_integrater_high_resolution() > 0.0
and self.get_integrater_user_resolution()
):
logger.debug(
"Setting resolution limit in DEFPIX to %.2f"
% self.get_integrater_high_resolution()
)
defpix.set_resolution_high(self.get_integrater_high_resolution())
defpix.set_resolution_low(self.get_integrater_low_resolution())
elif self.get_integrater_low_resolution():
logger.debug(
"Setting low resolution limit in DEFPIX to %.2f"
% self.get_integrater_low_resolution()
)
defpix.set_resolution_high(0.0)
defpix.set_resolution_low(self.get_integrater_low_resolution())
defpix.run()
# and gather the result files
for file in ("BKGPIX.cbf", "ABS.cbf"):
self._xds_data_files[file] = defpix.get_output_data_file(file)
integrate = self.Integrate()
if self._xds_integrate_parameters:
integrate.set_updates(self._xds_integrate_parameters)
# decide what images we are going to process, if not already
# specified
if not self._intgr_wedge:
images = self.get_matching_images()
self.set_integrater_wedge(min(images), max(images))
integrate.set_data_range(
self._intgr_wedge[0] + self.get_frame_offset(),
self._intgr_wedge[1] + self.get_frame_offset(),
)
for file in (
"X-CORRECTIONS.cbf",
"Y-CORRECTIONS.cbf",
"BLANK.cbf",
"BKGPIX.cbf",
"GAIN.cbf",
):
integrate.set_input_data_file(file, self._xds_data_files[file])
if "GXPARM.XDS" in self._xds_data_files:
logger.debug("Using globally refined parameters")
integrate.set_input_data_file(
"XPARM.XDS", self._xds_data_files["GXPARM.XDS"]
)
integrate.set_refined_xparm()
else:
integrate.set_input_data_file(
"XPARM.XDS", self._xds_data_files["XPARM.XDS"]
)
integrate.run()
self._intgr_per_image_statistics = integrate.get_per_image_statistics()
logger.info(self.show_per_image_statistics())
# record the log file -
pname, xname, dname = self.get_integrater_project_info()
sweep = self.get_integrater_sweep_name()
FileHandler.record_log_file(
f"{pname} {xname} {dname} {sweep} INTEGRATE",
os.path.join(self.get_working_directory(), "INTEGRATE.LP"),
)
# and copy the first pass INTEGRATE.HKL...
lattice = self._intgr_refiner.get_refiner_lattice()
if not os.path.exists(
os.path.join(self.get_working_directory(), "INTEGRATE-%s.HKL" % lattice)
):
here = self.get_working_directory()
shutil.copyfile(
os.path.join(here, "INTEGRATE.HKL"),
os.path.join(here, "INTEGRATE-%s.HKL" % lattice),
)
# record INTEGRATE.HKL
FileHandler.record_more_data_file(
f"{pname} {xname} {dname} {sweep} INTEGRATE",
os.path.join(self.get_working_directory(), "INTEGRATE.HKL"),
)
# should the existence of these require that I rerun the
# integration or can we assume that the application of a
# sensible resolution limit will achieve this??
self._xds_integrate_parameters = integrate.get_updates()
# record the mosaic spread &c.
m_min, m_mean, m_max = integrate.get_mosaic()
self.set_integrater_mosaic_min_mean_max(m_min, m_mean, m_max)
logger.info(
"Mosaic spread: %.3f < %.3f < %.3f"
% self.get_integrater_mosaic_min_mean_max()
)
return os.path.join(self.get_working_directory(), "INTEGRATE.HKL")
def _integrate_finish(self):
"""Finish off the integration by running correct."""
# first run the postrefinement etc with spacegroup P1
# and the current unit cell - this will be used to
# obtain a benchmark rmsd in pixels / phi and also
# cell deviations (this is working towards spotting bad
# indexing solutions) - only do this if we have no
# reindex matrix... and no postrefined cell...
p1_deviations = None
# fix for bug # 3264 -
# if we have not run integration with refined parameters, make it so...
# erm? shouldn't this therefore return if this is the principle, or
# set the flag after we have tested the lattice?
if (
"GXPARM.XDS" not in self._xds_data_files
and PhilIndex.params.xds.integrate.reintegrate
):
logger.debug("Resetting integrater, to ensure refined orientation is used")
self.set_integrater_done(False)
if (
not self.get_integrater_reindex_matrix()
and not self._intgr_cell
and PhilIndex.params.xia2.settings.lattice_rejection
and not self.get_integrater_sweep().get_user_lattice()
):
correct = self.Correct()
correct.set_data_range(
self._intgr_wedge[0] + self.get_frame_offset(),
self._intgr_wedge[1] + self.get_frame_offset(),
)
if self.get_polarization() > 0.0:
correct.set_polarization(self.get_polarization())
# FIXME should this be using the correctly transformed
# cell or are the results ok without it?!
correct.set_spacegroup_number(1)
correct.set_cell(self._intgr_refiner_cell)
correct.run()
cell = correct.get_result("cell")
cell_esd = correct.get_result("cell_esd")
logger.debug("Postrefinement in P1 results:")
logger.debug("%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % tuple(cell))
logger.debug("%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % tuple(cell_esd))
logger.debug(
"Deviations: %.2f pixels %.2f degrees"
% (correct.get_result("rmsd_pixel"), correct.get_result("rmsd_phi"))
)
p1_deviations = (
correct.get_result("rmsd_pixel"),
correct.get_result("rmsd_phi"),
)
# next run the postrefinement etc with the given
# cell / lattice - this will be the assumed result...
integrate_hkl = os.path.join(self.get_working_directory(), "INTEGRATE.HKL")
if PhilIndex.params.xia2.settings.input.format.dynamic_shadowing:
from dxtbx.serialize import load
from dials.algorithms.shadowing.filter import filter_shadowed_reflections
experiments_json = xparm_xds_to_experiments_json(
os.path.join(self.get_working_directory(), "XPARM.XDS"),
self.get_working_directory(),
)
experiments = load.experiment_list(experiments_json, check_format=True)
imageset = experiments[0].imageset
masker = (
imageset.get_format_class()
.get_instance(imageset.paths()[0])
.get_masker()
)
if masker is not None:
integrate_filename = integrate_hkl_to_reflection_file(
integrate_hkl, experiments_json, self.get_working_directory()
)
reflections = flex.reflection_table.from_file(integrate_filename)
t0 = time.time()
sel = filter_shadowed_reflections(experiments, reflections)
shadowed = reflections.select(sel)
t1 = time.time()
logger.debug(
"Filtered %i reflections in %.1f seconds"
% (sel.count(True), t1 - t0)
)
filter_hkl = os.path.join(self.get_working_directory(), "FILTER.HKL")
with open(filter_hkl, "wb") as f:
detector = experiments[0].detector
for ref in shadowed:
p = detector[ref["panel"]]
ox, oy = p.get_raw_image_offset()
h, k, l = ref["miller_index"]
x, y, z = ref["xyzcal.px"]
dx, dy, dz = (2, 2, 2)
print(
"%i %i %i %.1f %.1f %.1f %.1f %.1f %.1f"
% (h, k, l, x + ox, y + oy, z, dx, dy, dz),
file=f,
)
t2 = time.time()
logger.debug("Written FILTER.HKL in %.1f seconds" % (t2 - t1))
correct = self.Correct()
correct.set_data_range(
self._intgr_wedge[0] + self.get_frame_offset(),
self._intgr_wedge[1] + self.get_frame_offset(),
)
if self.get_polarization() > 0.0:
correct.set_polarization(self.get_polarization())
# BUG # 2695 probably comes from here - need to check...
# if the pointless interface comes back with a different
# crystal setting then the unit cell stored in self._intgr_cell
# needs to be set to None...
if self.get_integrater_spacegroup_number():
correct.set_spacegroup_number(self.get_integrater_spacegroup_number())
if not self._intgr_cell:
raise RuntimeError("no unit cell to recycle")
correct.set_cell(self._intgr_cell)
# BUG # 3113 - new version of XDS will try and figure the
# best spacegroup out from the intensities (and get it wrong!)
# unless we set the spacegroup and cell explicitly
if not self.get_integrater_spacegroup_number():
cell = self._intgr_refiner_cell
lattice = self._intgr_refiner.get_refiner_lattice()
spacegroup_number = lattice_to_spacegroup_number(lattice)
# this should not prevent the postrefinement from
# working correctly, else what is above would not
# work correctly (the postrefinement test)
correct.set_spacegroup_number(spacegroup_number)
correct.set_cell(cell)
logger.debug("Setting spacegroup to: %d" % spacegroup_number)
logger.debug("Setting cell to: %.2f %.2f %.2f %.2f %.2f %.2f" % tuple(cell))
if self.get_integrater_reindex_matrix():
# bug! if the lattice is not primitive the values in this
# reindex matrix need to be multiplied by a constant which
# depends on the Bravais lattice centering.
lattice = self._intgr_refiner.get_refiner_lattice()
matrix = self.get_integrater_reindex_matrix()
matrix = scitbx.matrix.sqr(matrix).transpose().elems
matrix = r_to_rt(matrix)
if lattice[1] == "P":
mult = 1
elif lattice[1] == "C" or lattice[1] == "I":
mult = 2
elif lattice[1] == "R":
mult = 3
elif lattice[1] == "F":
mult = 4
else:
raise RuntimeError("unknown multiplier for lattice %s" % lattice)
logger.debug("REIDX multiplier for lattice %s: %d" % (lattice, mult))
mult_matrix = [mult * m for m in matrix]
logger.debug(
"REIDX set to %d %d %d %d %d %d %d %d %d %d %d %d" % tuple(mult_matrix)
)
correct.set_reindex_matrix(mult_matrix)
correct.run()
# record the log file -
pname, xname, dname = self.get_integrater_project_info()
sweep = self.get_integrater_sweep_name()
FileHandler.record_log_file(
f"{pname} {xname} {dname} {sweep} CORRECT",
os.path.join(self.get_working_directory(), "CORRECT.LP"),
)
FileHandler.record_more_data_file(
f"{pname} {xname} {dname} {sweep} CORRECT",
os.path.join(self.get_working_directory(), "XDS_ASCII.HKL"),
)
# erm. just to be sure
if self.get_integrater_reindex_matrix() and correct.get_reindex_used():
raise RuntimeError("Reindex panic!")
# get the reindex operation used, which may be useful if none was
# set but XDS decided to apply one, e.g. #419.
if not self.get_integrater_reindex_matrix() and correct.get_reindex_used():
# convert this reindex operation to h, k, l form: n.b. this
# will involve dividing through by the lattice centring multiplier
matrix = rt_to_r(correct.get_reindex_used())
matrix = scitbx.matrix.sqr(matrix).transpose().elems
lattice = self._intgr_refiner.get_refiner_lattice()
if lattice[1] == "P":
mult = 1.0
elif lattice[1] == "C" or lattice[1] == "I":
mult = 2.0
elif lattice[1] == "R":
mult = 3.0
elif lattice[1] == "F":
mult = 4.0
matrix = [m / mult for m in matrix]
reindex_op = mat_to_symop(matrix)
# assign this to self: will this reset?! make for a leaky
# abstraction and just assign this...
# self.set_integrater_reindex_operator(reindex)
self._intgr_reindex_operator = reindex_op
# record the log file -
pname, xname, dname = self.get_integrater_project_info()
sweep = self.get_integrater_sweep_name()
FileHandler.record_log_file(
f"{pname} {xname} {dname} {sweep} CORRECT",
os.path.join(self.get_working_directory(), "CORRECT.LP"),
)
# should get some interesting stuff from the XDS correct file
# here, for instance the resolution range to use in integration
# (which should be fed back if not fast) and so on...
self._intgr_corrected_hklout = os.path.join(
self.get_working_directory(), "XDS_ASCII.HKL"
)
# also record the batch range - needed for the analysis of the
# radiation damage in chef...
self._intgr_batches_out = (self._intgr_wedge[0], self._intgr_wedge[1])
# FIXME perhaps I should also feedback the GXPARM file here??
for file in ["GXPARM.XDS"]:
self._xds_data_files[file] = correct.get_output_data_file(file)
# record the postrefined cell parameters
self._intgr_cell = correct.get_result("cell")
self._intgr_n_ref = correct.get_result("n_ref")
logger.debug('Postrefinement in "correct" spacegroup results:')
logger.debug(
"%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % tuple(correct.get_result("cell"))
)
logger.debug(
"%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f"
% tuple(correct.get_result("cell_esd"))
)
logger.debug(
"Deviations: %.2f pixels %.2f degrees"
% (correct.get_result("rmsd_pixel"), correct.get_result("rmsd_phi"))
)
logger.debug(
"Error correction parameters: A=%.3f B=%.3f"
% correct.get_result("sdcorrection")
)
# compute misorientation of axes
xparm_file = os.path.join(self.get_working_directory(), "GXPARM.XDS")
handle = xparm.reader()
handle.read_file(xparm_file)
rotn = handle.rotation_axis
beam = handle.beam_vector
dot = sum(rotn[j] * beam[j] for j in range(3))
r = math.sqrt(sum(rotn[j] * rotn[j] for j in range(3)))
b = math.sqrt(sum(beam[j] * beam[j] for j in range(3)))
rtod = 180.0 / math.pi
angle = rtod * math.fabs(0.5 * math.pi - math.acos(dot / (r * b)))
logger.debug("Axis misalignment %.2f degrees" % angle)
correct_deviations = (
correct.get_result("rmsd_pixel"),
correct.get_result("rmsd_phi"),
)
if p1_deviations:
# compare and reject if both > 50% higher - though adding a little
# flexibility - 0.5 pixel / osc width slack.
pixel = p1_deviations[0]
phi = math.sqrt(0.05 * 0.05 + p1_deviations[1] * p1_deviations[1])
threshold = PhilIndex.params.xia2.settings.lattice_rejection_threshold
logger.debug("RMSD ratio: %.2f" % (correct_deviations[0] / pixel))
logger.debug("RMSPhi ratio: %.2f" % (correct_deviations[1] / phi))
if (
correct_deviations[0] / pixel > threshold
and correct_deviations[1] / phi > threshold
):
logger.info("Eliminating this indexing solution as postrefinement")
logger.info("deviations rather high relative to triclinic")
raise BadLatticeError("high relative deviations in postrefinement")
if (
not PhilIndex.params.dials.fast_mode
and not PhilIndex.params.xds.keep_outliers
):
# check for alien reflections and perhaps recycle - removing them
correct_remove = correct.get_remove()
if correct_remove:
current_remove = set()
final_remove = []
# first ensure that there are no duplicate entries...
if os.path.exists(
os.path.join(self.get_working_directory(), "REMOVE.HKL")
):
with open(
os.path.join(self.get_working_directory(), "REMOVE.HKL")
) as fh:
for line in fh.readlines():
h, k, l = list(map(int, line.split()[:3]))
z = float(line.split()[3])
if (h, k, l, z) not in current_remove:
current_remove.add((h, k, l, z))
for c in correct_remove:
if c in current_remove:
continue
final_remove.append(c)
logger.debug(
"%d alien reflections are already removed"
% (len(correct_remove) - len(final_remove))
)
else:
# we want to remove all of the new dodgy reflections
final_remove = correct_remove
z_min = PhilIndex.params.xds.z_min
rejected = 0
with open(
os.path.join(self.get_working_directory(), "REMOVE.HKL"), "w"
) as remove_hkl:
# write in the old reflections
for remove in current_remove:
z = remove[3]
if z >= z_min:
remove_hkl.write("%d %d %d %f\n" % remove)
else:
rejected += 1
logger.debug(
"Wrote %d old reflections to REMOVE.HKL"
% (len(current_remove) - rejected)
)
logger.debug("Rejected %d as z < %f" % (rejected, z_min))
# and the new reflections
rejected = 0
used = 0
for remove in final_remove:
z = remove[3]
if z >= z_min:
used += 1
remove_hkl.write("%d %d %d %f\n" % remove)
else:
rejected += 1
logger.debug(
"Wrote %d new reflections to REMOVE.HKL"
% (len(final_remove) - rejected)
)
logger.debug("Rejected %d as z < %f" % (rejected, z_min))
# we want to rerun the finishing step so...
# unless we have added no new reflections... or unless we
# have not confirmed the point group (see SCI-398)
if used and self.get_integrater_reindex_matrix():
self.set_integrater_finish_done(False)
else:
logger.debug(
"Going quickly so not removing %d outlier reflections..."
% len(correct.get_remove())
)
# Convert INTEGRATE.HKL to MTZ format and reapply any reindexing operations
# spacegroup changes to allow use with CCP4 / Aimless for scaling
hklout = os.path.splitext(integrate_hkl)[0] + ".mtz"
self._factory.set_working_directory(self.get_working_directory())
pointless = self._factory.Pointless()
pointless.set_xdsin(integrate_hkl)
pointless.set_hklout(hklout)
pointless.xds_to_mtz()
integrate_mtz = hklout
if (
self.get_integrater_reindex_operator()
or self.get_integrater_spacegroup_number()
):
logger.debug("Reindexing things to MTZ")
reindex = Reindex()
reindex.set_working_directory(self.get_working_directory())
auto_logfiler(reindex)
if self.get_integrater_reindex_operator():
reindex.set_operator(self.get_integrater_reindex_operator())
if self.get_integrater_spacegroup_number():
reindex.set_spacegroup(self.get_integrater_spacegroup_number())
hklout = "%s_reindex.mtz" % os.path.splitext(integrate_mtz)[0]
reindex.set_hklin(integrate_mtz)
reindex.set_hklout(hklout)
reindex.reindex()
integrate_mtz = hklout
experiments_json = xparm_xds_to_experiments_json(
self._xds_data_files["GXPARM.XDS"], self.get_working_directory()
)
pname, xname, dname = self.get_integrater_project_info()
sweep = self.get_integrater_sweep_name()
FileHandler.record_more_data_file(
f"{pname} {xname} {dname} {sweep}", experiments_json
)
FileHandler.record_more_data_file(
f"{pname} {xname} {dname} {sweep} INTEGRATE", integrate_mtz
)
self._intgr_experiments_filename = experiments_json
return integrate_mtz
def get_integrated_experiments(self):
return self._intgr_experiments_filename
def integrate_hkl_to_reflection_file(
integrate_hkl, experiments_json, working_directory
):
importer = ImportXDS()
importer.set_working_directory(working_directory)
auto_logfiler(importer)
importer.set_experiments_json(experiments_json)
importer.set_integrate_hkl(integrate_hkl)
importer.run()
return importer.get_reflection_filename()
def xparm_xds_to_experiments_json(xparm_xds, working_directory):
importer = ImportXDS()
importer.set_working_directory(working_directory)
auto_logfiler(importer)
importer.set_xparm_xds(xparm_xds)
importer.run()
return importer.get_experiments_json()
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Modules/Integrater/XDSIntegrater.py",
"copies": "1",
"size": "33839",
"license": "bsd-3-clause",
"hash": 1583534125715426800,
"line_mean": 35.8215451578,
"line_max": 88,
"alpha_frac": 0.5706137888,
"autogenerated": false,
"ratio": 3.7804714557032733,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4851085244503273,
"avg_score": null,
"num_lines": null
} |
from distutils.core import setup
setup(
name='pyld-signatures',
version='0.1dev',
packages=['pyld_sig',],
license='License :: OSI Approved :: BSD License',
author='Spec-Ops',
install_requires=[
"cryptography",
"isodate",
"pyld",
"pytz",
"pytest",
],
)
| {
"repo_name": "Spec-Ops/pyld-signatures",
"path": "setup.py",
"copies": "1",
"size": "2021",
"license": "bsd-3-clause",
"hash": 7780679977858535000,
"line_mean": 39.42,
"line_max": 78,
"alpha_frac": 0.7377535873,
"autogenerated": false,
"ratio": 4.4126637554585155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008742035423925815,
"num_lines": 50
} |
import base64
import copy
import json
import isodate
from datetime import datetime
from pyld import jsonld
import pytz
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.exceptions import InvalidSignature
SECURITY_CONTEXT_URL = 'https://w3id.org/security/v1'
SECURITY_CONTEXT = {
"@context": {
"id": "@id",
"type": "@type",
"dc": "http://purl.org/dc/terms/",
"sec": "https://w3id.org/security#",
"xsd": "http://www.w3.org/2001/XMLSchema#",
"EcdsaKoblitzSignature2016": "sec:EcdsaKoblitzSignature2016",
"EncryptedMessage": "sec:EncryptedMessage",
"GraphSignature2012": "sec:GraphSignature2012",
"LinkedDataSignature2015": "sec:LinkedDataSignature2015",
"LinkedDataSignature2016": "sec:LinkedDataSignature2016",
"CryptographicKey": "sec:Key",
"authenticationTag": "sec:authenticationTag",
"canonicalizationAlgorithm": "sec:canonicalizationAlgorithm",
"cipherAlgorithm": "sec:cipherAlgorithm",
"cipherData": "sec:cipherData",
"cipherKey": "sec:cipherKey",
"created": {"@id": "dc:created", "@type": "xsd:dateTime"},
"creator": {"@id": "dc:creator", "@type": "@id"},
"digestAlgorithm": "sec:digestAlgorithm",
"digestValue": "sec:digestValue",
"domain": "sec:domain",
"encryptionKey": "sec:encryptionKey",
"expiration": {"@id": "sec:expiration", "@type": "xsd:dateTime"},
"expires": {"@id": "sec:expiration", "@type": "xsd:dateTime"},
"initializationVector": "sec:initializationVector",
"iterationCount": "sec:iterationCount",
"nonce": "sec:nonce",
"normalizationAlgorithm": "sec:normalizationAlgorithm",
"owner": {"@id": "sec:owner", "@type": "@id"},
"password": "sec:password",
"privateKey": {"@id": "sec:privateKey", "@type": "@id"},
"privateKeyPem": "sec:privateKeyPem",
"publicKey": {"@id": "sec:publicKey", "@type": "@id"},
"publicKeyPem": "sec:publicKeyPem",
"publicKeyService": {"@id": "sec:publicKeyService", "@type": "@id"},
"revoked": {"@id": "sec:revoked", "@type": "xsd:dateTime"},
"salt": "sec:salt",
"signature": "sec:signature",
"signatureAlgorithm": "sec:signingAlgorithm",
"signatureValue": "sec:signatureValue"}}
IDENTITY_CONTEXT_URL = 'https://w3id.org/identity/v1'
IDENTITY_CONTEXT = {
"@context": {
"id": "@id",
"type": "@type",
"cred": "https://w3id.org/credentials#",
"dc": "http://purl.org/dc/terms/",
"identity": "https://w3id.org/identity#",
"perm": "https://w3id.org/permissions#",
"ps": "https://w3id.org/payswarm#",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"sec": "https://w3id.org/security#",
"schema": "http://schema.org/",
"xsd": "http://www.w3.org/2001/XMLSchema#",
"Group": "https://www.w3.org/ns/activitystreams#Group",
"claim": {"@id": "cred:claim", "@type": "@id"},
"credential": {"@id": "cred:credential", "@type": "@id"},
"issued": {"@id": "cred:issued", "@type": "xsd:dateTime"},
"issuer": {"@id": "cred:issuer", "@type": "@id"},
"recipient": {"@id": "cred:recipient", "@type": "@id"},
"Credential": "cred:Credential",
"CryptographicKeyCredential": "cred:CryptographicKeyCredential",
"about": {"@id": "schema:about", "@type": "@id"},
"address": {"@id": "schema:address", "@type": "@id"},
"addressCountry": "schema:addressCountry",
"addressLocality": "schema:addressLocality",
"addressRegion": "schema:addressRegion",
"comment": "rdfs:comment",
"created": {"@id": "dc:created", "@type": "xsd:dateTime"},
"creator": {"@id": "dc:creator", "@type": "@id"},
"description": "schema:description",
"email": "schema:email",
"familyName": "schema:familyName",
"givenName": "schema:givenName",
"image": {"@id": "schema:image", "@type": "@id"},
"label": "rdfs:label",
"name": "schema:name",
"postalCode": "schema:postalCode",
"streetAddress": "schema:streetAddress",
"title": "dc:title",
"url": {"@id": "schema:url", "@type": "@id"},
"Person": "schema:Person",
"PostalAddress": "schema:PostalAddress",
"Organization": "schema:Organization",
"identityService": {"@id": "identity:identityService", "@type": "@id"},
"idp": {"@id": "identity:idp", "@type": "@id"},
"Identity": "identity:Identity",
"paymentProcessor": "ps:processor",
"preferences": {"@id": "ps:preferences", "@type": "@vocab"},
"cipherAlgorithm": "sec:cipherAlgorithm",
"cipherData": "sec:cipherData",
"cipherKey": "sec:cipherKey",
"digestAlgorithm": "sec:digestAlgorithm",
"digestValue": "sec:digestValue",
"domain": "sec:domain",
"expires": {"@id": "sec:expiration", "@type": "xsd:dateTime"},
"initializationVector": "sec:initializationVector",
"member": {"@id": "schema:member", "@type": "@id"},
"memberOf": {"@id": "schema:memberOf", "@type": "@id"},
"nonce": "sec:nonce",
"normalizationAlgorithm": "sec:normalizationAlgorithm",
"owner": {"@id": "sec:owner", "@type": "@id"},
"password": "sec:password",
"privateKey": {"@id": "sec:privateKey", "@type": "@id"},
"privateKeyPem": "sec:privateKeyPem",
"publicKey": {"@id": "sec:publicKey", "@type": "@id"},
"publicKeyPem": "sec:publicKeyPem",
"publicKeyService": {"@id": "sec:publicKeyService", "@type": "@id"},
"revoked": {"@id": "sec:revoked", "@type": "xsd:dateTime"},
"signature": "sec:signature",
"signatureAlgorithm": "sec:signatureAlgorithm",
"signatureValue": "sec:signatureValue",
"CryptographicKey": "sec:Key",
"EncryptedMessage": "sec:EncryptedMessage",
"GraphSignature2012": "sec:GraphSignature2012",
"LinkedDataSignature2015": "sec:LinkedDataSignature2015",
"accessControl": {"@id": "perm:accessControl", "@type": "@id"},
"writePermission": {"@id": "perm:writePermission", "@type": "@id"}
}
}
_get_values = jsonld.JsonLdProcessor.get_values
def _get_value(obj, key):
try:
return _get_values(obj, key)[0]
# A bit more accurate since we're trying to pull a value out of a specific
# key, and nothing exists for this one
except IndexError:
raise KeyError(key)
_has_value = jsonld.JsonLdProcessor.has_value
def _make_simple_loader(url_map, load_unknown_urls=True,
cache_externally_loaded=True):
def _make_context(url, doc):
return {
"contextUrl": None,
"documentUrl": url,
"document": doc}
# Wrap in the structure that's expected to come back from the
# documentLoader
_pre_url_map = {}
_pre_url_map.update(url_map)
_url_map = {
url: _make_context(url, doc)
for url, doc in _pre_url_map.items()}
def loader(url):
if url in _url_map:
return _url_map[url]
elif load_unknown_urls:
doc = jsonld.load_document(url)
# @@: Is this optimization safe in all cases?
if isinstance(doc["document"], str):
doc["document"] = json.loads(doc["document"])
_url_map[url] = doc
return doc
else:
raise jsonld.JsonLdError(
"url not found and loader set to not load unknown URLs.",
{'url': url})
return loader
_security_context_loader = _make_simple_loader(
{SECURITY_CONTEXT_URL: SECURITY_CONTEXT,
IDENTITY_CONTEXT_URL: IDENTITY_CONTEXT})
# @@: Shouldn't this be a mapping from these names to their actual
# functionality? Seems kludgy to have all these if-elif-else things
# as interspersed through the document...
# Okay, answer is yes
# TODO: Make these JsonLdErrors
# class LdsError(jsonld.JsonLdError): pass
# class LdsTypeError(LdsError, TypeError): pass
class LdsError(Exception): pass
class LdsTypeError(LdsError, TypeError): pass
def is_valid_uri(obj):
"""
Check to see if OBJ is a valid URI
(or at least do the best check we can: that it's a string, and that
it contains the ':' character.)
"""
return isinstance(obj, str) and ":" in obj
def sign(document, options):
"""
Signs a JSON-LD document using a digital signature.
- input: the JSON-LD document to be signed.
- options: options to use:
[privateKeyPem] A PEM-encoded private key.
[creator] the URL to the paired public key.
[date] an optional date to override the signature date with.
If provided, must have an "aware" timezone
(.tzinfo not None)
[domain] an optional domain to include in the signature.
[nonce] an optional nonce to include in the signature.
[algorithm] the algorithm to use, eg: 'GraphSignature2012',
'LinkedDataSignature2015' (default: 'GraphSignature2012').
"""
options = copy.deepcopy(options)
# TODO: The spec says privateKey, but in jsonld-signatures.js there are
# these two separate fields...
options["date"] = options.get("date") or datetime.now(pytz.utc)
options.setdefault("algorithm", "GraphSignature2012")
if not options["algorithm"] in SUITES:
raise LdsError(
("[jsig.sign] Unsupported algorithm '%s'; options.algorithm must "
"be one of: %s") % (options["algorithm"], SUITES.keys()))
suite = SUITES[options["algorithm"]]
options = suite.signature_munge_verify_options(options)
# @@: Do we need this in the sign thing?
sig_options = {
"date": options["date"]
}
if "nonce" in options:
sig_options["nonce"] = options["nonce"]
if "domain" in options:
sig_options["domain"] = options["domain"]
formatted = suite.format_for_signature(document, sig_options, options)
sig_val = suite.sign_formatted(formatted, options)
signature = {
"@context": SECURITY_CONTEXT_URL,
"type": options["algorithm"],
"creator": options["creator"],
"created": options["date"],
"signatureValue": sig_val}
if "domain" in options:
signature["domain"] = options["domain"]
if "nonce" in options:
signature["nonce"] = options["nonce"]
ctx = _get_values(document, "@context")
compacted = jsonld.compact(
{"https://w3id.org/security#signature": signature},
ctx, options={
"documentLoader": _security_context_loader})
del compacted["@context"]
output = copy.deepcopy(document)
# @@: Wow, this seems like a terribly kludgy way to get that key,
# but that's what's done in jsonld-signatures.js. I mean,
# I guess it should work. I guess this is to avoid that the name may
# be either expanded or compacted at this point
signature_key = list(compacted.keys())[0]
# TODO: support multiple signatures.
# Same warning as in jsonld-signatures.js! ;P
# We could put this in the suite option?
output[signature_key] = compacted[signature_key]
return output
def _basic_rsa_signature(formatted, options):
private_key = serialization.load_pem_private_key(
options["privateKeyPem"],
password=None,
backend=default_backend())
signed = private_key.sign(
formatted,
# I'm guessing this is the right padding function...?
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH),
hashes.SHA256())
return base64.b64encode(signed).decode("utf-8")
def _getDataToHash_2012_2015(input, sig_options, options):
# TODO: These are two separate algorithms, so we should separate them
to_hash = ""
if options["algorithm"] == "GraphSignature2012":
if "nonce" in sig_options:
to_hash += sig_options["nonce"]
to_hash += sig_options["date"]
to_hash += input
if "domain" in sig_options:
to_hash += "@" + sig_options["domain"]
else:
headers = {
"http://purl.org/dc/elements/1.1/created": sig_options.get("date"),
"https://w3id.org/security#domain": sig_options.get("domain"),
"https://w3id.org/security#nonce": sig_options.get("nonce")};
# add headers in lexicographical order
for key in sorted(headers.keys()):
value = headers[key]
if value is not None:
to_hash += "%s: %s\n" % (key, value)
to_hash += input
return to_hash.encode("utf-8")
def _w3c_date(dt):
# We may need to convert it to UTC
if dt.tzinfo is not pytz.utc:
dt = dt.astimezone(pytz.utc)
return isodate.datetime_isoformat(dt)
# Verification
def verify(signed_document, options):
"""
Signs a JSON-LD document using a digital signature.
Args:
- input: the JSON-LD document to be verified.
- options:
# TODO: Not all these are implemented yet, and some may be algorithm
# specific
Options:
- publicKey(signature, options): A procedure which, if present, is called
to retrieve the public key. Must do all validation that ownership
correcly aligns.
- checkNonce(nonce, options)] a procedure to check if the nonce (null
if none) used in the signature is valid.
- checkDomain(domain, options): a procedure to check if the domain used
(null if none) is valid.
- checkKey(key, options): a procedure to check if the key used to sign the
message is trusted.
- checkKeyOwner(owner, key, options): a procedure to check if the key's
owner is trusted.
- checkTimestamp: check signature timestamp (default: false).
- maxTimestampDelta: signature must be created within a window of
this many seconds (default: 15 minutes).
- documentLoader(url): the document loader.
- id the ID (full URL) of the node to check the signature of, if
the input contains multiple signed nodes.
"""
options = copy.copy(options)
loader = options.get("documentLoader", _security_context_loader)
options.setdefault("algorithm", "GraphSignature2012")
# Here's a TODO copy-pasta'ed from jsonld-signatures.js:
# TODO: frame before getting signature, not just compact? considerations:
# should the assumption be that the signature is on the top-level object
# and thus framing is unnecessary?
compacted = jsonld.compact(
signed_document, SECURITY_CONTEXT_URL, options={
"documentLoader": loader})
try:
signature = _get_values(compacted, "signature")[0]
except IndexError:
raise LdsError('[jsigs.verify] No signature found.')
try:
suite_name = _get_values(signature, "type")[0]
except IndexError:
suite_name = ""
if not suite_name in SUITES:
raise LdsError(
("[jsigs.verify] Unsupported signature algorithm \"%s\"; "
"supported algorithms are: %s") % (suite_name,
SUITES.keys()))
suite = SUITES[suite_name]
# TODO: Should we be framing here? According to my talks with Dave Longley
# we probably should, though I don't know how well pyld supports framing
# and I need to wrap my head around it better
# @@: So here we have to extract the signature
# @@: 3 before 1 and 2? Well we need it in 1 and 2 :P
# SPEC (3): Remove any signature nodes from the default graph in
# document and save it as signature.
# @@: This isn't recursive, should it be? Also it just handles
# one value for now.
# SPEC (2): Let document be a copy of signed document.
document = copy.deepcopy(compacted)
signature = document.pop("signature")
# SPEC (1): Get the public key by dereferencing its URL identifier
# in the signature node of the default graph of signed document.
# @@: Rest of SPEC(1) in _get_public_key
get_public_key = options.get("publicKey", _get_public_key)
public_key = get_public_key(signature, options)
# SPEC (5): Create a value tbv that represents the data to be
# verified, and set it to the result of running the Create Verify
# Hash Algorithm, passing the information in signature.
# TODO: This doesn't look like the same verification step
# being done in the signature step as ported from jsonld-signatures.js
# It looks like what step we do here should be farmed out depending
# on the signature suite used.
# @@: Maybe sig_options should be munged by the suite?
sig_options = {}
if "publicKeyPem" in public_key:
sig_options["publicKeyPem"] = _get_value(public_key, "publicKeyPem")
if "publicKeyWif" in public_key:
sig_options["publicKeyWif"] = _get_value(public_key, "publicKeyWif")
if "nonce" in signature:
sig_options["nonce"] = _get_value(signature, "nonce")
if "domain" in signature:
sig_options["domain"] = _get_value(signature, "domain")
# @@: Why isn't this also "created"?
sig_options["date"] = _get_value(signature, "created")
tbv = suite.format_for_signature(document, sig_options, options)
# SPEC (6): Pass the signatureValue, tbv, and the public key to
# the signature algorithm (e.g. JSON Web Signature using
# RSASSA-PKCS1-v1_5 algorithm). Return the resulting boolean
# value.
return suite.verify_formatted(signature, tbv, public_key, options)
def _get_public_key(signature, options):
def _id_of(obj):
if isinstance(obj, str):
return obj
return obj.get("@id") or obj.get("id")
creator_id = _id_of(_get_value(signature, "creator"))
if not creator_id:
raise LdsError(
'[jsigs.verify] creator not found on signature.')
creator = _get_security_compacted_jsonld(creator_id, options)
if not "publicKey" in creator:
raise LdsError(
'[jsigs.verify] publicKey not found on creator object')
# @@: What if it's a fragment identifier on an embedded object?
public_key_id = _get_value(creator, "publicKey")
public_key = _get_security_compacted_jsonld(
public_key_id, options)
owners = _get_values(public_key, "owner")
# SPEC (1): Confirm that the linked data document that describes
# the public key specifies its owner and that its owner's URL
# identifier can be dereferenced to reveal a bi-directional link
# back to the key.
if not creator_id in owners:
raise LdsError(
'[jsigs.verify] The public key is not owned by its declared owner.')
# SPEC (1): Ensure that the key's owner is a trusted entity before
# proceeding to the next step.
check_key_owner = options.get("checkKeyOwner")
if check_key_owner and not check_key_owner(signature, public_key, options):
raise LdsError(
'[jsigs.verify] The owner of the public key is not trusted.')
return public_key
def _security_compact(document, options):
loader = options.get("documentLoader", _security_context_loader)
return jsonld.compact(document, SECURITY_CONTEXT_URL,
options={"documentLoader": loader})
def _get_jsonld(id, options):
if isinstance(id, dict):
id = id.get("id") or id.get("@id")
if not id:
raise ValueError("Tried to fetch object with no id: %s" % id)
loader = options.get("documentLoader", _security_context_loader)
return loader(id)["document"]
def _get_security_compacted_jsonld(id, options):
return _security_compact(_get_jsonld(id, options), options)
# TODO: Are we actually passing in multiple aglgorithms for message
# canonicalization *and* message digest?
def create_verify_hash(document, suite, options,
options_to_canonicalize):
"""
"""
normalized_input = suite.normalize_jsonld(document, options)
# SPEC (1): Let options be a copy of input options.
options_to_canonicalize = copy.deepcopy(options_to_canonicalize)
# SPEC (2): If type, id, or signatureValue exists in options,
# remove the entry.
# @@: Well since we're specifically passing these in to this procedure
# I guess we don't need to do that...
# SPEC (3): If created does not exist in options, add an entry
# with a value that is an ISO8601 combined date and time string
# containing the current date and time accurate to at least one
# second, in Universal Time Code format. For example:
# 2017-11-13T20:21:34Z.
if not "created" in options_to_canonicalize:
options_to_canonicalize["created"] = _w3c_date(datetime.now(pytz.utc))
# SPEC (4): Generate output by:
# SPEC (4.1): Creating a canonicalized options document by
# canonicalizing options according to the canonicalization
# algorithm (e.g. the GCA2015 [RDF-DATASET-NORMALIZATION]
# algorithm).
# Well, we need to add the context first:
options_to_canonicalize["@context"] = SECURITY_CONTEXT_URL
canonical_options = suite.normalize_jsonld(
options_to_canonicalize, options)
# SPEC (4.2): Hash canonicalized options document using the
# message digest algorithm (e.g. SHA-256) and set output to the
# result.
output = suite.message_digest(canonical_options, options)
# SPEC (4.3): Hash canonicalized document using the message digest
# algorithm (e.g. SHA-256) and append it to output.
output += suite.message_digest(normalized_input, options)
# SPEC (5): Hash output using the message digest algorithm
# (e.g. SHA-256) and replace it with the result.
output = suite.message_digest(output, options)
# SPEC (6): Return output.
return output
def _rsa_verify_sig(sig_value, formatted, public_key_jsonld):
"""
- sig_value: data to be verified
- public_key: creator of this document's public_key
- tbv: to be verified
"""
# TODO: Support other formats than just PEM
public_key = serialization.load_pem_public_key(
_get_value(public_key_jsonld, "publicKeyPem").encode("utf-8"),
backend=default_backend())
try:
public_key.verify(
base64.b64decode(sig_value.encode("utf-8")), formatted,
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH),
hashes.SHA256())
return True
except InvalidSignature:
return False
# In the future, we'll be doing a lot more work based on what suite is
# selected.
def signature_common_munge_verify(options):
if not is_valid_uri(options["creator"]):
raise LdsTypeError(
"[jsig.sign] options.creator must be a URL string.")
if "domain" in options and not is_valid_uri(options["domain"]):
raise LdsTypeError(
"[jsig.sign] options.domain must be a string.")
if "nonce" in options and not is_valid_uri(options["nonce"]):
raise LdsTypeError(
"[jsig.sign] options.nonce must be a string.")
if not isinstance(options["date"], str):
options["date"] = _w3c_date(options["date"])
return options
class SignatureSuite():
name = None
@classmethod
def signature_munge_verify_options(cls, options):
options = signature_common_munge_verify(options)
return options
@classmethod
def normalize_jsonld(cls, document, options):
raise NotImplementedError()
@classmethod
def format_for_signature(cls, document, sig_options, options):
raise NotImplementedError()
@classmethod
def sign_formatted(cls, formatted, options):
raise NotImplementedError()
@classmethod
def verify_formatted(cls, formatted, options):
raise NotImplementedError()
def _format_gs_2012_ld_2015(suite, document, sig_options, options):
normalized = suite.normalize_jsonld(document, options)
if len(normalized) == 0:
raise LdsError(
('[jsig.sign] '
'The data to sign is empty. This error may be because a '
'"@context" was not supplied in the input thereby causing '
'any terms or prefixes to be undefined. '
'Input: %s') % (json.dumps(document)))
return _getDataToHash_2012_2015(normalized, sig_options, options)
class GraphSignature2012(SignatureSuite):
name = "GraphSignature2012"
@classmethod
def format_for_signature(cls, document, sig_options, options):
return _format_gs_2012_ld_2015(cls, document, sig_options, options)
@classmethod
def normalize_jsonld(self, document, options):
return jsonld.normalize(
document,
{"algorithm": "URGNA2012",
"format": "application/nquads",
"documentLoader": options.get("documentLoader",
_security_context_loader)})
@classmethod
def sign_formatted(cls, formatted, options):
return _basic_rsa_signature(formatted, options)
@classmethod
def verify_formatted(cls, signature, formatted, public_key_jsonld, options):
return _rsa_verify_sig(
_get_value(signature, "signatureValue"),
formatted, public_key_jsonld)
class LinkedDataSignature2015(SignatureSuite):
name = "LinkedDataSignature2015"
@classmethod
def normalize_jsonld(cls, document, options):
return jsonld.normalize(
document, {"algorithm": "URDNA2015",
"format": "application/nquads"})
@classmethod
def format_for_signature(cls, document, sig_options, options):
return _format_gs_2012_ld_2015(cls, document, sig_options, options)
@classmethod
def sign_formatted(cls, formatted, options):
return _basic_rsa_signature(formatted, options)
class EcdsaKoblitzSignature2016(SignatureSuite):
name = "EcdsaKoblitzSignature2016"
@classmethod
def signature_munge_verify_options(cls, options):
options = signature_common_munge_verify(options)
if not isinstance(options.get("privateKeyWif", str)):
raise LdsTypeError(
"[jsig.sign] options.privateKeyWif must be a base 58 "
"formatted string.")
elif not isinstance(options.get("privateKeyPem"), str):
raise LdsTypeError(
"[jsig.sign] options.privateKeyPem must be a PEM "
"formatted string.")
return options
class LinkedDataSignature2016(SignatureSuite):
name = "LinkedDataSignature2016"
SUITES = {
s.name: s
for s in [GraphSignature2012,
LinkedDataSignature2015,
# EcdsaKoblitzSignature2016,
]}
| {
"repo_name": "Spec-Ops/pyld-signatures",
"path": "pyld_sig/__init__.py",
"copies": "1",
"size": "28940",
"license": "bsd-3-clause",
"hash": -3593398744881655300,
"line_mean": 37.535286285,
"line_max": 80,
"alpha_frac": 0.6350034554,
"autogenerated": false,
"ratio": 3.816934845687154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49519383010871537,
"avg_score": null,
"num_lines": null
} |
# An implementation of the Longest Common Subsequence Problem
# Takes two command line arguments, the two subsequences and prints out the
# longest common subsequence and its size
from copy import deepcopy
import sys
def lcs (prev, current, a, b):
rows, columns = len(a), len(b)
print(columns, rows)
print(current)
for i in range(1, rows + 1):
for j in range(1, columns + 1): # skip the first column
if a[i-1] == b[j-1]: # decrement since we start at 1
current[j] = prev[j - 1] + 1
else:
current[j] = max(prev[j], current[j - 1])
print(current)
prev = deepcopy(current)
print("Max value is", current[columns])
return current[columns]
# Parse the command line arguments into an array
if len(sys.argv) != 3:
print("There can only be two arguments")
quit() # may be unsafe
# Initialize two rows of the array, our previous, and the one we are using
# n are the rows in the array and m are the columns
# We increment the length for our "perimeter"
prev = [0 for i in range(len(sys.argv[2]) + 1)]
current = [0 for i in range(len(sys.argv[2]) + 1)]
lcs (prev, current, sys.argv[1], sys.argv[2])
| {
"repo_name": "pybae/etc",
"path": "Algorithms/lcs.py",
"copies": "1",
"size": "1204",
"license": "mit",
"hash": 2470287357003485700,
"line_mean": 35.4848484848,
"line_max": 75,
"alpha_frac": 0.6411960133,
"autogenerated": false,
"ratio": 3.4597701149425286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46009661282425285,
"avg_score": null,
"num_lines": null
} |
"""An implementation of the model from:
Model-based dissection of CD95 signaling dynamics reveals both a pro- and
antiapoptotic role of c-FLIPL. Fricker N, Beaudouin J, Richter P, Eils R,
Krammer PH, Lavrik IN. J Cell Biol. 2010 Aug 9;190(3):377-89.
doi:10.1083/jcb.201002060
http://jcb.rupress.org/content/190/3/377.long
Implemented by: Jeremie Roux, Will Chen, Jeremy Muhlich
"""
from __future__ import print_function
from pysb import *
Model()
# Non-zero initial conditions (in molecules per cell):
Parameter('L_0' , 1500e3); # baseline level of ligand for most experiments (corresponding to 50 ng/ml SuperKiller TRAIL)
Parameter('pR_0' , 170.999e3); # TRAIL receptor (for experiments not involving siRNA)
Parameter('FADD_0' , 133.165e3);
Parameter('flipL_0' , 0.49995e3); # FlipL 1X = 0.49995e3
Parameter('flipS_0' , 0.422e3); # Flip
Parameter('pC8_0' , 200.168e3); # procaspase-8 (pro-C8)
Parameter('Bid_0' , 100e3); # Bid
Monomer('L', ['b'])
Monomer('pR', ['b', 'rf'])
Monomer('FADD', ['rf', 'fe'])
Monomer('flipL', ['b', 'fe', 'ee', 'D384'],
{'D384': ['U','C']}
)
Monomer('flipS', ['b', 'fe', 'ee'])
Monomer('pC8', ['fe', 'ee', 'D384', 'D400'],
{'D384': ['U','C'],
'D400': ['U','C']}
)
Monomer('Bid') #called Apoptosis substrat in Lavrik's model
Monomer('tBid')
flip_monomers = (flipL, flipS);
# L + R <--> L:R
Parameter('kf1', 70.98e-03) #70.98e-03
Parameter('kr1', 0)
Rule('R_L_Binding', L (b=None) + pR (b=None, rf=None) >> L (b=1) % pR (b=1, rf=None), kf1)
# FADD binds
Parameter('kf29', 84.4211e-03) #84.4211e-03
Rule('RL_FADD_Binding', pR (b=ANY, rf=None) + FADD (rf=None, fe=None) >> pR (b=ANY, rf=2) % FADD (rf=2, fe=None), kf29)
#C8 binds to L:R:FADD
Parameter('kf30', 3.19838e-03) #3.19838e-03
Parameter('kr30', 0.1) #0.1
Rule('RLFADD_C8_Binding', FADD (rf=ANY, fe=None) + pC8 (fe=None, ee=None, D384='U') | FADD (rf=ANY, fe=1) % pC8 (fe=1, ee=None, D384='U'), kf30, kr30)
#FLIP(variants) bind to L:R:FADD
Parameter('kf31', 69.3329e-03)
Parameter('kr31', 0.0)
Parameter('kf32', 69.4022e-03)
Parameter('kr32', 0.08)
# FIXME: this pattern requires a dummy kr31 which is ultimately ignored
for flip_m, kf, kr, reversible in (zip(flip_monomers, (kf31,kf32), (kr31,kr32), (False,True))):
rule = Rule('RLFADD_%s_Binding' % flip_m.name, FADD (rf=ANY, fe=None) + flip_m (fe=None, ee=None) | FADD (rf=ANY, fe=1) % flip_m (fe=1, ee=None), kf, kr)
if reversible is False:
rule.is_reversible = False
rule.rule_expression.is_reversible = False
rule.rate_reverse = None
pC8_HomoD = pC8 (fe=ANY, ee=1, D384='U') % pC8 (fe=ANY, ee=1, D384='U')
pC8_HeteroD = pC8 (fe=ANY, ee=1, D384='U') % flipL (fe=ANY, ee=1, D384='U')
p43_HomoD = pC8 (fe=ANY, ee=1, D384='C') % pC8 (fe=ANY, ee=1, D384='C')
p43_HeteroD = pC8 (fe=ANY, ee=1, D384='C') % flipL (fe=ANY, ee=1, D384='C')
#L:R:FADD:C8 dimerizes
Parameter('kf33', 2.37162)
Parameter('kr33', 0.1)
Parameter('kc33', 1e-05)
Rule('RLFADD_C8_C8_Binding', pC8 (fe=ANY, ee=None, D384='U') + pC8 (fe=ANY, ee=None, D384='U') | pC8_HomoD, kf33, kr33)
#L:R:FADD:C8 L:R:FADD:FLIP(variants) dimerizes
Parameter('kf34', 4.83692)
Parameter('kr34', 0)
Parameter('kf35', 2.88545)
Parameter('kr35', 1)
# FIXME: this pattern requires a dummy kr31 which is ultimately ignored
for flip_m, kf, kr, reversible in (zip(flip_monomers, (kf34,kf35), (kr34,kr35), (False,True))):
rule = Rule('RLFADD_C8_%s_Binding' % flip_m.name, pC8 (fe=ANY, ee=None, D384='U') + flip_m (fe=ANY, ee=None) | pC8 (fe=ANY, ee=1, D384='U') % flip_m (fe=ANY, ee=1), kf, kr)
if reversible is False:
rule.is_reversible = False
rule.rule_expression.is_reversible = False
rule.rate_reverse = None
Parameter('kc36', 0.223046e-3)
#Homodimer catalyses Homodimer ?: no p18 is released. Only this "cleaved" p43 homoD is the product that will transform into a p18 + L:R:FADD in later reaction.
Rule('HomoD_cat_HomoD', pC8_HomoD + pC8_HomoD >> pC8_HomoD + p43_HomoD, kc36)
#Homodimer catalyses Heterodimer ?????
Rule('HomoD_cat_HeteroD', pC8_HomoD + pC8_HeteroD >> pC8_HomoD + p43_HeteroD, kc36)
Parameter('kc37', 0.805817e-3)
#Heterodimer catalyses Heterodimer ?????
Rule('HeteroD_cat_HeteroD', pC8_HeteroD + pC8_HeteroD >> pC8_HeteroD + p43_HeteroD, kc37)
#Heterodimer catalyses Homodimer ?????
Rule('HeteroD_cat_HomoD', pC8_HeteroD + pC8_HomoD >> pC8_HeteroD + p43_HomoD, kc37)
Parameter('kc38', 1.4888e-3)
#Cleaved Homodimer catalyses Homodimer ?????
Rule('Cl_HomoD_cat_HomoD', p43_HomoD + pC8_HomoD >> p43_HomoD + p43_HomoD, kc38)
#Cleaved HomoD catalyses Heterodimer ?????
Rule('Cl_HomoD_cat_HeteroD', p43_HomoD + pC8_HeteroD >> p43_HomoD + p43_HeteroD, kc38)
Parameter('kc39', 13.098e-3)
#Cleaved HeteroD catalyses Homodimer ?????
Rule('Cl_heteroD_cat_HomoD', p43_HeteroD + pC8_HomoD >> p43_HeteroD + p43_HomoD, kc39)
#Cleaved HeteroD catalyses Heterodimer ?????
Rule('Cl_heteroD_cat_HeteroD', p43_HeteroD + pC8_HeteroD >> p43_HeteroD + p43_HeteroD, kc39)
#Cleaved HomoD catalyses Cleaved HomoD to p18 and release L:R:FADD
Parameter('kc40', 0.999273e-3)
Rule('Cl_HomoD_cat_Cl_HomoD', pC8 (fe=ANY, ee=1, D384='C', D400='U') % pC8 (fe=ANY, ee=1, D384='C', D400='U') +
FADD (rf=ANY, fe=2) % pC8 (fe=2, ee=3, D384='C', D400='U') % FADD (rf=ANY, fe=4) % pC8 (fe=4, ee=3, D384='C', D400='U') >>
pC8 (fe=ANY, ee=1, D384='C', D400='U') % pC8 (fe=ANY, ee=1, D384='C', D400='U') +
FADD (rf=ANY, fe=None) + FADD (rf=ANY, fe=None) + pC8 (fe=None, ee=1, D384='C',D400='C') % pC8 (fe=None, ee=1, D384='C',D400='C'),
kc40)
#Cleaved HeteroD catalyses Cleaved HomoD to p18 and release L:R:FADD
Parameter('kc41', 0.982109e-3)
Rule('Cl_HeteroD_cat_Cl_HomoD', pC8 (fe=ANY, ee=1, D384='C', D400='U') % flipL (fe=ANY, ee=1, D384='C') +
FADD (rf=ANY, fe=2) % pC8 (fe=2, ee=3, D384='C', D400='U') % FADD (rf=ANY, fe=4) % pC8 (fe=4, ee=3, D384='C', D400='U') >>
pC8 (fe=ANY, ee=1, D384='C', D400='U') % flipL (fe=ANY, ee=1, D384='C') +
FADD (rf=ANY, fe=None) + FADD (rf=ANY, fe=None) + pC8 (fe=None, ee=1, D384='C',D400='C') % pC8 (fe=None, ee=1, D384='C',D400='C'),
kc41)
#Cleaved HomoD cleaves Bid ?????
Parameter('kc42', 0.0697394e-3)
Rule('Cl_Homo_cat_Bid', pC8 (fe=ANY, ee=1, D384='C', D400='U') % pC8 (fe=ANY, ee=1, D384='C', D400='U') + Bid () >>
pC8 (fe=ANY, ee=1, D384='C', D400='U') % pC8 (fe=ANY, ee=1, D384='C', D400='U') + tBid (), kc42)
#Cleaved HeteroD cleaves Bid ?????
Parameter('kc43', 0.0166747e-3)
Rule('Cl_Hetero_cat_Bid', pC8 (fe=ANY, ee=1, D384='C', D400='U') % flipL (fe=ANY, ee=1, D384='C') + Bid () >>
pC8 (fe=ANY, ee=1, D384='C', D400='U') % flipL (fe=ANY, ee=1, D384='C') + tBid (), kc43)
#p18 cleaves Bid ?????
Parameter('kc44', 0.0000479214e-3)
Rule('p18_Bid_cat', pC8 (fe=None, ee=1, D384='C',D400='C') % pC8 (fe=None, ee=1, D384='C',D400='C') + Bid () >>
pC8 (fe=None, ee=1, D384='C',D400='C') % pC8 (fe=None, ee=1, D384='C',D400='C') + tBid (), kc44)
# Fig 4B
Observable('p18', pC8(fe=None, ee=1, D384='C',D400='C') % pC8(fe=None, ee=1, D384='C',D400='C'))
Observable('tBid_total', tBid() )
# generate initial conditions from _0 parameter naming convention
for m in model.monomers:
ic_param = model.parameters.get('%s_0' % m.name, None)
if ic_param is not None:
sites = {}
for s in m.sites:
if s in m.site_states:
sites[s] = m.site_states[s][0]
else:
sites[s] = None
Initial(m(sites), ic_param)
####
if __name__ == '__main__':
print(__doc__, "\n", model)
print("""
NOTE: This model code is designed to be imported and programatically
manipulated, not executed directly. The above output is merely a
diagnostic aid.""")
####
# some of the model definition from the supplemental materials, for reference:
# ********** MODEL STATES
# %% Protein amounts are given in thousand molecules per cell.
# CD95L(0) = 1,500%% amount ligand
# CD95R(0) = 170.999%% amount CD95
# FADD(0) = 133.165%% amount FADD
# C8(0) = 200.168%% amount Procaspase-8
# FL(0) = 0.49995%% amount FLIP-Long
# FS(0) = 0.422%% amount FLIP-Short
# CD95RL(0) = 0%% amount of CD95-CD95L complexes
# CD95FADD(0) = 0%% amount of CD95-FADD complexes
# FADDC8(0) = 0%% amount Procaspase-8 bound to FADD
# FADDFL(0) = 0%% amount c-FLIPL bound to FADD
# FADDFS(0) = 0%% amount c-FLIPS bound to FADD
# C8heterodimer(0) = 0%% amount Procaspase-8/c-FLIPL heterodimers
# C8homodimer(0) = 0%% amount Procaspase-8 homodimers
# C8FSdimer(0) =0%% amount Procaspase-8/c-FLIPS heterodimers
# p43heterodimer(0) = 0%% amount p43/p41-Procaspase-8/p43-FLIP heterodimers
# p43homodimer(0) = 0%% amount p43/p41-Procaspase-8 homodimers
# p18(0)=0%% amount p18 formed
# apoptosissubstrate(0)=100
# cleavedsubstrate(0) = 0%% amount cleaved apoptosis substrate
# ********** MODEL VARIABLES
# p18total = 2 x p18
# p43Casp8total = 2 x p43homodimer + p43heterodimer
# procaspase8total = C8 + FADDC8 + C8heterodimer + 2 x C8homodimer + C8FSdimer
# c8total = p43Casp8total + procaspase8total + 2 x p18
# cleavedC8 = c8total - procaspase8total
# celldeath = cleavedsubstrate / 0.10875%% Model readout: percentage of dead cells
# ********** MODEL REACTIONS
# RCD95LBindCD95R = 7.0980e-002 x CD95L x CD95R
# RFADDBindCD95RL = 0.0844211 x CD95RL x FADD
# RC8BindCD95FADD = 0.00319838 x CD95FADD x C8
# RFLBindCD95FADD = 0.0693329 x CD95FADD x FL
# RFSBindCD95FADD = 0.0694022 x CD95FADD x FS
# RFADDC8Dissociate = 0.1 x FADDC8
# RFADDFSDissociate = 0.08 x FADDFS
# RFADDC8BindFADDC8 = 1.18581 x FADDC8 x FADDC8
# RFADDFLBindFADDC8 = 4.83692 x FADDC8 x FADDFL
# RFADDFSBindFADDC8 = 2.88545 x FADDC8 x FADDFS
# RC8FSdimerDissociate = 1 x C8FSdimer
# RC8homodimerDissociate = 0.1 x C8homodimer
# RC8homodimerCleaveC8homodimer = 0.000223046 x C8homodimer x C8homodimer
# RC8homodimerCleaveC8heterodimer = 0.000223046 x C8homodimer x C8heterodimer
# RC8heterodimerCleaveC8heterodimer = 0.000805817 x C8heterodimer x C8heterodimer
# RC8heterodimerCleaveC8homodimer = 0.000805817 x C8heterodimer x C8homodimer
# Rp43homodimerCleaveC8homodimer = 0.0014888 x p43homodimer x C8homodimer
# Rp43homodimerCleaveC8heterodimer = 0.0014888 x p43homodimer x C8heterodimer
# Rp43heterodimerCleaveC8homodimer = 0.013098 x p43heterodimer x C8homodimer
# Rp43heterodimerCleaveC8heterodimer = 0.013098 x p43heterodimer x C8heterodimer
# Rp43homodimerCleavep43homodimer = 0.000999273 x p43homodimer x p43homodimer
# Rp43heterodimerCleavep43homodimer = 0.000982109 x p43heterodimer x p43homodimer
# Rp43heterodimerCleaveApoptosisSubstrate = 1.66747e-005 x p43heterodimer x apoptosissubstrate
# Rp43homodimerCleaveApoptosisSubstrate = 6.97394e-005 x p43homodimer x apoptosissubstrate
# Rp18CleaveApoptosisSubstrate = 4.79214e-08 x p18 x apoptosissubstrate
| {
"repo_name": "pysb/pysb",
"path": "pysb/examples/fricker_2010_apoptosis.py",
"copies": "5",
"size": "10795",
"license": "bsd-2-clause",
"hash": -4203956628892056000,
"line_mean": 44.1673640167,
"line_max": 176,
"alpha_frac": 0.6638258453,
"autogenerated": false,
"ratio": 2.2057621577441764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0132555188800874,
"num_lines": 239
} |
"""An implementation of the OpenID Provider Authentication Policy
Extension 1.0, Draft 5
@see: http://openid.net/developers/specs/
@since: 2.1.0
"""
from __future__ import unicode_literals
import re
import warnings
import six
from openid.extension import Extension
__all__ = [
'Request',
'Response',
'ns_uri',
'AUTH_PHISHING_RESISTANT',
'AUTH_MULTI_FACTOR',
'AUTH_MULTI_FACTOR_PHYSICAL',
'LEVELS_NIST',
'LEVELS_JISA',
]
ns_uri = "http://specs.openid.net/extensions/pape/1.0"
AUTH_MULTI_FACTOR_PHYSICAL = \
'http://schemas.openid.net/pape/policies/2007/06/multi-factor-physical'
AUTH_MULTI_FACTOR = \
'http://schemas.openid.net/pape/policies/2007/06/multi-factor'
AUTH_PHISHING_RESISTANT = \
'http://schemas.openid.net/pape/policies/2007/06/phishing-resistant'
AUTH_NONE = \
'http://schemas.openid.net/pape/policies/2007/06/none'
TIME_VALIDATOR = re.compile(r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ$')
LEVELS_NIST = 'http://csrc.nist.gov/publications/nistpubs/800-63/SP800-63V1_0_2.pdf'
LEVELS_JISA = 'http://www.jisa.or.jp/spec/auth_level.html'
class PAPEExtension(Extension):
_default_auth_level_aliases = {
'nist': LEVELS_NIST,
'jisa': LEVELS_JISA,
}
def __init__(self):
self.auth_level_aliases = self._default_auth_level_aliases.copy()
def _addAuthLevelAlias(self, auth_level_uri, alias=None):
"""Add an auth level URI alias to this request.
@param auth_level_uri: The auth level URI to send in the
request.
@param alias: The namespace alias to use for this auth level
in this message. May be None if the alias is not
important.
"""
if alias is None:
try:
alias = self._getAlias(auth_level_uri)
except KeyError:
alias = self._generateAlias()
else:
existing_uri = self.auth_level_aliases.get(alias)
if existing_uri is not None and existing_uri != auth_level_uri:
raise KeyError('Attempting to redefine alias %r from %r to %r',
alias, existing_uri, auth_level_uri)
self.auth_level_aliases[alias] = auth_level_uri
def _generateAlias(self):
"""Return an unused auth level alias"""
for i in range(1000):
alias = 'cust%d' % (i,)
if alias not in self.auth_level_aliases:
return alias
raise RuntimeError('Could not find an unused alias (tried 1000!)')
def _getAlias(self, auth_level_uri):
"""Return the alias for the specified auth level URI.
@raises KeyError: if no alias is defined
"""
for (alias, existing_uri) in self.auth_level_aliases.items():
if auth_level_uri == existing_uri:
return alias
raise KeyError(auth_level_uri)
class Request(PAPEExtension):
"""A Provider Authentication Policy request, sent from a relying
party to a provider
@ivar preferred_auth_policies: The authentication policies that
the relying party prefers
@type preferred_auth_policies: List[six.text_type]
@ivar max_auth_age: The maximum time, in seconds, that the relying
party wants to allow to have elapsed before the user must
re-authenticate
@type max_auth_age: int or NoneType
@ivar preferred_auth_level_types: Ordered list of authentication
level namespace URIs
@type preferred_auth_level_types: List[six.text_type]
"""
ns_alias = 'pape'
def __init__(self, preferred_auth_policies=None, max_auth_age=None,
preferred_auth_level_types=None):
super(Request, self).__init__()
if preferred_auth_policies is None:
preferred_auth_policies = []
self.preferred_auth_policies = preferred_auth_policies
self.max_auth_age = max_auth_age
self.preferred_auth_level_types = []
if preferred_auth_level_types is not None:
for auth_level in preferred_auth_level_types:
self.addAuthLevel(auth_level)
def __bool__(self):
return bool(self.preferred_auth_policies or self.max_auth_age is not None or self.preferred_auth_level_types)
def __nonzero__(self):
return self.__bool__()
def addPolicyURI(self, policy_uri):
"""Add an acceptable authentication policy URI to this request
This method is intended to be used by the relying party to add
acceptable authentication types to the request.
@param policy_uri: The identifier for the preferred type of
authentication.
@see: http://openid.net/specs/openid-provider-authentication-policy-extension-1_0-05.html#auth_policies
"""
if policy_uri not in self.preferred_auth_policies:
self.preferred_auth_policies.append(policy_uri)
def addAuthLevel(self, auth_level_uri, alias=None):
self._addAuthLevelAlias(auth_level_uri, alias)
if auth_level_uri not in self.preferred_auth_level_types:
self.preferred_auth_level_types.append(auth_level_uri)
def getExtensionArgs(self):
"""@see: C{L{Extension.getExtensionArgs}}
"""
ns_args = {
'preferred_auth_policies': ' '.join(self.preferred_auth_policies),
}
if self.max_auth_age is not None:
ns_args['max_auth_age'] = six.text_type(self.max_auth_age)
if self.preferred_auth_level_types:
preferred_types = []
for auth_level_uri in self.preferred_auth_level_types:
alias = self._getAlias(auth_level_uri)
ns_args['auth_level.ns.%s' % (alias,)] = auth_level_uri
preferred_types.append(alias)
ns_args['preferred_auth_level_types'] = ' '.join(preferred_types)
return ns_args
@classmethod
def fromOpenIDRequest(cls, request):
"""Instantiate a Request object from the arguments in a
C{checkid_*} OpenID message
"""
self = cls()
args = request.message.getArgs(self.ns_uri)
is_openid1 = request.message.isOpenID1()
if args == {}:
return None
self.parseExtensionArgs(args, is_openid1)
return self
def parseExtensionArgs(self, args, is_openid1, strict=False):
"""Set the state of this request to be that expressed in these
PAPE arguments
@param args: The PAPE arguments without a namespace
@param strict: Whether to raise an exception if the input is
out of spec or otherwise malformed. If strict is false,
malformed input will be ignored.
@param is_openid1: Whether the input should be treated as part
of an OpenID1 request
@rtype: None
@raises ValueError: When the max_auth_age is not parseable as
an integer
"""
# preferred_auth_policies is a space-separated list of policy URIs
self.preferred_auth_policies = []
policies_str = args.get('preferred_auth_policies')
if policies_str:
for uri in policies_str.split(' '):
if uri not in self.preferred_auth_policies:
self.preferred_auth_policies.append(uri)
# max_auth_age is base-10 integer number of seconds
max_auth_age_str = args.get('max_auth_age')
self.max_auth_age = None
if max_auth_age_str:
try:
self.max_auth_age = int(max_auth_age_str)
except ValueError:
if strict:
raise
# Parse auth level information
preferred_auth_level_types = args.get('preferred_auth_level_types')
if preferred_auth_level_types:
aliases = preferred_auth_level_types.strip().split()
for alias in aliases:
key = 'auth_level.ns.%s' % (alias,)
try:
uri = args[key]
except KeyError:
if is_openid1:
uri = self._default_auth_level_aliases.get(alias)
else:
uri = None
if uri is None:
if strict:
raise ValueError('preferred auth level %r is not '
'defined in this message' % (alias,))
else:
self.addAuthLevel(uri, alias)
def preferredTypes(self, supported_types):
"""Given a list of authentication policy URIs that a provider
supports, this method returns the subsequence of those types
that are preferred by the relying party.
@param supported_types: A sequence of authentication policy
type URIs that are supported by a provider
@returns: The sub-sequence of the supported types that are
preferred by the relying party. This list will be ordered
in the order that the types appear in the supported_types
sequence, and may be empty if the provider does not prefer
any of the supported authentication types.
@returntype: List[six.text_type]
"""
return [i for i in supported_types if i in self.preferred_auth_policies]
Request.ns_uri = ns_uri
class Response(PAPEExtension):
"""A Provider Authentication Policy response, sent from a provider
to a relying party
@ivar auth_policies: List of authentication policies conformed to
by this OpenID assertion, represented as policy URIs
"""
ns_alias = 'pape'
def __init__(self, auth_policies=None, auth_time=None,
auth_levels=None):
super(Response, self).__init__()
if auth_policies:
self.auth_policies = auth_policies
else:
self.auth_policies = []
self.auth_time = auth_time
self.auth_levels = {}
if auth_levels is None:
auth_levels = {}
for uri, level in auth_levels.items():
self.setAuthLevel(uri, level)
def setAuthLevel(self, level_uri, level, alias=None):
"""Set the value for the given auth level type.
@param level: string representation of an authentication level
valid for level_uri
@param alias: An optional namespace alias for the given auth
level URI. May be omitted if the alias is not
significant. The library will use a reasonable default for
widely-used auth level types.
"""
self._addAuthLevelAlias(level_uri, alias)
self.auth_levels[level_uri] = level
def getAuthLevel(self, level_uri):
"""Return the auth level for the specified auth level
identifier
@returns: A string that should map to the auth levels defined
for the auth level type
@raises KeyError: If the auth level type is not present in
this message
"""
return self.auth_levels[level_uri]
@property
def nist_auth_level(self):
"""Backward-compatibility accessor for the NIST auth level."""
try:
return int(self.getAuthLevel(LEVELS_NIST))
except KeyError:
return None
def addPolicyURI(self, policy_uri):
"""Add a authentication policy to this response
This method is intended to be used by the provider to add a
policy that the provider conformed to when authenticating the user.
@param policy_uri: The identifier for the preferred type of
authentication.
@see: http://openid.net/specs/openid-provider-authentication-policy-extension-1_0-01.html#auth_policies
"""
if policy_uri == AUTH_NONE:
raise RuntimeError(
'To send no policies, do not set any on the response.')
if policy_uri not in self.auth_policies:
self.auth_policies.append(policy_uri)
@classmethod
def fromSuccessResponse(cls, success_response):
"""Create a C{L{Response}} object from a successful OpenID
library response
(C{L{openid.consumer.consumer.SuccessResponse}}) response
message
@param success_response: A SuccessResponse from consumer.complete()
@type success_response: C{L{openid.consumer.consumer.SuccessResponse}}
@rtype: Response or None
@returns: A provider authentication policy response from the
data that was supplied with the C{id_res} response or None
if the provider sent no signed PAPE response arguments.
"""
self = cls()
# PAPE requires that the args be signed.
args = success_response.getSignedNS(self.ns_uri)
is_openid1 = success_response.isOpenID1()
# Only try to construct a PAPE response if the arguments were
# signed in the OpenID response. If not, return None.
if args is not None:
self.parseExtensionArgs(args, is_openid1)
return self
else:
return None
def parseExtensionArgs(self, args, is_openid1, strict=False):
"""Parse the provider authentication policy arguments into the
internal state of this object
@param args: unqualified provider authentication policy
arguments
@param strict: Whether to raise an exception when bad data is
encountered
@returns: None. The data is parsed into the internal fields of
this object.
"""
policies_str = args.get('auth_policies')
if policies_str:
auth_policies = policies_str.split(' ')
elif strict:
raise ValueError('Missing auth_policies')
else:
auth_policies = []
if (len(auth_policies) > 1 and strict and AUTH_NONE in auth_policies):
raise ValueError('Got some auth policies, as well as the special '
'"none" URI: %r' % (auth_policies,))
if 'none' in auth_policies:
msg = '"none" used as a policy URI (see PAPE draft < 5)'
if strict:
raise ValueError(msg)
else:
warnings.warn(msg, stacklevel=2)
auth_policies = [u for u in auth_policies
if u not in ['none', AUTH_NONE]]
self.auth_policies = auth_policies
for (key, val) in six.iteritems(args):
if key.startswith('auth_level.'):
alias = key[11:]
# skip the already-processed namespace declarations
if alias.startswith('ns.'):
continue
try:
uri = args['auth_level.ns.%s' % (alias,)]
except KeyError:
if is_openid1:
uri = self._default_auth_level_aliases.get(alias)
else:
uri = None
if uri is None:
if strict:
raise ValueError(
'Undefined auth level alias: %r' % (alias,))
else:
self.setAuthLevel(uri, val, alias)
auth_time = args.get('auth_time')
if auth_time:
if TIME_VALIDATOR.match(auth_time):
self.auth_time = auth_time
elif strict:
raise ValueError("auth_time must be in RFC3339 format")
def getExtensionArgs(self):
"""@see: C{L{Extension.getExtensionArgs}}
"""
if len(self.auth_policies) == 0:
ns_args = {
'auth_policies': AUTH_NONE,
}
else:
ns_args = {
'auth_policies': ' '.join(self.auth_policies),
}
for level_type, level in self.auth_levels.items():
alias = self._getAlias(level_type)
ns_args['auth_level.ns.%s' % (alias,)] = level_type
ns_args['auth_level.%s' % (alias,)] = six.text_type(level)
if self.auth_time is not None:
if not TIME_VALIDATOR.match(self.auth_time):
raise ValueError('auth_time must be in RFC3339 format')
ns_args['auth_time'] = self.auth_time
return ns_args
Response.ns_uri = ns_uri
| {
"repo_name": "openid/python-openid",
"path": "openid/extensions/pape.py",
"copies": "1",
"size": "16350",
"license": "apache-2.0",
"hash": 5797540045245743000,
"line_mean": 33.2767295597,
"line_max": 117,
"alpha_frac": 0.5927828746,
"autogenerated": false,
"ratio": 4.2074112197632525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5300194094363252,
"avg_score": null,
"num_lines": null
} |
"""An implementation of the OpenID Provider Authentication Policy
Extension 1.0, Draft 5
@see: http://openid.net/developers/specs/
@since: 2.1.0
"""
__all__ = [
'Request',
'Response',
'ns_uri',
'AUTH_PHISHING_RESISTANT',
'AUTH_MULTI_FACTOR',
'AUTH_MULTI_FACTOR_PHYSICAL',
'LEVELS_NIST',
'LEVELS_JISA',
]
from openid.extension import Extension
import warnings
import re
ns_uri = "http://specs.openid.net/extensions/pape/1.0"
AUTH_MULTI_FACTOR_PHYSICAL = \
'http://schemas.openid.net/pape/policies/2007/06/multi-factor-physical'
AUTH_MULTI_FACTOR = \
'http://schemas.openid.net/pape/policies/2007/06/multi-factor'
AUTH_PHISHING_RESISTANT = \
'http://schemas.openid.net/pape/policies/2007/06/phishing-resistant'
AUTH_NONE = \
'http://schemas.openid.net/pape/policies/2007/06/none'
TIME_VALIDATOR = re.compile('^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ$')
LEVELS_NIST = 'http://csrc.nist.gov/publications/nistpubs/800-63/SP800-63V1_0_2.pdf'
LEVELS_JISA = 'http://www.jisa.or.jp/spec/auth_level.html'
class PAPEExtension(Extension):
_default_auth_level_aliases = {
'nist': LEVELS_NIST,
'jisa': LEVELS_JISA,
}
def __init__(self):
self.auth_level_aliases = self._default_auth_level_aliases.copy()
def _addAuthLevelAlias(self, auth_level_uri, alias=None):
"""Add an auth level URI alias to this request.
@param auth_level_uri: The auth level URI to send in the
request.
@param alias: The namespace alias to use for this auth level
in this message. May be None if the alias is not
important.
"""
if alias is None:
try:
alias = self._getAlias(auth_level_uri)
except KeyError:
alias = self._generateAlias()
else:
existing_uri = self.auth_level_aliases.get(alias)
if existing_uri is not None and existing_uri != auth_level_uri:
raise KeyError('Attempting to redefine alias %r from %r to %r',
alias, existing_uri, auth_level_uri)
self.auth_level_aliases[alias] = auth_level_uri
def _generateAlias(self):
"""Return an unused auth level alias"""
for i in range(1000):
alias = 'cust%d' % (i,)
if alias not in self.auth_level_aliases:
return alias
raise RuntimeError('Could not find an unused alias (tried 1000!)')
def _getAlias(self, auth_level_uri):
"""Return the alias for the specified auth level URI.
@raises KeyError: if no alias is defined
"""
for (alias, existing_uri) in self.auth_level_aliases.items():
if auth_level_uri == existing_uri:
return alias
raise KeyError(auth_level_uri)
class Request(PAPEExtension):
"""A Provider Authentication Policy request, sent from a relying
party to a provider
@ivar preferred_auth_policies: The authentication policies that
the relying party prefers
@type preferred_auth_policies: [str]
@ivar max_auth_age: The maximum time, in seconds, that the relying
party wants to allow to have elapsed before the user must
re-authenticate
@type max_auth_age: int or NoneType
@ivar preferred_auth_level_types: Ordered list of authentication
level namespace URIs
@type preferred_auth_level_types: [str]
"""
ns_alias = 'pape'
def __init__(self, preferred_auth_policies=None, max_auth_age=None,
preferred_auth_level_types=None):
super(Request, self).__init__()
if preferred_auth_policies is None:
preferred_auth_policies = []
self.preferred_auth_policies = preferred_auth_policies
self.max_auth_age = max_auth_age
self.preferred_auth_level_types = []
if preferred_auth_level_types is not None:
for auth_level in preferred_auth_level_types:
self.addAuthLevel(auth_level)
def __bool__(self):
return bool(self.preferred_auth_policies or
self.max_auth_age is not None or
self.preferred_auth_level_types)
def addPolicyURI(self, policy_uri):
"""Add an acceptable authentication policy URI to this request
This method is intended to be used by the relying party to add
acceptable authentication types to the request.
@param policy_uri: The identifier for the preferred type of
authentication.
@see: http://openid.net/specs/openid-provider-authentication-policy-extension-1_0-05.html#auth_policies
"""
if policy_uri not in self.preferred_auth_policies:
self.preferred_auth_policies.append(policy_uri)
def addAuthLevel(self, auth_level_uri, alias=None):
self._addAuthLevelAlias(auth_level_uri, alias)
if auth_level_uri not in self.preferred_auth_level_types:
self.preferred_auth_level_types.append(auth_level_uri)
def getExtensionArgs(self):
"""@see: C{L{Extension.getExtensionArgs}}
"""
ns_args = {
'preferred_auth_policies':' '.join(self.preferred_auth_policies),
}
if self.max_auth_age is not None:
ns_args['max_auth_age'] = str(self.max_auth_age)
if self.preferred_auth_level_types:
preferred_types = []
for auth_level_uri in self.preferred_auth_level_types:
alias = self._getAlias(auth_level_uri)
ns_args['auth_level.ns.%s' % (alias,)] = auth_level_uri
preferred_types.append(alias)
ns_args['preferred_auth_level_types'] = ' '.join(preferred_types)
return ns_args
def fromOpenIDRequest(cls, request):
"""Instantiate a Request object from the arguments in a
C{checkid_*} OpenID message
"""
self = cls()
args = request.message.getArgs(self.ns_uri)
is_openid1 = request.message.isOpenID1()
if args == {}:
return None
self.parseExtensionArgs(args, is_openid1)
return self
fromOpenIDRequest = classmethod(fromOpenIDRequest)
def parseExtensionArgs(self, args, is_openid1, strict=False):
"""Set the state of this request to be that expressed in these
PAPE arguments
@param args: The PAPE arguments without a namespace
@param strict: Whether to raise an exception if the input is
out of spec or otherwise malformed. If strict is false,
malformed input will be ignored.
@param is_openid1: Whether the input should be treated as part
of an OpenID1 request
@rtype: None
@raises ValueError: When the max_auth_age is not parseable as
an integer
"""
# preferred_auth_policies is a space-separated list of policy URIs
self.preferred_auth_policies = []
policies_str = args.get('preferred_auth_policies')
if policies_str:
if isinstance(policies_str, bytes):
policies_str = str(policies_str, encoding="utf-8")
for uri in policies_str.split(' '):
if uri not in self.preferred_auth_policies:
self.preferred_auth_policies.append(uri)
# max_auth_age is base-10 integer number of seconds
max_auth_age_str = args.get('max_auth_age')
self.max_auth_age = None
if max_auth_age_str:
try:
self.max_auth_age = int(max_auth_age_str)
except ValueError:
if strict:
raise
# Parse auth level information
preferred_auth_level_types = args.get('preferred_auth_level_types')
if preferred_auth_level_types:
aliases = preferred_auth_level_types.strip().split()
for alias in aliases:
key = 'auth_level.ns.%s' % (alias,)
try:
uri = args[key]
except KeyError:
if is_openid1:
uri = self._default_auth_level_aliases.get(alias)
else:
uri = None
if uri is None:
if strict:
raise ValueError('preferred auth level %r is not '
'defined in this message' % (alias,))
else:
self.addAuthLevel(uri, alias)
def preferredTypes(self, supported_types):
"""Given a list of authentication policy URIs that a provider
supports, this method returns the subsequence of those types
that are preferred by the relying party.
@param supported_types: A sequence of authentication policy
type URIs that are supported by a provider
@returns: The sub-sequence of the supported types that are
preferred by the relying party. This list will be ordered
in the order that the types appear in the supported_types
sequence, and may be empty if the provider does not prefer
any of the supported authentication types.
@returntype: [str]
"""
return list(filter(self.preferred_auth_policies.__contains__,
supported_types))
Request.ns_uri = ns_uri
class Response(PAPEExtension):
"""A Provider Authentication Policy response, sent from a provider
to a relying party
@ivar auth_policies: List of authentication policies conformed to
by this OpenID assertion, represented as policy URIs
"""
ns_alias = 'pape'
def __init__(self, auth_policies=None, auth_time=None,
auth_levels=None):
super(Response, self).__init__()
if auth_policies:
self.auth_policies = auth_policies
else:
self.auth_policies = []
self.auth_time = auth_time
self.auth_levels = {}
if auth_levels is None:
auth_levels = {}
for uri, level in auth_levels.items():
self.setAuthLevel(uri, level)
def setAuthLevel(self, level_uri, level, alias=None):
"""Set the value for the given auth level type.
@param level: string representation of an authentication level
valid for level_uri
@param alias: An optional namespace alias for the given auth
level URI. May be omitted if the alias is not
significant. The library will use a reasonable default for
widely-used auth level types.
"""
self._addAuthLevelAlias(level_uri, alias)
self.auth_levels[level_uri] = level
def getAuthLevel(self, level_uri):
"""Return the auth level for the specified auth level
identifier
@returns: A string that should map to the auth levels defined
for the auth level type
@raises KeyError: If the auth level type is not present in
this message
"""
return self.auth_levels[level_uri]
def _getNISTAuthLevel(self):
try:
return int(self.getAuthLevel(LEVELS_NIST))
except KeyError:
return None
nist_auth_level = property(
_getNISTAuthLevel,
doc="Backward-compatibility accessor for the NIST auth level")
def addPolicyURI(self, policy_uri):
"""Add a authentication policy to this response
This method is intended to be used by the provider to add a
policy that the provider conformed to when authenticating the user.
@param policy_uri: The identifier for the preferred type of
authentication.
@see: http://openid.net/specs/openid-provider-authentication-policy-extension-1_0-01.html#auth_policies
"""
if policy_uri == AUTH_NONE:
raise RuntimeError(
'To send no policies, do not set any on the response.')
if policy_uri not in self.auth_policies:
self.auth_policies.append(policy_uri)
def fromSuccessResponse(cls, success_response):
"""Create a C{L{Response}} object from a successful OpenID
library response
(C{L{openid.consumer.consumer.SuccessResponse}}) response
message
@param success_response: A SuccessResponse from consumer.complete()
@type success_response: C{L{openid.consumer.consumer.SuccessResponse}}
@rtype: Response or None
@returns: A provider authentication policy response from the
data that was supplied with the C{id_res} response or None
if the provider sent no signed PAPE response arguments.
"""
self = cls()
# PAPE requires that the args be signed.
args = success_response.getSignedNS(self.ns_uri)
is_openid1 = success_response.isOpenID1()
# Only try to construct a PAPE response if the arguments were
# signed in the OpenID response. If not, return None.
if args is not None:
self.parseExtensionArgs(args, is_openid1)
return self
else:
return None
def parseExtensionArgs(self, args, is_openid1, strict=False):
"""Parse the provider authentication policy arguments into the
internal state of this object
@param args: unqualified provider authentication policy
arguments
@param strict: Whether to raise an exception when bad data is
encountered
@returns: None. The data is parsed into the internal fields of
this object.
"""
policies_str = args.get('auth_policies')
if policies_str:
auth_policies = policies_str.split(' ')
elif strict:
raise ValueError('Missing auth_policies')
else:
auth_policies = []
if (len(auth_policies) > 1 and strict and AUTH_NONE in auth_policies):
raise ValueError('Got some auth policies, as well as the special '
'"none" URI: %r' % (auth_policies,))
if 'none' in auth_policies:
msg = '"none" used as a policy URI (see PAPE draft < 5)'
if strict:
raise ValueError(msg)
else:
warnings.warn(msg, stacklevel=2)
auth_policies = [u for u in auth_policies
if u not in ['none', AUTH_NONE]]
self.auth_policies = auth_policies
for (key, val) in args.items():
if key.startswith('auth_level.'):
alias = key[11:]
# skip the already-processed namespace declarations
if alias.startswith('ns.'):
continue
try:
uri = args['auth_level.ns.%s' % (alias,)]
except KeyError:
if is_openid1:
uri = self._default_auth_level_aliases.get(alias)
else:
uri = None
if uri is None:
if strict:
raise ValueError(
'Undefined auth level alias: %r' % (alias,))
else:
self.setAuthLevel(uri, val, alias)
auth_time = args.get('auth_time')
if auth_time:
if TIME_VALIDATOR.match(auth_time):
self.auth_time = auth_time
elif strict:
raise ValueError("auth_time must be in RFC3339 format")
fromSuccessResponse = classmethod(fromSuccessResponse)
def getExtensionArgs(self):
"""@see: C{L{Extension.getExtensionArgs}}
"""
if len(self.auth_policies) == 0:
ns_args = {
'auth_policies': AUTH_NONE,
}
else:
ns_args = {
'auth_policies':' '.join(self.auth_policies),
}
for level_type, level in self.auth_levels.items():
alias = self._getAlias(level_type)
ns_args['auth_level.ns.%s' % (alias,)] = level_type
ns_args['auth_level.%s' % (alias,)] = str(level)
if self.auth_time is not None:
if not TIME_VALIDATOR.match(self.auth_time):
raise ValueError('auth_time must be in RFC3339 format')
ns_args['auth_time'] = self.auth_time
return ns_args
Response.ns_uri = ns_uri
| {
"repo_name": "ddayguerrero/blogme",
"path": "flask/lib/python3.4/site-packages/openid/extensions/draft/pape5.py",
"copies": "12",
"size": "16494",
"license": "mit",
"hash": -5452355694426009000,
"line_mean": 33.5786163522,
"line_max": 111,
"alpha_frac": 0.5906390202,
"autogenerated": false,
"ratio": 4.240102827763496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007780302168258203,
"num_lines": 477
} |
"""An implementation of the OpenID Provider Authentication Policy
Extension 1.0
@see: http://openid.net/developers/specs/
@since: 2.1.0
"""
from __future__ import unicode_literals
import re
import warnings
import six
from openid.extension import Extension
__all__ = [
'Request',
'Response',
'ns_uri',
'AUTH_PHISHING_RESISTANT',
'AUTH_MULTI_FACTOR',
'AUTH_MULTI_FACTOR_PHYSICAL',
]
warnings.warn("Module 'openid.extensions.draft.pape2' is deprecated. Use 'openid.extensions.pape' instead.",
DeprecationWarning)
ns_uri = "http://specs.openid.net/extensions/pape/1.0"
AUTH_MULTI_FACTOR_PHYSICAL = \
'http://schemas.openid.net/pape/policies/2007/06/multi-factor-physical'
AUTH_MULTI_FACTOR = \
'http://schemas.openid.net/pape/policies/2007/06/multi-factor'
AUTH_PHISHING_RESISTANT = \
'http://schemas.openid.net/pape/policies/2007/06/phishing-resistant'
TIME_VALIDATOR = re.compile(r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ$')
class Request(Extension):
"""A Provider Authentication Policy request, sent from a relying
party to a provider
@ivar preferred_auth_policies: The authentication policies that
the relying party prefers
@type preferred_auth_policies: List[six.text_type]
@ivar max_auth_age: The maximum time, in seconds, that the relying
party wants to allow to have elapsed before the user must
re-authenticate
@type max_auth_age: int or NoneType
"""
ns_alias = 'pape'
def __init__(self, preferred_auth_policies=None, max_auth_age=None):
super(Request, self).__init__()
if not preferred_auth_policies:
preferred_auth_policies = []
self.preferred_auth_policies = preferred_auth_policies
self.max_auth_age = max_auth_age
def __bool__(self):
return bool(self.preferred_auth_policies or self.max_auth_age is not None)
def __nonzero__(self):
return self.__bool__()
def addPolicyURI(self, policy_uri):
"""Add an acceptable authentication policy URI to this request
This method is intended to be used by the relying party to add
acceptable authentication types to the request.
@param policy_uri: The identifier for the preferred type of
authentication.
@see: http://openid.net/specs/openid-provider-authentication-policy-extension-1_0-01.html#auth_policies
"""
if policy_uri not in self.preferred_auth_policies:
self.preferred_auth_policies.append(policy_uri)
def getExtensionArgs(self):
"""@see: C{L{Extension.getExtensionArgs}}
"""
ns_args = {
'preferred_auth_policies': ' '.join(self.preferred_auth_policies)
}
if self.max_auth_age is not None:
ns_args['max_auth_age'] = six.text_type(self.max_auth_age)
return ns_args
@classmethod
def fromOpenIDRequest(cls, request):
"""Instantiate a Request object from the arguments in a
C{checkid_*} OpenID message
"""
self = cls()
args = request.message.getArgs(self.ns_uri)
if args == {}:
return None
self.parseExtensionArgs(args)
return self
def parseExtensionArgs(self, args):
"""Set the state of this request to be that expressed in these
PAPE arguments
@param args: The PAPE arguments without a namespace
@rtype: None
@raises ValueError: When the max_auth_age is not parseable as
an integer
"""
# preferred_auth_policies is a space-separated list of policy URIs
self.preferred_auth_policies = []
policies_str = args.get('preferred_auth_policies')
if policies_str:
for uri in policies_str.split(' '):
if uri not in self.preferred_auth_policies:
self.preferred_auth_policies.append(uri)
# max_auth_age is base-10 integer number of seconds
max_auth_age_str = args.get('max_auth_age')
self.max_auth_age = None
if max_auth_age_str:
try:
self.max_auth_age = int(max_auth_age_str)
except ValueError:
pass
def preferredTypes(self, supported_types):
"""Given a list of authentication policy URIs that a provider
supports, this method returns the subsequence of those types
that are preferred by the relying party.
@param supported_types: A sequence of authentication policy
type URIs that are supported by a provider
@returns: The sub-sequence of the supported types that are
preferred by the relying party. This list will be ordered
in the order that the types appear in the supported_types
sequence, and may be empty if the provider does not prefer
any of the supported authentication types.
@returntype: List[six.text_type]
"""
return [i for i in supported_types if i in self.preferred_auth_policies]
Request.ns_uri = ns_uri
class Response(Extension):
"""A Provider Authentication Policy response, sent from a provider
to a relying party
"""
ns_alias = 'pape'
def __init__(self, auth_policies=None, auth_time=None,
nist_auth_level=None):
super(Response, self).__init__()
if auth_policies:
self.auth_policies = auth_policies
else:
self.auth_policies = []
self.auth_time = auth_time
self.nist_auth_level = nist_auth_level
def addPolicyURI(self, policy_uri):
"""Add a authentication policy to this response
This method is intended to be used by the provider to add a
policy that the provider conformed to when authenticating the user.
@param policy_uri: The identifier for the preferred type of
authentication.
@see: http://openid.net/specs/openid-provider-authentication-policy-extension-1_0-01.html#auth_policies
"""
if policy_uri not in self.auth_policies:
self.auth_policies.append(policy_uri)
@classmethod
def fromSuccessResponse(cls, success_response):
"""Create a C{L{Response}} object from a successful OpenID
library response
(C{L{openid.consumer.consumer.SuccessResponse}}) response
message
@param success_response: A SuccessResponse from consumer.complete()
@type success_response: C{L{openid.consumer.consumer.SuccessResponse}}
@rtype: Response or None
@returns: A provider authentication policy response from the
data that was supplied with the C{id_res} response or None
if the provider sent no signed PAPE response arguments.
"""
self = cls()
# PAPE requires that the args be signed.
args = success_response.getSignedNS(self.ns_uri)
# Only try to construct a PAPE response if the arguments were
# signed in the OpenID response. If not, return None.
if args is not None:
self.parseExtensionArgs(args)
return self
else:
return None
def parseExtensionArgs(self, args, strict=False):
"""Parse the provider authentication policy arguments into the
internal state of this object
@param args: unqualified provider authentication policy
arguments
@param strict: Whether to raise an exception when bad data is
encountered
@returns: None. The data is parsed into the internal fields of
this object.
"""
policies_str = args.get('auth_policies')
if policies_str and policies_str != 'none':
self.auth_policies = policies_str.split(' ')
nist_level_str = args.get('nist_auth_level')
if nist_level_str:
try:
nist_level = int(nist_level_str)
except ValueError:
if strict:
raise ValueError('nist_auth_level must be an integer between '
'zero and four, inclusive')
else:
self.nist_auth_level = None
else:
if 0 <= nist_level < 5:
self.nist_auth_level = nist_level
auth_time = args.get('auth_time')
if auth_time:
if TIME_VALIDATOR.match(auth_time):
self.auth_time = auth_time
elif strict:
raise ValueError("auth_time must be in RFC3339 format")
def getExtensionArgs(self):
"""@see: C{L{Extension.getExtensionArgs}}
"""
if len(self.auth_policies) == 0:
ns_args = {
'auth_policies': 'none',
}
else:
ns_args = {
'auth_policies': ' '.join(self.auth_policies),
}
if self.nist_auth_level is not None:
if self.nist_auth_level not in range(0, 5):
raise ValueError('nist_auth_level must be an integer between '
'zero and four, inclusive')
ns_args['nist_auth_level'] = six.text_type(self.nist_auth_level)
if self.auth_time is not None:
if not TIME_VALIDATOR.match(self.auth_time):
raise ValueError('auth_time must be in RFC3339 format')
ns_args['auth_time'] = self.auth_time
return ns_args
Response.ns_uri = ns_uri
| {
"repo_name": "openid/python-openid",
"path": "openid/extensions/draft/pape2.py",
"copies": "1",
"size": "9517",
"license": "apache-2.0",
"hash": 8009195472549468000,
"line_mean": 32.1602787456,
"line_max": 111,
"alpha_frac": 0.6130083009,
"autogenerated": false,
"ratio": 4.143230300391815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00022116208828662967,
"num_lines": 287
} |
"""An implementation of the OpenID Provider Authentication Policy
Extension 1.0
@see: http://openid.net/developers/specs/
@since: 2.1.0
"""
__all__ = [
'Request',
'Response',
'ns_uri',
'AUTH_PHISHING_RESISTANT',
'AUTH_MULTI_FACTOR',
'AUTH_MULTI_FACTOR_PHYSICAL',
]
from openid.extension import Extension
import re
ns_uri = "http://specs.openid.net/extensions/pape/1.0"
AUTH_MULTI_FACTOR_PHYSICAL = \
'http://schemas.openid.net/pape/policies/2007/06/multi-factor-physical'
AUTH_MULTI_FACTOR = \
'http://schemas.openid.net/pape/policies/2007/06/multi-factor'
AUTH_PHISHING_RESISTANT = \
'http://schemas.openid.net/pape/policies/2007/06/phishing-resistant'
TIME_VALIDATOR = re.compile('^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ$')
class Request(Extension):
"""A Provider Authentication Policy request, sent from a relying
party to a provider
@ivar preferred_auth_policies: The authentication policies that
the relying party prefers
@type preferred_auth_policies: [str]
@ivar max_auth_age: The maximum time, in seconds, that the relying
party wants to allow to have elapsed before the user must
re-authenticate
@type max_auth_age: int or NoneType
"""
ns_alias = 'pape'
def __init__(self, preferred_auth_policies=None, max_auth_age=None):
super(Request, self).__init__()
if not preferred_auth_policies:
preferred_auth_policies = []
self.preferred_auth_policies = preferred_auth_policies
self.max_auth_age = max_auth_age
def __bool__(self):
return bool(self.preferred_auth_policies or
self.max_auth_age is not None)
def addPolicyURI(self, policy_uri):
"""Add an acceptable authentication policy URI to this request
This method is intended to be used by the relying party to add
acceptable authentication types to the request.
@param policy_uri: The identifier for the preferred type of
authentication.
@see: http://openid.net/specs/openid-provider-authentication-policy-extension-1_0-01.html#auth_policies
"""
if policy_uri not in self.preferred_auth_policies:
self.preferred_auth_policies.append(policy_uri)
def getExtensionArgs(self):
"""@see: C{L{Extension.getExtensionArgs}}
"""
ns_args = {
'preferred_auth_policies':' '.join(self.preferred_auth_policies)
}
if self.max_auth_age is not None:
ns_args['max_auth_age'] = str(self.max_auth_age)
return ns_args
def fromOpenIDRequest(cls, request):
"""Instantiate a Request object from the arguments in a
C{checkid_*} OpenID message
"""
self = cls()
args = request.message.getArgs(self.ns_uri)
if args == {}:
return None
self.parseExtensionArgs(args)
return self
fromOpenIDRequest = classmethod(fromOpenIDRequest)
def parseExtensionArgs(self, args):
"""Set the state of this request to be that expressed in these
PAPE arguments
@param args: The PAPE arguments without a namespace
@rtype: None
@raises ValueError: When the max_auth_age is not parseable as
an integer
"""
# preferred_auth_policies is a space-separated list of policy URIs
self.preferred_auth_policies = []
policies_str = args.get('preferred_auth_policies')
if policies_str:
if isinstance(policies_str, bytes):
policies_str = str(policies_str, encoding="utf-8")
for uri in policies_str.split(' '):
if uri not in self.preferred_auth_policies:
self.preferred_auth_policies.append(uri)
# max_auth_age is base-10 integer number of seconds
max_auth_age_str = args.get('max_auth_age')
self.max_auth_age = None
if max_auth_age_str:
try:
self.max_auth_age = int(max_auth_age_str)
except ValueError:
pass
def preferredTypes(self, supported_types):
"""Given a list of authentication policy URIs that a provider
supports, this method returns the subsequence of those types
that are preferred by the relying party.
@param supported_types: A sequence of authentication policy
type URIs that are supported by a provider
@returns: The sub-sequence of the supported types that are
preferred by the relying party. This list will be ordered
in the order that the types appear in the supported_types
sequence, and may be empty if the provider does not prefer
any of the supported authentication types.
@returntype: [str]
"""
return list(filter(self.preferred_auth_policies.__contains__,
supported_types))
Request.ns_uri = ns_uri
class Response(Extension):
"""A Provider Authentication Policy response, sent from a provider
to a relying party
"""
ns_alias = 'pape'
def __init__(self, auth_policies=None, auth_time=None,
nist_auth_level=None):
super(Response, self).__init__()
if auth_policies:
self.auth_policies = auth_policies
else:
self.auth_policies = []
self.auth_time = auth_time
self.nist_auth_level = nist_auth_level
def addPolicyURI(self, policy_uri):
"""Add a authentication policy to this response
This method is intended to be used by the provider to add a
policy that the provider conformed to when authenticating the user.
@param policy_uri: The identifier for the preferred type of
authentication.
@see: http://openid.net/specs/openid-provider-authentication-policy-extension-1_0-01.html#auth_policies
"""
if policy_uri not in self.auth_policies:
self.auth_policies.append(policy_uri)
def fromSuccessResponse(cls, success_response):
"""Create a C{L{Response}} object from a successful OpenID
library response
(C{L{openid.consumer.consumer.SuccessResponse}}) response
message
@param success_response: A SuccessResponse from consumer.complete()
@type success_response: C{L{openid.consumer.consumer.SuccessResponse}}
@rtype: Response or None
@returns: A provider authentication policy response from the
data that was supplied with the C{id_res} response or None
if the provider sent no signed PAPE response arguments.
"""
self = cls()
# PAPE requires that the args be signed.
args = success_response.getSignedNS(self.ns_uri)
# Only try to construct a PAPE response if the arguments were
# signed in the OpenID response. If not, return None.
if args is not None:
self.parseExtensionArgs(args)
return self
else:
return None
def parseExtensionArgs(self, args, strict=False):
"""Parse the provider authentication policy arguments into the
internal state of this object
@param args: unqualified provider authentication policy
arguments
@param strict: Whether to raise an exception when bad data is
encountered
@returns: None. The data is parsed into the internal fields of
this object.
"""
policies_str = args.get('auth_policies')
if policies_str and policies_str != 'none':
self.auth_policies = policies_str.split(' ')
nist_level_str = args.get('nist_auth_level')
if nist_level_str:
try:
nist_level = int(nist_level_str)
except ValueError:
if strict:
raise ValueError(
'nist_auth_level must be an integer between '
'zero and four, inclusive')
else:
self.nist_auth_level = None
else:
if 0 <= nist_level < 5:
self.nist_auth_level = nist_level
auth_time = args.get('auth_time')
if auth_time:
if TIME_VALIDATOR.match(auth_time):
self.auth_time = auth_time
elif strict:
raise ValueError("auth_time must be in RFC3339 format")
fromSuccessResponse = classmethod(fromSuccessResponse)
def getExtensionArgs(self):
"""@see: C{L{Extension.getExtensionArgs}}
"""
if len(self.auth_policies) == 0:
ns_args = {
'auth_policies': 'none',
}
else:
ns_args = {
'auth_policies': ' '.join(self.auth_policies),
}
if self.nist_auth_level is not None:
if self.nist_auth_level not in list(range(0, 5)):
raise ValueError('nist_auth_level must be an integer between '
'zero and four, inclusive')
ns_args['nist_auth_level'] = str(self.nist_auth_level)
if self.auth_time is not None:
if not TIME_VALIDATOR.match(self.auth_time):
raise ValueError('auth_time must be in RFC3339 format')
ns_args['auth_time'] = self.auth_time
return ns_args
Response.ns_uri = ns_uri
| {
"repo_name": "bbozhev/flask-test",
"path": "flask/lib/python2.7/site-packages/openid/extensions/draft/pape2.py",
"copies": "12",
"size": "9469",
"license": "mit",
"hash": -9045294505862824000,
"line_mean": 32.6975088968,
"line_max": 111,
"alpha_frac": 0.6076671243,
"autogenerated": false,
"ratio": 4.189823008849557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012220397702918113,
"num_lines": 281
} |
""" An implementation of the paper "A Neural Algorithm of Artistic Style"
by Gatys et al. in TensorFlow.
Author: Chip Huyen (huyenn@stanford.edu)
Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"
For more details, please read the assignment handout:
http://web.stanford.edu/class/cs20si/assignments/a2.pdf
"""
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
import vgg_model
import utils
# parameters to manage experiments
STYLE = 'guernica'
CONTENT = 'deadpool'
STYLE_IMAGE = 'styles/' + STYLE + '.jpg'
CONTENT_IMAGE = 'content/' + CONTENT + '.jpg'
IMAGE_HEIGHT = 250
IMAGE_WIDTH = 333
NOISE_RATIO = 0.6 # percentage of weight of the noise for intermixing with the content image
CONTENT_WEIGHT = 1
STYLE_WEIGHT = 20
learning_rate = 1e-4
# Layers used for style features. You can change this.
STYLE_LAYERS = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
W = [0.5, 1.0, 1.5, 3.0, 4.0] # give more weights to deeper layers.
# Layer used for content features. You can change this.
CONTENT_LAYER = 'conv4_2'
ITERS = 300
LR = 2.0
MEAN_PIXELS = np.array([123.68, 116.779, 103.939]).reshape((1,1,1,3))
""" MEAN_PIXELS is defined according to description on their github:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8
'In the paper, the model is denoted as the configuration D trained with scale jittering.
The input images should be zero-centered by mean pixel (rather than mean image) subtraction.
Namely, the following BGR values should be subtracted: [103.939, 116.779, 123.68].'
"""
# VGG-19 parameters file
VGG_DOWNLOAD_LINK = 'http://www.vlfeat.org/matconvnet/models/imagenet-vgg-verydeep-19.mat'
VGG_MODEL = 'imagenet-vgg-verydeep-19.mat'
EXPECTED_BYTES = 534904783
def _create_content_loss(p, f):
""" Calculate the loss between the feature representation of the
content image and the generated image.
Inputs:
p, f are just P, F in the paper
(read the assignment handout if you're confused)
Note: we won't use the coefficient 0.5 as defined in the paper
but the coefficient as defined in the assignment handout.
Output:
the content loss
"""
s = p.size
content_loss = tf.reduce_sum((p - f)**2)/(4.0*s)
return content_loss
def _gram_matrix(F, N, M):
""" Create and return the gram matrix for tensor F
Hint: you'll first have to reshape F
"""
F = tf.reshape(F,(M,N))
gram = tf.matmul(F,tf.transpose(F))
return gram
def _single_style_loss(a, g):
""" Calculate the style loss at a certain layer
Inputs:
a is the feature representation of the real image
g is the feature representation of the generated image
Output:
the style loss at a certain layer (which is E_l in the paper)
Hint: 1. you'll have to use the function _gram_matrix()
2. we'll use the same coefficient for style loss as in the paper
3. a and g are feature representation, not gram matrices
"""
N = a.shape[3]
M = a.shape[1]*a.shape[2]
gram_a = _gram_matrix(a,N,M)
gram_g = _gram_matrix(g,N,M)
style_loss = tf.reduce_sum((gram_a - gram_g)**2)/(2*N*M)**2
return style_loss
def _create_style_loss(A, model):
""" Return the total style loss
"""
n_layers = len(STYLE_LAYERS)
E = [_single_style_loss(A[i], model[STYLE_LAYERS[i]]) for i in range(n_layers)]
###############################
## TO DO: return total style loss
w = np.asarray([i for i in range(n_layers)])
total_style_loss = tf.reduce_sum([W[i]*E[i] for i in range(n_layers)])
return total_style_loss
###############################
def _create_losses(model, input_image, content_image, style_image):
with tf.variable_scope('loss') as scope:
with tf.Session() as sess:
sess.run(input_image.assign(content_image)) # assign content image to the input variable
p = sess.run(model[CONTENT_LAYER])
content_loss = _create_content_loss(p, model[CONTENT_LAYER])
with tf.Session() as sess:
sess.run(input_image.assign(style_image))
A = sess.run([model[layer_name] for layer_name in STYLE_LAYERS])
style_loss = _create_style_loss(A, model)
##########################################
## TO DO: create total loss.
## Hint: don't forget the content loss and style loss weights
total_loss = CONTENT_WEIGHT*content_loss + STYLE_WEIGHT*style_loss
##########################################
return content_loss, style_loss, total_loss
def _create_summary(model):
""" Create summary ops necessary
Hint: don't forget to merge them
"""
tf.summary.scalar("loss",model['total_loss'])
tf.summary.scalar("content_loss",model['content_loss'])
tf.summary.scalar("style_loss",model['style_loss'])
summary_op = tf.summary.merge_all()
return summary_op
def train(model, generated_image, initial_image):
""" Train your model.
Don't forget to create folders for checkpoints and outputs.
"""
skip_step = 1
with tf.Session() as sess:
saver = tf.train.Saver()
###############################
## TO DO:
## 1. initialize your variables
## 2. create writer to write your graph
init = tf.global_variables_initializer()
sess.run(init)
writer = tf.summary.FileWriter('graphs/',sess.graph)
###############################
sess.run(generated_image.assign(initial_image))
ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/checkpoint'))
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
initial_step = model['global_step'].eval()
start_time = time.time()
for index in range(initial_step, ITERS):
if index >= 5 and index < 20:
skip_step = 10
elif index >= 20:
skip_step = 20
sess.run(model['optimizer'])
if (index + 1) % skip_step == 0:
###############################
## TO DO: obtain generated image and loss
gen_image, total_loss, summary = sess.run([generated_image,model['total_loss'],model['summary_op']])
###############################
gen_image = gen_image + MEAN_PIXELS
writer.add_summary(summary, global_step=index)
print('Step {}\n Sum: {:5.1f}'.format(index + 1, np.sum(gen_image)))
print(' Loss: {:5.1f}'.format(total_loss))
print(' Time: {}'.format(time.time() - start_time))
start_time = time.time()
filename = 'outputs/%d.png' % (index)
utils.save_image(filename, gen_image)
if (index + 1) % 20 == 0:
saver.save(sess, 'checkpoints/style_transfer', index)
def main():
with tf.variable_scope('input') as scope:
# use variable instead of placeholder because we're training the intial image to make it
# look like both the content image and the style image
input_image = tf.Variable(np.zeros([1, IMAGE_HEIGHT, IMAGE_WIDTH, 3]), dtype=tf.float32)
utils.download(VGG_DOWNLOAD_LINK, VGG_MODEL, EXPECTED_BYTES)
model = vgg_model.load_vgg(VGG_MODEL, input_image)
model['global_step'] = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
content_image = utils.get_resized_image(CONTENT_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
content_image = content_image - MEAN_PIXELS
style_image = utils.get_resized_image(STYLE_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
style_image = style_image - MEAN_PIXELS
model['content_loss'], model['style_loss'], model['total_loss'] = _create_losses(model,
input_image, content_image, style_image)
###############################
## TO DO: create optimizer
model['optimizer'] = tf.train.AdamOptimizer(learning_rate).minimize(model['total_loss'],
global_step=model['global_step'])
###############################
model['summary_op'] = _create_summary(model)
initial_image = utils.generate_noise_image(content_image, IMAGE_HEIGHT, IMAGE_WIDTH, NOISE_RATIO)
train(model, input_image, initial_image)
if __name__ == '__main__':
main()
| {
"repo_name": "najeeb97khan/Tensorflow-CS20SI",
"path": "Assignment_2/style_transfer/style_transfer.py",
"copies": "1",
"size": "8564",
"license": "mit",
"hash": -4277097788352153000,
"line_mean": 38.465437788,
"line_max": 116,
"alpha_frac": 0.6020551144,
"autogenerated": false,
"ratio": 3.625740897544454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4727796011944454,
"avg_score": null,
"num_lines": null
} |
""" An implementation of the paper "A Neural Algorithm of Artistic Style"
For more details, please read the assignment handout:
http://web.stanford.edu/class/cs20si/assignments/a2.pdf
"""
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
import vgg_model
import utils
# parameters to manage experiments
STYLE = 'guernica'
CONTENT = 'deadpool'
STYLE_IMAGE = 'styles/' + STYLE + '.jpg'
CONTENT_IMAGE = 'content/' + CONTENT + '.jpg'
IMAGE_HEIGHT = 250
IMAGE_WIDTH = 333
NOISE_RATIO = 0.6 # percentage of weight of the noise for intermixing with the content image
CONTENT_WEIGHT = 0.01
STYLE_WEIGHT = 1
# Layers used for style features. You can change this.
STYLE_LAYERS = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
W = [0.5, 1.0, 1.5, 3.0, 4.0] # give more weights to deeper layers.
# Layer used for content features. You can change this.
CONTENT_LAYER = 'conv4_2'
ITERS = 300
LR = 2.0
MEAN_PIXELS = np.array([123.68, 116.779, 103.939]).reshape((1,1,1,3))
""" MEAN_PIXELS is defined according to description on their github:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8
'In the paper, the model is denoted as the configuration D trained with scale jittering.
The input images should be zero-centered by mean pixel (rather than mean image) subtraction.
Namely, the following BGR values should be subtracted: [103.939, 116.779, 123.68].'
"""
# VGG-19 parameters file
VGG_DOWNLOAD_LINK = 'http://www.vlfeat.org/matconvnet/models/imagenet-vgg-verydeep-19.mat'
VGG_MODEL = 'imagenet-vgg-verydeep-19.mat'
EXPECTED_BYTES = 534904783
def _create_content_loss(p, f):
""" Calculate the loss between the feature representation of the
content image and the generated image.
Inputs:
p, f are just P, F in the paper
(read the assignment handout if you're confused)
Note: we won't use the coefficient 0.5 as defined in the paper
but the coefficient as defined in the assignment handout.
Output:
the content loss
"""
return tf.reduce_sum((f - p) ** 2) / (4.0 * p.size)
def _gram_matrix(F, N, M):
""" Create and return the gram matrix for tensor F
Hint: you'll first have to reshape F
"""
F = tf.reshape(F, (M, N))
return tf.matmul(tf.transpose(F), F)
def _single_style_loss(a, g):
""" Calculate the style loss at a certain layer
Inputs:
a is the feature representation of the real image
g is the feature representation of the generated image
Output:
the style loss at a certain layer (which is E_l in the paper)
Hint: 1. you'll have to use the function _gram_matrix()
2. we'll use the same coefficient for style loss as in the paper
3. a and g are feature representation, not gram matrices
"""
N = a.shape[3] # number of filters
M = a.shape[1] * a.shape[2] # height times width of the feature map
A = _gram_matrix(a, N, M)
G = _gram_matrix(g, N, M)
return tf.reduce_sum((G - A) ** 2 / ((2 * N * M) ** 2))
def _create_style_loss(A, model):
""" Return the total style loss
"""
n_layers = len(STYLE_LAYERS)
E = [_single_style_loss(A[i], model[STYLE_LAYERS[i]]) for i in range(n_layers)]
###############################
## TO DO: return total style loss
return sum([W[i] * E[i] for i in range(n_layers)])
###############################
def _create_losses(model, input_image, content_image, style_image):
with tf.variable_scope('loss') as scope:
with tf.Session() as sess:
sess.run(input_image.assign(content_image)) # assign content image to the input variable
p = sess.run(model[CONTENT_LAYER])
content_loss = _create_content_loss(p, model[CONTENT_LAYER])
with tf.Session() as sess:
sess.run(input_image.assign(style_image))
A = sess.run([model[layer_name] for layer_name in STYLE_LAYERS])
style_loss = _create_style_loss(A, model)
##########################################
## TO DO: create total loss.
## Hint: don't forget the content loss and style loss weights
total_loss = CONTENT_WEIGHT * content_loss + STYLE_WEIGHT * style_loss
##########################################
return content_loss, style_loss, total_loss
def _create_summary(model):
""" Create summary ops necessary
Hint: don't forget to merge them
"""
with tf.name_scope('summaries'):
tf.summary.scalar('content loss', model['content_loss'])
tf.summary.scalar('style loss', model['style_loss'])
tf.summary.scalar('total loss', model['total_loss'])
tf.summary.histogram('histogram content loss', model['content_loss'])
tf.summary.histogram('histogram style loss', model['style_loss'])
tf.summary.histogram('histogram total loss', model['total_loss'])
return tf.summary.merge_all()
def train(model, generated_image, initial_image):
""" Train your model.
Don't forget to create folders for checkpoints and outputs.
"""
skip_step = 1
with tf.Session() as sess:
saver = tf.train.Saver()
###############################
## TO DO:
## 1. initialize your variables
## 2. create writer to write your graph
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(EXP + '/graphs', sess.graph)
###############################
sess.run(generated_image.assign(initial_image))
ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/checkpoint'))
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
initial_step = model['global_step'].eval()
start_time = time.time()
for index in range(initial_step, ITERS):
if index >= 5 and index < 20:
skip_step = 10
elif index >= 20:
skip_step = 20
sess.run(model['optimizer'])
if (index + 1) % skip_step == 0:
###############################
## TO DO: obtain generated image and loss
gen_image, total_loss, summary = sess.run([generated_image, model['total_loss'],
model['summary_op']])
###############################
gen_image = gen_image + MEAN_PIXELS
writer.add_summary(summary, global_step=index)
print('Step {}\n Sum: {:5.1f}'.format(index + 1, np.sum(gen_image)))
print(' Loss: {:5.1f}'.format(total_loss))
print(' Time: {}'.format(time.time() - start_time))
start_time = time.time()
filename = 'outputs/%d.png' % (index)
utils.save_image(filename, gen_image)
if (index + 1) % 20 == 0:
saver.save(sess, 'checkpoints/style_transfer', index)
def main():
with tf.variable_scope('input') as scope:
# use variable instead of placeholder because we're training the intial image to make it
# look like both the content image and the style image
input_image = tf.Variable(np.zeros([1, IMAGE_HEIGHT, IMAGE_WIDTH, 3]), dtype=tf.float32)
utils.download(VGG_DOWNLOAD_LINK, VGG_MODEL, EXPECTED_BYTES)
model = vgg_model.load_vgg(VGG_MODEL, input_image)
model['global_step'] = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
content_image = utils.get_resized_image(CONTENT_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
content_image = content_image - MEAN_PIXELS
style_image = utils.get_resized_image(STYLE_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
style_image = style_image - MEAN_PIXELS
model['content_loss'], model['style_loss'], model['total_loss'] = _create_losses(model,
input_image, content_image, style_image)
###############################
## TO DO: create optimizer
model['optimizer'] = tf.train.AdamOptimizer(LR).minimize(model['total_loss'],
global_step=model['global_step'])
###############################
model['summary_op'] = _create_summary(model)
initial_image = utils.generate_noise_image(content_image, IMAGE_HEIGHT, IMAGE_WIDTH, NOISE_RATIO)
train(model, input_image, initial_image)
if __name__ == '__main__':
main()
| {
"repo_name": "infilect/ml-course1",
"path": "week3/assignments/style_transfer/style_transfer_sols.py",
"copies": "2",
"size": "8613",
"license": "mit",
"hash": 2502604371116722000,
"line_mean": 39.6273584906,
"line_max": 106,
"alpha_frac": 0.5934053175,
"autogenerated": false,
"ratio": 3.7125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5305905317499999,
"avg_score": null,
"num_lines": null
} |
"""An implementation of the Porter2 stemming algorithm.
See http://snowball.tartarus.org/algorithms/english/stemmer.html
Adapted from pyporter2 by Michael Dirolf.
This algorithm is more correct but (at least in this implementation)
several times slower than the original porter algorithm as implemented
in stemming.porter.
"""
import re
r_exp = re.compile(r"[^aeiouy]*[aeiouy]+[^aeiouy](\w*)")
ewss_exp1 = re.compile(r"^[aeiouy][^aeiouy]$")
ewss_exp2 = re.compile(r".*[^aeiouy][aeiouy][^aeiouywxY]$")
ccy_exp = re.compile(r"([aeiouy])y")
s1a_exp = re.compile(r"[aeiouy].")
s1b_exp = re.compile(r"[aeiouy]")
def get_r1(word):
# exceptional forms
if word.startswith('gener') or word.startswith('arsen'):
return 5
if word.startswith('commun'):
return 6
# normal form
match = r_exp.match(word)
if match:
return match.start(1)
return len(word)
def get_r2(word):
match = r_exp.match(word, get_r1(word))
if match:
return match.start(1)
return len(word)
def ends_with_short_syllable(word):
if len(word) == 2:
if ewss_exp1.match(word):
return True
if ewss_exp2.match(word):
return True
return False
def is_short_word(word):
if ends_with_short_syllable(word):
if get_r1(word) == len(word):
return True
return False
def remove_initial_apostrophe(word):
if word.startswith("'"):
return word[1:]
return word
def capitalize_consonant_ys(word):
if word.startswith('y'):
word = 'Y' + word[1:]
return ccy_exp.sub('\g<1>Y', word)
def step_0(word):
if word.endswith("'s'"):
return word[:-3]
if word.endswith("'s"):
return word[:-2]
if word.endswith("'"):
return word[:-1]
return word
def step_1a(word):
if word.endswith('sses'):
return word[:-4] + 'ss'
if word.endswith('ied') or word.endswith('ies'):
if len(word) > 4:
return word[:-3] + 'i'
else:
return word[:-3] + 'ie'
if word.endswith('us') or word.endswith('ss'):
return word
if word.endswith('s'):
preceding = word[:-1]
if s1a_exp.search(preceding):
return preceding
return word
return word
doubles = ('bb', 'dd', 'ff', 'gg', 'mm', 'nn', 'pp', 'rr', 'tt')
def ends_with_double(word):
for double in doubles:
if word.endswith(double):
return True
return False
def step_1b_helper(word):
if word.endswith('at') or word.endswith('bl') or word.endswith('iz'):
return word + 'e'
if ends_with_double(word):
return word[:-1]
if is_short_word(word):
return word + 'e'
return word
s1b_suffixes = ('ed', 'edly', 'ing', 'ingly')
def step_1b(word, r1):
if word.endswith('eedly'):
if len(word) - 5 >= r1:
return word[:-3]
return word
if word.endswith('eed'):
if len(word) - 3 >= r1:
return word[:-1]
return word
for suffix in s1b_suffixes:
if word.endswith(suffix):
preceding = word[:-len(suffix)]
if s1b_exp.search(preceding):
return step_1b_helper(preceding)
return word
return word
def step_1c(word):
if word.endswith('y') or word.endswith('Y'):
if word[-2] not in 'aeiouy':
if len(word) > 2:
return word[:-1] + 'i'
return word
def step_2_helper(word, r1, end, repl, prev):
if word.endswith(end):
if len(word) - len(end) >= r1:
if prev == []:
return word[:-len(end)] + repl
for p in prev:
if word[:-len(end)].endswith(p):
return word[:-len(end)] + repl
return word
return None
s2_triples = (('ization', 'ize', []),
('ational', 'ate', []),
('fulness', 'ful', []),
('ousness', 'ous', []),
('iveness', 'ive', []),
('tional', 'tion', []),
('biliti', 'ble', []),
('lessli', 'less', []),
('entli', 'ent', []),
('ation', 'ate', []),
('alism', 'al', []),
('aliti', 'al', []),
('ousli', 'ous', []),
('iviti', 'ive', []),
('fulli', 'ful', []),
('enci', 'ence', []),
('anci', 'ance', []),
('abli', 'able', []),
('izer', 'ize', []),
('ator', 'ate', []),
('alli', 'al', []),
('bli', 'ble', []),
('ogi', 'og', ['l']),
('li', '', ['c', 'd', 'e', 'g', 'h', 'k', 'm', 'n', 'r', 't']))
def step_2(word, r1):
for trip in s2_triples:
attempt = step_2_helper(word, r1, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
def step_3_helper(word, r1, r2, end, repl, r2_necessary):
if word.endswith(end):
if len(word) - len(end) >= r1:
if not r2_necessary:
return word[:-len(end)] + repl
else:
if len(word) - len(end) >= r2:
return word[:-len(end)] + repl
return word
return None
s3_triples = (('ational', 'ate', False),
('tional', 'tion', False),
('alize', 'al', False),
('icate', 'ic', False),
('iciti', 'ic', False),
('ative', '', True),
('ical', 'ic', False),
('ness', '', False),
('ful', '', False))
def step_3(word, r1, r2):
for trip in s3_triples:
attempt = step_3_helper(word, r1, r2, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
s4_delete_list = ('al', 'ance', 'ence', 'er', 'ic', 'able', 'ible', 'ant', 'ement',
'ment', 'ent', 'ism', 'ate', 'iti', 'ous', 'ive', 'ize')
def step_4(word, r2):
for end in s4_delete_list:
if word.endswith(end):
if len(word) - len(end) >= r2:
return word[:-len(end)]
return word
if word.endswith('sion') or word.endswith('tion'):
if len(word) - 3 >= r2:
return word[:-3]
return word
def step_5(word, r1, r2):
if word.endswith('l'):
if len(word) - 1 >= r2 and word[-2] == 'l':
return word[:-1]
return word
if word.endswith('e'):
if len(word) - 1 >= r2:
return word[:-1]
if len(word) - 1 >= r1 and not ends_with_short_syllable(word[:-1]):
return word[:-1]
return word
def normalize_ys(word):
return word.replace('Y', 'y')
exceptional_forms = {'skis': 'ski',
'skies': 'sky',
'dying': 'die',
'lying': 'lie',
'tying': 'tie',
'idly': 'idl',
'gently': 'gentl',
'ugly': 'ugli',
'early': 'earli',
'only': 'onli',
'singly': 'singl',
'sky': 'sky',
'news': 'news',
'howe': 'howe',
'atlas': 'atlas',
'cosmos': 'cosmos',
'bias': 'bias',
'andes': 'andes'}
exceptional_early_exit_post_1a = frozenset(['inning', 'outing', 'canning', 'herring',
'earring', 'proceed', 'exceed', 'succeed'])
def stem(word):
if len(word) <= 2:
return word
word = remove_initial_apostrophe(word)
# handle some exceptional forms
if word in exceptional_forms:
return exceptional_forms[word]
word = capitalize_consonant_ys(word)
r1 = get_r1(word)
r2 = get_r2(word)
word = step_0(word)
word = step_1a(word)
# handle some more exceptional forms
if word in exceptional_early_exit_post_1a:
return word
word = step_1b(word, r1)
word = step_1c(word)
word = step_2(word, r1)
word = step_3(word, r1, r2)
word = step_4(word, r2)
word = step_5(word, r1, r2)
word = normalize_ys(word)
return word
| {
"repo_name": "HeadCow/ARPS",
"path": "tmsvm/dependence/porter2.py",
"copies": "21",
"size": "8289",
"license": "mit",
"hash": 6390124163143646000,
"line_mean": 28.1866197183,
"line_max": 87,
"alpha_frac": 0.4793099288,
"autogenerated": false,
"ratio": 3.3763747454175155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.04399440268992363,
"num_lines": 284
} |
"""An implementation of the Python Database API Specification v2.0
using Teradata ODBC."""
# The MIT License (MIT)
#
# Copyright (c) 2015 by Teradata
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import ctypes
import logging
import threading
import atexit
import platform
import re
import collections
from . import util, datatypes
from .api import * # @UnusedWildImport # noqa
logger = logging.getLogger(__name__)
# ODBC Constants
SQL_ATTR_ODBC_VERSION, SQL_OV_ODBC2, SQL_OV_ODBC3 = 200, 2, 3
SQL_ATTR_QUERY_TIMEOUT, SQL_ATTR_AUTOCOMMIT = 0, 102
SQL_NULL_HANDLE, SQL_HANDLE_ENV, SQL_HANDLE_DBC, SQL_HANDLE_STMT = 0, 1, 2, 3
SQL_SUCCESS, SQL_SUCCESS_WITH_INFO = 0, 1,
SQL_ERROR, SQL_INVALID_HANDLE = -1, -2
SQL_NEED_DATA, SQL_NO_DATA = 99, 100
SQL_CLOSE, SQL_UNBIND, SQL_RESET_PARAMS = 0, 2, 3
SQL_PARAM_TYPE_UNKNOWN = 0
SQL_PARAM_INPUT, SQL_PARAM_INPUT_OUTPUT, SQL_PARAM_OUTPUT = 1, 2, 4
SQL_ATTR_PARAM_BIND_TYPE = 18
SQL_ATTR_PARAMS_PROCESSED_PTR, SQL_ATTR_PARAM_STATUS_PTR = 21, 20
SQL_ATTR_PARAMSET_SIZE = 22
SQL_PARAM_BIND_BY_COLUMN = 0
SQL_NULL_DATA, SQL_NTS = -1, -3
SQL_IS_POINTER, SQL_IS_UINTEGER, SQL_IS_INTEGER = -4, -5, -6
SQL_C_BINARY, SQL_BINARY, SQL_VARBINARY, SQL_LONGVARBINARY = -2, -2, -3, -4
SQL_C_WCHAR, SQL_WVARCHAR, SQL_WLONGVARCHAR = -8, -9, -10
SQL_FLOAT = 6
SQL_C_FLOAT = SQL_REAL = 7
SQL_C_DOUBLE = SQL_DOUBLE = 8
SQL_DESC_TYPE_NAME = 14
SQL_COMMIT, SQL_ROLLBACK = 0, 1
SQL_STATE_DATA_TRUNCATED = '01004'
SQL_STATE_CONNECTION_NOT_OPEN = '08003'
SQL_STATE_INVALID_TRANSACTION_STATE = '25000'
SQLLEN = ctypes.c_ssize_t
SQLULEN = ctypes.c_size_t
SQLUSMALLINT = ctypes.c_ushort
SQLSMALLINT = ctypes.c_short
SQLINTEGER = ctypes.c_int
SQLFLOAT = ctypes.c_float
SQLDOUBLE = ctypes.c_double
SQLBYTE = ctypes.c_ubyte
SQLWCHAR = ctypes.c_wchar
SQLRETURN = SQLSMALLINT
SQLPOINTER = ctypes.c_void_p
SQLHANDLE = ctypes.c_void_p
ADDR = ctypes.byref
PTR = ctypes.POINTER
SMALL_BUFFER_SIZE = 2 ** 12
LARGE_BUFFER_SIZE = 2 ** 20
TRUE = 1
FALSE = 0
odbc = None
hEnv = None
lock = threading.Lock()
pyVer = sys.version_info[0]
osType = platform.system()
# The amount of seconds to wait when submitting non-user defined SQL (e.g.
# set query bands, etc).
QUERY_TIMEOUT = 120
if pyVer > 2:
unicode = str # @ReservedAssignment
if osType == "Darwin" or osType == "Windows":
# Mac OSx and Windows
_createBuffer = lambda l: ctypes.create_unicode_buffer(l)
_inputStr = lambda s, l = None: None if s is None else \
ctypes.create_unicode_buffer((s if util.isString(s) else str(s)), l)
_outputStr = lambda s: s.value
_convertParam = lambda s: None if s is None else (
s if util.isString(s) else str(s))
else:
# Unix/Linux
_createBuffer = lambda l: ctypes.create_string_buffer(l)
_inputStr = lambda s, l = None: None if s is None else \
ctypes.create_string_buffer((s if util.isString(s) else str(s)).encode(
'utf8'), l)
_outputStr = lambda s: unicode(s.raw.partition(b'\00')[0], 'utf8')
_convertParam = lambda s: None if s is None else (
(s if util.isString(s) else str(s)).encode('utf8'))
SQLWCHAR = ctypes.c_char
connections = []
def cleanupConnections():
"""Cleanup open connections."""
if connections:
logger.warn(
"%s open connections found on exit, attempting to close...",
len(connections))
for conn in list(connections):
conn.close()
def getDiagnosticInfo(handle, handleType=SQL_HANDLE_STMT):
"""Gets diagnostic information associated with ODBC calls, particularly
when errors occur."""
info = []
infoNumber = 1
sqlState = _createBuffer(6)
nativeError = SQLINTEGER()
messageBuffer = _createBuffer(SMALL_BUFFER_SIZE)
messageLength = SQLSMALLINT()
while True:
rc = odbc.SQLGetDiagRecW(handleType, handle, infoNumber, sqlState,
ADDR(nativeError), messageBuffer,
len(messageBuffer), ADDR(messageLength))
if rc == SQL_SUCCESS_WITH_INFO and \
messageLength.value > ctypes.sizeof(messageBuffer):
# Resize buffer to fit entire message.
messageBuffer = _createBuffer(messageLength.value)
continue
if rc == SQL_SUCCESS or rc == SQL_SUCCESS_WITH_INFO:
info.append(
(_outputStr(sqlState), _outputStr(messageBuffer),
abs(nativeError.value)))
infoNumber += 1
elif rc == SQL_NO_DATA:
return info
elif rc == SQL_INVALID_HANDLE:
raise InterfaceError(
'SQL_INVALID_HANDLE',
"Invalid handle passed to SQLGetDiagRecW.")
elif rc == SQL_ERROR:
raise InterfaceError(
"SQL_ERROR", "SQL_ERROR returned from SQLGetDiagRecW.")
else:
raise InterfaceError(
"UNKNOWN_RETURN_CODE",
"SQLGetDiagRecW returned an unknown return code: %s", rc)
def checkStatus(rc, hEnv=SQL_NULL_HANDLE, hDbc=SQL_NULL_HANDLE,
hStmt=SQL_NULL_HANDLE, method="Method", ignore=None):
""" Check return status code and log any information or error messages.
If error is returned, raise exception."""
sqlState = []
logger.trace("%s returned status code %s", method, rc)
if rc not in (SQL_SUCCESS, SQL_NO_DATA):
if hStmt != SQL_NULL_HANDLE:
info = getDiagnosticInfo(hStmt, SQL_HANDLE_STMT)
elif hDbc != SQL_NULL_HANDLE:
info = getDiagnosticInfo(hDbc, SQL_HANDLE_DBC)
else:
info = getDiagnosticInfo(hEnv, SQL_HANDLE_ENV)
for i in info:
sqlState.append(i[0])
if rc == SQL_SUCCESS_WITH_INFO:
logger.debug(
u"{} succeeded with info: [{}] {}".format(method,
i[0], i[1]))
elif not ignore or i[0] not in ignore:
logger.debug((u"{} returned non-successful error code "
u"{}: [{}] {}").format(method, rc, i[0], i[1]))
raise DatabaseError(i[2], u"[{}] {}".format(i[0], i[1]), i[0])
else:
logger.debug(
u"Ignoring return of {} from {}: [{}] {}".format(rc,
method,
i[0],
i[1]))
# Breaking here because this error is ignored and info could
# contain older error messages.
# E.g. if error was SQL_STATE_CONNECTION_NOT_OPEN, the next
# error would be the original connection error.
break
if not info:
logger.info(
"No information associated with return code %s from %s",
rc, method)
return sqlState
def prototype(func, *args):
"""Setup function prototype"""
func.restype = SQLRETURN
func.argtypes = args
def initFunctionPrototypes():
"""Initialize function prototypes for ODBC calls."""
prototype(odbc.SQLAllocHandle, SQLSMALLINT, SQLHANDLE, PTR(SQLHANDLE))
prototype(odbc.SQLGetDiagRecW, SQLSMALLINT, SQLHANDLE, SQLSMALLINT,
PTR(SQLWCHAR), PTR(SQLINTEGER), PTR(SQLWCHAR), SQLSMALLINT,
PTR(SQLSMALLINT))
prototype(odbc.SQLSetEnvAttr, SQLHANDLE,
SQLINTEGER, SQLPOINTER, SQLINTEGER)
prototype(odbc.SQLDriverConnectW, SQLHANDLE, SQLHANDLE,
PTR(SQLWCHAR), SQLSMALLINT, PTR(SQLWCHAR), SQLSMALLINT,
PTR(SQLSMALLINT), SQLUSMALLINT)
prototype(odbc.SQLFreeHandle, SQLSMALLINT, SQLHANDLE)
prototype(odbc.SQLExecDirectW, SQLHANDLE, PTR(SQLWCHAR), SQLINTEGER)
prototype(odbc.SQLNumResultCols, SQLHANDLE, PTR(SQLSMALLINT))
prototype(odbc.SQLDescribeColW, SQLHANDLE, SQLUSMALLINT, PTR(SQLWCHAR),
SQLSMALLINT, PTR(SQLSMALLINT), PTR(SQLSMALLINT), PTR(SQLULEN),
PTR(SQLSMALLINT), PTR(SQLSMALLINT))
prototype(odbc.SQLColAttributeW, SQLHANDLE, SQLUSMALLINT,
SQLUSMALLINT, SQLPOINTER, SQLSMALLINT, PTR(SQLSMALLINT),
PTR(SQLLEN))
prototype(odbc.SQLFetch, SQLHANDLE)
prototype(odbc.SQLGetData, SQLHANDLE, SQLUSMALLINT,
SQLSMALLINT, SQLPOINTER, SQLLEN, PTR(SQLLEN))
prototype(odbc.SQLFreeStmt, SQLHANDLE, SQLUSMALLINT)
prototype(odbc.SQLPrepareW, SQLHANDLE, PTR(SQLWCHAR), SQLINTEGER)
prototype(odbc.SQLNumParams, SQLHANDLE, PTR(SQLSMALLINT))
prototype(odbc.SQLDescribeParam, SQLHANDLE, SQLUSMALLINT, PTR(
SQLSMALLINT), PTR(SQLULEN), PTR(SQLSMALLINT), PTR(SQLSMALLINT))
prototype(odbc.SQLBindParameter, SQLHANDLE, SQLUSMALLINT, SQLSMALLINT,
SQLSMALLINT, SQLSMALLINT, SQLULEN, SQLSMALLINT, SQLPOINTER,
SQLLEN, PTR(SQLLEN))
prototype(odbc.SQLExecute, SQLHANDLE)
prototype(odbc.SQLSetStmtAttr, SQLHANDLE,
SQLINTEGER, SQLPOINTER, SQLINTEGER)
prototype(odbc.SQLMoreResults, SQLHANDLE)
prototype(odbc.SQLDisconnect, SQLHANDLE)
prototype(odbc.SQLSetConnectAttr, SQLHANDLE,
SQLINTEGER, SQLPOINTER, SQLINTEGER)
prototype(odbc.SQLEndTran, SQLSMALLINT, SQLHANDLE, SQLSMALLINT)
prototype(odbc.SQLRowCount, SQLHANDLE, PTR(SQLLEN))
def initOdbcLibrary(odbcLibPath=None):
"""Initialize the ODBC Library."""
global odbc
if odbc is None:
if osType == "Windows":
odbc = ctypes.windll.odbc32
else:
if not odbcLibPath:
# If MAC OSx
if osType == "Darwin":
odbcLibPath = "libiodbc.dylib"
else:
odbcLibPath = 'libodbc.so'
logger.info("Loading ODBC Library: %s", odbcLibPath)
odbc = ctypes.cdll.LoadLibrary(odbcLibPath)
def initOdbcEnv():
"""Initialize ODBC environment handle."""
global hEnv
if hEnv is None:
hEnv = SQLPOINTER()
rc = odbc.SQLAllocHandle(SQL_HANDLE_ENV, SQL_NULL_HANDLE, ADDR(hEnv))
checkStatus(rc, hEnv=hEnv)
atexit.register(cleanupOdbcEnv)
atexit.register(cleanupConnections)
# Set the ODBC environment's compatibility level to ODBC 3.0
rc = odbc.SQLSetEnvAttr(hEnv, SQL_ATTR_ODBC_VERSION, SQL_OV_ODBC3, 0)
checkStatus(rc, hEnv=hEnv)
def cleanupOdbcEnv():
"""Cleanup ODBC environment handle."""
if hEnv:
odbc.SQLFreeHandle(SQL_HANDLE_ENV, hEnv)
def init(odbcLibPath=None):
try:
lock.acquire()
initOdbcLibrary(odbcLibPath)
initFunctionPrototypes()
initOdbcEnv()
finally:
lock.release()
class OdbcConnection:
"""Represents a Connection to Teradata using ODBC."""
def __init__(self, dbType="Teradata", system=None,
username=None, password=None, autoCommit=False,
transactionMode=None, queryBands=None, odbcLibPath=None,
dataTypeConverter=datatypes.DefaultDataTypeConverter(),
**kwargs):
"""Creates an ODBC connection."""
self.hDbc = SQLPOINTER()
self.cursorCount = 0
self.sessionno = 0
self.cursors = []
self.dbType = dbType
self.converter = dataTypeConverter
connections.append(self)
# Build connect string
extraParams = set(k.lower() for k in kwargs)
connectParams = collections.OrderedDict()
if "dsn" not in extraParams:
connectParams["DRIVER"] = dbType
if system:
connectParams["DBCNAME"] = system
if username:
connectParams["UID"] = username
if password:
connectParams["PWD"] = password
if transactionMode:
connectParams["SESSIONMODE"] = "Teradata" \
if transactionMode == "TERA" else transactionMode
connectParams.update(kwargs)
connectString = u";".join(u"{}={}".format(key, value)
for key, value in connectParams.items())
# Initialize connection handle
init(odbcLibPath)
rc = odbc.SQLAllocHandle(SQL_HANDLE_DBC, hEnv, ADDR(self.hDbc))
checkStatus(rc, hEnv=hEnv, method="SQLAllocHandle")
# Create connection
logger.debug("Creating connection using ODBC ConnectString: %s",
re.sub("PWD=.*?(;|$)", "PWD=XXX;", connectString))
try:
lock.acquire()
rc = odbc.SQLDriverConnectW(self.hDbc, 0, _inputStr(connectString),
SQL_NTS, None, 0, None, 0)
finally:
lock.release()
checkStatus(rc, hDbc=self.hDbc, method="SQLDriverConnectW")
# Setup autocommit, query bands, etc.
try:
logger.debug("Setting AUTOCOMMIT to %s",
"True" if util.booleanValue(autoCommit) else "False")
rc = odbc.SQLSetConnectAttr(
self.hDbc, SQL_ATTR_AUTOCOMMIT,
TRUE if util.booleanValue(autoCommit) else FALSE, 0)
checkStatus(
rc, hDbc=self.hDbc,
method="SQLSetConnectAttr - SQL_ATTR_AUTOCOMMIT")
if dbType == "Teradata":
with self.cursor() as c:
self.sessionno = c.execute(
"SELECT SESSION",
queryTimeout=QUERY_TIMEOUT).fetchone()[0]
logger.debug("SELECT SESSION returned %s", self.sessionno)
if queryBands:
c.execute(u"SET QUERY_BAND = '{};' FOR SESSION".format(
u";".join(u"{}={}".format(util.toUnicode(k),
util.toUnicode(v))
for k, v in queryBands.items())),
queryTimeout=QUERY_TIMEOUT)
self.commit()
logger.debug("Created session %s.", self.sessionno)
except Exception:
self.close()
raise
def close(self):
"""CLoses an ODBC Connection."""
if self.hDbc:
if self.sessionno:
logger.debug("Closing session %s...", self.sessionno)
for cursor in list(self.cursors):
cursor.close()
rc = odbc.SQLDisconnect(self.hDbc)
sqlState = checkStatus(
rc, hDbc=self.hDbc, method="SQLDisconnect",
ignore=[SQL_STATE_CONNECTION_NOT_OPEN,
SQL_STATE_INVALID_TRANSACTION_STATE])
if SQL_STATE_INVALID_TRANSACTION_STATE in sqlState:
logger.warning("Rolling back open transaction for session %s "
"so it can be closed.", self.sessionno)
rc = odbc.SQLEndTran(SQL_HANDLE_DBC, self.hDbc, SQL_ROLLBACK)
checkStatus(
rc, hDbc=self.hDbc,
method="SQLEndTran - SQL_ROLLBACK - Disconnect")
rc = odbc.SQLDisconnect(self.hDbc)
checkStatus(rc, hDbc=self.hDbc, method="SQLDisconnect")
rc = odbc.SQLFreeHandle(SQL_HANDLE_DBC, self.hDbc)
if rc != SQL_INVALID_HANDLE:
checkStatus(rc, hDbc=self.hDbc, method="SQLFreeHandle")
connections.remove(self)
self.hDbc = None
if self.sessionno:
logger.debug("Session %s closed.", self.sessionno)
def commit(self):
"""Commits a transaction."""
logger.debug("Committing transaction...")
rc = odbc.SQLEndTran(SQL_HANDLE_DBC, self.hDbc, SQL_COMMIT)
checkStatus(rc, hDbc=self.hDbc, method="SQLEndTran - SQL_COMMIT")
def rollback(self):
"""Rollsback a transaction."""
logger.debug("Rolling back transaction...")
rc = odbc.SQLEndTran(SQL_HANDLE_DBC, self.hDbc, SQL_ROLLBACK)
checkStatus(rc, hDbc=self.hDbc, method="SQLEndTran - SQL_ROLLBACK")
def cursor(self):
"""Returns a cursor."""
cursor = OdbcCursor(
self, self.dbType, self.converter, self.cursorCount)
self.cursorCount += 1
return cursor
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, t, value, traceback):
self.close()
def __repr__(self):
return "OdbcConnection(sessionno={})".format(self.sessionno)
connect = OdbcConnection
class OdbcCursor (util.Cursor):
"""Represents an ODBC Cursor."""
def __init__(self, connection, dbType, converter, num):
util.Cursor.__init__(self, connection, dbType, converter)
self.num = num
self.moreResults = None
if num > 0:
logger.debug(
"Creating cursor %s for session %s.", self.num,
self.connection.sessionno)
self.hStmt = SQLPOINTER()
rc = odbc.SQLAllocHandle(
SQL_HANDLE_STMT, connection.hDbc, ADDR(self.hStmt))
checkStatus(rc, hStmt=self.hStmt)
connection.cursors.append(self)
def callproc(self, procname, params, queryTimeout=0):
query = "CALL {} (".format(procname)
for i in range(0, len(params)):
if i > 0:
query += ", "
query += "?"
query += ")"
logger.debug("Executing Procedure: %s", query)
self.execute(query, params, queryTimeout=queryTimeout)
return util.OutParams(params, self.dbType, self.converter)
def close(self):
if self.hStmt:
if self.num > 0:
logger.debug(
"Closing cursor %s for session %s.", self.num,
self.connection.sessionno)
rc = odbc.SQLFreeHandle(SQL_HANDLE_STMT, self.hStmt)
checkStatus(rc, hStmt=self.hStmt)
self.connection.cursors.remove(self)
self.hStmt = None
def _setQueryTimeout(self, queryTimeout):
rc = odbc.SQLSetStmtAttr(
self.hStmt, SQL_ATTR_QUERY_TIMEOUT, SQLPOINTER(queryTimeout),
SQL_IS_UINTEGER)
checkStatus(
rc, hStmt=self.hStmt,
method="SQLSetStmtStmtAttr - SQL_ATTR_QUERY_TIMEOUT")
def execute(self, query, params=None, queryTimeout=0):
if params:
self.executemany(query, [params, ], queryTimeout)
else:
if self.connection.sessionno:
logger.debug(
"Executing query on session %s using SQLExecDirectW: %s",
self.connection.sessionno, query)
self._free()
self._setQueryTimeout(queryTimeout)
rc = odbc.SQLExecDirectW(
self.hStmt, _inputStr(_convertLineFeeds(query)), SQL_NTS)
checkStatus(rc, hStmt=self.hStmt, method="SQLExecDirectW")
self._handleResults()
return self
def executemany(self, query, params, batch=False, queryTimeout=0):
self._free()
# Prepare the query
rc = odbc.SQLPrepareW(
self.hStmt, _inputStr(_convertLineFeeds(query)), SQL_NTS)
checkStatus(rc, hStmt=self.hStmt, method="SQLPrepare")
self._setQueryTimeout(queryTimeout)
# Get the number of parameters in the SQL statement.
numParams = SQLSMALLINT()
rc = odbc.SQLNumParams(self.hStmt, ADDR(numParams))
checkStatus(rc, hStmt=self.hStmt, method="SQLNumParams")
numParams = numParams.value
# The argument types.
dataTypes = []
for paramNum in range(0, numParams):
dataType = SQLSMALLINT()
parameterSize = SQLULEN()
decimalDigits = SQLSMALLINT()
nullable = SQLSMALLINT()
rc = odbc.SQLDescribeParam(
self.hStmt, paramNum + 1, ADDR(dataType), ADDR(parameterSize),
ADDR(decimalDigits), ADDR(nullable))
checkStatus(rc, hStmt=self.hStmt, method="SQLDescribeParams")
dataTypes.append(dataType.value)
if batch:
logger.debug(
"Executing query on session %s using batched SQLExecute: %s",
self.connection.sessionno, query)
self._executeManyBatch(params, numParams, dataTypes)
else:
logger.debug(
"Executing query on session %s using SQLExecute: %s",
self.connection.sessionno, query)
rc = odbc.SQLSetStmtAttr(self.hStmt, SQL_ATTR_PARAMSET_SIZE, 1, 0)
checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
paramSetNum = 0
for p in params:
paramSetNum += 1
logger.trace("ParamSet %s: %s", paramSetNum, p)
if len(p) != numParams:
raise InterfaceError(
"PARAMS_MISMATCH", "The number of supplied parameters "
"({}) does not match the expected number of "
"parameters ({}).".format(len(p), numParams))
paramArray = []
lengthArray = []
for paramNum in range(0, numParams):
val = p[paramNum]
inputOutputType = _getInputOutputType(val)
valueType, paramType = _getParamValueType(
dataTypes[paramNum])
param, length = _getParamValue(val, valueType, False)
paramArray.append(param)
if param is not None:
if valueType == SQL_C_BINARY:
bufSize = SQLLEN(length)
lengthArray.append(SQLLEN(length))
columnSize = SQLULEN(length)
elif valueType == SQL_C_DOUBLE:
bufSize = SQLLEN(length)
lengthArray.append(SQLLEN(length))
columnSize = SQLULEN(length)
param = ADDR(param)
else:
bufSize = SQLLEN(ctypes.sizeof(param))
lengthArray.append(SQLLEN(SQL_NTS))
columnSize = SQLULEN(length)
else:
bufSize = SQLLEN(0)
columnSize = SQLULEN(0)
lengthArray.append(SQLLEN(SQL_NULL_DATA))
logger.trace("Binding parameter %s...", paramNum + 1)
rc = odbc.SQLBindParameter(
self.hStmt, paramNum + 1, inputOutputType, valueType,
paramType, columnSize, 0, param, bufSize,
ADDR(lengthArray[paramNum]))
checkStatus(
rc, hStmt=self.hStmt, method="SQLBindParameter")
logger.debug("Executing prepared statement.")
rc = odbc.SQLExecute(self.hStmt)
for paramNum in range(0, numParams):
val = p[paramNum]
if isinstance(val, OutParam):
val.size = lengthArray[paramNum].value
checkStatus(rc, hStmt=self.hStmt, method="SQLExecute")
self._handleResults()
return self
def _executeManyBatch(self, params, numParams, dataTypes):
# Get the number of parameter sets.
paramSetSize = len(params)
# Set the SQL_ATTR_PARAM_BIND_TYPE statement attribute to use
# column-wise binding.
rc = odbc.SQLSetStmtAttr(
self.hStmt, SQL_ATTR_PARAM_BIND_TYPE, SQL_PARAM_BIND_BY_COLUMN, 0)
checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
# Specify the number of elements in each parameter array.
rc = odbc.SQLSetStmtAttr(
self.hStmt, SQL_ATTR_PARAMSET_SIZE, paramSetSize, 0)
checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
# Specify a PTR to get the number of parameters processed.
# paramsProcessed = SQLULEN()
# rc = odbc.SQLSetStmtAttr(self.hStmt, SQL_ATTR_PARAMS_PROCESSED_PTR,
# ADDR(paramsProcessed), SQL_IS_POINTER)
# checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
# Specify a PTR to get the status of the parameters processed.
# paramsStatus = (SQLUSMALLINT * paramSetSize)()
# rc = odbc.SQLSetStmtAttr(self.hStmt, SQL_ATTR_PARAM_STATUS_PTR,
# ADDR(paramsStatus), SQL_IS_POINTER)
# checkStatus(rc, hStmt=self.hStmt, method="SQLSetStmtAttr")
# Bind the parameters.
paramArrays = []
lengthArrays = []
paramSetSize = len(params)
paramSetNum = 0
for p in params:
paramSetNum += 1
logger.debug("ParamSet %s: %s", paramSetNum, p)
if len(p) != numParams:
raise InterfaceError(
"PARAMS_MISMATCH", "The number of supplied parameters "
"({}) does not match the expected number of parameters "
"({}).".format(len(p), numParams))
for paramNum in range(0, numParams):
p = []
valueType, paramType = _getParamValueType(dataTypes[paramNum])
maxLen = 0
for paramSetNum in range(0, paramSetSize):
param, length = _getParamValue(
params[paramSetNum][paramNum], valueType, True)
if length > maxLen:
maxLen = length
p.append(param)
logger.debug(
"Max length for parameter %s is %s.", paramNum + 1, maxLen)
if valueType == SQL_C_BINARY:
valueSize = SQLLEN(maxLen)
paramArrays.append((SQLBYTE * (paramSetSize * maxLen))())
elif valueType == SQL_C_DOUBLE:
valueSize = SQLLEN(maxLen)
paramArrays.append((SQLDOUBLE * paramSetSize)())
else:
maxLen += 1
valueSize = SQLLEN(ctypes.sizeof(SQLWCHAR) * maxLen)
paramArrays.append(_createBuffer(paramSetSize * maxLen))
lengthArrays.append((SQLLEN * paramSetSize)())
for paramSetNum in range(0, paramSetSize):
index = paramSetNum * maxLen
if p[paramSetNum] is not None:
if valueType == SQL_C_DOUBLE:
paramArrays[paramNum][paramSetNum] = p[paramSetNum]
else:
for c in p[paramSetNum]:
paramArrays[paramNum][index] = c
index += 1
if valueType == SQL_C_BINARY:
lengthArrays[paramNum][
paramSetNum] = len(p[paramSetNum])
else:
lengthArrays[paramNum][
paramSetNum] = SQLLEN(SQL_NTS)
paramArrays[paramNum][
index] = _convertParam("\x00")[0]
else:
lengthArrays[paramNum][paramSetNum] = SQLLEN(SQL_NULL_DATA)
if valueType == SQL_C_WCHAR:
paramArrays[paramNum][index] = _convertParam("\x00")[0]
logger.trace("Binding parameter %s...", paramNum + 1)
rc = odbc.SQLBindParameter(self.hStmt, paramNum + 1,
SQL_PARAM_INPUT, valueType, paramType,
SQLULEN(maxLen), 0,
paramArrays[paramNum], valueSize,
lengthArrays[paramNum])
checkStatus(rc, hStmt=self.hStmt, method="SQLBindParameter")
# Execute the SQL statement.
logger.debug("Executing prepared statement.")
rc = odbc.SQLExecute(self.hStmt)
checkStatus(rc, hStmt=self.hStmt, method="SQLExecute")
def _handleResults(self):
# Rest cursor attributes.
self.description = None
self.rowcount = -1
self.rownumber = None
self.columns = {}
self.types = []
self.moreResults = None
# Get column count in result set.
columnCount = SQLSMALLINT()
rc = odbc.SQLNumResultCols(self.hStmt, ADDR(columnCount))
checkStatus(rc, hStmt=self.hStmt, method="SQLNumResultCols")
rowCount = SQLLEN()
rc = odbc.SQLRowCount(self.hStmt, ADDR(rowCount))
checkStatus(rc, hStmt=self.hStmt, method="SQLRowCount")
self.rowcount = rowCount.value
# Get column meta data and create row iterator.
if columnCount.value > 0:
self.description = []
nameBuf = _createBuffer(SMALL_BUFFER_SIZE)
nameLength = SQLSMALLINT()
dataType = SQLSMALLINT()
columnSize = SQLULEN()
decimalDigits = SQLSMALLINT()
nullable = SQLSMALLINT()
for col in range(0, columnCount.value):
rc = odbc.SQLDescribeColW(
self.hStmt, col + 1, nameBuf, len(nameBuf),
ADDR(nameLength), ADDR(dataType), ADDR(columnSize),
ADDR(decimalDigits), ADDR(nullable))
checkStatus(rc, hStmt=self.hStmt, method="SQLDescribeColW")
columnName = _outputStr(nameBuf)
odbc.SQLColAttributeW(
self.hStmt, col + 1, SQL_DESC_TYPE_NAME, ADDR(nameBuf),
len(nameBuf), None, None)
checkStatus(rc, hStmt=self.hStmt, method="SQLColAttributeW")
typeName = _outputStr(nameBuf)
typeCode = self.converter.convertType(self.dbType, typeName)
self.columns[columnName.lower()] = col
self.types.append((typeName, typeCode))
self.description.append((
columnName, typeCode, None, columnSize.value,
decimalDigits.value, None, nullable.value))
self.iterator = rowIterator(self)
def nextset(self):
if self.moreResults is None:
self._checkForMoreResults()
if self.moreResults:
self._handleResults()
return True
def _checkForMoreResults(self):
rc = odbc.SQLMoreResults(self.hStmt)
checkStatus(rc, hStmt=self.hStmt, method="SQLMoreResults")
self.moreResults = rc == SQL_SUCCESS or rc == SQL_SUCCESS_WITH_INFO
return self.moreResults
def _free(self):
rc = odbc.SQLFreeStmt(self.hStmt, SQL_CLOSE)
checkStatus(rc, hStmt=self.hStmt, method="SQLFreeStmt - SQL_CLOSE")
rc = odbc.SQLFreeStmt(self.hStmt, SQL_RESET_PARAMS)
checkStatus(
rc, hStmt=self.hStmt, method="SQLFreeStmt - SQL_RESET_PARAMS")
def _convertLineFeeds(query):
return "\r".join(util.linesplit(query))
def _getInputOutputType(val):
inputOutputType = SQL_PARAM_INPUT
if isinstance(val, InOutParam):
inputOutputType = SQL_PARAM_INPUT_OUTPUT
elif isinstance(val, OutParam):
inputOutputType = SQL_PARAM_OUTPUT
return inputOutputType
def _getParamValueType(dataType):
valueType = SQL_C_WCHAR
paramType = SQL_WVARCHAR
if dataType in (SQL_BINARY, SQL_VARBINARY, SQL_LONGVARBINARY):
valueType = SQL_C_BINARY
paramType = dataType
elif dataType == SQL_WLONGVARCHAR:
paramType = SQL_WLONGVARCHAR
elif dataType in (SQL_FLOAT, SQL_DOUBLE, SQL_REAL):
valueType = SQL_C_DOUBLE
paramType = SQL_DOUBLE
return valueType, paramType
def _getParamValue(val, valueType, batch):
length = 0
if val is None:
param = None
elif valueType == SQL_C_BINARY:
ba = val
if isinstance(val, InOutParam):
ba = val.inValue
elif isinstance(val, OutParam):
ba = bytearray(SMALL_BUFFER_SIZE if val.size is None else val.size)
if not isinstance(ba, bytearray):
raise InterfaceError("Expected bytearray for BINARY parameter.")
length = len(ba)
if batch:
param = ba
else:
byteArr = SQLBYTE * length
param = byteArr.from_buffer(ba)
if isinstance(val, OutParam):
val.setValueFunc(lambda: ba[:val.size])
elif valueType == SQL_C_DOUBLE:
f = val
if isinstance(val, InOutParam):
f = val.inValue
elif isinstance(val, OutParam):
f = float(0)
param = SQLDOUBLE(f if not util.isString(f) else float(f))
length = ctypes.sizeof(param)
if isinstance(val, OutParam):
val.setValueFunc(lambda: param.value)
else:
if batch:
param = _convertParam(val)
length = len(param)
elif isinstance(val, InOutParam):
length = SMALL_BUFFER_SIZE if val.size is None else val.size
param = _inputStr(val.inValue, length)
val.setValueFunc(lambda: _outputStr(param))
elif isinstance(val, OutParam):
length = SMALL_BUFFER_SIZE if val.size is None else val.size
param = _createBuffer(length)
val.setValueFunc(lambda: _outputStr(param))
else:
param = _inputStr(val)
length = len(param)
return param, length
def rowIterator(cursor):
""" Generator function for iterating over the rows in a result set. """
buf = _createBuffer(LARGE_BUFFER_SIZE)
bufSize = ctypes.sizeof(buf)
length = SQLLEN()
while cursor.description is not None:
rc = odbc.SQLFetch(cursor.hStmt)
checkStatus(rc, hStmt=cursor.hStmt, method="SQLFetch")
if rc == SQL_NO_DATA:
break
values = []
# Get each column in the row.
for col in range(1, len(cursor.description) + 1):
val = None
dataType = SQL_C_WCHAR
if cursor.description[col - 1][1] == BINARY:
dataType = SQL_C_BINARY
elif cursor.types[col - 1][0] in datatypes.FLOAT_TYPES:
dataType = SQL_C_DOUBLE
rc = odbc.SQLGetData(
cursor.hStmt, col, dataType, buf, bufSize, ADDR(length))
sqlState = checkStatus(rc, hStmt=cursor.hStmt, method="SQLGetData")
if length.value != SQL_NULL_DATA:
if SQL_STATE_DATA_TRUNCATED in sqlState:
logger.debug(
"Data truncated. Calling SQLGetData to get next part "
"of data for column %s of size %s.",
col, length.value)
if dataType == SQL_C_BINARY:
val = bytearray(length.value)
val[0:bufSize] = (
ctypes.c_ubyte * bufSize).from_buffer(buf)
newBufSize = len(val) - bufSize
newBuffer = (ctypes.c_ubyte * newBufSize).from_buffer(
val, bufSize)
rc = odbc.SQLGetData(
cursor.hStmt, col, dataType, newBuffer,
newBufSize, ADDR(length))
checkStatus(
rc, hStmt=cursor.hStmt, method="SQLGetData2")
else:
val = [_outputStr(buf), ]
while SQL_STATE_DATA_TRUNCATED in sqlState:
rc = odbc.SQLGetData(
cursor.hStmt, col, dataType, buf, bufSize,
ADDR(length))
sqlState = checkStatus(
rc, hStmt=cursor.hStmt, method="SQLGetData2")
val.append(_outputStr(buf))
val = "".join(val)
else:
if dataType == SQL_C_BINARY:
val = bytearray(
(ctypes.c_ubyte * length.value).from_buffer(buf))
elif dataType == SQL_C_DOUBLE:
val = ctypes.c_double.from_buffer(buf).value
else:
val = _outputStr(buf)
values.append(val)
yield values
if not cursor._checkForMoreResults():
cursor._free()
| {
"repo_name": "fxstein/PyTd",
"path": "teradata/tdodbc.py",
"copies": "1",
"size": "37550",
"license": "mit",
"hash": -3962073777640271400,
"line_mean": 40.9084821429,
"line_max": 79,
"alpha_frac": 0.5679360852,
"autogenerated": false,
"ratio": 3.999360954308233,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5067297039508233,
"avg_score": null,
"num_lines": null
} |
"""An implementation of the Python Database API Specification v2.0 using
Teradata REST."""
# The MIT License (MIT)
#
# Copyright (c) 2015 by Teradata
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import atexit
import base64
import json
import logging
import ssl
import sys
import time
from . import pulljson, util, datatypes
from .api import * # @UnusedWildImport # noqa
if sys.version_info[0] == 2:
import httplib as httplib # @UnresolvedImport #@UnusedImport
else:
import http.client as httplib # @UnresolvedImport @UnusedImport @Reimport
unicode = str
logger = logging.getLogger(__name__)
REST_ERROR = "REST_ERROR"
HTTP_STATUS_DATABASE_ERROR = 420
ERROR_USER_GENERATED_TRANSACTION_ABORT = 3514
MAX_CONNECT_RETRIES = 5
connections = []
def cleanup():
for conn in connections:
conn.close()
atexit.register(cleanup)
class RestConnection:
""" Represents a Connection to Teradata using the REST API for
Teradata Database """
def __init__(self, dbType="Teradata", host=None, system=None,
username=None, password=None, protocol='http', port=None,
webContext='/tdrest', autoCommit=False, implicit=False,
transactionMode='TERA', queryBands=None, charset=None,
verifyCerts=True, sslContext=None,
dataTypeConverter=datatypes.DefaultDataTypeConverter()):
self.dbType = dbType
self.system = system
self.sessionId = None
self.implicit = implicit
self.transactionMode = transactionMode
self.dataTypeConverter = dataTypeConverter
self.cursors = []
# Support TERA and Teradata as transaction mode to be consistent with
# ODBC.
if transactionMode == "Teradata":
self.transactionMode = "TERA"
self.autoCommit = False
if port is None:
if protocol == 'http':
port = 1080
elif protocol == 'https':
port = 1443
else:
raise InterfaceError(
CONFIG_ERROR, "Unsupported protocol: {}".format(protocol))
self.template = RestTemplate(
protocol, host, port, webContext, username, password,
accept='application/vnd.com.teradata.rest-v1.0+json',
verifyCerts=util.booleanValue(verifyCerts), sslContext=sslContext)
with self.template.connect() as conn:
if not self.implicit:
options = {}
options['autoCommit'] = autoCommit
options['transactionMode'] = transactionMode
if queryBands:
options['queryBands'] = queryBands
if charset:
options['charSet'] = charset
try:
session = conn.post(
'/systems/{0}/sessions'.format(self.system),
options).readObject()
self.sessionId = session['sessionId']
connections.append(self)
logger.info("Created explicit session: %s", session)
except (pulljson.JSONParseError) as e:
raise InterfaceError(
e.code, "Error reading JSON response: " + e.msg)
def close(self):
""" Closes an Explicit Session using the REST API for Teradata
Database """
if hasattr(self, 'sessionId') and self.sessionId is not None:
with self.template.connect() as conn:
try:
conn.delete(
'/systems/{0}/sessions/{1}'.format(
self.system, self.sessionId))
except InterfaceError as e:
# Ignore if the session is already closed.
if e.code != 404:
raise
logger.info("Closing session: %s", self.sessionId)
self.sessionId = None
connections.remove(self)
for cursor in list(self.cursors):
cursor.close()
def commit(self):
with self.cursor() as cursor:
if self.transactionMode == 'ANSI':
cursor.execute("COMMIT")
else:
cursor.execute("ET")
def rollback(self):
with self.cursor() as cursor:
try:
cursor.execute("ROLLBACK")
except DatabaseError as e:
if e.code != ERROR_USER_GENERATED_TRANSACTION_ABORT:
raise
def cursor(self):
return RestCursor(self)
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, t, value, traceback):
self.close()
connect = RestConnection
class RestCursor (util.Cursor):
def __init__(self, connection):
self.conn = None
util.Cursor.__init__(
self, connection, connection.dbType, connection.dataTypeConverter)
self.conn = connection.template.connect()
connection.cursors.append(self)
def callproc(self, procname, params, queryTimeout=None):
inparams = None
outparams = None
count = 0
query = "CALL {} (".format(procname)
if params is not None:
inparams = [[]]
outparams = []
for p in params:
if count > 0:
query += ", "
if isinstance(p, InOutParam):
inparams[0].append(p.inValue)
# outparams.append(p.inValue)
elif isinstance(p, OutParam):
outparams.append(None)
else:
inparams[0].append(p)
count += 1
query += "?"
query += ")"
outparams = self._handleResults(self._execute(
query, inparams, outparams, queryTimeout=queryTimeout),
len(outparams) > 0)
return util.OutParams(params, self.dbType, self.converter, outparams)
def close(self):
if self.conn:
self.conn.close()
def execute(self, query, params=None, queryTimeout=None):
if params is not None:
params = [params]
self._handleResults(
self._execute(query, params, queryTimeout=queryTimeout))
return self
def executemany(self, query, params, batch=False, queryTimeout=None):
self._handleResults(
self._execute(query, params, batch=batch,
queryTimeout=queryTimeout))
return self
def _handleResults(self, results, hasOutParams=False):
self.results = results
try:
results.expectObject()
self.queueDuration = results.expectField(
"queueDuration", pulljson.NUMBER)
self.queryDuration = results.expectField(
"queryDuration", pulljson.NUMBER)
logger.debug("Durations reported by REST service: Queue Duration: "
"%s, Query Duration: %s", self.queueDuration,
self.queryDuration)
results.expectField("results", pulljson.ARRAY)
results.expectObject()
return self._handleResultSet(results, hasOutParams)
except (pulljson.JSONParseError) as e:
raise InterfaceError(
e.code, "Error reading JSON response: " + e.msg)
def _execute(self, query, params=None, outParams=None, batch=False,
queryTimeout=None):
options = {}
options['query'] = query
options['format'] = 'array'
options['includeColumns'] = 'true'
options['rowLimit'] = 0
if params is not None:
options['params'] = list(
list(_convertParam(p) for p in paramSet)
for paramSet in params)
options['batch'] = batch
if outParams is not None:
options['outParams'] = outParams
if not self.connection.implicit:
options['session'] = int(self.connection.sessionId)
if queryTimeout is not None:
options['queryTimeout'] = queryTimeout
options['queueTimeout'] = queryTimeout
return self.conn.post('/systems/{0}/queries'.format(
self.connection.system), options)
def _handleResultSet(self, results, hasOutParams=False):
outParams = None
if hasOutParams:
outParams = results.expectField(
"outParams", pulljson.ARRAY, readAll=True)
self.resultSet = None
else:
try:
self.resultSet = results.expectField(
"resultSet", pulljson.BOOLEAN)
except pulljson.JSONParseError:
# Workaround for Batch mode and Stored procedures which doens't
# include a resultSet.
self.resultSet = None
if self.resultSet:
index = 0
self.columns = {}
self.description = []
self.types = []
self.rowcount = -1
self.rownumber = None
for column in results.expectField("columns", pulljson.ARRAY):
self.columns[column["name"].lower()] = index
type_code = self.converter.convertType(
self.dbType, column["type"])
self.types.append((column["type"], type_code))
self.description.append(
(column["name"], type_code, None, None, None, None, None))
index += 1
self.iterator = results.expectField("data", pulljson.ARRAY)
else:
self.columns = None
self.description = None
self.rownumber = None
self.rowcount = -1
if self.resultSet is not None:
self.rowcount = results.expectField("count")
return outParams
def nextset(self):
for row in self: # @UnusedVariable
pass
for event in self.results:
if event.type == pulljson.START_OBJECT:
self._handleResultSet(self.results)
return True
def _convertParam(p):
if util.isString(p) or p is None:
return p
elif isinstance(p, bytearray):
return ''.join('{:02x}'.format(x) for x in p)
else:
return unicode(p)
class RestTemplate:
def __init__(self, protocol, host, port, webContext, username, password,
sslContext=None, verifyCerts=True, accept=None):
self.protocol = protocol
self.host = host
self.port = port
self.webContext = webContext
self.headers = {}
self.headers['Content-Type'] = 'application/json'
if accept is not None:
self.headers['Accept'] = accept
self.headers['Authorization'] = 'Basic ' + \
base64.b64encode(
(username + ":" + password).encode('utf_8')).decode('ascii')
self.sslContext = sslContext
if sslContext is None and not verifyCerts:
self.sslContext = ssl.create_default_context()
self.sslContext.check_hostname = False
self.sslContext.verify_mode = ssl.CERT_NONE
def connect(self):
return HttpConnection(self)
class HttpConnection:
def __init__(self, template):
self.template = template
if template.protocol.lower() == "http":
self.conn = httplib.HTTPConnection(template.host, template.port)
elif template.protocol.lower() == "https":
self.conn = httplib.HTTPSConnection(
template.host, template.port, context=template.sslContext)
else:
raise InterfaceError(
REST_ERROR, "Unknown protocol: %s" % template.protocol)
failureCount = 0
while True:
try:
self.conn.connect()
break
except Exception as e:
eofError = "EOF occurred in violation of protocol" in str(e)
failureCount += 1
if not eofError or failureCount > MAX_CONNECT_RETRIES:
raise InterfaceError(
REST_ERROR,
"Error accessing {}:{}. ERROR: {}".format(
template.host, template.port, e))
else:
logger.debug(
"Received an \"EOF occurred in violation of "
"protocol\" error, retrying connection.")
def close(self):
if self.conn:
self.conn.close()
def post(self, uri, data={}):
return self.send(uri, 'POST', data)
def delete(self, uri):
self.send(uri, 'DELETE', None)
def get(self, uri):
return self.send(uri, 'GET', None)
def __enter__(self):
return self
def __exit__(self, t, value, traceback):
self.close()
def send(self, uri, method, data):
response = None
url = self.template.webContext + uri
try:
start = time.time()
payload = json.dumps(data).encode('utf8') if data else None
logger.trace("%s: %s, %s", method, url, payload)
self.conn.request(method, url, payload, self.template.headers)
response = self.conn.getresponse()
duration = time.time() - start
logger.debug("Roundtrip Duration: %.3f seconds", duration)
except Exception as e:
raise InterfaceError(
REST_ERROR, 'Error accessing {}. ERROR: {}'.format(url, e))
if response.status < 300:
return pulljson.JSONPullParser(response)
if response.status < 400:
raise InterfaceError(
response.status,
"HTTP Status: {}. ERROR: Redirection not supported.")
else:
msg = response.read().decode("utf8")
try:
errorDetails = json.loads(msg)
except Exception:
raise InterfaceError(
response.status, "HTTP Status: " + str(response.status) +
", URL: " + url + ", Details: " + str(msg))
if response.status == HTTP_STATUS_DATABASE_ERROR:
raise DatabaseError(
int(errorDetails['error']), errorDetails['message'])
else:
raise InterfaceError(response.status, "HTTP Status: " + str(
response.status) + ", URL: " + url +
", Details: " + str(errorDetails))
| {
"repo_name": "fxstein/PyTd",
"path": "teradata/tdrest.py",
"copies": "1",
"size": "15604",
"license": "mit",
"hash": -5716982777036947000,
"line_mean": 36.0641330166,
"line_max": 79,
"alpha_frac": 0.5640861318,
"autogenerated": false,
"ratio": 4.551925320886815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5616011452686815,
"avg_score": null,
"num_lines": null
} |
'''An implementation of the Raft consensus algorithm.
References:
[1] Ongaro, Diego and Ousterhout, John. "In Search of an
Understandable Consensus Algorithm (Extended Version)". May 2014
<https://ramcloud.stanford.edu/raft.pdf>
[2] Howard, Heidi. "ARC: Analysis of Raft Consensus." July 2014.
<http://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-857.pdf>
'''
from kontiki.persist import MatchAfterTooHigh
from kontiki.fundamentals import majorityMedian
from kontiki.rpc_objects import LogEntry
from twisted.python import log
from twisted.internet import reactor, defer, task
import random
class State(object):
'''A Raft participant.
`peers`: identities for the server's peers
`persister`: an object that implements the Persist protocol and
can save and restore to stable storage
`applyCommand`: callable invoked with the command to apply
'''
def __init__(self, server, identity, peers, persister, applyCommand,
electionTimeoutRange, commitIndex=0, lastApplied=0):
self.server = server
self.identity = identity
self.peers = peers
self.persister = persister
self.applyCommand = applyCommand
self.electionTimeoutRange = electionTimeoutRange
self.commitIndex = commitIndex
self.lastApplied = lastApplied
self.pending = set()
@classmethod
def fromState(cls, electionTimeoutRange, server, state):
state.cancelAll()
return cls(electionTimeoutRange=electionTimeoutRange,
server=server,
identity=state.identity,
peers=state.peers,
applyCommand=state.applyCommand,
persister=state.persister,
commitIndex=state.commitIndex,
lastApplied=state.lastApplied)
def logFailure(self, failure, call=None):
failure.trap(defer.CancelledError)
log.msg('Failure in call %s: %s' % (call or "<Unknown>", failure))
def track(self, deferred):
self.pending.add(deferred)
def removeFromPending(result):
if deferred in self.pending:
self.pending.remove(deferred)
return result
deferred.addBoth(removeFromPending)
return deferred
def cancelAll(self):
# iterate over a shallow copy self.pending as the cancellation
# will remove deferreds from it
for pending in self.pending.copy():
pending.cancel()
def begin(self):
return self.applyCommitted()
def applyCommitted(self):
def apply(logEntries):
def incrementLastApplied(ignored):
self.lastApplied += 1
commandDeferreds = []
for entry in logEntries:
cmd = defer.maybeDeferred(self.applyCommand, entry.command)
cmd.addCallback(incrementLastApplied)
cmd.addErrback(self.logFailure)
commandDeferreds.append(cmd)
return defer.DeferredList(commandDeferreds)
d = self.track(self.persister.committableLogEntries(self.lastApplied,
self.commitIndex))
d.addCallback(apply)
d.addErrback(self.logFailure)
return d
def becomeFollower(self, term):
prep = [self.persister.setCurrentTerm(term),
self.persister.voteFor(None)]
resultsDeferred = defer.gatherResults(prep)
def changeServerStateToFollower(results):
newTerm, identity = results
assert identity is None
changeStateDeferred = self.server.changeState(Follower)
changeStateDeferred.addCallback(lambda ignore: newTerm)
return changeStateDeferred
resultsDeferred.addCallback(changeServerStateToFollower)
return resultsDeferred
def willBecomeFollower(self, term, currentTerm=None):
if currentTerm is None:
currentTermDeferred = self.persister.getCurrentTerm()
else:
currentTermDeferred = defer.succeed(currentTerm)
def judge(currentTerm):
if term > currentTerm:
d = self.becomeFollower(term)
result = True
else:
d = defer.succeed(currentTerm)
result = False
def formatResult(resultTerm):
log.msg('willBecomeFollower (currentTerm %s) '
'for term %s: %s' % (resultTerm, term, result))
return resultTerm, result
d.addCallback(formatResult)
return d
currentTermDeferred.addCallback(judge)
currentTermDeferred.addErrback(self.logFailure)
return currentTermDeferred
def candidateIdOK(self, candidateId):
votedForDeferred = self.persister.votedFor()
def judgeCandidateId(votedFor):
ok = votedFor is None or votedFor == candidateId
log.msg('candidateIdOK: %s' % ok)
return ok
votedForDeferred.addCallback(judgeCandidateId)
votedForDeferred.addErrback(self.logFailure)
return votedForDeferred
def candidateLogUpToDate(self, lastLogIndex, lastLogTerm):
# Section 5.4.1
currentTermDeferred = self.persister.getCurrentTerm()
def gotCurrentTerm(currentTerm):
if currentTerm == lastLogTerm:
def compareLastIndices(lastIndex):
ok = lastIndex <= lastLogIndex
log.msg('candidateLogUpToDate: terms equal, '
'logs ok: %s' % ok)
return ok
judgementDeferred = self.persister.getLastIndex()
judgementDeferred.addCallback(compareLastIndices)
else:
judgementDeferred = self.persister.lastIndexLETerm(lastLogTerm)
def report(result):
log.msg('candidateLogUpToDate: term is greater, '
'logs ok: %s' % result)
return result
judgementDeferred.addCallback(report)
return judgementDeferred
currentTermDeferred.addCallback(gotCurrentTerm)
return currentTermDeferred
def appendEntries(self, term, leaderId, prevLogIndex,
prevLogTerm, entries, leaderCommit):
d = self.willBecomeFollower(term)
def rerunOrReturn(termAndBecameFollower):
newTerm, becameFollower = termAndBecameFollower
if becameFollower:
return self.server.state.appendEntries(newTerm,
leaderId,
prevLogIndex,
prevLogTerm,
entries,
leaderCommit)
else:
log.msg("Rejecting appendEntries from %s because "
"term %s <= currentTerm %s"
% (leaderId, term, newTerm))
return newTerm, False
d.addCallback(rerunOrReturn)
d.addErrback(self.logFailure)
return d
def requestVote(self, term, candidateId, lastLogIndex, lastLogTerm):
# RPC
currentTermDeferred = self.persister.getCurrentTerm()
def considerTerm(currentTerm):
if term < currentTerm:
return currentTerm, False
else:
criteria = [self.candidateIdOK(candidateId),
self.candidateLogUpToDate(lastLogIndex,
lastLogTerm)]
resultsDeferred = defer.gatherResults(criteria)
def determineVote(results):
if all(results) and term > currentTerm:
log.msg("requestVote: candidate %s is OK"
% candidateId)
setVote = self.persister.voteFor(candidateId)
def becomeFollower(vote):
assert vote == candidateId
return self.becomeFollower(term)
def concedeVote(newTerm):
assert newTerm == term, (newTerm, term)
return newTerm, True
setVote.addCallback(becomeFollower)
setVote.addCallback(concedeVote)
return setVote
else:
log.msg('requestVote: candidate %s is not OK, '
'results: %s, '
'term (%d) > currentTerm (%d): %r, '
'May become follower ' % (candidateId,
results,
term, currentTerm,
term > currentTerm))
return self.willBecomeFollower(term,
currentTerm=currentTerm)
resultsDeferred.addCallback(determineVote)
return resultsDeferred
currentTermDeferred.addCallback(considerTerm)
return currentTermDeferred
class StartsElection(State):
becomeCandidateTimeout = None
clock = reactor
def begin(self):
self.rng = random.Random()
startupDeferred = super(StartsElection, self).begin()
startupDeferred.addCallback(self.resetElectionTimeout)
return startupDeferred
def cancelBecomeCandidateTimeout(self):
if (self.becomeCandidateTimeout is not None
and self.becomeCandidateTimeout.active()):
self.becomeCandidateTimeout.cancel()
def cancelAll(self):
self.cancelBecomeCandidateTimeout()
super(StartsElection, self).cancelAll()
def resetElectionTimeout(self, ignored=None):
self.cancelBecomeCandidateTimeout()
self.electionTimeout = self.rng.uniform(*self.electionTimeoutRange)
def timeoutOccured():
log.msg('Election timeout occurred')
return self.server.changeState(Candidate)
d = self.clock.callLater(self.electionTimeout, timeoutOccured)
self.becomeCandidateTimeout = d
class Follower(StartsElection):
'''A Raft follower.'''
leaderId = None
def begin(self):
log.msg('Became follower')
return super(Follower, self).begin()
def appendEntries(self, term, leaderId, prevLogIndex, prevLogTerm,
entries, leaderCommit):
# RPC
# 1 & 2
criteria = [self.persister.getCurrentTerm(),
self.persister.indexMatchesTerm(index=prevLogIndex,
term=prevLogTerm)]
def evaluateCriteria(results):
currentTerm, indexMatchesTerm = results
if term < currentTerm or not indexMatchesTerm:
log.msg('Rejecting appendEntries:'
' term < currentTerm %s indexMatchesTerm %s'
% (term < currentTerm, indexMatchesTerm))
return defer.succeed((currentTerm, False))
self.resetElectionTimeout()
self.leaderId = leaderId
if term > currentTerm:
updateDeferred = self.persister.setCurrentTerm(term)
elif term == currentTerm:
updateDeferred = defer.succeed(currentTerm)
else:
assert False, "Why is term not >= currentTerm here?"
def handleEntries(currentTerm):
assert currentTerm == term
d = self.persister.matchAndAppendNewLogEntries(
matchAfter=prevLogIndex, entries=entries)
d.addCallback(lambda ignore: currentTerm)
return d
updateDeferred.addCallback(handleEntries)
def getLastCommitIndex(currentTerm):
d = self.persister.getLastIndex()
def formatResult(lastCommitIndex):
return currentTerm, lastCommitIndex
d.addCallback(formatResult)
return d
updateDeferred.addCallback(getLastCommitIndex)
def updateCommitIndexAndApplyCommitted(currentTermAndlastIndex):
currentTerm, lastIndex = currentTermAndlastIndex
if leaderCommit > self.commitIndex:
self.commitIndex = min(leaderCommit, lastIndex)
stateMachineDeferred = self.applyCommitted()
def returnSuccess(ignored):
return currentTerm, True
stateMachineDeferred.addCallback(returnSuccess)
return stateMachineDeferred
updateDeferred.addCallback(updateCommitIndexAndApplyCommitted)
def returnFalseOnMatchAfterTooHigh(failure):
failure.trap(MatchAfterTooHigh)
return currentTerm, False
updateDeferred.addErrback(returnFalseOnMatchAfterTooHigh)
return updateDeferred
resultsDeferred = defer.gatherResults(criteria)
resultsDeferred.addCallback(evaluateCriteria)
return resultsDeferred
def command(self, command):
d = self.track(self.peers[self.leaderId].callRemote('command',
command))
d.addErrback(self.logFailure, call="command")
return d
class Candidate(StartsElection):
def begin(self):
log.msg('Became candidate')
# TODO: Should bother to apply committable entries?
startupDeferred = super(Candidate, self).begin()
startupDeferred.addCallback(self.conductElection)
return startupDeferred
def prepareForElection(self):
self.resetElectionTimeout()
self.votes = 0
# these are purely for inspection
self.cachedCurrentTerm = None
self.cachedLastLogIndex = None
self.cachedLastLogTerm = None
prep = [self.persister.setCurrentTerm(None, increment=True),
self.persister.getLastIndex(),
self.persister.getLastTerm(),
self.persister.voteFor(self.identity)]
def cacheAndProcess(results):
assert results[-1] == self.identity
self.votes += 1
processed = (self.cachedCurrentTerm,
self.cachedLastLogIndex,
self.cachedLastLogTerm) = results[:-1]
return processed
waitForAllPreparations = defer.gatherResults(prep)
waitForAllPreparations.addCallback(cacheAndProcess)
return waitForAllPreparations
def willBecomeLeader(self, votesSoFar):
majority = len(self.peers) / 2 + 1
log.msg('willBecomeLeader: votesSoFar: %d, majority: %s' % (votesSoFar,
majority))
if votesSoFar >= majority:
changeStateDeferred = self.server.changeState(Leader)
changeStateDeferred.addCallback(lambda ignore: True)
return defer.succeed(False)
def completeRequestVote(self, result, peer):
term, voteGranted = result
log.msg('completeRequestVote from %r: term: %r, voteGranted %r'
% (peer.identity, term, voteGranted))
becameFollowerDeferred = self.willBecomeFollower(term)
def shouldBecomeLeader(termAndBecameFollower):
_, becameFollower = termAndBecameFollower
if not becameFollower and voteGranted:
self.votes += 1
return self.willBecomeLeader(self.votes)
else:
log.msg('Lost Election')
return becameFollowerDeferred.addCallback(shouldBecomeLeader)
def sendRequestVote(self, peer, term, lastLogIndex, lastLogTerm):
log.msg('sendRequestVote to %s; currentTerm %s' % (peer.identity,
term))
d = self.track(peer.callRemote('requestVote',
term,
self.identity,
lastLogIndex,
lastLogTerm))
d.addCallback(self.completeRequestVote, peer)
d.addErrback(self.logFailure, call="requestVote")
return d
def broadcastRequestVote(self, cached, returnDeferred=False):
peerPoll = [self.sendRequestVote(peer, *cached)
for peer in self.peers.values()]
resultsDeferred = defer.gatherResults(peerPoll)
if returnDeferred:
# this is for unit testing, where we want to make sure all
# deferreds are accessible for clean up by trial.
# generally we do NOT want to return this, as it will be
# sent downward through self.conductElection, through
# self.begin to the server's begin, which is guarded by
# the RPC DeferredLock. if this deferred DID reach that
# lock, then no RPC calls could be received during an
# election!
return resultsDeferred
def conductElection(self, ignored=None, *args, **kwargs):
log.msg('Conducting election')
preparationDeferred = self.prepareForElection()
preparationDeferred.addCallback(self.broadcastRequestVote,
*args, **kwargs)
return preparationDeferred
def command(self, command):
# TODO: a deferred that lasts until an election has been won?
return False
class Leader(State):
'''A Raft leader.'''
heartbeatLoopingCall = None
lowerBound = 0.02
def __init__(self, *args, **kwargs):
log.msg('Became leader')
super(Leader, self).__init__(*args, **kwargs)
etRange = self.electionTimeoutRange
self.heartbeatInterval = self.calculateHeartbeatInterval(etRange)
def calculateHeartbeatInterval(self, electionTimeoutRange,
lowerBound=None):
if lowerBound is None:
lowerBound = self.lowerBound
lowerTimeoutBound, _ = self.electionTimeoutRange
return min(lowerTimeoutBound / 10.0, lowerBound)
def begin(self, lowerBound=None):
startupDeferred = super(Leader, self).begin()
startupDeferred.addCallback(self.postElection)
return startupDeferred
def cancelAll(self):
if self.heartbeatLoopingCall and self.heartbeatLoopingCall.running:
self.heartbeatLoopingCall.stop()
super(Leader, self).cancelAll()
def postElection(self, ignored=None):
print 'HEARTBEAT INTERVAL', self.heartbeatInterval
d = task.LoopingCall(self.broadcastAppendEntries)
self.heartbeatLoopingCall = d
lastLogIndexDeferred = self.persister.getLastIndex()
def getLastLogIndex(lastLogIndex):
self.nextIndex = dict.fromkeys(self.peers, lastLogIndex + 1)
self.matchIndex = dict.fromkeys(self.peers, 0)
self.matchIndex[self.identity] = lastLogIndex
lastLogIndexDeferred.addCallback(getLastLogIndex)
def start(ignored):
self.heartbeatLoopingCall.start(self.heartbeatInterval)
lastLogIndexDeferred.addCallback(start)
return lastLogIndexDeferred
def updateCommitIndex(self):
newCommitIndex = majorityMedian(self.matchIndex.values())
if newCommitIndex > self.commitIndex:
self.commitIndex = newCommitIndex
return True
return False
def completeAppendEntries(self, result, identity, lastLogIndex):
term, success = result
getCurrentTermDeferred = self.persister.getCurrentTerm()
def compareTerm(currentTerm):
if currentTerm < term:
return self.willBecomeFollower(term)
elif not success:
self.nextIndex[identity] -= 1
# explicit retry maybe?
# return self.sendAppendEntries(identity, self.peers[identity])
else:
self.nextIndex[identity] = lastLogIndex + 1
self.matchIndex[identity] = lastLogIndex
if self.updateCommitIndex():
# TODO: the leader can't commit any log entries
# with terms < currentTerm unless it's committing
# an entry from currentTerm. this may result in a
# livelock (see [2], pg 52)
return self.applyCommitted()
return getCurrentTermDeferred.addCallback(compareTerm)
def sendAppendEntries(self, identity, perspective):
prevLogIndex = self.nextIndex[identity] - 1
viewDeferred = self.persister.appendEntriesView(prevLogIndex)
def sendPeerEntries(result):
currentTerm, lastLogIndex, prevLogTerm, entries = result
commitIndex = self.commitIndex
d = self.track(perspective.callRemote('appendEntries',
term=currentTerm,
leaderId=self.identity,
prevLogIndex=prevLogIndex,
prevLogTerm=prevLogTerm,
entries=entries,
leaderCommit=commitIndex))
d.addCallback(self.completeAppendEntries,
identity=identity,
lastLogIndex=lastLogIndex)
d.addErrback(self.logFailure, call="appendEntries")
viewDeferred.addCallback(sendPeerEntries)
return viewDeferred
def broadcastAppendEntries(self, ignored=None):
for identity, perspective in self.peers.items():
self.sendAppendEntries(identity, perspective)
def command(self, command):
newEntriesDeferred = self.persister.getCurrentTerm()
def addEntries(term):
entries = [LogEntry(term=term,
command=command)]
addDeferred = self.persister.addNewEntries(entries)
def updateMyMatchIndex(matchIndex):
self.matchIndex[self.identity] = matchIndex
addDeferred.addCallback(updateMyMatchIndex)
addDeferred.addCallback(self.broadcastAppendEntries)
addDeferred.addCallback(lambda _: True)
return newEntriesDeferred.addCallback(addEntries)
| {
"repo_name": "matthewnorman/kon_tiki",
"path": "kontiki/raft.py",
"copies": "2",
"size": "22848",
"license": "bsd-3-clause",
"hash": -5710742842367078000,
"line_mean": 37.0166389351,
"line_max": 79,
"alpha_frac": 0.584602591,
"autogenerated": false,
"ratio": 4.751091703056769,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 601
} |
# An implementation of the scaler interface for dials.scale
import logging
import math
import os
import bz2
from orderedset import OrderedSet
import libtbx
import numpy as np
from xia2.Handlers.Citations import Citations
from xia2.Handlers.Files import FileHandler
from xia2.lib.bits import auto_logfiler
from xia2.Handlers.Phil import PhilIndex
from xia2.lib.SymmetryLib import sort_lattices
from xia2.Handlers.Streams import banner
from xia2.Handlers.CIF import CIF, mmCIF
from xia2.Modules.Scaler.CommonScaler import CommonScaler as Scaler
from xia2.Wrappers.Dials.Scale import DialsScale
from xia2.Wrappers.Dials.Merge import DialsMerge
from xia2.Wrappers.CCP4.CCP4Factory import CCP4Factory
from xia2.Modules.AnalyseMyIntensities import AnalyseMyIntensities
from xia2.Modules.Scaler.CCP4ScalerHelpers import (
SweepInformationHandler,
mosflm_B_matrix,
)
from xia2.Wrappers.Dials.Symmetry import DialsSymmetry
from xia2.Wrappers.Dials.Reindex import Reindex as DialsReindex
from xia2.Wrappers.Dials.AssignUniqueIdentifiers import DialsAssignIdentifiers
from xia2.Wrappers.Dials.SplitExperiments import SplitExperiments
from xia2.Wrappers.Dials.ExportMtz import ExportMtz
from xia2.Wrappers.Dials.ExportMMCIF import ExportMMCIF
from xia2.Wrappers.Dials.TwoThetaRefine import TwoThetaRefine
from xia2.Handlers.Syminfo import Syminfo
from dxtbx.serialize import load
from dials.util.batch_handling import calculate_batch_offsets
from dials.util.export_mtz import match_wavelengths
from dials.algorithms.scaling.plots import plot_absorption_surface
from dials.array_family import flex
import dials.util.version
from cctbx.sgtbx import lattice_symmetry_group
from iotbx import mtz
import iotbx.cif
from iotbx.scalepack import no_merge_original_index
from iotbx.scalepack.merge import write as merge_scalepack_write
logger = logging.getLogger("xia2.Modules.Scaler.DialsScaler")
class DialsScaler(Scaler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._scalr_scaled_refl_files = {}
self._scalr_statistics = {}
self._factory = CCP4Factory() # allows lots of post-scaling calculations
self._helper = DialsScalerHelper()
self._scaler = None
self._scaled_experiments = None
self._scaled_reflections = None
self._no_times_scaled = 0
self._scaler_symmetry_check_count = 0
self.sweep_infos = []
def set_working_directory(self, working_directory):
self._working_directory = working_directory
self._factory.set_working_directory(working_directory)
self._helper.set_working_directory(working_directory)
def _updated_dials_scaler(self):
# Sets the relevant parameters from the PhilIndex
resolution = PhilIndex.params.xia2.settings.resolution
self._scaler.set_resolution(d_min=resolution.d_min, d_max=resolution.d_max)
self._scaler.set_intensities(PhilIndex.params.dials.scale.intensity_choice)
full_matrix = PhilIndex.params.dials.scale.full_matrix
if full_matrix in (libtbx.Auto, "auto", None):
if len(self.sweep_infos) > 4:
full_matrix = False
else:
full_matrix = True
self._scaler.set_full_matrix(full_matrix)
self._scaler.set_outlier_rejection(
PhilIndex.params.dials.scale.outlier_rejection
)
self._scaler.set_outlier_zmax(PhilIndex.params.dials.scale.outlier_zmax)
self._scaler.set_error_model(PhilIndex.params.dials.scale.error_model)
self._scaler.set_error_model_grouping_method(
PhilIndex.params.dials.scale.error_model_grouping
)
if PhilIndex.params.dials.scale.error_model_group:
self._scaler.set_error_model_groups(
PhilIndex.params.dials.scale.error_model_group
)
self._scaler.set_partiality_cutoff(
PhilIndex.params.dials.scale.partiality_threshold
)
if self.get_scaler_anomalous():
self._scaler.set_anomalous()
exp = load.experiment_list(self.sweep_infos[0].get_experiments())[0]
scale_interval, decay_interval = scaling_model_auto_rules(exp)
# Model handling
autos = (None, "auto", libtbx.Auto)
if PhilIndex.params.dials.scale.model in autos:
PhilIndex.params.dials.scale.model = "physical"
self._scaler.set_model(PhilIndex.params.dials.scale.model)
if PhilIndex.params.dials.scale.rotation_spacing:
scale_interval = PhilIndex.params.dials.scale.rotation_spacing
if PhilIndex.params.dials.scale.model == "physical":
if PhilIndex.params.dials.scale.physical_model.Bfactor_spacing:
decay_interval = (
PhilIndex.params.dials.scale.physical_model.Bfactor_spacing
)
self._scaler.set_spacing(scale_interval)
if PhilIndex.params.dials.scale.Bfactor:
self._scaler.set_bfactor(True, decay_interval)
else:
self._scaler.set_bfactor(False)
if PhilIndex.params.dials.scale.absorption:
self._scaler.set_absorption_correction(True)
if PhilIndex.params.dials.scale.physical_model.absorption_level:
self._scaler.set_absorption_level(
PhilIndex.params.dials.scale.physical_model.absorption_level
)
if PhilIndex.params.dials.scale.physical_model.lmax not in autos:
self._scaler.set_lmax(
PhilIndex.params.dials.scale.physical_model.lmax
)
if (
PhilIndex.params.dials.scale.physical_model.surface_weight
not in autos
):
self._scaler.set_surface_weight(
PhilIndex.params.dials.scale.physical_model.surface_weight
)
else:
self._scaler.set_absorption_correction(False)
elif PhilIndex.params.dials.scale.model == "dose_decay":
self._scaler.set_spacing(scale_interval)
if PhilIndex.params.dials.scale.absorption:
self._scaler.set_absorption_correction(True)
self._scaler.set_lmax(PhilIndex.params.dials.scale.decay_model.lmax)
else:
self._scaler.set_absorption_correction(False)
if PhilIndex.params.dials.scale.dose_decay_model.share.decay is not None:
self._scale.set_shared_decay(
PhilIndex.params.dials.scale.dose_decay_model.share.decay
)
if PhilIndex.params.dials.scale.dose_decay_model.resolution_dependence:
self._scale.set_resolution_dependence(
PhilIndex.dials.scale.dose_decay.resolution_dependence
)
elif PhilIndex.params.dials.scale.model == "KB":
# For KB model, want both Bfactor and scale terms
self._scaler.set_bfactor(True)
elif PhilIndex.params.dials.scale.model == "array":
if PhilIndex.params.dials.scale.Bfactor:
self._scaler.set_bfactor(True, scale_interval)
self._scaler.set_decay_bins(
PhilIndex.params.dials.scale.array_model.resolution_bins
)
else:
self._scaler.set_bfactor(False)
if PhilIndex.params.dials.scale.absorption:
self._scaler.set_absorption_correction(True)
self._scaler.set_array_absorption_bins(
PhilIndex.params.dials.scale.array_model.absorption_bins
)
else:
self._scaler.set_absorption_correction(False)
return self._scaler
def _do_prescale_kb(self, experiments, reflections):
# Pre-scale the data with KB scaling to ensure all experiments are on
# the same scale prior to running dials.symmetry
self._scaler = DialsScale()
self._scaler.set_model("KB")
self._scaler.set_full_matrix(False)
self._scaler.set_error_model(None)
self._scaler.set_intensities("profile")
for expts, refl in zip(experiments, reflections):
self._scaler.add_experiments_json(expts)
self._scaler.add_reflections_file(refl)
self._scaler.set_working_directory(self.get_working_directory())
auto_logfiler(self._scaler)
self._scaler.scale()
prescaled_experiments = self._scaler.get_scaled_experiments()
prescaled_reflections = self._scaler.get_scaled_reflections()
self._scaler = None
return prescaled_experiments, prescaled_reflections
def _do_multisweep_symmetry_analysis(self):
refiners = []
experiments = []
reflections = []
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
integrater = si.get_integrater()
experiments.append(integrater.get_integrated_experiments())
reflections.append(integrater.get_integrated_reflections())
refiners.append(integrater.get_integrater_refiner())
prescaled_experiments, prescaled_reflections = self._do_prescale_kb(
experiments, reflections
)
logger.debug("Running multisweep dials.symmetry for %d sweeps", len(refiners))
(
pointgroup,
reindex_op,
ntr,
pt,
reind_refl,
reind_exp,
reindex_initial,
) = self._helper.dials_symmetry_indexer_jiffy(
[prescaled_experiments], [prescaled_reflections], refiners, multisweep=True
)
FileHandler.record_temporary_file(reind_refl)
FileHandler.record_temporary_file(reind_exp)
return pointgroup, reindex_op, ntr, pt, reind_refl, reind_exp, reindex_initial
def _multi_sweep_scale_prepare(self):
need_to_return = False
(
pointgroup,
reindex_op,
ntr,
_,
reind_refl,
reind_exp,
reindex_initial,
) = self._do_multisweep_symmetry_analysis()
if ntr:
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
si.get_integrater().integrater_reset_reindex_operator()
self.set_scaler_done(False)
self.set_scaler_prepare_done(False)
need_to_return = True
return need_to_return
else:
self._scalr_likely_spacegroups = [pointgroup]
if reindex_initial:
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
self._helper.reindex_jiffy(si, pointgroup, reindex_op=reindex_op)
# integrater reset reindex op and update in si.
else:
self._sweep_handler = self._helper.split_experiments(
reind_exp, reind_refl, self._sweep_handler
)
return need_to_return
def _input_pointgroup_scale_prepare(self):
pointgroup = self._scalr_input_pointgroup
if self._scalr_input_spacegroup:
self._scalr_likely_spacegroups = [self._scalr_input_spacegroup]
pointgroup = self._scalr_input_spacegroup
logger.debug("Using input pointgroup: %s", pointgroup)
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
self._helper.reindex_jiffy(si, pointgroup, "h,k,l")
def _standard_scale_prepare(self):
pointgroups = {}
reindex_ops = {}
probably_twinned = False
need_to_return = False
lattices = []
# First check for the existence of multiple lattices. If only one
# epoch, then this gives the necessary data for proceeding straight
# to the point group check.
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
intgr = si.get_integrater()
experiment = intgr.get_integrated_experiments()
reflections = intgr.get_integrated_reflections()
refiner = intgr.get_integrater_refiner()
(
pointgroup,
reindex_op,
ntr,
pt,
_,
__,
___,
) = self._helper.dials_symmetry_indexer_jiffy(
[experiment], [reflections], [refiner]
)
lattice = Syminfo.get_lattice(pointgroup)
if lattice not in lattices:
lattices.append(lattice)
if ntr:
si.get_integrater().integrater_reset_reindex_operator()
need_to_return = True
if pt:
probably_twinned = True
pointgroups[epoch] = pointgroup
reindex_ops[epoch] = reindex_op
logger.debug("Pointgroup: %s (%s)", pointgroup, reindex_op)
if len(lattices) > 1:
# Check consistency of lattices if more than one. If not, then
# can proceed to straight to checking point group consistency
# using the cached results.
correct_lattice = sort_lattices(lattices)[0]
logger.info("Correct lattice asserted to be %s", correct_lattice)
# transfer this information back to the indexers
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
refiner = si.get_integrater().get_integrater_refiner()
_tup = (correct_lattice, si.get_sweep_name())
state = refiner.set_refiner_asserted_lattice(correct_lattice)
if state == refiner.LATTICE_CORRECT:
logger.info("Lattice %s ok for sweep %s" % _tup)
elif state == refiner.LATTICE_IMPOSSIBLE:
raise RuntimeError("Lattice %s impossible for %s" % _tup)
elif state == refiner.LATTICE_POSSIBLE:
logger.info("Lattice %s assigned for sweep %s" % _tup)
need_to_return = True
if need_to_return:
return need_to_return
need_to_return = False
pointgroup_set = {pointgroups[e] for e in pointgroups}
if len(pointgroup_set) > 1 and not probably_twinned:
raise RuntimeError(
"non uniform pointgroups: %s" % str(list(pointgroup_set))
)
if len(pointgroup_set) > 1:
logger.debug(
"Probably twinned, pointgroups: %s",
" ".join(p.replace(" ", "") for p in pointgroup_set),
)
numbers = [Syminfo.spacegroup_name_to_number(s) for s in pointgroup_set]
overall_pointgroup = Syminfo.spacegroup_number_to_name(min(numbers))
self._scalr_input_pointgroup = overall_pointgroup
logger.info("Twinning detected, assume pointgroup %s", overall_pointgroup)
need_to_return = True
else:
overall_pointgroup = pointgroup_set.pop()
self._scalr_likely_spacegroups = [overall_pointgroup]
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
self._helper.reindex_jiffy(si, overall_pointgroup, reindex_ops[epoch])
return need_to_return
def _scale_prepare(self):
"""Perform all of the preparation required to deliver the scaled
data. This should sort together the reflection files, ensure that
they are correctly indexed (via dials.symmetry) and generally tidy
things up."""
Citations.cite("dials.scale")
# AIM discover symmetry and reindex with dials.symmetry, and set the correct
# reflections in si.reflections, si.experiments
self._helper.set_working_directory(self.get_working_directory())
self._factory.set_working_directory(self.get_working_directory())
self._sweep_handler = SweepInformationHandler(self._scalr_integraters)
p, x = self._sweep_handler.get_project_info()
self._scalr_pname = p
self._scalr_xname = x
self._helper.set_pname_xname(p, x)
# First do stuff to work out if excluding any data
# Note - does this actually work? I couldn't seem to get it to work
# in either this pipeline or the standard dials pipeline
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
_, xname, dname = si.get_project_info()
sname = si.get_sweep_name()
exclude_sweep = False
for sweep in PhilIndex.params.xia2.settings.sweep:
if sweep.id == sname and sweep.exclude:
exclude_sweep = True
break
if exclude_sweep:
self._sweep_handler.remove_epoch(epoch)
logger.debug("Excluding sweep %s", sname)
else:
logger.debug("%-30s %s/%s/%s", "adding data from:", xname, dname, sname)
# If multiple files, want to run symmetry to check for consistent indexing
# also
# try to reproduce what CCP4ScalerA is doing
# first assign identifiers to avoid dataset-id collisions
# Idea is that this should be called anytime you get data anew from the
# integrater, to intercept and assign unique ids, then set in the
# sweep_information (si) and always use si.set_reflections/
# si.get_reflections as we process.
# self._sweep_handler = self._helper.assign_and_return_datasets(
# self._sweep_handler
# ) symmetry now sorts out identifiers.
need_to_return = False
if self._scalr_input_pointgroup:
self._input_pointgroup_scale_prepare()
elif (
len(self._sweep_handler.get_epochs()) > 1
and PhilIndex.params.xia2.settings.multi_sweep_indexing
):
need_to_return = self._multi_sweep_scale_prepare()
else:
need_to_return = self._standard_scale_prepare()
if need_to_return:
self.set_scaler_done(False)
self.set_scaler_prepare_done(False)
return
### After this point, point group is good and only need to
### reindex to consistent setting. Don't need to call back to the
### integator, just use the data in the sweep info.
# First work out if we're going to reindex against external reference
param = PhilIndex.params.xia2.settings.scale
using_external_references = False
reference_refl = None
reference_expt = None
if param.reference_reflection_file:
if not param.reference_experiment_file:
logger.info(
"""
No DIALS reference experiments file provided, reference reflection file will
not be used. Reference mtz files for reindexing not currently supported for
pipeline=dials (supported for pipeline=dials-aimless).
"""
)
else:
reference_refl = param.reference_reflection_file
reference_expt = param.reference_experiment_file
using_external_references = True
logger.debug("Using reference reflections %s", reference_refl)
logger.debug("Using reference experiments %s", reference_expt)
if len(self._sweep_handler.get_epochs()) > 1:
if PhilIndex.params.xia2.settings.unify_setting:
self.unify_setting()
if PhilIndex.params.xia2.settings.use_brehm_diederichs:
self.brehm_diederichs_reindexing()
# If not using Brehm-deidrichs reindexing, set reference as first
# sweep, unless using external reference.
elif not using_external_references:
logger.debug("First sweep will be used as reference for reindexing")
first = self._sweep_handler.get_epochs()[0]
si = self._sweep_handler.get_sweep_information(first)
reference_expt = si.get_experiments()
reference_refl = si.get_reflections()
# Now reindex to be consistent with first dataset - run reindex on each
# dataset with reference (unless did brehm diederichs and didn't supply
# a reference file)
if reference_refl and reference_expt:
exp = load.experiment_list(reference_expt)
reference_cell = exp[0].crystal.get_unit_cell().parameters()
# ---------- REINDEX TO CORRECT (REFERENCE) SETTING ----------
logger.info("Reindexing all datasets to common reference")
if using_external_references:
epochs = self._sweep_handler.get_epochs()
else:
epochs = self._sweep_handler.get_epochs()[1:]
for epoch in epochs:
# if we are working with unified UB matrix then this should not
# be a problem here (note, *if*; *should*)
# what about e.g. alternative P1 settings?
# see JIRA MXSW-904
if PhilIndex.params.xia2.settings.unify_setting:
continue
reindexer = DialsReindex()
reindexer.set_working_directory(self.get_working_directory())
auto_logfiler(reindexer)
si = self._sweep_handler.get_sweep_information(epoch)
reindexer.set_reference_filename(reference_expt)
reindexer.set_reference_reflections(reference_refl)
reindexer.set_indexed_filename(si.get_reflections())
reindexer.set_experiments_filename(si.get_experiments())
reindexer.run()
# At this point, CCP4ScalerA would reset in integrator so that
# the integrater calls reindex, no need to do that here as
# have access to the files and will never need to reintegrate.
si.set_reflections(reindexer.get_reindexed_reflections_filename())
si.set_experiments(reindexer.get_reindexed_experiments_filename())
# FIXME how to get some indication of the reindexing used?
exp = load.experiment_list(
reindexer.get_reindexed_experiments_filename()
)
cell = exp[0].crystal.get_unit_cell().parameters()
# Note - no lattice check as this will already be caught by reindex
logger.debug("Cell: %.2f %.2f %.2f %.2f %.2f %.2f" % cell)
logger.debug("Ref: %.2f %.2f %.2f %.2f %.2f %.2f" % reference_cell)
for j in range(6):
if (
math.fabs((cell[j] - reference_cell[j]) / reference_cell[j])
> 0.1
):
raise RuntimeError(
"unit cell parameters differ in %s and %s"
% (reference_expt, si.get_reflections())
)
# Now make sure all batches ok before finish preparing
# This should be made safer, currently after dials.scale there is no
# concept of 'batch', dials.export uses the calculate_batch_offsets
# to assign batches, giving the same result as below.
experiments_to_rebatch = []
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
experiment = si.get_experiments()
experiments_to_rebatch.append(load.experiment_list(experiment)[0])
offsets = calculate_batch_offsets(experiments_to_rebatch)
for i, epoch in enumerate(self._sweep_handler.get_epochs()):
si = self._sweep_handler.get_sweep_information(epoch)
r = si.get_batch_range()
si.set_batch_offset(offsets[i])
si.set_batches([r[0] + offsets[i], r[1] + offsets[i]])
def _scale(self):
"""Perform all of the operations required to deliver the scaled
data."""
self.sweep_infos = [
self._sweep_handler.get_sweep_information(e)
for e in self._sweep_handler.get_epochs()
]
### Set the parameters and datafiles for dials.scale
self._scaler = DialsScale()
self._scaler = self._updated_dials_scaler()
if self._scaled_experiments and self._scaled_reflections:
# going to continue-where-left-off
self._scaler.add_experiments_json(self._scaled_experiments)
self._scaler.add_reflections_file(self._scaled_reflections)
else:
for si in self.sweep_infos:
self._scaler.add_experiments_json(si.get_experiments())
self._scaler.add_reflections_file(si.get_reflections())
# ensure we start with a clean slate in case we pre-scaled the data
# before running dials.symmetry
self._scaler.set_overwrite_existing_models(True)
self._scalr_scaled_reflection_files = {"mtz_unmerged": {}, "mtz": {}}
### Set the resolution limit if applicable
user_resolution_limits = {}
highest_resolution = 100.0
for si in self.sweep_infos:
dname = si.get_project_info()[2]
sname = si.get_sweep_name()
intgr = si.get_integrater()
if intgr.get_integrater_user_resolution():
# record user resolution here but don't use it until later - why?
dmin = intgr.get_integrater_high_resolution()
if (dname, sname) not in user_resolution_limits:
user_resolution_limits[(dname, sname)] = dmin
elif dmin < user_resolution_limits[(dname, sname)]:
user_resolution_limits[(dname, sname)] = dmin
if (dname, sname) in self._scalr_resolution_limits:
d_min, _ = self._scalr_resolution_limits[(dname, sname)]
if d_min < highest_resolution:
highest_resolution = d_min
if highest_resolution < 99.9:
self._scaler.set_resolution(d_min=highest_resolution)
### Setup final job details and run scale
self._scaler.set_working_directory(self.get_working_directory())
auto_logfiler(self._scaler)
FileHandler.record_log_file(
f"{self._scalr_pname} {self._scalr_xname} SCALE",
self._scaler.get_log_file(),
)
self._scaler.scale()
FileHandler.record_html_file(
f"{self._scalr_pname} {self._scalr_xname} SCALE",
self._scaler.get_html(),
)
self._scaled_experiments = self._scaler.get_scaled_experiments()
self._scaled_reflections = self._scaler.get_scaled_reflections()
# make it so that only scaled.expt and scaled.refl are
# the files that dials.scale knows about, so that if scale is called again,
# scaling resumes from where it left off.
self._scaler.clear_datafiles()
### Calculate the resolution limit and set done False if applicable
highest_suggested_resolution = self.assess_resolution_limits(
hklin=None,
user_resolution_limits=user_resolution_limits,
reflections=self._scaled_reflections,
experiments=self._scaled_experiments,
)
if not self.get_scaler_done():
# reset for when resolution limit applied
logger.debug("Returning as scaling not finished...")
return
### Want to do space group check after scaling. So run dials.symmetry
### with absences only before exporting merged and unmerged files
### again in correct s.g.
if (
not PhilIndex.params.xia2.settings.small_molecule
and not self._scalr_input_spacegroup
):
logger.notice(banner("Systematic absences check"))
symmetry = DialsSymmetry()
symmetry.set_experiments_filename(self._scaled_experiments)
symmetry.set_reflections_filename(self._scaled_reflections)
symmetry.set_working_directory(self.get_working_directory())
symmetry.set_mode_absences_only()
auto_logfiler(symmetry)
symmetry.decide_pointgroup() # bad name - actually running absences here
self._scaled_experiments = symmetry.get_output_experiments_filename()
sg = load.experiment_list(self._scaled_experiments)[
0
].crystal.get_space_group()
logger.info("Most likely space group: %s", sg.info())
self._scalr_likely_spacegroups = [sg.type().lookup_symbol()]
FileHandler.record_more_data_file(
f"{self._scalr_pname} {self._scalr_xname} scaled",
self._scaled_experiments,
)
FileHandler.record_more_data_file(
f"{self._scalr_pname} {self._scalr_xname} scaled",
self._scaled_reflections,
)
# Run twotheta refine
self._update_scaled_unit_cell_from_scaled_data()
### Now export and merge so that mtz files in correct space group.
### For MAD case, need to generate individual merged and unmerged mtz
### files. First split experiments on wavelength, then run dials.export
### and dials.merge on each
# Find number of dnames (i.e. number of wavelengths)
dnames_set = OrderedSet()
experiments = load.experiment_list(self._scaled_experiments)
wavelengths = flex.double(
match_wavelengths(experiments)
) # in experiments order
for si in self.sweep_infos:
dnames_set.add(
si.get_project_info()[2]
) # sweep info in same order as experiments
assert len(wavelengths) == len(dnames_set)
scaled_unmerged_mtz_path = os.path.join(
self.get_working_directory(),
f"{self._scalr_pname}_{self._scalr_xname}_scaled_unmerged.mtz",
)
if len(dnames_set) > 1:
self._scalr_scaled_refl_files = {}
logger.debug("Splitting experiments by wavelength")
# first split by wavelength
splitter = SplitExperiments()
splitter.add_experiments(self._scaled_experiments)
splitter.add_reflections(self._scaled_reflections)
splitter.set_by_wavelength(True)
splitter.set_working_directory(self.get_working_directory())
auto_logfiler(splitter)
splitter.run()
nn = len(dnames_set)
fmt = "%%0%dd" % (math.log10(nn) + 1)
wl_sort = flex.sort_permutation(wavelengths)
sorted_dnames_by_wl = [dnames_set[i] for i in wl_sort]
for i, dname in enumerate(sorted_dnames_by_wl):
# need to sort by wavelength from low to high
nums = fmt % i
exporter = ExportMtz()
exporter.set_working_directory(self.get_working_directory())
expt_name = os.path.join(
self.get_working_directory(), "split_%s.expt" % nums
)
refl_name = os.path.join(
self.get_working_directory(), "split_%s.refl" % nums
)
FileHandler.record_temporary_file(expt_name)
FileHandler.record_temporary_file(refl_name)
exporter.crystal_name = self._scalr_xname
exporter.project_name = self._scalr_pname
exporter.set_experiments_filename(expt_name)
exporter.set_reflections_filename(refl_name)
exporter.set_intensity_choice("scale")
exporter.set_partiality_threshold(
PhilIndex.params.dials.scale.partiality_threshold
) # 0.4 default
auto_logfiler(exporter)
mtz_filename = os.path.join(
self.get_working_directory(),
scaled_unmerged_mtz_path.rstrip(".mtz") + "_%s.mtz" % dname,
)
exporter.set_mtz_filename(mtz_filename)
self._scalr_scaled_reflection_files["mtz_unmerged"][
dname
] = mtz_filename
logger.debug("Exporting %s", mtz_filename)
exporter.run()
FileHandler.record_data_file(mtz_filename)
# Export an mmCIF file using dials.export.
exporter = ExportMMCIF()
exporter.set_working_directory(self.get_working_directory())
exporter.set_experiments_filename(expt_name)
exporter.set_reflections_filename(refl_name)
exporter.set_compression("bz2")
exporter.set_pdb_version(
PhilIndex.params.xia2.settings.output.mmcif.pdb_version
)
exporter.set_partiality_threshold(
PhilIndex.params.dials.scale.partiality_threshold
) # 0.4 default
mmcif_path = mtz_filename.rstrip(".mtz") + ".mmcif"
exporter.set_filename(mmcif_path)
auto_logfiler(exporter)
logger.debug("Exporting %s", mmcif_path)
exporter.run()
FileHandler.record_temporary_file(mmcif_path)
# now convert to .sca format
convert_mtz_to_sca(mtz_filename)
merger = DialsMerge() # merge but don't truncate
merger.set_working_directory(self.get_working_directory())
merger.set_experiments_filename(expt_name)
merger.set_reflections_filename(refl_name)
merger.set_project_name(self._scalr_pname)
merger.set_crystal_names(self._scalr_xname)
merger.set_dataset_names(dname)
merger.set_partiality_threshold(
PhilIndex.params.dials.scale.partiality_threshold
)
auto_logfiler(merger)
mtz_filename = os.path.join(
self.get_working_directory(),
"%s_%s_scaled_%s.mtz"
% (self._scalr_pname, self._scalr_xname, dname),
)
self._scalr_scaled_refl_files[dname] = mtz_filename
self._scalr_scaled_reflection_files["mtz"][dname] = mtz_filename
merger.set_mtz_filename(mtz_filename)
logger.debug("Merging %s", mtz_filename)
merger.run()
FileHandler.record_data_file(mtz_filename)
# now convert to .sca format
convert_mtz_to_sca(mtz_filename)
### For non-MAD case, run dials.export and dials.merge on scaled data.
else:
exporter = ExportMtz()
exporter.crystal_name = self._scalr_xname
exporter.project_name = self._scalr_pname
exporter.set_working_directory(self.get_working_directory())
exporter.set_experiments_filename(self._scaled_experiments)
exporter.set_reflections_filename(self._scaled_reflections)
exporter.set_intensity_choice("scale")
exporter.set_partiality_threshold(
PhilIndex.params.dials.scale.partiality_threshold
) # 0.4 default
auto_logfiler(exporter)
exporter.set_mtz_filename(scaled_unmerged_mtz_path)
logger.debug("Exporting %s", scaled_unmerged_mtz_path)
exporter.run()
self._scalr_scaled_reflection_files["mtz_unmerged"] = {
dnames_set[0]: scaled_unmerged_mtz_path
}
FileHandler.record_data_file(scaled_unmerged_mtz_path)
# now convert to .sca format
convert_mtz_to_sca(scaled_unmerged_mtz_path)
merger = DialsMerge()
merger.set_working_directory(self.get_working_directory())
merger.set_experiments_filename(self._scaled_experiments)
merger.set_reflections_filename(self._scaled_reflections)
merger.set_project_name(self._scalr_pname)
merger.set_crystal_names(self._scalr_xname)
merger.set_dataset_names(dnames_set[0])
merger.set_partiality_threshold(
PhilIndex.params.dials.scale.partiality_threshold
)
auto_logfiler(merger)
mtz_filename = os.path.join(
self.get_working_directory(),
f"{self._scalr_pname}_{self._scalr_xname}_scaled.mtz",
)
self._scalr_scaled_refl_files[dnames_set[0]] = mtz_filename
self._scalr_scaled_reflection_files["mtz"][dnames_set[0]] = mtz_filename
merger.set_mtz_filename(mtz_filename)
logger.debug("Merging %s", mtz_filename)
merger.run()
FileHandler.record_data_file(mtz_filename)
# now export to sca format
convert_mtz_to_sca(mtz_filename)
# Export an mmCIF file using dials.export.
exporter = ExportMMCIF()
exporter.set_working_directory(self.get_working_directory())
exporter.set_experiments_filename(self._scaled_experiments)
exporter.set_reflections_filename(self._scaled_reflections)
exporter.set_compression("bz2")
exporter.set_pdb_version(
PhilIndex.params.xia2.settings.output.mmcif.pdb_version
)
exporter.set_partiality_threshold(
PhilIndex.params.dials.scale.partiality_threshold
) # 0.4 default
mmcif_path = scaled_unmerged_mtz_path.rstrip(".mtz") + ".mmcif"
exporter.set_filename(mmcif_path)
auto_logfiler(exporter)
logger.debug("Exporting %s", mmcif_path)
exporter.run()
FileHandler.record_temporary_file(mmcif_path)
# Also export just integrated data.
for si in self.sweep_infos:
exporter = ExportMtz()
exporter.crystal_name = self._scalr_xname
exporter.project_name = self._scalr_pname
exporter.set_reflections_filename(si.get_reflections())
exporter.set_experiments_filename(si.get_experiments())
exporter.set_intensity_choice("profile+sum")
pname, xname, dname = si.get_project_info()
sweep = si.get_integrater().get_integrater_sweep_name()
tag = f"{pname} {xname} {dname} {sweep} INTEGRATE"
mtz_filename = os.path.join(
self.get_working_directory(), "%s_integrated.mtz" % sweep
)
exporter.set_mtz_filename(mtz_filename)
exporter.run()
FileHandler.record_more_data_file(tag, mtz_filename)
if PhilIndex.params.xia2.settings.merging_statistics.source == "cctbx":
for key in self._scalr_scaled_refl_files:
stats = self._compute_scaler_statistics(
self._scalr_scaled_reflection_files["mtz_unmerged"][key],
selected_band=(highest_suggested_resolution, None),
wave=key,
)
self._scalr_statistics[
(self._scalr_pname, self._scalr_xname, key)
] = stats
# add CIF data
expts = load.experiment_list(self._scaled_experiments)
overall_absmin = 1.0
for expt in expts:
if (expt.scaling_model.id_ == "physical") and (
"absorption" in expt.scaling_model.components
):
surface_plot = plot_absorption_surface(expt.scaling_model)
correction = np.array(
surface_plot["absorption_surface"]["data"][0]["z"]
)
# correction is a 2D numpy array
absmin = np.min(correction) / np.max(correction)
if absmin > 0: # hope should always happen!
overall_absmin = min(absmin, overall_absmin)
dials_version = dials.util.version.dials_version()
block = CIF.get_block("xia2")
mmblock = mmCIF.get_block("xia2")
mmblock["_exptl.entry_id"] = "xia2"
mmblock["_exptl.method"] = "X-RAY DIFFRACTION"
block["_exptl_absorpt_correction_T_min"] = mmblock[
"_exptl.absorpt_correction_T_min"
] = overall_absmin # = scaled relative to 1
block["_exptl_absorpt_correction_T_max"] = mmblock[
"_exptl.absorpt_correction_T_max"
] = 1.0 #
block["_exptl_absorpt_correction_type"] = mmblock[
"_exptl.absorpt_correction_type"
] = "empirical"
block["_exptl_absorpt_process_details"] = mmblock[
"_exptl.absorpt_process_details"
] = (
"""
%s
Scaling & analysis of unmerged intensities, absorption correction using spherical harmonics
"""
% dials_version
)
def _write_mmcif_output(self):
"""Migrate mmcif data generated by dials.export"""
def _add_to_block(blockname, mmcif_path):
mmcif_file_object = bz2.open(mmcif_path + ".bz2")
mmblock_dials = iotbx.cif.reader(file_object=mmcif_file_object).model()
# give the entry a consistent unique name in all fields
if PhilIndex.params.xia2.settings.output.mmcif.pdb_version == "v5_next":
mmblock_dials["dials"]["_pdbx_diffrn_data_section.id"] = blockname
mmblock_dials["dials"]["_entry.id"] = blockname
for key in mmblock_dials["dials"].keys():
if key.endswith("entry_id"):
mmblock_dials["dials"][key] = blockname
mmCIF.get_block(blockname).update(mmblock_dials["dials"])
dnames = OrderedSet([si.get_project_info()[2] for si in self.sweep_infos])
for dname in dnames:
mtz_path = self._scalr_scaled_reflection_files["mtz_unmerged"][dname]
mmcif_path = mtz_path.rstrip(".mtz") + ".mmcif"
blockname = f"{self._scalr_pname}_{self._scalr_xname}_{dname}"
_add_to_block(blockname, mmcif_path)
def _update_scaled_unit_cell_from_scaled_data(self):
params = PhilIndex.params
fast_mode = params.dials.fast_mode
if (
params.xia2.settings.integrater == "dials"
and not fast_mode
and params.xia2.settings.scale.two_theta_refine
):
logger.notice(banner("Unit cell refinement"))
# Collect a list of all sweeps, grouped by project, crystal, wavelength
groups_list = []
groups = {}
self._scalr_cell_dict = {}
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
pi = "_".join(si.get_project_info()) # pname, xname, dname
groups_list.append(pi)
p4p_file = os.path.join(
self.get_working_directory(),
f"{self._scalr_pname}_{self._scalr_xname}.p4p",
)
if len(set(groups_list)) > 1:
# need to split up experiments and reflections
self._sweep_handler = self._helper.split_experiments(
self._scaled_experiments,
self._scaled_reflections,
self._sweep_handler,
)
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
pi = "_".join(si.get_project_info()) # pname, xname, dname
groups[pi] = groups.get(pi, []) + [
(si.get_experiments(), si.get_reflections())
] # if key exists, add another 2-tuple to the list.
for pi in groups:
# Run twothetarefine on each group
tt_grouprefiner = TwoThetaRefine()
tt_grouprefiner.set_working_directory(self.get_working_directory())
auto_logfiler(tt_grouprefiner)
args = list(zip(*groups[pi]))
tt_grouprefiner.set_experiments(args[0])
tt_grouprefiner.set_reflection_files(args[1])
tt_grouprefiner.set_output_p4p(p4p_file)
tt_grouprefiner.run()
logger.info(
"%s: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f"
% tuple(
["".join(pi.split("_")[2:])]
+ list(tt_grouprefiner.get_unit_cell())
)
)
self._scalr_cell_dict[pi] = (
tt_grouprefiner.get_unit_cell(),
tt_grouprefiner.get_unit_cell_esd(),
tt_grouprefiner.import_cif(),
tt_grouprefiner.import_mmcif(),
)
if len(groups_list) > 1:
cif_in = tt_grouprefiner.import_cif()
cif_out = CIF.get_block(pi)
for key in sorted(cif_in.keys()):
cif_out[key] = cif_in[key]
mmcif_in = tt_grouprefiner.import_mmcif()
mmcif_out = mmCIF.get_block(pi)
# reset the entry id to be the name of the new block
mmcif_out["_entry.id"] = pi
for key in sorted(mmcif_in.keys()):
if key.endswith("entry_id"):
mmcif_out[key] = pi
else:
mmcif_out[key] = mmcif_in[key]
# now do two theta refine on combined scaled data.
tt_refiner = TwoThetaRefine()
tt_refiner.set_working_directory(self.get_working_directory())
auto_logfiler(tt_refiner)
tt_refiner.set_experiments([self._scaled_experiments])
tt_refiner.set_reflection_files([self._scaled_reflections]) # needs a list
tt_refiner.set_output_p4p(p4p_file)
tt_refiner.run()
self._scaled_experiments = tt_refiner.get_output_experiments()
FileHandler.record_more_data_file(
f"{self._scalr_pname} {self._scalr_xname} scaled",
self._scaled_experiments,
)
self._scalr_cell = tt_refiner.get_unit_cell()
logger.info(
"Overall: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f"
% tt_refiner.get_unit_cell()
)
self._scalr_cell_esd = tt_refiner.get_unit_cell_esd()
cif_in = tt_refiner.import_cif()
mmcif_in = tt_refiner.import_mmcif()
if params.xia2.settings.small_molecule:
FileHandler.record_data_file(p4p_file)
cif_out = CIF.get_block("xia2")
mmcif_out = mmCIF.get_block("xia2")
for key in sorted(cif_in.keys()):
cif_out[key] = cif_in[key]
for key in sorted(mmcif_in.keys()):
if key.endswith("entry_id"):
mmcif_out[key] = "xia2"
else:
mmcif_out[key] = mmcif_in[key]
logger.debug("Unit cell obtained by two-theta refinement")
else:
ami = AnalyseMyIntensities()
ami.set_working_directory(self.get_working_directory())
average_unit_cell, _ = ami.compute_average_cell(
[
self._scalr_scaled_refl_files[key]
for key in self._scalr_scaled_refl_files
]
)
logger.debug("Computed average unit cell (will use in all files)")
self._scalr_cell = average_unit_cell
self._scalr_cell_esd = None
# Write average unit cell to .cif
cif_out = CIF.get_block("xia2")
cif_out[ # pylint: disable=E1137
"_computing_cell_refinement"
] = "AIMLESS averaged unit cell"
for cell, cifname in zip(
self._scalr_cell,
[
"length_a",
"length_b",
"length_c",
"angle_alpha",
"angle_beta",
"angle_gamma",
],
):
cif_out["_cell_%s" % cifname] = cell # pylint: disable=E1137
logger.debug("%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % self._scalr_cell)
def apply_reindex_operator_to_sweep_info(self, si, reindex_operator, reason):
"""Use a reindex operator to reindex the data.
Take the data from the sweep info, reindex using
dials.reindex, and set the new data into the si.
"""
reindexer = DialsReindex()
reindexer.set_working_directory(self.get_working_directory())
auto_logfiler(reindexer)
reindexer.set_indexed_filename(si.get_reflections())
reindexer.set_experiments_filename(si.get_experiments())
reindexer.set_cb_op(reindex_operator)
reindexer.run()
si.set_reflections(reindexer.get_reindexed_reflections_filename())
si.set_experiments(reindexer.get_reindexed_experiments_filename())
logger.debug(
"Reindexed with operator %s, reason is %s", reindex_operator, reason
)
def get_UBlattsymm_from_sweep_info(self, sweep_info):
"""Calculate U, B and lattice symmetry from experiment."""
expt = load.experiment_list(sweep_info.get_experiments())[0]
uc = expt.crystal.get_unit_cell()
umatrix = expt.crystal.get_U()
lattice_symm = lattice_symmetry_group(uc, max_delta=0.0)
return tuple(umatrix), mosflm_B_matrix(uc), lattice_symm
def get_mtz_data_from_sweep_info(self, sweep_info):
"""Get the data in mtz form.
Need to run dials.export to convert the data from experiment list
and reflection table to mtz form."""
return self.export_to_mtz(sweep_info)
def export_to_mtz(self, sweep_info):
"""Export to mtz, using dials.integrate phil params"""
params = PhilIndex.params.dials.integrate
export = ExportMtz()
export.crystal_name = self._scalr_xname
export.project_name = self._scalr_pname
export.set_working_directory(self.get_working_directory())
export.set_experiments_filename(sweep_info.get_experiments())
export.set_reflections_filename(sweep_info.get_reflections())
export.set_combine_partials(params.combine_partials)
export.set_partiality_threshold(params.partiality_threshold)
if len(sweep_info.get_batches()) == 1:
export.set_partiality_threshold(0.1)
if (
len(sweep_info.get_batches()) == 1
or PhilIndex.params.dials.fast_mode
or not PhilIndex.params.xia2.settings.integration.profile_fitting
):
# With no profiles available have to rely on summation alone
export.set_intensity_choice("sum")
auto_logfiler(export, "EXPORTMTZ")
mtz_filename = os.path.join(
self.get_working_directory(), "%s.mtz" % sweep_info.get_sweep_name()
)
export.set_mtz_filename(mtz_filename)
export.run()
return mtz_filename
class DialsScalerHelper:
"""A class to help the DIALS Scaler along a little."""
def __init__(self):
self._working_directory = os.getcwd()
self._scalr_xname = None
self._scalr_pname = None
def set_pname_xname(self, pname, xname):
self._scalr_xname = xname
self._scalr_pname = pname
def set_working_directory(self, working_directory):
self._working_directory = working_directory
def get_working_directory(self):
return self._working_directory
def assign_dataset_identifiers(self, experiments, reflections):
"""Assign unique identifiers to the datasets"""
assigner = DialsAssignIdentifiers()
assigner.set_working_directory(self.get_working_directory())
auto_logfiler(assigner)
for (exp, refl) in zip(experiments, reflections):
assigner.add_experiments(exp)
assigner.add_reflections(refl)
assigner.assign_identifiers()
return assigner
def split_experiments(self, experiment, reflection, sweep_handler):
"""Split a multi-experiment dataset into individual datasets and set in the
sweep handler."""
splitter = SplitExperiments()
splitter.add_experiments(experiment)
splitter.add_reflections(reflection)
splitter.set_working_directory(self.get_working_directory())
auto_logfiler(splitter)
splitter.run()
nn = len(sweep_handler.get_epochs()) - 1
fmt = "%%0%dd" % len(str(nn))
for i, epoch in enumerate(sweep_handler.get_epochs()):
si = sweep_handler.get_sweep_information(epoch)
nums = fmt % i
si.set_reflections(
os.path.join(self.get_working_directory(), "split_%s.refl" % nums)
)
si.set_experiments(
os.path.join(self.get_working_directory(), "split_%s.expt" % nums)
)
FileHandler.record_temporary_file(
os.path.join(self.get_working_directory(), "split_%s.refl" % nums)
)
FileHandler.record_temporary_file(
os.path.join(self.get_working_directory(), "split_%s.expt" % nums)
)
return sweep_handler
def assign_and_return_datasets(self, sweep_handler):
"""Assign unique identifiers to all integrated experiments & reflections,
and set these in the sweep_information for each epoch."""
experiments = []
reflections = []
for epoch in sweep_handler.get_epochs():
si = sweep_handler.get_sweep_information(epoch)
integrater = si.get_integrater()
experiments.append(integrater.get_integrated_experiments())
reflections.append(integrater.get_integrated_reflections())
assigner = self.assign_dataset_identifiers(experiments, reflections)
sweep_handler = self.split_experiments(
assigner.get_output_experiments_filename(),
assigner.get_output_reflections_filename(),
sweep_handler,
)
return sweep_handler
def dials_symmetry_indexer_jiffy(
self, experiments, reflections, refiners, multisweep=False
):
"""A jiffy to centralise the interactions between dials.symmetry
and the Indexer, multisweep edition."""
# First check format of input against expected input
assert len(experiments) == len(
reflections
), """
Unequal number of experiments/reflections passed to dials_symmetry_indexer_jiffy"""
if len(experiments) > 1:
assert multisweep, """
Passing multple datasets to indexer_jiffy but not set multisweep=True"""
reindex_initial = False
symmetry_analyser = self.dials_symmetry_decide_pointgroup(
experiments, reflections
)
possible = symmetry_analyser.get_possible_lattices()
logger.debug("Possible lattices (dials.symmetry):")
logger.debug(" ".join(possible))
# all refiners contain the same indexer link, so any good here.
(
correct_lattice,
rerun_symmetry,
need_to_return,
) = decide_correct_lattice_using_refiner(possible, refiners[0])
if need_to_return and multisweep:
if (
PhilIndex.params.xia2.settings.integrate_p1
and not PhilIndex.params.xia2.settings.reintegrate_correct_lattice
):
need_to_return = False
rerun_symmetry = True
else:
for refiner in refiners[1:]:
refiner.refiner_reset()
if rerun_symmetry:
# don't actually need to rerun, just set correct solution - this
# call updates the relevant info in the Wrapper - but will need to reindex later
symmetry_analyser.set_correct_lattice(correct_lattice)
reindex_initial = True
# rather than reindexing here, just set the reindex_inital and let the
# scaler manage this as necessary
logger.debug(
"Symmetry analysis of %s", " ".join(experiments) + " ".join(reflections)
)
pointgroup = symmetry_analyser.get_pointgroup()
reindex_op = symmetry_analyser.get_reindex_operator()
probably_twinned = symmetry_analyser.get_probably_twinned()
reindexed_reflections = symmetry_analyser.get_output_reflections_filename()
reindexed_experiments = symmetry_analyser.get_output_experiments_filename()
logger.debug("Pointgroup: %s (%s)", pointgroup, reindex_op)
return (
pointgroup,
reindex_op,
need_to_return,
probably_twinned,
reindexed_reflections,
reindexed_experiments,
reindex_initial,
)
def dials_symmetry_decide_pointgroup(self, experiments, reflections):
"""Run the symmetry analyser and return it for later inspection."""
symmetry_analyser = DialsSymmetry()
symmetry_analyser.set_working_directory(self.get_working_directory())
auto_logfiler(symmetry_analyser)
FileHandler.record_log_file(
f"{self._scalr_pname} {self._scalr_xname} SYMMETRY",
symmetry_analyser.get_log_file(),
)
for (exp, refl) in zip(experiments, reflections):
symmetry_analyser.add_experiments(exp)
symmetry_analyser.add_reflections(refl)
symmetry_analyser.decide_pointgroup()
return symmetry_analyser
@staticmethod
def reindex_jiffy(si, pointgroup, reindex_op):
"""Add data from si and reindex, setting back in si"""
integrater = si.get_integrater()
integrater.set_integrater_spacegroup_number(
Syminfo.spacegroup_name_to_number(pointgroup)
)
integrater.set_integrater_reindex_operator(
reindex_op, reason="setting point group"
)
integrater.set_output_format("pickle")
integrater.get_integrater_intensities()
# ^ This will give us the reflections in the correct point group
si.set_reflections(integrater.get_integrated_reflections())
si.set_experiments(integrater.get_integrated_experiments())
def decide_correct_lattice_using_refiner(possible_lattices, refiner):
"""Use the refiner to determine which of the possible lattices is the
correct one."""
correct_lattice, rerun_symmetry, need_to_return = (None, False, False)
for lattice in possible_lattices:
state = refiner.set_refiner_asserted_lattice(lattice)
if state == refiner.LATTICE_CORRECT:
logger.debug("Agreed lattice %s", lattice)
correct_lattice = lattice
break
elif state == refiner.LATTICE_IMPOSSIBLE:
logger.debug("Rejected lattice %s", lattice)
rerun_symmetry = True
continue
elif state == refiner.LATTICE_POSSIBLE:
logger.debug("Accepted lattice %s, will reprocess", lattice)
need_to_return = True
correct_lattice = lattice
break
if correct_lattice is None:
correct_lattice = refiner.get_refiner_lattice()
rerun_symmetry = True
logger.debug("No solution found: assuming lattice from refiner")
return correct_lattice, rerun_symmetry, need_to_return
def convert_mtz_to_sca(mtz_filename):
"""Convert an mtz files to .sca format and write."""
sca_filename = mtz_filename.replace("mtz", "sca")
m = mtz.object(mtz_filename)
for ma in m.as_miller_arrays(merge_equivalents=False, anomalous=False):
if ma.info().labels == ["I", "SIGI"]:
no_merge_original_index.writer(ma, file_name=sca_filename)
FileHandler.record_data_file(sca_filename)
break
elif ma.info().labels == ["IMEAN", "SIGIMEAN"]:
merge_scalepack_write(miller_array=ma, file_name=sca_filename)
FileHandler.record_data_file(sca_filename)
break
else:
raise KeyError("Intensity column labels not found in MTZ file")
def scaling_model_auto_rules(experiment):
"""Use dials.scale rules for determining suitable parameters."""
osc_range = experiment.scan.get_oscillation_range()
scan_width = osc_range[1] - osc_range[0]
if scan_width < 5.0:
scale_interval, decay_interval = (1.0, 1.5)
elif scan_width < 10.0:
scale_interval, decay_interval = (2.0, 3.0)
elif scan_width < 25.0:
scale_interval, decay_interval = (4.0, 5.0)
elif scan_width < 90.0:
scale_interval, decay_interval = (8.0, 10.0)
else:
scale_interval, decay_interval = (15.0, 20.0)
return scale_interval, decay_interval
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Modules/Scaler/DialsScaler.py",
"copies": "1",
"size": "61842",
"license": "bsd-3-clause",
"hash": -5741158566559380000,
"line_mean": 41.6496551724,
"line_max": 92,
"alpha_frac": 0.5888231299,
"autogenerated": false,
"ratio": 3.8571695877253167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9942674002017511,
"avg_score": 0.0006637431215611025,
"num_lines": 1450
} |
# An implementation of the Scaler interface using CCP4 programs and Aimless.
import copy
import logging
import math
import os
import re
from xia2.Handlers.CIF import CIF, mmCIF
from xia2.Handlers.Citations import Citations
from xia2.Handlers.Files import FileHandler
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.Syminfo import Syminfo
from xia2.lib.bits import is_mtz_file, nifty_power_of_ten, transpose_loggraph
from xia2.lib.SymmetryLib import sort_lattices
from xia2.Modules import MtzUtils
from xia2.Modules.Scaler.CCP4ScalerHelpers import (
CCP4ScalerHelper,
SweepInformationHandler,
_prepare_pointless_hklin,
ersatz_resolution,
get_umat_bmat_lattice_symmetry_from_mtz,
)
from xia2.Modules.Scaler.CommonScaler import CommonScaler as Scaler
from xia2.Modules.Scaler.rebatch import rebatch
from xia2.Toolkit.AimlessSurface import (
evaluate_1degree,
generate_map,
scrape_coefficients,
)
from xia2.Wrappers.CCP4.CCP4Factory import CCP4Factory
logger = logging.getLogger("xia2.Modules.Scaler.CCP4ScalerA")
class CCP4ScalerA(Scaler):
"""An implementation of the Scaler interface using CCP4 programs."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._sweep_handler = None
self._scalr_scaled_refl_files = {}
self._wavelengths_in_order = []
# flags to keep track of the corrections we will be applying
model = PhilIndex.params.xia2.settings.scale.model
self._scalr_correct_absorption = "absorption" in model
self._scalr_correct_decay = "decay" in model
self._scalr_corrections = True
# useful handles...!
self._prepared_reflections = None
self._reference = None
self._factory = CCP4Factory()
self._helper = CCP4ScalerHelper()
# overloaded from the Scaler interface... to plumb in the factory
def to_dict(self):
obj = super().to_dict()
if self._sweep_handler is not None:
obj["_sweep_handler"] = self._sweep_handler.to_dict()
obj["_prepared_reflections"] = self._prepared_reflections
return obj
@classmethod
def from_dict(cls, obj):
return_obj = super().from_dict(obj)
if return_obj._sweep_handler is not None:
return_obj._sweep_handler = SweepInformationHandler.from_dict(
return_obj._sweep_handler
)
return_obj._prepared_reflections = obj["_prepared_reflections"]
return return_obj
def set_working_directory(self, working_directory):
self._working_directory = working_directory
self._factory.set_working_directory(working_directory)
self._helper.set_working_directory(working_directory)
# this is an overload from the factory - it returns Aimless wrapper set up
# with the desired corrections
def _updated_aimless(self):
"""Generate a correctly configured Aimless..."""
aimless = None
params = PhilIndex.params.ccp4.aimless
if not self._scalr_corrections:
aimless = self._factory.Aimless()
else:
aimless = self._factory.Aimless(
absorption_correction=self._scalr_correct_absorption,
decay_correction=self._scalr_correct_decay,
)
aimless.set_mode(PhilIndex.params.xia2.settings.scale.scales)
aimless.set_spacing(params.rotation.spacing)
aimless.set_bfactor(brotation=params.brotation.spacing)
if PhilIndex.params.xia2.settings.small_molecule:
aimless.set_spacing(15.0)
aimless.set_bfactor(
bfactor=PhilIndex.params.xia2.settings.small_molecule_bfactor
)
aimless.set_surface_tie(params.surface_tie)
aimless.set_surface_link(params.surface_link)
if params.secondary.frame == "camera":
secondary = "secondary"
else:
secondary = "absorption"
lmax = params.secondary.lmax
aimless.set_secondary(secondary, lmax)
if PhilIndex.params.xia2.settings.multi_crystal:
aimless.set_surface_link(False)
# if profile fitting off use summation intensities
if PhilIndex.params.xia2.settings.integration.profile_fitting:
aimless.set_intensities(params.intensities)
else:
aimless.set_intensities("summation")
return aimless
def _pointless_indexer_jiffy(self, hklin, refiner):
return self._helper.pointless_indexer_jiffy(hklin, refiner)
def _pointless_indexer_multisweep(self, hklin, refiners):
return self._helper.pointless_indexer_multisweep(hklin, refiners)
def _scale_prepare(self):
"""Perform all of the preparation required to deliver the scaled
data. This should sort together the reflection files, ensure that
they are correctly indexed (via pointless) and generally tidy
things up."""
# acknowledge all of the programs we are about to use...
Citations.cite("pointless")
Citations.cite("aimless")
Citations.cite("ccp4")
# ---------- GATHER ----------
self._sweep_handler = SweepInformationHandler(self._scalr_integraters)
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
pname, xname, dname = si.get_project_info()
sname = si.get_sweep_name()
exclude_sweep = False
for sweep in PhilIndex.params.xia2.settings.sweep:
if sweep.id == sname and sweep.exclude:
exclude_sweep = True
break
if exclude_sweep:
self._sweep_handler.remove_epoch(epoch)
logger.debug("Excluding sweep %s", sname)
else:
logger.debug("%-30s %s/%s/%s", "adding data from:", xname, dname, sname)
# gather data for all images which belonged to the parent
# crystal - allowing for the fact that things could go wrong
# e.g. epoch information not available, exposure times not in
# headers etc...
for e in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(e)
assert is_mtz_file(si.get_reflections()), repr(si.get_reflections())
p, x = self._sweep_handler.get_project_info()
self._scalr_pname = p
self._scalr_xname = x
# verify that the lattices are consistent, calling eliminate if
# they are not N.B. there could be corner cases here
need_to_return = False
multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing
# START OF if more than one epoch
if len(self._sweep_handler.get_epochs()) > 1:
# if we have multi-sweep-indexing going on then logic says all should
# share common lattice & UB definition => this is not used here?
# START OF if multi_sweep indexing and not input pg
if multi_sweep_indexing and not self._scalr_input_pointgroup:
pointless_hklins = []
max_batches = 0
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
hklin = si.get_reflections()
batches = MtzUtils.batches_from_mtz(hklin)
if 1 + max(batches) - min(batches) > max_batches:
max_batches = max(batches) - min(batches) + 1
logger.debug("Biggest sweep has %d batches", max_batches)
max_batches = nifty_power_of_ten(max_batches)
counter = 0
refiners = []
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
hklin = si.get_reflections()
integrater = si.get_integrater()
refiner = integrater.get_integrater_refiner()
refiners.append(refiner)
hklin = self._prepare_pointless_hklin(
hklin, si.get_integrater().get_phi_width()
)
hklout = os.path.join(
self.get_working_directory(),
"%s_%s_%s_%s_prepointless.mtz"
% (pname, xname, dname, si.get_sweep_name()),
)
# we will want to delete this one exit
FileHandler.record_temporary_file(hklout)
first_batch = min(si.get_batches())
si.set_batch_offset(counter * max_batches - first_batch + 1)
rebatch(
hklin,
hklout,
first_batch=counter * max_batches + 1,
pname=pname,
xname=xname,
dname=dname,
)
pointless_hklins.append(hklout)
# update the counter & recycle
counter += 1
# SUMMARY - have added all sweeps to pointless_hklins
s = self._factory.Sortmtz()
pointless_hklin = os.path.join(
self.get_working_directory(),
"%s_%s_prepointless_sorted.mtz"
% (self._scalr_pname, self._scalr_xname),
)
s.set_hklout(pointless_hklin)
for hklin in pointless_hklins:
s.add_hklin(hklin)
s.sort()
# FIXME xia2-51 in here look at running constant scaling on the
# pointless hklin to put the runs on the same scale. Ref=[A]
pointless_const = os.path.join(
self.get_working_directory(),
"%s_%s_prepointless_const.mtz"
% (self._scalr_pname, self._scalr_xname),
)
FileHandler.record_temporary_file(pointless_const)
aimless_const = self._factory.Aimless()
aimless_const.set_hklin(pointless_hklin)
aimless_const.set_hklout(pointless_const)
aimless_const.const()
pointless_const = os.path.join(
self.get_working_directory(),
"%s_%s_prepointless_const_unmerged.mtz"
% (self._scalr_pname, self._scalr_xname),
)
FileHandler.record_temporary_file(pointless_const)
pointless_hklin = pointless_const
# FIXME xia2-51 in here need to pass all refiners to ensure that the
# information is passed back to all of them not just the last one...
logger.debug(
"Running multisweep pointless for %d sweeps", len(refiners)
)
pointgroup, reindex_op, ntr, pt = self._pointless_indexer_multisweep(
pointless_hklin, refiners
)
logger.debug("X1698: %s: %s", pointgroup, reindex_op)
lattices = [Syminfo.get_lattice(pointgroup)]
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
intgr = si.get_integrater()
hklin = si.get_reflections()
refiner = intgr.get_integrater_refiner()
if ntr:
intgr.integrater_reset_reindex_operator()
need_to_return = True
# SUMMARY - added all sweeps together into an mtz, ran
# _pointless_indexer_multisweep on this, made a list of one lattice
# and potentially reset reindex op?
# END OF if multi_sweep indexing and not input pg
# START OF if not multi_sweep, or input pg given
else:
lattices = []
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
intgr = si.get_integrater()
hklin = si.get_reflections()
refiner = intgr.get_integrater_refiner()
if self._scalr_input_pointgroup:
pointgroup = self._scalr_input_pointgroup
reindex_op = "h,k,l"
ntr = False
else:
pointless_hklin = self._prepare_pointless_hklin(
hklin, si.get_integrater().get_phi_width()
)
pointgroup, reindex_op, ntr, pt = self._pointless_indexer_jiffy(
pointless_hklin, refiner
)
logger.debug("X1698: %s: %s", pointgroup, reindex_op)
lattice = Syminfo.get_lattice(pointgroup)
if lattice not in lattices:
lattices.append(lattice)
if ntr:
intgr.integrater_reset_reindex_operator()
need_to_return = True
# SUMMARY do pointless_indexer on each sweep, get lattices and make a list
# of unique lattices, potentially reset reindex op.
# END OF if not multi_sweep, or input pg given
# SUMMARY - still within if more than one epoch, now have a list of number
# of lattices
# START OF if multiple-lattices
if len(lattices) > 1:
# why not using pointless indexer jiffy??!
correct_lattice = sort_lattices(lattices)[0]
logger.info("Correct lattice asserted to be %s", correct_lattice)
# transfer this information back to the indexers
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
refiner = si.get_integrater().get_integrater_refiner()
sname = si.get_sweep_name()
state = refiner.set_refiner_asserted_lattice(correct_lattice)
if state == refiner.LATTICE_CORRECT:
logger.info(
"Lattice %s ok for sweep %s", correct_lattice, sname
)
elif state == refiner.LATTICE_IMPOSSIBLE:
raise RuntimeError(
f"Lattice {correct_lattice} impossible for {sname}"
)
elif state == refiner.LATTICE_POSSIBLE:
logger.info(
"Lattice %s assigned for sweep %s", correct_lattice, sname
)
need_to_return = True
# END OF if multiple-lattices
# SUMMARY - forced all lattices to be same and hope its okay.
# END OF if more than one epoch
# if one or more of them was not in the lowest lattice,
# need to return here to allow reprocessing
if need_to_return:
self.set_scaler_done(False)
self.set_scaler_prepare_done(False)
return
# ---------- REINDEX ALL DATA TO CORRECT POINTGROUP ----------
# all should share the same pointgroup, unless twinned... in which
# case force them to be...
pointgroups = {}
reindex_ops = {}
probably_twinned = False
need_to_return = False
multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing
# START OF if multi-sweep and not input pg
if multi_sweep_indexing and not self._scalr_input_pointgroup:
pointless_hklins = []
max_batches = 0
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
hklin = si.get_reflections()
batches = MtzUtils.batches_from_mtz(hklin)
if 1 + max(batches) - min(batches) > max_batches:
max_batches = max(batches) - min(batches) + 1
logger.debug("Biggest sweep has %d batches", max_batches)
max_batches = nifty_power_of_ten(max_batches)
counter = 0
refiners = []
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
hklin = si.get_reflections()
integrater = si.get_integrater()
refiner = integrater.get_integrater_refiner()
refiners.append(refiner)
hklin = self._prepare_pointless_hklin(
hklin, si.get_integrater().get_phi_width()
)
hklout = os.path.join(
self.get_working_directory(),
"%s_%s_%s_%s_prepointless.mtz"
% (pname, xname, dname, si.get_sweep_name()),
)
# we will want to delete this one exit
FileHandler.record_temporary_file(hklout)
first_batch = min(si.get_batches())
si.set_batch_offset(counter * max_batches - first_batch + 1)
rebatch(
hklin,
hklout,
first_batch=counter * max_batches + 1,
pname=pname,
xname=xname,
dname=dname,
)
pointless_hklins.append(hklout)
# update the counter & recycle
counter += 1
# FIXME related to xia2-51 - this looks very very similar to the logic
# in [A] above - is this duplicated logic?
s = self._factory.Sortmtz()
pointless_hklin = os.path.join(
self.get_working_directory(),
"%s_%s_prepointless_sorted.mtz"
% (self._scalr_pname, self._scalr_xname),
)
s.set_hklout(pointless_hklin)
for hklin in pointless_hklins:
s.add_hklin(hklin)
s.sort()
pointless_const = os.path.join(
self.get_working_directory(),
f"{self._scalr_pname}_{self._scalr_xname}_prepointless_const.mtz",
)
FileHandler.record_temporary_file(pointless_const)
aimless_const = self._factory.Aimless()
aimless_const.set_hklin(pointless_hklin)
aimless_const.set_hklout(pointless_const)
aimless_const.const()
pointless_const = os.path.join(
self.get_working_directory(),
"%s_%s_prepointless_const_unmerged.mtz"
% (self._scalr_pname, self._scalr_xname),
)
FileHandler.record_temporary_file(pointless_const)
pointless_hklin = pointless_const
pointgroup, reindex_op, ntr, pt = self._pointless_indexer_multisweep(
pointless_hklin, refiners
)
for epoch in self._sweep_handler.get_epochs():
pointgroups[epoch] = pointgroup
reindex_ops[epoch] = reindex_op
# SUMMARY ran pointless multisweep on combined mtz and made a dict
# of pointgroups and reindex_ops (all same)
# END OF if multi-sweep and not input pg
# START OF if not mulit-sweep or pg given
else:
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
hklin = si.get_reflections()
integrater = si.get_integrater()
refiner = integrater.get_integrater_refiner()
if self._scalr_input_pointgroup:
logger.debug(
"Using input pointgroup: %s", self._scalr_input_pointgroup
)
pointgroup = self._scalr_input_pointgroup
reindex_op = "h,k,l"
pt = False
else:
pointless_hklin = self._prepare_pointless_hklin(
hklin, si.get_integrater().get_phi_width()
)
pointgroup, reindex_op, ntr, pt = self._pointless_indexer_jiffy(
pointless_hklin, refiner
)
logger.debug("X1698: %s: %s", pointgroup, reindex_op)
if ntr:
integrater.integrater_reset_reindex_operator()
need_to_return = True
if pt and not probably_twinned:
probably_twinned = True
logger.debug("Pointgroup: %s (%s)", pointgroup, reindex_op)
pointgroups[epoch] = pointgroup
reindex_ops[epoch] = reindex_op
# SUMMARY - for each sweep, run indexer jiffy and get reindex operators
# and pointgroups dictionaries (could be different between sweeps)
# END OF if not mulit-sweep or pg given
overall_pointgroup = None
pointgroup_set = {pointgroups[e] for e in pointgroups}
if len(pointgroup_set) > 1 and not probably_twinned:
raise RuntimeError(
"non uniform pointgroups: %s" % str(list(pointgroup_set))
)
if len(pointgroup_set) > 1:
logger.debug(
"Probably twinned, pointgroups: %s",
" ".join(p.replace(" ", "") for p in pointgroup_set),
)
numbers = (Syminfo.spacegroup_name_to_number(ps) for ps in pointgroup_set)
overall_pointgroup = Syminfo.spacegroup_number_to_name(min(numbers))
self._scalr_input_pointgroup = overall_pointgroup
logger.info("Twinning detected, assume pointgroup %s", overall_pointgroup)
need_to_return = True
else:
overall_pointgroup = pointgroup_set.pop()
# SUMMARY - Have handled if different pointgroups & chosen an overall_pointgroup
# which is the lowest symmetry
# Now go through sweeps and do reindexing
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
integrater = si.get_integrater()
integrater.set_integrater_spacegroup_number(
Syminfo.spacegroup_name_to_number(overall_pointgroup)
)
integrater.set_integrater_reindex_operator(
reindex_ops[epoch], reason="setting point group"
)
# This will give us the reflections in the correct point group
si.set_reflections(integrater.get_integrater_intensities())
if need_to_return:
self.set_scaler_done(False)
self.set_scaler_prepare_done(False)
return
# in here now optionally work through the data files which should be
# indexed with a consistent point group, and transform the orientation
# matrices by the lattice symmetry operations (if possible) to get a
# consistent definition of U matrix modulo fixed rotations
if PhilIndex.params.xia2.settings.unify_setting:
self.unify_setting()
if self.get_scaler_reference_reflection_file():
self._reference = self.get_scaler_reference_reflection_file()
logger.debug("Using HKLREF %s", self._reference)
elif PhilIndex.params.xia2.settings.scale.reference_reflection_file:
self._reference = (
PhilIndex.params.xia2.settings.scale.reference_reflection_file
)
logger.debug("Using HKLREF %s", self._reference)
params = PhilIndex.params
use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs
if len(self._sweep_handler.get_epochs()) > 1 and use_brehm_diederichs:
self.brehm_diederichs_reindexing()
# If not Brehm-deidrichs, set reference as first sweep
elif len(self._sweep_handler.get_epochs()) > 1 and not self._reference:
first = self._sweep_handler.get_epochs()[0]
si = self._sweep_handler.get_sweep_information(first)
self._reference = si.get_reflections()
# Now reindex to be consistent with first dataset - run pointless on each
# dataset with reference
if self._reference:
md = self._factory.Mtzdump()
md.set_hklin(self._reference)
md.dump()
datasets = md.get_datasets()
# then get the unit cell, lattice etc.
reference_lattice = Syminfo.get_lattice(md.get_spacegroup())
reference_cell = md.get_dataset_info(datasets[0])["cell"]
# then compute the pointgroup from this...
# ---------- REINDEX TO CORRECT (REFERENCE) SETTING ----------
for epoch in self._sweep_handler.get_epochs():
# if we are working with unified UB matrix then this should not
# be a problem here (note, *if*; *should*)
# what about e.g. alternative P1 settings?
# see JIRA MXSW-904
if PhilIndex.params.xia2.settings.unify_setting:
continue
pl = self._factory.Pointless()
si = self._sweep_handler.get_sweep_information(epoch)
hklin = si.get_reflections()
pl.set_hklin(
self._prepare_pointless_hklin(
hklin, si.get_integrater().get_phi_width()
)
)
hklout = os.path.join(
self.get_working_directory(),
"%s_rdx2.mtz" % os.path.split(hklin)[-1][:-4],
)
# we will want to delete this one exit
FileHandler.record_temporary_file(hklout)
# now set the initial reflection set as a reference...
pl.set_hklref(self._reference)
# https://github.com/xia2/xia2/issues/115 - should ideally iteratively
# construct a reference or a tree of correlations to ensure correct
# reference setting - however if small molecule assume has been
# multi-sweep-indexed so can ignore "fatal errors" - temporary hack
pl.decide_pointgroup(
ignore_errors=PhilIndex.params.xia2.settings.small_molecule
)
logger.debug("Reindexing analysis of %s", pl.get_hklin())
pointgroup = pl.get_pointgroup()
reindex_op = pl.get_reindex_operator()
logger.debug("Operator: %s", reindex_op)
# apply this...
integrater = si.get_integrater()
integrater.set_integrater_reindex_operator(
reindex_op, reason="match reference"
)
integrater.set_integrater_spacegroup_number(
Syminfo.spacegroup_name_to_number(pointgroup)
)
si.set_reflections(integrater.get_integrater_intensities())
md = self._factory.Mtzdump()
md.set_hklin(si.get_reflections())
md.dump()
datasets = md.get_datasets()
if len(datasets) > 1:
raise RuntimeError(
"more than one dataset in %s" % si.get_reflections()
)
# then get the unit cell, lattice etc.
lattice = Syminfo.get_lattice(md.get_spacegroup())
cell = md.get_dataset_info(datasets[0])["cell"]
if lattice != reference_lattice:
raise RuntimeError(
"lattices differ in %s and %s"
% (self._reference, si.get_reflections())
)
logger.debug("Cell: %.2f %.2f %.2f %.2f %.2f %.2f" % cell)
logger.debug("Ref: %.2f %.2f %.2f %.2f %.2f %.2f" % reference_cell)
for j in range(6):
if (
math.fabs((cell[j] - reference_cell[j]) / reference_cell[j])
> 0.1
):
raise RuntimeError(
"unit cell parameters differ in %s and %s"
% (self._reference, si.get_reflections())
)
# ---------- SORT TOGETHER DATA ----------
self._sort_together_data_ccp4()
self._scalr_resolution_limits = {}
# store central resolution limit estimates
batch_ranges = [
self._sweep_handler.get_sweep_information(epoch).get_batch_range()
for epoch in self._sweep_handler.get_epochs()
]
self._resolution_limit_estimates = ersatz_resolution(
self._prepared_reflections, batch_ranges
)
def _scale(self):
"Perform all of the operations required to deliver the scaled data."
epochs = self._sweep_handler.get_epochs()
sc = self._updated_aimless()
sc.set_hklin(self._prepared_reflections)
sc.set_chef_unmerged(True)
sc.set_new_scales_file("%s.scales" % self._scalr_xname)
user_resolution_limits = {}
for epoch in epochs:
si = self._sweep_handler.get_sweep_information(epoch)
pname, xname, dname = si.get_project_info()
sname = si.get_sweep_name()
intgr = si.get_integrater()
if intgr.get_integrater_user_resolution():
dmin = intgr.get_integrater_high_resolution()
if (dname, sname) not in user_resolution_limits:
user_resolution_limits[(dname, sname)] = dmin
elif dmin < user_resolution_limits[(dname, sname)]:
user_resolution_limits[(dname, sname)] = dmin
start, end = si.get_batch_range()
if (dname, sname) in self._scalr_resolution_limits:
resolution, _ = self._scalr_resolution_limits[(dname, sname)]
sc.add_run(start, end, exclude=False, resolution=resolution, name=sname)
else:
sc.add_run(start, end, name=sname)
sc.set_hklout(
os.path.join(
self.get_working_directory(),
f"{self._scalr_pname}_{self._scalr_xname}_scaled_test.mtz",
)
)
if self.get_scaler_anomalous():
sc.set_anomalous()
# what follows, sucks
failover = PhilIndex.params.xia2.settings.failover
if failover:
try:
sc.scale()
except RuntimeError as e:
es = str(e)
if (
"bad batch" in es
or "negative scales run" in es
or "no observations" in es
):
# first ID the sweep from the batch no
batch = int(es.split()[-1])
epoch = self._identify_sweep_epoch(batch)
sweep = self._scalr_integraters[epoch].get_integrater_sweep()
# then remove it from my parent xcrystal
self.get_scaler_xcrystal().remove_sweep(sweep)
# then remove it from the scaler list of intergraters
# - this should really be a scaler interface method
del self._scalr_integraters[epoch]
# then tell the user what is happening
logger.info(
"Sweep %s gave negative scales - removing", sweep.get_name()
)
# then reset the prepare, do, finish flags
self.set_scaler_prepare_done(False)
self.set_scaler_done(False)
self.set_scaler_finish_done(False)
# and return
return
else:
raise e
else:
sc.scale()
# then gather up all of the resulting reflection files
# and convert them into the required formats (.sca, .mtz.)
loggraph = sc.parse_ccp4_loggraph()
resolution_info = {}
reflection_files = sc.get_scaled_reflection_files()
for dataset in reflection_files:
FileHandler.record_temporary_file(reflection_files[dataset])
for key in loggraph:
if "Analysis against resolution" in key:
dataset = key.split(",")[-1].strip()
resolution_info[dataset] = transpose_loggraph(loggraph[key])
# check in here that there is actually some data to scale..!
if not resolution_info:
raise RuntimeError("no resolution info")
highest_suggested_resolution = self.assess_resolution_limits(
sc.get_unmerged_reflection_file(), user_resolution_limits
)
if not self.get_scaler_done():
logger.debug("Returning as scaling not finished...")
return
batch_info = {}
for key in loggraph:
if "Analysis against Batch" in key:
dataset = key.split(",")[-1].strip()
batch_info[dataset] = transpose_loggraph(loggraph[key])
sc = self._updated_aimless()
FileHandler.record_log_file(
f"{self._scalr_pname} {self._scalr_xname} aimless", sc.get_log_file()
)
sc.set_hklin(self._prepared_reflections)
sc.set_new_scales_file("%s_final.scales" % self._scalr_xname)
for epoch in epochs:
si = self._sweep_handler.get_sweep_information(epoch)
pname, xname, dname = si.get_project_info()
sname = si.get_sweep_name()
start, end = si.get_batch_range()
resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]
sc.add_run(
start, end, exclude=False, resolution=resolution_limit, name=xname
)
sc.set_hklout(
os.path.join(
self.get_working_directory(),
f"{self._scalr_pname}_{self._scalr_xname}_scaled.mtz",
)
)
if self.get_scaler_anomalous():
sc.set_anomalous()
sc.scale()
FileHandler.record_xml_file(
f"{self._scalr_pname} {self._scalr_xname} aimless", sc.get_xmlout()
)
data = sc.get_summary()
scales_file = sc.get_new_scales_file()
loggraph = sc.parse_ccp4_loggraph()
standard_deviation_info = {}
for key in loggraph:
if "standard deviation v. Intensity" in key:
dataset = key.split(",")[-1].strip()
standard_deviation_info[dataset] = transpose_loggraph(loggraph[key])
resolution_info = {}
for key in loggraph:
if "Analysis against resolution" in key:
dataset = key.split(",")[-1].strip()
resolution_info[dataset] = transpose_loggraph(loggraph[key])
batch_info = {}
for key in loggraph:
if "Analysis against Batch" in key:
dataset = key.split(",")[-1].strip()
batch_info[dataset] = transpose_loggraph(loggraph[key])
# finally put all of the results "somewhere useful"
self._scalr_statistics = data
self._scalr_scaled_refl_files = copy.deepcopy(sc.get_scaled_reflection_files())
sc = self._updated_aimless()
sc.set_hklin(self._prepared_reflections)
sc.set_scales_file(scales_file)
self._wavelengths_in_order = []
for epoch in epochs:
si = self._sweep_handler.get_sweep_information(epoch)
pname, xname, dname = si.get_project_info()
sname = si.get_sweep_name()
start, end = si.get_batch_range()
resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]
sc.add_run(
start, end, exclude=False, resolution=resolution_limit, name=sname
)
if dname not in self._wavelengths_in_order:
self._wavelengths_in_order.append(dname)
sc.set_hklout(
os.path.join(
self.get_working_directory(),
f"{self._scalr_pname}_{self._scalr_xname}_scaled.mtz",
)
)
sc.set_scalepack()
if self.get_scaler_anomalous():
sc.set_anomalous()
sc.scale()
self._update_scaled_unit_cell()
self._scalr_scaled_reflection_files = {}
self._scalr_scaled_reflection_files["sca"] = {}
self._scalr_scaled_reflection_files["sca_unmerged"] = {}
self._scalr_scaled_reflection_files["mtz_unmerged"] = {}
for key in self._scalr_scaled_refl_files:
hklout = self._scalr_scaled_refl_files[key]
scaout = "%s.sca" % hklout[:-4]
self._scalr_scaled_reflection_files["sca"][key] = scaout
FileHandler.record_data_file(scaout)
scalepack = os.path.join(
os.path.split(hklout)[0],
os.path.split(hklout)[1]
.replace("_scaled", "_scaled_unmerged")
.replace(".mtz", ".sca"),
)
self._scalr_scaled_reflection_files["sca_unmerged"][key] = scalepack
FileHandler.record_data_file(scalepack)
mtz_unmerged = os.path.splitext(scalepack)[0] + ".mtz"
self._scalr_scaled_reflection_files["mtz_unmerged"][key] = mtz_unmerged
FileHandler.record_data_file(mtz_unmerged)
if self._scalr_cell_esd is not None:
# patch .mtz and overwrite unit cell information
import xia2.Modules.Scaler.tools as tools
override_cell = self._scalr_cell_dict.get(
f"{self._scalr_pname}_{self._scalr_xname}_{key}"
)[0]
tools.patch_mtz_unit_cell(mtz_unmerged, override_cell)
tools.patch_mtz_unit_cell(hklout, override_cell)
self._scalr_scaled_reflection_files["mtz_unmerged"][key] = mtz_unmerged
FileHandler.record_data_file(mtz_unmerged)
if PhilIndex.params.xia2.settings.merging_statistics.source == "cctbx":
for key in self._scalr_scaled_refl_files:
stats = self._compute_scaler_statistics(
self._scalr_scaled_reflection_files["mtz_unmerged"][key],
selected_band=(highest_suggested_resolution, None),
wave=key,
)
self._scalr_statistics[
(self._scalr_pname, self._scalr_xname, key)
] = stats
sc = self._updated_aimless()
sc.set_hklin(self._prepared_reflections)
sc.set_scales_file(scales_file)
self._wavelengths_in_order = []
for epoch in epochs:
si = self._sweep_handler.get_sweep_information(epoch)
pname, xname, dname = si.get_project_info()
sname = si.get_sweep_name()
start, end = si.get_batch_range()
resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]
sc.add_run(
start, end, exclude=False, resolution=resolution_limit, name=sname
)
if dname not in self._wavelengths_in_order:
self._wavelengths_in_order.append(dname)
sc.set_hklout(
os.path.join(
self.get_working_directory(),
f"{self._scalr_pname}_{self._scalr_xname}_chef.mtz",
)
)
sc.set_chef_unmerged(True)
if self.get_scaler_anomalous():
sc.set_anomalous()
sc.scale()
if not PhilIndex.params.dials.fast_mode:
try:
self._generate_absorption_map(sc)
except Exception as e:
# Map generation may fail for number of reasons, eg. matplotlib borken
logger.debug("Could not generate absorption map (%s)", e)
def _generate_absorption_map(self, scaler):
output = scaler.get_all_output()
aimless = "AIMLESS, CCP4"
pattern = re.compile(" +#+ *CCP4.*#+")
for line in output:
if pattern.search(line):
aimless = re.sub(r"\s\s+", ", ", line.strip("\t\n #"))
break
coefficients = scrape_coefficients(log=output)
if coefficients:
absmap = evaluate_1degree(coefficients)
absmin, absmax = absmap.min(), absmap.max()
else:
absmin, absmax = 1.0, 1.0
block = CIF.get_block("xia2")
mmblock = mmCIF.get_block("xia2")
mmblock["_exptl.entry_id"] = "xia2"
mmblock["_exptl.method"] = "X-RAY DIFFRACTION"
block["_exptl_absorpt_correction_T_min"] = mmblock[
"_exptl.absorpt_correction_T_min"
] = (
absmin / absmax
) # = scaled
block["_exptl_absorpt_correction_T_max"] = mmblock[
"_exptl.absorpt_correction_T_max"
] = (
absmax / absmax
) # = 1
block["_exptl_absorpt_correction_type"] = mmblock[
"_exptl.absorpt_correction_type"
] = "empirical"
block["_exptl_absorpt_process_details"] = mmblock[
"_exptl.absorpt_process_details"
] = (
"""
%s
Scaling & analysis of unmerged intensities, absorption correction using spherical harmonics
"""
% aimless
)
log_directory = self._base_path / "LogFiles"
if absmax - absmin > 0.000001:
log_directory.mkdir(parents=True, exist_ok=True)
mapfile = log_directory / "absorption_surface.png"
generate_map(absmap, str(mapfile))
else:
logger.debug(
"Cannot create absorption surface: map is too flat (min: %f, max: %f)",
absmin,
absmax,
)
def _identify_sweep_epoch(self, batch):
"""Identify the sweep epoch a given batch came from - N.B.
this assumes that the data are rebatched, will raise an exception if
more than one candidate is present."""
epochs = []
for epoch in self._sweep_handler.get_epochs():
si = self._sweep_handler.get_sweep_information(epoch)
if batch in si.get_batches():
epochs.append(epoch)
if len(epochs) > 1:
raise RuntimeError("batch %d found in multiple sweeps" % batch)
return epochs[0]
def _prepare_pointless_hklin(self, hklin, phi_width):
return _prepare_pointless_hklin(self.get_working_directory(), hklin, phi_width)
def get_batch_to_dose(self):
batch_to_dose = {}
epoch_to_dose = {}
for xsample in self.get_scaler_xcrystal()._samples.values():
epoch_to_dose.update(xsample.get_epoch_to_dose())
for e0 in self._sweep_handler._sweep_information:
si = self._sweep_handler._sweep_information[e0]
batch_offset = si.get_batch_offset()
printed = False
for b in range(si.get_batches()[0], si.get_batches()[1] + 1):
if epoch_to_dose:
# when handling Eiger data this table appears to be somewhat broken
# see https://github.com/xia2/xia2/issues/90 - proper fix should be
# to work out why the epochs are not set correctly in first place...
if si._image_to_epoch[b - batch_offset] in epoch_to_dose:
if not printed:
logger.debug("Epoch found; all good")
printed = True
batch_to_dose[b] = epoch_to_dose[
si._image_to_epoch[b - batch_offset]
]
else:
if not printed:
logger.debug("Epoch not found; using offset %f", e0)
printed = True
batch_to_dose[b] = epoch_to_dose[
si._image_to_epoch[b - batch_offset] - e0
]
else:
# backwards compatibility 2015-12-11
batch_to_dose[b] = b
return batch_to_dose
def get_UBlattsymm_from_sweep_info(self, sweep_info):
"""Return U, B, lattice symmetry from the data (i.e. mtz file)."""
return get_umat_bmat_lattice_symmetry_from_mtz(sweep_info.get_reflections())
def apply_reindex_operator_to_sweep_info(self, sweep_info, reindex_op, reason):
"""Apply the reindex operator to the data.
Delegate to the integrater reindex operator method."""
intgr = sweep_info.get_integrater()
intgr.set_integrater_reindex_operator(reindex_op, reason=reason)
sweep_info.set_reflections(intgr.get_integrater_intensities())
def get_mtz_data_from_sweep_info(self, sweep_info):
"""Get the data in mtz form.
Trivial for CCP4ScalerA, as always use the integrator to
generate a new mtz when reindexing, so just return this."""
return sweep_info.get_reflections()
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Modules/Scaler/CCP4ScalerA.py",
"copies": "1",
"size": "45837",
"license": "bsd-3-clause",
"hash": 1758730570044993500,
"line_mean": 35.995157385,
"line_max": 91,
"alpha_frac": 0.544232825,
"autogenerated": false,
"ratio": 4.041350731793335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5085583556793335,
"avg_score": null,
"num_lines": null
} |
# An implementation of the segsieve algorithm
# Finds the number of primes in range [a, b] in O(n log log b)
# time and O(sqrt(b)) space
from math import *
p = []
# Generate list of important primes
def sieve(delta):
flags = [None] * (delta+1)
for i in range(3, delta+1, 2):
if not flags[i]:
p.append(i)
for j in range(i*i, delta+1, i):
flags[i] = 1
def segsieve(a, b):
delta = ceil(sqrt(b))
# Initialize the number of primes
# If the range includes 2, add 1, since we're skipping 2 with the sieve
ans = 1 if (a <= 2 and b >= 2) else 0
# Generate primes
sieve(delta)
# Flags for each segment
flags = [None] * (delta+1)
# Loop through segments
for low in range(a, b+1, delta):
flags = [0] * (delta+1)
# Compute the endpoint of this segment: either a normal endpoint, or the end of the range
high = min(low + delta - 1, b)
# Loop through the primes to segment the sieve
for prime in p:
# First number to be sieved by this prime
first = floor(low // prime) * prime
if first < low:
first += prime
# Sieve
for i in range(first, high+1, prime):
# If j is not the prime itself
if i != prime:
# Normalize the sieve into range [0, delta]
flags[i - low] = 1
# Count the number of primes in the segment after sieving
# Start from an odd number, in jumps of 2
for i in range(max(3, low+1 if (low % 2 == 0) else low), high+1, 2):
if flags[i - low] == 0:
ans += 1
if ans % 10000 == 0:
print(ans)
return ans
a, b = map(int, input().split())
print("Number of primes in range: %d" % segsieve(a, b))
| {
"repo_name": "ruar18/competitive-programming",
"path": "algorithms-data-structures/number-theory/primes/segmented-sieve.py",
"copies": "1",
"size": "1871",
"license": "mit",
"hash": 358089324921771500,
"line_mean": 29.6721311475,
"line_max": 98,
"alpha_frac": 0.5328701229,
"autogenerated": false,
"ratio": 3.543560606060606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4576430728960606,
"avg_score": null,
"num_lines": null
} |
"""An implementation of the Web Site Process Bus.
This module is completely standalone, depending only on the stdlib.
Web Site Process Bus
--------------------
A Bus object is used to contain and manage site-wide behavior:
daemonization, HTTP server start/stop, process reload, signal handling,
drop privileges, PID file management, logging for all of these,
and many more.
In addition, a Bus object provides a place for each web framework
to register code that runs in response to site-wide events (like
process start and stop), or which controls or otherwise interacts with
the site-wide components mentioned above. For example, a framework which
uses file-based templates would add known template filenames to an
autoreload component.
Ideally, a Bus object will be flexible enough to be useful in a variety
of invocation scenarios:
1. The deployer starts a site from the command line via a
framework-neutral deployment script; applications from multiple frameworks
are mixed in a single site. Command-line arguments and configuration
files are used to define site-wide components such as the HTTP server,
WSGI component graph, autoreload behavior, signal handling, etc.
2. The deployer starts a site via some other process, such as Apache;
applications from multiple frameworks are mixed in a single site.
Autoreload and signal handling (from Python at least) are disabled.
3. The deployer starts a site via a framework-specific mechanism;
for example, when running tests, exploring tutorials, or deploying
single applications from a single framework. The framework controls
which site-wide components are enabled as it sees fit.
The Bus object in this package uses topic-based publish-subscribe
messaging to accomplish all this. A few topic channels are built in
('start', 'stop', 'exit', 'graceful', 'log', and 'main'). Frameworks and
site containers are free to define their own. If a message is sent to a
channel that has not been defined or has no listeners, there is no effect.
In general, there should only ever be a single Bus object per process.
Frameworks and site containers share a single Bus object by publishing
messages and subscribing listeners.
The Bus object works as a finite state machine which models the current
state of the process. Bus methods move it from one state to another;
those methods then publish to subscribed listeners on the channel for
the new state.::
O
|
V
STOPPING --> STOPPED --> EXITING -> X
A A |
| \___ |
| \ |
| V V
STARTED <-- STARTING
"""
import atexit
import os
import sys
import threading
import time
import traceback as _traceback
import warnings
from cherrypy._cpcompat import set
# Here I save the value of os.getcwd(), which, if I am imported early enough,
# will be the directory from which the startup script was run. This is needed
# by _do_execv(), to change back to the original directory before execv()ing a
# new process. This is a defense against the application having changed the
# current working directory (which could make sys.executable "not found" if
# sys.executable is a relative-path, and/or cause other problems).
_startup_cwd = os.getcwd()
class ChannelFailures(Exception):
"""Exception raised when errors occur in a listener during Bus.publish()."""
delimiter = '\n'
def __init__(self, *args, **kwargs):
# Don't use 'super' here; Exceptions are old-style in Py2.4
# See https://bitbucket.org/cherrypy/cherrypy/issue/959
Exception.__init__(self, *args, **kwargs)
self._exceptions = list()
def handle_exception(self):
"""Append the current exception to self."""
self._exceptions.append(sys.exc_info()[1])
def get_instances(self):
"""Return a list of seen exception instances."""
return self._exceptions[:]
def __str__(self):
exception_strings = map(repr, self.get_instances())
return self.delimiter.join(exception_strings)
__repr__ = __str__
def __bool__(self):
return bool(self._exceptions)
__nonzero__ = __bool__
# Use a flag to indicate the state of the bus.
class _StateEnum(object):
class State(object):
name = None
def __repr__(self):
return "states.%s" % self.name
def __setattr__(self, key, value):
if isinstance(value, self.State):
value.name = key
object.__setattr__(self, key, value)
states = _StateEnum()
states.STOPPED = states.State()
states.STARTING = states.State()
states.STARTED = states.State()
states.STOPPING = states.State()
states.EXITING = states.State()
try:
import fcntl
except ImportError:
max_files = 0
else:
try:
max_files = os.sysconf('SC_OPEN_MAX')
except AttributeError:
max_files = 1024
class Bus(object):
"""Process state-machine and messenger for HTTP site deployment.
All listeners for a given channel are guaranteed to be called even
if others at the same channel fail. Each failure is logged, but
execution proceeds on to the next listener. The only way to stop all
processing from inside a listener is to raise SystemExit and stop the
whole server.
"""
states = states
state = states.STOPPED
execv = False
max_cloexec_files = max_files
def __init__(self):
self.execv = False
self.state = states.STOPPED
self.listeners = dict(
[(channel, set()) for channel
in ('start', 'stop', 'exit', 'graceful', 'log', 'main')])
self._priorities = {}
def subscribe(self, channel, callback, priority=None):
"""Add the given callback at the given channel (if not present)."""
if channel not in self.listeners:
self.listeners[channel] = set()
self.listeners[channel].add(callback)
if priority is None:
priority = getattr(callback, 'priority', 50)
self._priorities[(channel, callback)] = priority
def unsubscribe(self, channel, callback):
"""Discard the given callback (if present)."""
listeners = self.listeners.get(channel)
if listeners and callback in listeners:
listeners.discard(callback)
del self._priorities[(channel, callback)]
def publish(self, channel, *args, **kwargs):
"""Return output of all subscribers for the given channel."""
if channel not in self.listeners:
return []
exc = ChannelFailures()
output = []
items = [(self._priorities[(channel, listener)], listener)
for listener in self.listeners[channel]]
try:
items.sort(key=lambda item: item[0])
except TypeError:
# Python 2.3 had no 'key' arg, but that doesn't matter
# since it could sort dissimilar types just fine.
items.sort()
for priority, listener in items:
try:
output.append(listener(*args, **kwargs))
except KeyboardInterrupt:
raise
except SystemExit:
e = sys.exc_info()[1]
# If we have previous errors ensure the exit code is non-zero
if exc and e.code == 0:
e.code = 1
raise
except:
exc.handle_exception()
if channel == 'log':
# Assume any further messages to 'log' will fail.
pass
else:
self.log("Error in %r listener %r" % (channel, listener),
level=40, traceback=True)
if exc:
raise exc
return output
def _clean_exit(self):
"""An atexit handler which asserts the Bus is not running."""
if self.state != states.EXITING:
warnings.warn(
"The main thread is exiting, but the Bus is in the %r state; "
"shutting it down automatically now. You must either call "
"bus.block() after start(), or call bus.exit() before the "
"main thread exits." % self.state, RuntimeWarning)
self.exit()
def start(self):
"""Start all services."""
atexit.register(self._clean_exit)
self.state = states.STARTING
self.log('Bus STARTING')
try:
self.publish('start')
self.state = states.STARTED
self.log('Bus STARTED')
except (KeyboardInterrupt, SystemExit):
raise
except:
self.log("Shutting down due to error in start listener:",
level=40, traceback=True)
e_info = sys.exc_info()[1]
try:
self.exit()
except:
# Any stop/exit errors will be logged inside publish().
pass
# Re-raise the original error
raise e_info
def exit(self):
"""Stop all services and prepare to exit the process."""
exitstate = self.state
try:
self.stop()
self.state = states.EXITING
self.log('Bus EXITING')
self.publish('exit')
# This isn't strictly necessary, but it's better than seeing
# "Waiting for child threads to terminate..." and then nothing.
self.log('Bus EXITED')
except:
# This method is often called asynchronously (whether thread,
# signal handler, console handler, or atexit handler), so we
# can't just let exceptions propagate out unhandled.
# Assume it's been logged and just die.
os._exit(70) # EX_SOFTWARE
if exitstate == states.STARTING:
# exit() was called before start() finished, possibly due to
# Ctrl-C because a start listener got stuck. In this case,
# we could get stuck in a loop where Ctrl-C never exits the
# process, so we just call os.exit here.
os._exit(70) # EX_SOFTWARE
def restart(self):
"""Restart the process (may close connections).
This method does not restart the process from the calling thread;
instead, it stops the bus and asks the main thread to call execv.
"""
self.execv = True
self.exit()
def graceful(self):
"""Advise all services to reload."""
self.log('Bus graceful')
self.publish('graceful')
def block(self, interval=0.1):
"""Wait for the EXITING state, KeyboardInterrupt or SystemExit.
This function is intended to be called only by the main thread.
After waiting for the EXITING state, it also waits for all threads
to terminate, and then calls os.execv if self.execv is True. This
design allows another thread to call bus.restart, yet have the main
thread perform the actual execv call (required on some platforms).
"""
try:
self.wait(states.EXITING, interval=interval, channel='main')
except (KeyboardInterrupt, IOError):
# The time.sleep call might raise
# "IOError: [Errno 4] Interrupted function call" on KBInt.
self.log('Keyboard Interrupt: shutting down bus')
self.exit()
except SystemExit:
self.log('SystemExit raised: shutting down bus')
self.exit()
raise
# Waiting for ALL child threads to finish is necessary on OS X.
# See https://bitbucket.org/cherrypy/cherrypy/issue/581.
# It's also good to let them all shut down before allowing
# the main thread to call atexit handlers.
# See https://bitbucket.org/cherrypy/cherrypy/issue/751.
self.log("Waiting for child threads to terminate...")
for t in threading.enumerate():
if t != threading.currentThread() and t.isAlive():
# Note that any dummy (external) threads are always daemonic.
if hasattr(threading.Thread, "daemon"):
# Python 2.6+
d = t.daemon
else:
d = t.isDaemon()
if not d:
self.log("Waiting for thread %s." % t.getName())
t.join()
if self.execv:
self._do_execv()
def wait(self, state, interval=0.1, channel=None):
"""Poll for the given state(s) at intervals; publish to channel."""
if isinstance(state, (tuple, list)):
states = state
else:
states = [state]
def _wait():
while self.state not in states:
time.sleep(interval)
self.publish(channel)
# From http://psyco.sourceforge.net/psycoguide/bugs.html:
# "The compiled machine code does not include the regular polling
# done by Python, meaning that a KeyboardInterrupt will not be
# detected before execution comes back to the regular Python
# interpreter. Your program cannot be interrupted if caught
# into an infinite Psyco-compiled loop."
try:
sys.modules['psyco'].cannotcompile(_wait)
except (KeyError, AttributeError):
pass
_wait()
def _do_execv(self):
"""Re-execute the current process.
This must be called from the main thread, because certain platforms
(OS X) don't allow execv to be called in a child thread very well.
"""
args = sys.argv[:]
self.log('Re-spawning %s' % ' '.join(args))
if sys.platform[:4] == 'java':
from _systemrestart import SystemRestart
raise SystemRestart
else:
args.insert(0, sys.executable)
if sys.platform == 'win32':
args = ['"%s"' % arg for arg in args]
os.chdir(_startup_cwd)
if self.max_cloexec_files:
self._set_cloexec()
os.execv(sys.executable, args)
def _set_cloexec(self):
"""Set the CLOEXEC flag on all open files (except stdin/out/err).
If self.max_cloexec_files is an integer (the default), then on
platforms which support it, it represents the max open files setting
for the operating system. This function will be called just before
the process is restarted via os.execv() to prevent open files
from persisting into the new process.
Set self.max_cloexec_files to 0 to disable this behavior.
"""
for fd in range(3, self.max_cloexec_files): # skip stdin/out/err
try:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
except IOError:
continue
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def stop(self):
"""Stop all services."""
self.state = states.STOPPING
self.log('Bus STOPPING')
self.publish('stop')
self.state = states.STOPPED
self.log('Bus STOPPED')
def start_with_callback(self, func, args=None, kwargs=None):
"""Start 'func' in a new thread T, then start self (and return T)."""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
args = (func,) + args
def _callback(func, *a, **kw):
self.wait(states.STARTED)
func(*a, **kw)
t = threading.Thread(target=_callback, args=args, kwargs=kwargs)
t.setName('Bus Callback ' + t.getName())
t.start()
self.start()
return t
def log(self, msg="", level=20, traceback=False):
"""Log the given message. Append the last traceback if requested."""
if traceback:
msg += "\n" + "".join(_traceback.format_exception(*sys.exc_info()))
self.publish('log', msg, level)
bus = Bus()
| {
"repo_name": "bmbove/omxremote",
"path": "cherrypy/process/wspbus.py",
"copies": "1",
"size": "16029",
"license": "bsd-3-clause",
"hash": 4870785982507429000,
"line_mean": 36.1041666667,
"line_max": 80,
"alpha_frac": 0.6047788384,
"autogenerated": false,
"ratio": 4.427900552486188,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014157983912593648,
"num_lines": 432
} |
"""An implementation of the Zephyr Abstract Syntax Definition Language.
See http://asdl.sourceforge.net/ and
http://www.cs.princeton.edu/~danwang/Papers/dsl97/dsl97-abstract.html.
Only supports top level module decl, not view. I'm guessing that view
is intended to support the browser and I'm not interested in the
browser.
Changes for Python: Add support for module versions
"""
import os
import traceback
import spark
class Token(object):
# spark seems to dispatch in the parser based on a token's
# type attribute
def __init__(self, type, lineno):
self.type = type
self.lineno = lineno
def __str__(self):
return self.type
def __repr__(self):
return str(self)
class Id(Token):
def __init__(self, value, lineno):
self.type = 'Id'
self.value = value
self.lineno = lineno
def __str__(self):
return self.value
class String(Token):
def __init__(self, value, lineno):
self.type = 'String'
self.value = value
self.lineno = lineno
class ASDLSyntaxError(Exception):
def __init__(self, lineno, token=None, msg=None):
self.lineno = lineno
self.token = token
self.msg = msg
def __str__(self):
if self.msg is None:
return "Error at '%s', line %d" % (self.token, self.lineno)
else:
return "%s, line %d" % (self.msg, self.lineno)
class ASDLScanner(spark.GenericScanner, object):
def tokenize(self, input):
self.rv = []
self.lineno = 1
super(ASDLScanner, self).tokenize(input)
return self.rv
def t_id(self, s):
r"[\w\.]+"
# XXX doesn't distinguish upper vs. lower, which is
# significant for ASDL.
self.rv.append(Id(s, self.lineno))
def t_string(self, s):
r'"[^"]*"'
self.rv.append(String(s, self.lineno))
def t_xxx(self, s): # not sure what this production means
r"<="
self.rv.append(Token(s, self.lineno))
def t_punctuation(self, s):
r"[\{\}\*\=\|\(\)\,\?\:]"
self.rv.append(Token(s, self.lineno))
def t_comment(self, s):
r"\-\-[^\n]*"
pass
def t_newline(self, s):
r"\n"
self.lineno += 1
def t_whitespace(self, s):
r"[ \t]+"
pass
def t_default(self, s):
r" . +"
raise ValueError, "unmatched input: %s" % `s`
class ASDLParser(spark.GenericParser, object):
def __init__(self):
super(ASDLParser, self).__init__("module")
def typestring(self, tok):
return tok.type
def error(self, tok):
raise ASDLSyntaxError(tok.lineno, tok)
def p_module_0(self, (module, name, version, _0, _1)):
" module ::= Id Id version { } "
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, None, version)
def p_module(self, (module, name, version, _0, definitions, _1)):
" module ::= Id Id version { definitions } "
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, definitions, version)
def p_version(self, (version, V)):
"version ::= Id String"
if version.value != "version":
raise ASDLSyntaxError(version.lineno,
msg="expected 'version', found %" % version)
return V
def p_definition_0(self, (definition,)):
" definitions ::= definition "
return definition
def p_definition_1(self, (definitions, definition)):
" definitions ::= definition definitions "
return definitions + definition
def p_definition(self, (id, _, type)):
" definition ::= Id = type "
return [Type(id, type)]
def p_type_0(self, (product,)):
" type ::= product "
return product
def p_type_1(self, (sum,)):
" type ::= sum "
return Sum(sum)
def p_type_2(self, (sum, id, _0, attributes, _1)):
" type ::= sum Id ( fields ) "
if id.value != "attributes":
raise ASDLSyntaxError(id.lineno,
msg="expected attributes, found %s" % id)
if attributes:
attributes.reverse()
return Sum(sum, attributes)
def p_product(self, (_0, fields, _1)):
" product ::= ( fields ) "
# XXX can't I just construct things in the right order?
fields.reverse()
return Product(fields)
def p_sum_0(self, (constructor,)):
" sum ::= constructor "
return [constructor]
def p_sum_1(self, (constructor, _, sum)):
" sum ::= constructor | sum "
return [constructor] + sum
def p_sum_2(self, (constructor, _, sum)):
" sum ::= constructor | sum "
return [constructor] + sum
def p_constructor_0(self, (id,)):
" constructor ::= Id "
return Constructor(id)
def p_constructor_1(self, (id, _0, fields, _1)):
" constructor ::= Id ( fields ) "
# XXX can't I just construct things in the right order?
fields.reverse()
return Constructor(id, fields)
def p_fields_0(self, (field,)):
" fields ::= field "
return [field]
def p_fields_1(self, (field, _, fields)):
" fields ::= field , fields "
return fields + [field]
def p_field_0(self, (type,)):
" field ::= Id "
return Field(type)
def p_field_1(self, (type, name)):
" field ::= Id Id "
return Field(type, name)
def p_field_2(self, (type, _, name)):
" field ::= Id * Id "
return Field(type, name, seq=True)
def p_field_3(self, (type, _, name)):
" field ::= Id ? Id "
return Field(type, name, opt=True)
def p_field_4(self, (type, _)):
" field ::= Id * "
return Field(type, seq=True)
def p_field_5(self, (type, _)):
" field ::= Id ? "
return Field(type, opt=True)
builtin_types = ("identifier", "string", "int", "bool", "object")
# below is a collection of classes to capture the AST of an AST :-)
# not sure if any of the methods are useful yet, but I'm adding them
# piecemeal as they seem helpful
class AST(object):
pass # a marker class
class Module(AST):
def __init__(self, name, dfns, version):
self.name = name
self.dfns = dfns
self.version = version
self.types = {} # maps type name to value (from dfns)
for type in dfns:
self.types[type.name.value] = type.value
def __repr__(self):
return "Module(%s, %s)" % (self.name, self.dfns)
class Type(AST):
def __init__(self, name, value):
self.name = name
self.value = value
def __repr__(self):
return "Type(%s, %s)" % (self.name, self.value)
class Constructor(AST):
def __init__(self, name, fields=None):
self.name = name
self.fields = fields or []
def __repr__(self):
return "Constructor(%s, %s)" % (self.name, self.fields)
class Field(AST):
def __init__(self, type, name=None, seq=False, opt=False):
self.type = type
self.name = name
self.seq = seq
self.opt = opt
def __repr__(self):
if self.seq:
extra = ", seq=True"
elif self.opt:
extra = ", opt=True"
else:
extra = ""
if self.name is None:
return "Field(%s%s)" % (self.type, extra)
else:
return "Field(%s, %s%s)" % (self.type, self.name, extra)
class Sum(AST):
def __init__(self, types, attributes=None):
self.types = types
self.attributes = attributes or []
def __repr__(self):
if self.attributes is None:
return "Sum(%s)" % self.types
else:
return "Sum(%s, %s)" % (self.types, self.attributes)
class Product(AST):
def __init__(self, fields):
self.fields = fields
def __repr__(self):
return "Product(%s)" % self.fields
class VisitorBase(object):
def __init__(self, skip=False):
self.cache = {}
self.skip = skip
def visit(self, object, *args):
meth = self._dispatch(object)
if meth is None:
return
try:
meth(object, *args)
except Exception, err:
print "Error visiting", repr(object)
print err
traceback.print_exc()
# XXX hack
if hasattr(self, 'file'):
self.file.flush()
os._exit(1)
def _dispatch(self, object):
assert isinstance(object, AST), repr(object)
klass = object.__class__
meth = self.cache.get(klass)
if meth is None:
methname = "visit" + klass.__name__
if self.skip:
meth = getattr(self, methname, None)
else:
meth = getattr(self, methname)
self.cache[klass] = meth
return meth
class Check(VisitorBase):
def __init__(self):
super(Check, self).__init__(skip=True)
self.cons = {}
self.errors = 0
self.types = {}
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, str(type.name))
def visitSum(self, sum, name):
for t in sum.types:
self.visit(t, name)
def visitConstructor(self, cons, name):
key = str(cons.name)
conflict = self.cons.get(key)
if conflict is None:
self.cons[key] = name
else:
print "Redefinition of constructor %s" % key
print "Defined in %s and %s" % (conflict, name)
self.errors += 1
for f in cons.fields:
self.visit(f, key)
def visitField(self, field, name):
key = str(field.type)
l = self.types.setdefault(key, [])
l.append(name)
def visitProduct(self, prod, name):
for f in prod.fields:
self.visit(f, name)
def check(mod):
v = Check()
v.visit(mod)
for t in v.types:
if t not in mod.types and not t in builtin_types:
v.errors += 1
uses = ", ".join(v.types[t])
print "Undefined type %s, used in %s" % (t, uses)
return not v.errors
def parse(file):
scanner = ASDLScanner()
parser = ASDLParser()
buf = open(file).read()
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
except ASDLSyntaxError, err:
print err
lines = buf.split("\n")
print lines[err.lineno - 1] # lines starts at 0, files at 1
if __name__ == "__main__":
import glob
import sys
if len(sys.argv) > 1:
files = sys.argv[1:]
else:
testdir = "tests"
files = glob.glob(testdir + "/*.asdl")
for file in files:
print file
mod = parse(file)
print "module", mod.name
print len(mod.dfns), "definitions"
if not check(mod):
print "Check failed"
else:
for dfn in mod.dfns:
print dfn.type
| {
"repo_name": "CamelBackNotation/CarnotKE",
"path": "jyhton/ast/asdl.py",
"copies": "7",
"size": "11335",
"license": "apache-2.0",
"hash": 6655011558011426000,
"line_mean": 26.4455205811,
"line_max": 77,
"alpha_frac": 0.5389501544,
"autogenerated": false,
"ratio": 3.723718791064389,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002479235940948759,
"num_lines": 413
} |
"""An implementation of the Zephyr Abstract Syntax Definition Language.
See http://asdl.sourceforge.net/ and
http://www.cs.princeton.edu/research/techreps/TR-554-97
Only supports top level module decl, not view. I'm guessing that view
is intended to support the browser and I'm not interested in the
browser.
Changes for Python: Add support for module versions
"""
import os
import sys
import traceback
import spark
def output(string):
sys.stdout.write(string + "\n")
class Token(object):
# spark seems to dispatch in the parser based on a token's
# type attribute
def __init__(self, type, lineno):
self.type = type
self.lineno = lineno
def __str__(self):
return self.type
def __repr__(self):
return str(self)
class Id(Token):
def __init__(self, value, lineno):
self.type = 'Id'
self.value = value
self.lineno = lineno
def __str__(self):
return self.value
class String(Token):
def __init__(self, value, lineno):
self.type = 'String'
self.value = value
self.lineno = lineno
class ASDLSyntaxError(Exception):
def __init__(self, lineno, token=None, msg=None):
self.lineno = lineno
self.token = token
self.msg = msg
def __str__(self):
if self.msg is None:
return "Error at '%s', line %d" % (self.token, self.lineno)
else:
return "%s, line %d" % (self.msg, self.lineno)
class ASDLScanner(spark.GenericScanner, object):
def tokenize(self, input):
self.rv = []
self.lineno = 1
super(ASDLScanner, self).tokenize(input)
return self.rv
def t_id(self, s):
r"[\w\.]+"
# XXX doesn't distinguish upper vs. lower, which is
# significant for ASDL.
self.rv.append(Id(s, self.lineno))
def t_string(self, s):
r'"[^"]*"'
self.rv.append(String(s, self.lineno))
def t_xxx(self, s): # not sure what this production means
r"<="
self.rv.append(Token(s, self.lineno))
def t_punctuation(self, s):
r"[\{\}\*\=\|\(\)\,\?\:]"
self.rv.append(Token(s, self.lineno))
def t_comment(self, s):
r"\-\-[^\n]*"
pass
def t_newline(self, s):
r"\n"
self.lineno += 1
def t_whitespace(self, s):
r"[ \t]+"
pass
def t_default(self, s):
r" . +"
raise ValueError("unmatched input: %r" % s)
class ASDLParser(spark.GenericParser, object):
def __init__(self):
super(ASDLParser, self).__init__("module")
def typestring(self, tok):
return tok.type
def error(self, tok):
raise ASDLSyntaxError(tok.lineno, tok)
def p_module_0(self, info):
" module ::= Id Id version { } "
module, name, version, _0, _1 = info
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, None, version)
def p_module(self, info):
" module ::= Id Id version { definitions } "
module, name, version, _0, definitions, _1 = info
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, definitions, version)
def p_version(self, info):
"version ::= Id String"
version, V = info
if version.value != "version":
raise ASDLSyntaxError(version.lineno,
msg="expected 'version', found %" % version)
return V
def p_definition_0(self, definition):
" definitions ::= definition "
return definition[0]
def p_definition_1(self, definitions):
" definitions ::= definition definitions "
return definitions[0] + definitions[1]
def p_definition(self, info):
" definition ::= Id = type "
id, _, type = info
return [Type(id, type)]
def p_type_0(self, product):
" type ::= product "
return product[0]
def p_type_1(self, sum):
" type ::= sum "
return Sum(sum[0])
def p_type_2(self, info):
" type ::= sum Id ( fields ) "
sum, id, _0, attributes, _1 = info
if id.value != "attributes":
raise ASDLSyntaxError(id.lineno,
msg="expected attributes, found %s" % id)
if attributes:
attributes.reverse()
return Sum(sum, attributes)
def p_product(self, info):
" product ::= ( fields ) "
_0, fields, _1 = info
# XXX can't I just construct things in the right order?
fields.reverse()
return Product(fields)
def p_sum_0(self, constructor):
" sum ::= constructor "
return [constructor[0]]
def p_sum_1(self, info):
" sum ::= constructor | sum "
constructor, _, sum = info
return [constructor] + sum
def p_sum_2(self, info):
" sum ::= constructor | sum "
constructor, _, sum = info
return [constructor] + sum
def p_constructor_0(self, id):
" constructor ::= Id "
return Constructor(id[0])
def p_constructor_1(self, info):
" constructor ::= Id ( fields ) "
id, _0, fields, _1 = info
# XXX can't I just construct things in the right order?
fields.reverse()
return Constructor(id, fields)
def p_fields_0(self, field):
" fields ::= field "
return [field[0]]
def p_fields_1(self, info):
" fields ::= field , fields "
field, _, fields = info
return fields + [field]
def p_field_0(self, type_):
" field ::= Id "
return Field(type_[0])
def p_field_1(self, info):
" field ::= Id Id "
type, name = info
return Field(type, name)
def p_field_2(self, info):
" field ::= Id * Id "
type, _, name = info
return Field(type, name, seq=True)
def p_field_3(self, info):
" field ::= Id ? Id "
type, _, name = info
return Field(type, name, opt=True)
def p_field_4(self, type_):
" field ::= Id * "
return Field(type_[0], seq=True)
def p_field_5(self, type_):
" field ::= Id ? "
return Field(type[0], opt=True)
builtin_types = ("identifier", "string", "int", "bool", "object")
# below is a collection of classes to capture the AST of an AST :-)
# not sure if any of the methods are useful yet, but I'm adding them
# piecemeal as they seem helpful
class AST(object):
pass # a marker class
class Module(AST):
def __init__(self, name, dfns, version):
self.name = name
self.dfns = dfns
self.version = version
self.types = {} # maps type name to value (from dfns)
for type in dfns:
self.types[type.name.value] = type.value
def __repr__(self):
return "Module(%s, %s)" % (self.name, self.dfns)
class Type(AST):
def __init__(self, name, value):
self.name = name
self.value = value
def __repr__(self):
return "Type(%s, %s)" % (self.name, self.value)
class Constructor(AST):
def __init__(self, name, fields=None):
self.name = name
self.fields = fields or []
def __repr__(self):
return "Constructor(%s, %s)" % (self.name, self.fields)
class Field(AST):
def __init__(self, type, name=None, seq=False, opt=False):
self.type = type
self.name = name
self.seq = seq
self.opt = opt
def __repr__(self):
if self.seq:
extra = ", seq=True"
elif self.opt:
extra = ", opt=True"
else:
extra = ""
if self.name is None:
return "Field(%s%s)" % (self.type, extra)
else:
return "Field(%s, %s%s)" % (self.type, self.name, extra)
class Sum(AST):
def __init__(self, types, attributes=None):
self.types = types
self.attributes = attributes or []
def __repr__(self):
if self.attributes is None:
return "Sum(%s)" % self.types
else:
return "Sum(%s, %s)" % (self.types, self.attributes)
class Product(AST):
def __init__(self, fields):
self.fields = fields
def __repr__(self):
return "Product(%s)" % self.fields
class VisitorBase(object):
def __init__(self, skip=False):
self.cache = {}
self.skip = skip
def visit(self, object, *args):
meth = self._dispatch(object)
if meth is None:
return
try:
meth(object, *args)
except Exception:
output("Error visiting" + repr(object))
output(str(sys.exc_info()[1]))
traceback.print_exc()
# XXX hack
if hasattr(self, 'file'):
self.file.flush()
os._exit(1)
def _dispatch(self, object):
assert isinstance(object, AST), repr(object)
klass = object.__class__
meth = self.cache.get(klass)
if meth is None:
methname = "visit" + klass.__name__
if self.skip:
meth = getattr(self, methname, None)
else:
meth = getattr(self, methname)
self.cache[klass] = meth
return meth
class Check(VisitorBase):
def __init__(self):
super(Check, self).__init__(skip=True)
self.cons = {}
self.errors = 0
self.types = {}
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, str(type.name))
def visitSum(self, sum, name):
for t in sum.types:
self.visit(t, name)
def visitConstructor(self, cons, name):
key = str(cons.name)
conflict = self.cons.get(key)
if conflict is None:
self.cons[key] = name
else:
output("Redefinition of constructor %s" % key)
output("Defined in %s and %s" % (conflict, name))
self.errors += 1
for f in cons.fields:
self.visit(f, key)
def visitField(self, field, name):
key = str(field.type)
l = self.types.setdefault(key, [])
l.append(name)
def visitProduct(self, prod, name):
for f in prod.fields:
self.visit(f, name)
def check(mod):
v = Check()
v.visit(mod)
for t in v.types:
if t not in mod.types and not t in builtin_types:
v.errors += 1
uses = ", ".join(v.types[t])
output("Undefined type %s, used in %s" % (t, uses))
return not v.errors
def parse(file):
scanner = ASDLScanner()
parser = ASDLParser()
buf = open(file).read()
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
except ASDLSyntaxError:
err = sys.exc_info()[1]
output(str(err))
lines = buf.split("\n")
output(lines[err.lineno - 1]) # lines starts at 0, files at 1
if __name__ == "__main__":
import glob
import sys
if len(sys.argv) > 1:
files = sys.argv[1:]
else:
testdir = "tests"
files = glob.glob(testdir + "/*.asdl")
for file in files:
output(file)
mod = parse(file)
if not mod:
break
output("module", mod.name)
output(len(mod.dfns), "definitions")
if not check(mod):
output("Check failed")
else:
for dfn in mod.dfns:
output(dfn.type)
| {
"repo_name": "imsparsh/python-for-android",
"path": "python3-alpha/python3-src/Parser/asdl.py",
"copies": "47",
"size": "11726",
"license": "apache-2.0",
"hash": -3072529063225658400,
"line_mean": 26.0184331797,
"line_max": 78,
"alpha_frac": 0.5368412076,
"autogenerated": false,
"ratio": 3.7403508771929825,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""An implementation of the Zephyr Abstract Syntax Definition Language.
See http://asdl.sourceforge.net/ and
http://www.cs.princeton.edu/research/techreps/TR-554-97
Only supports top level module decl, not view. I'm guessing that view
is intended to support the browser and I'm not interested in the
browser.
Changes for Python: Add support for module versions
"""
import os
import traceback
import spark
class Token(object):
# spark seems to dispatch in the parser based on a token's
# type attribute
def __init__(self, type, lineno):
self.type = type
self.lineno = lineno
def __str__(self):
return self.type
def __repr__(self):
return str(self)
class Id(Token):
def __init__(self, value, lineno):
self.type = 'Id'
self.value = value
self.lineno = lineno
def __str__(self):
return self.value
class String(Token):
def __init__(self, value, lineno):
self.type = 'String'
self.value = value
self.lineno = lineno
class ASDLSyntaxError(Exception):
def __init__(self, lineno, token=None, msg=None):
self.lineno = lineno
self.token = token
self.msg = msg
def __str__(self):
if self.msg is None:
return "Error at '%s', line %d" % (self.token, self.lineno)
else:
return "%s, line %d" % (self.msg, self.lineno)
class ASDLScanner(spark.GenericScanner, object):
def tokenize(self, input):
self.rv = []
self.lineno = 1
super(ASDLScanner, self).tokenize(input)
return self.rv
def t_id(self, s):
r"[\w\.]+"
# XXX doesn't distinguish upper vs. lower, which is
# significant for ASDL.
self.rv.append(Id(s, self.lineno))
def t_string(self, s):
r'"[^"]*"'
self.rv.append(String(s, self.lineno))
def t_xxx(self, s): # not sure what this production means
r"<="
self.rv.append(Token(s, self.lineno))
def t_punctuation(self, s):
r"[\{\}\*\=\|\(\)\,\?\:]"
self.rv.append(Token(s, self.lineno))
def t_comment(self, s):
r"\-\-[^\n]*"
pass
def t_newline(self, s):
r"\n"
self.lineno += 1
def t_whitespace(self, s):
r"[ \t]+"
pass
def t_default(self, s):
r" . +"
raise ValueError, "unmatched input: %s" % `s`
class ASDLParser(spark.GenericParser, object):
def __init__(self):
super(ASDLParser, self).__init__("module")
def typestring(self, tok):
return tok.type
def error(self, tok):
raise ASDLSyntaxError(tok.lineno, tok)
def p_module_0(self, (module, name, version, _0, _1)):
" module ::= Id Id version { } "
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, None, version)
def p_module(self, (module, name, version, _0, definitions, _1)):
" module ::= Id Id version { definitions } "
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, definitions, version)
def p_version(self, (version, V)):
"version ::= Id String"
if version.value != "version":
raise ASDLSyntaxError(version.lineno,
msg="expected 'version', found %" % version)
return V
def p_definition_0(self, (definition,)):
" definitions ::= definition "
return definition
def p_definition_1(self, (definitions, definition)):
" definitions ::= definition definitions "
return definitions + definition
def p_definition(self, (id, _, type)):
" definition ::= Id = type "
return [Type(id, type)]
def p_type_0(self, (product,)):
" type ::= product "
return product
def p_type_1(self, (sum,)):
" type ::= sum "
return Sum(sum)
def p_type_2(self, (sum, id, _0, attributes, _1)):
" type ::= sum Id ( fields ) "
if id.value != "attributes":
raise ASDLSyntaxError(id.lineno,
msg="expected attributes, found %s" % id)
if attributes:
attributes.reverse()
return Sum(sum, attributes)
def p_product(self, (_0, fields, _1)):
" product ::= ( fields ) "
# XXX can't I just construct things in the right order?
fields.reverse()
return Product(fields)
def p_sum_0(self, (constructor,)):
" sum ::= constructor "
return [constructor]
def p_sum_1(self, (constructor, _, sum)):
" sum ::= constructor | sum "
return [constructor] + sum
def p_sum_2(self, (constructor, _, sum)):
" sum ::= constructor | sum "
return [constructor] + sum
def p_constructor_0(self, (id,)):
" constructor ::= Id "
return Constructor(id)
def p_constructor_1(self, (id, _0, fields, _1)):
" constructor ::= Id ( fields ) "
# XXX can't I just construct things in the right order?
fields.reverse()
return Constructor(id, fields)
def p_fields_0(self, (field,)):
" fields ::= field "
return [field]
def p_fields_1(self, (field, _, fields)):
" fields ::= field , fields "
return fields + [field]
def p_field_0(self, (type,)):
" field ::= Id "
return Field(type)
def p_field_1(self, (type, name)):
" field ::= Id Id "
return Field(type, name)
def p_field_2(self, (type, _, name)):
" field ::= Id * Id "
return Field(type, name, seq=True)
def p_field_3(self, (type, _, name)):
" field ::= Id ? Id "
return Field(type, name, opt=True)
def p_field_4(self, (type, _)):
" field ::= Id * "
return Field(type, seq=True)
def p_field_5(self, (type, _)):
" field ::= Id ? "
return Field(type, opt=True)
builtin_types = ("identifier", "string", "int", "bool", "object")
# below is a collection of classes to capture the AST of an AST :-)
# not sure if any of the methods are useful yet, but I'm adding them
# piecemeal as they seem helpful
class AST(object):
pass # a marker class
class Module(AST):
def __init__(self, name, dfns, version):
self.name = name
self.dfns = dfns
self.version = version
self.types = {} # maps type name to value (from dfns)
for type in dfns:
self.types[type.name.value] = type.value
def __repr__(self):
return "Module(%s, %s)" % (self.name, self.dfns)
class Type(AST):
def __init__(self, name, value):
self.name = name
self.value = value
def __repr__(self):
return "Type(%s, %s)" % (self.name, self.value)
class Constructor(AST):
def __init__(self, name, fields=None):
self.name = name
self.fields = fields or []
def __repr__(self):
return "Constructor(%s, %s)" % (self.name, self.fields)
class Field(AST):
def __init__(self, type, name=None, seq=False, opt=False):
self.type = type
self.name = name
self.seq = seq
self.opt = opt
def __repr__(self):
if self.seq:
extra = ", seq=True"
elif self.opt:
extra = ", opt=True"
else:
extra = ""
if self.name is None:
return "Field(%s%s)" % (self.type, extra)
else:
return "Field(%s, %s%s)" % (self.type, self.name, extra)
class Sum(AST):
def __init__(self, types, attributes=None):
self.types = types
self.attributes = attributes or []
def __repr__(self):
if self.attributes is None:
return "Sum(%s)" % self.types
else:
return "Sum(%s, %s)" % (self.types, self.attributes)
class Product(AST):
def __init__(self, fields):
self.fields = fields
def __repr__(self):
return "Product(%s)" % self.fields
class VisitorBase(object):
def __init__(self, skip=False):
self.cache = {}
self.skip = skip
def visit(self, object, *args):
meth = self._dispatch(object)
if meth is None:
return
try:
meth(object, *args)
except Exception, err:
print "Error visiting", repr(object)
print err
traceback.print_exc()
# XXX hack
if hasattr(self, 'file'):
self.file.flush()
os._exit(1)
def _dispatch(self, object):
assert isinstance(object, AST), repr(object)
klass = object.__class__
meth = self.cache.get(klass)
if meth is None:
methname = "visit" + klass.__name__
if self.skip:
meth = getattr(self, methname, None)
else:
meth = getattr(self, methname)
self.cache[klass] = meth
return meth
class Check(VisitorBase):
def __init__(self):
super(Check, self).__init__(skip=True)
self.cons = {}
self.errors = 0
self.types = {}
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, str(type.name))
def visitSum(self, sum, name):
for t in sum.types:
self.visit(t, name)
def visitConstructor(self, cons, name):
key = str(cons.name)
conflict = self.cons.get(key)
if conflict is None:
self.cons[key] = name
else:
print "Redefinition of constructor %s" % key
print "Defined in %s and %s" % (conflict, name)
self.errors += 1
for f in cons.fields:
self.visit(f, key)
def visitField(self, field, name):
key = str(field.type)
l = self.types.setdefault(key, [])
l.append(name)
def visitProduct(self, prod, name):
for f in prod.fields:
self.visit(f, name)
def check(mod):
v = Check()
v.visit(mod)
for t in v.types:
if t not in mod.types and not t in builtin_types:
v.errors += 1
uses = ", ".join(v.types[t])
print "Undefined type %s, used in %s" % (t, uses)
return not v.errors
def parse(file):
scanner = ASDLScanner()
parser = ASDLParser()
buf = open(file).read()
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
except ASDLSyntaxError, err:
print err
lines = buf.split("\n")
print lines[err.lineno - 1] # lines starts at 0, files at 1
if __name__ == "__main__":
import glob
import sys
if len(sys.argv) > 1:
files = sys.argv[1:]
else:
testdir = "tests"
files = glob.glob(testdir + "/*.asdl")
for file in files:
print file
mod = parse(file)
print "module", mod.name
print len(mod.dfns), "definitions"
if not check(mod):
print "Check failed"
else:
for dfn in mod.dfns:
print dfn.type
| {
"repo_name": "google/google-ctf",
"path": "third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Parser/asdl.py",
"copies": "6",
"size": "11733",
"license": "apache-2.0",
"hash": -5132164861358181000,
"line_mean": 26.4092009685,
"line_max": 77,
"alpha_frac": 0.5196454445,
"autogenerated": false,
"ratio": 3.8532019704433496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.737284741494335,
"avg_score": null,
"num_lines": null
} |
"""An implementation of Unix's 'tee' command in Python
Implementing 'tee' in Python allows the ease of writing output once, which will
be written to many file/file-like objects.
Benefits:
* Cross-platform (e.g. a pipe to the actual tee command won't work on Windows)
* The ability to write to more than just files. Any file-like object that
implements write() and flush() is acceptable.
Sample Usage:
import pytee
tee = pytee.create_tee([ '/tmp/tee-test-1', '/tmp/tee-test-2' ], mode='a')
print >>tee, "a" * 100000,
tee.close()
# Need to wait for the child process to finish writing to the file(s)
# before we can measure the amount of data written to the file(s).
os.wait()
for filename in files:
with open(filename, 'r') as fh:
chars = len(fh.read())
print "File '%s' has %d chars" % (filename, chars)
"""
from __future__ import print_function
import sys
import os
__author__ = 'Brandon Sandrowicz <brandon@sandrowicz.org>'
__version__ = '0.1'
valid_modes = ['a','w']
def create_tee(files, mode, buffer_size=128):
"""Get a file object that will mirror writes across multiple files objs
Options:
files A list of files and/or file objects. All strings will be
treated as file paths and opened for writing. Everything
else is assumed to be a file-like object that implements
both the write() and flush() methods.
mode Which mode to use when opening new files. Valid values
are 'a' (append) and 'w' (overwrite).
buffer_size
Control the size of the buffer between writes to the
resulting file object and the list of files.
"""
if mode not in valid_modes:
raise IOError("Only valid modes to create_tee() are: %s" % ', '.join(valid_modes))
tee_list = []
for file in files:
if type(file) == str:
fp = open(file, mode)
tee_list.append(fp)
else:
tee_list.append(file)
pipe_read, pipe_write = os.pipe()
pid = os.fork()
if pid == 0:
# Child -- Read bytes from the pipe and write them to the specified
# files.
try:
# Close parent's end of the pipe
os.close(pipe_write)
bytes = os.read(pipe_read, buffer_size)
while(bytes):
for file in tee_list:
file.write(bytes)
file.flush()
# TODO maybe add in fsync() here if the fileno() method
# exists on file
bytes = os.read(pipe_read, buffer_size)
except:
pass
finally:
os._exit(255)
else:
# Parent -- Return a file object wrapper around the pipe to the
# child.
return os.fdopen(pipe_write,'w')
if __name__ == '__main__':
files = [ '/tmp/tee-test-1', '/tmp/tee-test-2' ]
num_chars = 100000
print("Writing %d chars to files (using create_tee):" % num_chars)
for file in files:
print(" %s" % file)
print()
tee = create_tee(files,mode='a')
print("a" * num_chars, file=tee)
tee.close()
os.wait()
for filename in files:
with open(filename, 'r') as fh:
chars = len(fh.read())
print("File '%s' has %d chars" % (filename, chars))
| {
"repo_name": "bsandrow/pytee",
"path": "pytee.py",
"copies": "1",
"size": "3625",
"license": "bsd-3-clause",
"hash": -296607030111853200,
"line_mean": 31.9545454545,
"line_max": 94,
"alpha_frac": 0.5376551724,
"autogenerated": false,
"ratio": 4.014396456256922,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0027642257960211695,
"num_lines": 110
} |
"""An implementation of Unix's 'tee' command in Python
Implementing 'tee' in Python allows the ease of writing output once, which will
be written to many file/file-like objects.
Benefits:
* Cross-platform (e.g. a pipe to the actual tee command won't work on Windows)
* The ability to write to more than just files. Any file-like object that
implements write() and flush() is acceptable.
Sample Usage:
import pytee
tee = pytee.create_tee([ '/tmp/tee-test-1', '/tmp/tee-test-2' ], mode='a')
print >>tee, "a" * 100000,
tee.close()
# Need to wait for the child process to finish writing to the file(s)
# before we can measure the amount of data written to the file(s).
os.wait()
for filename in files:
with open(filename, 'r') as fh:
chars = len(fh.read())
print "File '%s' has %d chars" % (filename, chars)
"""
import sys
import os
import subprocess
from netsa.util.shell import *
import os
from string import Template
__author__ = 'Brandon Sandrowicz <brandon@sandrowicz.org>'
__version__ = '0.1'
valid_modes = ['a', 'w']
def create_tee(files, mode, buffer_size=128):
"""Get a file object that will mirror writes across multiple files objs
Options:
files A list of files and/or file objects. All strings will be
treated as file paths and opened for writing. Everything
else is assumed to be a file-like object that implements
both the write() and flush() methods.
mode Which mode to use when opening new files. Valid values
are 'a' (append) and 'w' (overwrite).
buffer_size
Control the size of the buffer between writes to the
resulting file object and the list of files.
"""
if mode not in valid_modes:
raise IOError("Only valid modes to create_tee() are: %s" %
', '.join(valid_modes))
tee_list = []
for file in files:
if isinstance(file, str):
fp = open(file, mode)
tee_list.append(fp)
else:
tee_list.append(file)
pipe_read, pipe_write = os.pipe()
pid = os.fork()
if pid == 0:
# Child -- Read bytes from the pipe and write them to the specified
# files.
try:
# Close parent's end of the pipe
os.close(pipe_write)
bytes = os.read(pipe_read, buffer_size)
while(bytes):
for file in tee_list:
file.write(bytes)
file.flush()
# TODO maybe add in fsync() here if the fileno() method
# exists on file
bytes = os.read(pipe_read, buffer_size)
except:
pass
finally:
os._exit(255)
else:
# Parent -- Return a file object wrapper around the pipe to the
# child.
return os.fdopen(pipe_write, 'w')
| {
"repo_name": "Novartis/yap",
"path": "bin/pytee.py",
"copies": "1",
"size": "2764",
"license": "apache-2.0",
"hash": 5628525631498017000,
"line_mean": 27.7916666667,
"line_max": 79,
"alpha_frac": 0.6255426918,
"autogenerated": false,
"ratio": 3.6657824933687,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9788069976835367,
"avg_score": 0.0006510416666666666,
"num_lines": 96
} |
#an implimentation of Cross-Input Neighborhood Differences from http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Ahmed_An_Improved_Deep_2015_CVPR_paper.pdf in sonnet
import tensorflow as tf
import sonnet as snt
class CrossInputNeighborhoodDifferences(snt.AbstractModule):
""" docstring """
def __init__(self, name="Cross_Input_Neighborhood_Differences"):
super(CrossInputNeighborhoodDifferences, self).__init__(name=name)
def _build(self, inputs):
"""takes an input tuple of exactly two tensors of identical dimension, and computes the neighbourhood difference for each"""
#compute the map in both directions
#padding is above, below, left, right
list1 = []
list2 = []
for i in range(0,5) :
for j in range(0,5) :
list1.append(tf.pad(inputs[0], [[0,0],[i,4-i],[j,4-j],[0,0]]))
list2.append(tf.pad(inputs[1], [[0,0],[i,4-i],[j,4-j],[0,0]]))
pad1 = tf.concat(list1,3)
pad2 = tf.concat(list2,3)
tile1 = tf.tile(tf.pad(inputs[0],[[0,0],[2,2],[2,2],[0,0]]), [1,1,1,25])
tile2 = tf.tile(tf.pad(inputs[1],[[0,0],[2,2],[2,2],[0,0]]), [1,1,1,25])
return tf.concat([tile1 - pad2, tile2 - pad1], 3)
##for testing
if __name__ == '__main__':
a = tf.reshape(tf.constant([1.0,2.0,3.0,4.0]),[1,2,2,1])
b = tf.reshape(tf.constant([2.0,2.0,4.0,4.0]),[1,2,2,1])
mdl = CrossInputNeighborhoodDifferences()
res = mdl((a,b))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(res))
| {
"repo_name": "thesilencelies/SonnetConvs",
"path": "CrossInputNeighborhoodDifferences.py",
"copies": "1",
"size": "1536",
"license": "apache-2.0",
"hash": -2169105244998783700,
"line_mean": 39.4210526316,
"line_max": 181,
"alpha_frac": 0.630859375,
"autogenerated": false,
"ratio": 2.762589928057554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8608016005928143,
"avg_score": 0.05708665942588207,
"num_lines": 38
} |
"""An importer for 3D Studio files.
"""
# Author: Prabhu Ramachandran <prabhu at aero dot iitb dot ac dot in>
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from os.path import basename
# Enthought imports.
from tvtk.api import tvtk
from traits.api import Instance
# Local imports
from mayavi.sources.vrml_importer import VRMLImporter
######################################################################
# `ThreeDSImporter` class.
######################################################################
class ThreeDSImporter(VRMLImporter):
# The 3DS importer.
reader = Instance(tvtk.ThreeDSImporter, args=(),
kw={'compute_normals':True},
allow_none=False, record=True)
######################################################################
# `FileDataSource` interface
######################################################################
def has_output_port(self):
""" Return True as the reader has output port."""
return True
def get_output_object(self):
""" Return the reader output port."""
return self.reader.output_port
######################################################################
# Non-public interface
######################################################################
def _file_name_changed(self, value):
# This hack is necessary since for some reason the importer
# does not clear out the earlier actors.
self.reader = reader = tvtk.ThreeDSImporter(compute_normals=True)
reader.file_name = value
if self.scene is not None:
self.reader.render_window = self.scene.render_window
name = "3DStudio file (%s)"%basename(self.file_name)
if '[Hidden]' in self.name:
name += ' [Hidden]'
self.name = name
self._file_path.set(value)
self._update_reader()
self.render()
| {
"repo_name": "liulion/mayavi",
"path": "mayavi/sources/three_ds_importer.py",
"copies": "3",
"size": "1973",
"license": "bsd-3-clause",
"hash": -1874059362194500000,
"line_mean": 32.4406779661,
"line_max": 74,
"alpha_frac": 0.4956918398,
"autogenerated": false,
"ratio": 4.653301886792453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6648993726592453,
"avg_score": null,
"num_lines": null
} |
"""An importer for VRML files.
"""
# Author: Prabhu Ramachandran <prabhu at aero dot iitb dot ac dot in>
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from os.path import basename
# Enthought imports.
from tvtk.api import tvtk
from traits.api import Instance, Str
from traitsui.api import View, Item, FileEditor
from apptools.persistence.file_path import FilePath
from apptools.persistence.state_pickler import set_state
# Local imports
from mayavi.core.source import Source
from mayavi.core.pipeline_info import PipelineInfo
######################################################################
# `VRMLImporter` class.
######################################################################
class VRMLImporter(Source):
__version__ = 0
# The file name.
file_name = Str('', enter_set=True, auto_set=False,
desc='the VRML file name')
# The VRML importer.
reader = Instance(tvtk.VRMLImporter, args=(), allow_none=False,
record=True)
output_info = PipelineInfo(datasets=['none'])
###############
# Private traits.
# Our file path used for persistence
_file_path = Instance(FilePath, args=())
# Our View.
view = View(Item(name='file_name', editor=FileEditor()))
######################################################################
# `object` interface
######################################################################
def __get_pure_state__(self):
d = super(VRMLImporter, self).__get_pure_state__()
# These traits are dynamically created.
for name in ('reader', 'file_name'):
d.pop(name)
return d
def __set_pure_state__(self, state):
# The reader has its own file_name which needs to be fixed.
fname = state._file_path.abs_pth
# Now call the parent class to setup everything.
self.initialize(fname)
# Setup the rest of the state.
set_state(self, state, ignore=['_file_path'])
def initialize(self, file_name):
self.file_name = file_name
######################################################################
# `PipelineBase` interface.
######################################################################
def add_actors(self):
"""Adds `self.actors` to the scene.
"""
if not self._actors_added:
self.reader.render_window = self.scene.render_window
self._update_reader()
self._actors_added = True
if not self.visible:
self._visible_changed(self.visible)
self.scene.render()
def remove_actors(self):
"""Removes `self.actors` from the scene.
"""
if self._actors_added:
self.scene.remove_actors(self.actors)
self._actors_added = False
self.scene.render()
def has_output_port(self):
""" Return True as the reader has output port."""
return True
def get_output_object(self):
""" Return the reader output port."""
return self.reader.output_port
######################################################################
# Non-public interface
######################################################################
def _file_name_changed(self, value):
reader = self.reader
reader.file_name = value
self._file_path.set(value)
self._update_reader()
self.render()
name = "VRML file (%s)"%basename(self.file_name)
if '[Hidden]' in self.name:
name += ' [Hidden]'
self.name = name
def _update_reader(self):
reader = self.reader
if self.scene is None or reader.file_name is None \
or len(reader.file_name) == 0:
return
actors1 = [x for x in self.scene.renderer.actors]
reader.read()
self.scene.render()
actors2 = [x for x in self.scene.renderer.actors]
self.actors = [x for x in actors2 if x not in actors1]
# If these are the first actors on scene reset the view.
if len(actors1) == 0:
self.scene.reset_zoom()
def _scene_changed(self, old, new):
if self._actors_added:
old.remove_actors(self.actors)
reader = self.reader
reader.render_window = new.render_window
self._update_reader()
def _actors_changed(self, old, new):
if self._actors_added:
self.scene.remove_actors(old)
# The actors are added automatically when the importer
# does a read.
self.scene.render()
def _actors_items_changed(self, list_event):
if self._actors_added:
self.scene.remove_actors(list_event.removed)
# The actors are added automatically when the importer
# does a read.
self.scene.render()
def _visible_changed(self, value):
if value:
if not self._actors_added:
self.scene.add_actors(self.actors)
self._actors_added = True
super(VRMLImporter, self)._visible_changed(value)
| {
"repo_name": "liulion/mayavi",
"path": "mayavi/sources/vrml_importer.py",
"copies": "3",
"size": "5191",
"license": "bsd-3-clause",
"hash": 1398754739321764000,
"line_mean": 32.7077922078,
"line_max": 74,
"alpha_frac": 0.5270660759,
"autogenerated": false,
"ratio": 4.322231473771857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6349297549671857,
"avg_score": null,
"num_lines": null
} |
"""An improved base task implementing easy (and explicit) saving of outputs."""
import os
import logging
from inspect import getfullargspec
import numpy as np
from caput import pipeline, config, memh5
class MPILogFilter(logging.Filter):
"""Filter log entries by MPI rank.
Also this will optionally add MPI rank information, and add an elapsed time
entry.
Parameters
----------
add_mpi_info : boolean, optional
Add MPI rank/size info to log records that don't already have it.
level_rank0 : int
Log level for messages from rank=0.
level_all : int
Log level for messages from all other ranks.
"""
def __init__(
self, add_mpi_info=True, level_rank0=logging.INFO, level_all=logging.WARN
):
from mpi4py import MPI
self.add_mpi_info = add_mpi_info
self.level_rank0 = level_rank0
self.level_all = level_all
self.comm = MPI.COMM_WORLD
def filter(self, record):
# Add MPI info if desired
try:
record.mpi_rank
except AttributeError:
if self.add_mpi_info:
record.mpi_rank = self.comm.rank
record.mpi_size = self.comm.size
# Add a new field with the elapsed time in seconds (as a float)
record.elapsedTime = record.relativeCreated * 1e-3
# Return whether we should filter the record or not.
return (record.mpi_rank == 0 and record.levelno >= self.level_rank0) or (
record.mpi_rank > 0 and record.levelno >= self.level_all
)
def _log_level(x):
"""Interpret the input as a logging level.
Parameters
----------
x : int or str
Explicit integer logging level or one of 'DEBUG', 'INFO', 'WARN',
'ERROR' or 'CRITICAL'.
Returns
-------
level : int
"""
level_dict = {
"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARN": logging.WARN,
"WARNING": logging.WARN,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL,
}
if isinstance(x, int):
return x
elif isinstance(x, str) and x in level_dict:
return level_dict[x.upper()]
else:
raise ValueError("Logging level %s not understood" % repr(x))
class SetMPILogging(pipeline.TaskBase):
"""A task used to configure MPI aware logging.
Attributes
----------
level_rank0, level_all : int or str
Log level for rank=0, and other ranks respectively.
"""
level_rank0 = config.Property(proptype=_log_level, default=logging.INFO)
level_all = config.Property(proptype=_log_level, default=logging.WARN)
def __init__(self):
from mpi4py import MPI
import math
logging.captureWarnings(True)
rank_length = int(math.log10(MPI.COMM_WORLD.size)) + 1
mpi_fmt = "[MPI %%(mpi_rank)%id/%%(mpi_size)%id]" % (rank_length, rank_length)
filt = MPILogFilter(level_all=self.level_all, level_rank0=self.level_rank0)
# This uses the fact that caput.pipeline.Manager has already
# attempted to set up the logging. We just insert our custom filter
root_logger = logging.getLogger()
ch = root_logger.handlers[0]
ch.addFilter(filt)
formatter = logging.Formatter(
"%(elapsedTime)8.1fs "
+ mpi_fmt
+ " - %(levelname)-8s %(name)s: %(message)s"
)
ch.setFormatter(formatter)
class LoggedTask(pipeline.TaskBase):
"""A task with logger support."""
log_level = config.Property(proptype=_log_level, default=None)
def __init__(self):
# Get the logger for this task
self._log = logging.getLogger(
"%s.%s" % (self.__module__, self.__class__.__name__)
)
# Set the log level for this task if specified
if self.log_level is not None:
self.log.setLevel(self.log_level)
@property
def log(self):
"""The logger object for this task."""
return self._log
class MPITask(pipeline.TaskBase):
"""Base class for MPI using tasks. Just ensures that the task gets a `comm`
attribute.
"""
comm = None
def __init__(self):
from mpi4py import MPI
# Set default communicator
self.comm = MPI.COMM_WORLD
class _AddRankLogAdapter(logging.LoggerAdapter):
"""Add the rank of the logging process to a log message.
Attributes
----------
calling_obj : object
An object with a `comm` property that will be queried for the rank.
"""
calling_obj = None
def process(self, msg, kwargs):
if "extra" not in kwargs:
kwargs["extra"] = {}
kwargs["extra"]["mpi_rank"] = self.calling_obj.comm.rank
kwargs["extra"]["mpi_size"] = self.calling_obj.comm.size
return msg, kwargs
class MPILoggedTask(MPITask, LoggedTask):
"""A task base that has MPI aware logging."""
def __init__(self):
# Initialise the base classes
MPITask.__init__(self)
LoggedTask.__init__(self)
# Replace the logger with a LogAdapter instance that adds MPI process
# information
logadapter = _AddRankLogAdapter(self._log, None)
logadapter.calling_obj = self
self._log = logadapter
class SingleTask(MPILoggedTask, pipeline.BasicContMixin):
"""Process a task with at most one input and output.
Both input and output are expected to be :class:`memh5.BasicCont` objects.
This class allows writing of the output when requested.
Tasks inheriting from this class should override `process` and optionally
:meth:`setup` or :meth:`finish`. They should not override :meth:`next`.
If the value of :attr:`input_root` is anything other than the string "None"
then the input will be read (using :meth:`read_input`) from the file
``self.input_root + self.input_filename``. If the input is specified both as
a filename and as a product key in the pipeline configuration, an error
will be raised upon initialization.
If the value of :attr:`output_root` is anything other than the string
"None" then the output will be written (using :meth:`write_output`) to the
file ``self.output_root + self.output_filename``.
Attributes
----------
save : bool
Whether to save the output to disk or not.
output_name : string
A python format string used to construct the filename. Valid identifiers are:
- `count`: an integer giving which iteration of the task is this.
- `tag`: a string identifier for the output derived from the
containers `tag` attribute. If that attribute is not present
`count` is used instead.
- `key`: the name of the output key.
- `task`: the (unqualified) name of the task.
- `output_root`: the value of the output root argument. This is deprecated
and is just used for legacy support. The default value of
`output_name` means the previous behaviour works.
output_root : string
Pipeline settable parameter giving the first part of the output path.
Deprecated in favour of `output_name`.
nan_check : bool
Check the output for NaNs (and infs) logging if they are present.
nan_dump : bool
If NaN's are found, dump the container to disk.
nan_skip : bool
If NaN's are found, don't pass on the output.
versions : dict
Keys are module names (str) and values are their version strings. This is
attached to output metadata.
pipeline_config : dict
Global pipeline configuration. This is attached to output metadata.
Raises
------
`caput.pipeline.PipelineRuntimeError`
If this is used as a baseclass to a task overriding `self.process` with variable length or optional arguments.
"""
save = config.Property(default=False, proptype=bool)
output_root = config.Property(default="", proptype=str)
output_name = config.Property(default="{output_root}{tag}.h5", proptype=str)
nan_check = config.Property(default=True, proptype=bool)
nan_skip = config.Property(default=True, proptype=bool)
nan_dump = config.Property(default=True, proptype=bool)
# Metadata to get attached to the output
versions = config.Property(default={}, proptype=dict)
pipeline_config = config.Property(default={}, proptype=dict)
_count = 0
done = False
_no_input = False
def __init__(self):
super(SingleTask, self).__init__()
# Inspect the `process` method to see how many arguments it takes.
pro_argspec = getfullargspec(self.process)
n_args = len(pro_argspec.args) - 1
if pro_argspec.varargs or pro_argspec.varkw or pro_argspec.defaults:
msg = (
"`process` method may not have variable length or optional"
" arguments."
)
raise pipeline.PipelineRuntimeError(msg)
if n_args == 0:
self._no_input = True
else:
self._no_input = False
def next(self, *input):
"""Should not need to override. Implement `process` instead."""
self.log.info("Starting next for task %s" % self.__class__.__name__)
self.comm.Barrier()
# This should only be called once.
try:
if self.done:
raise pipeline.PipelineStopIteration()
except AttributeError:
self.done = True
# Process input and fetch output
if self._no_input:
if len(input) > 0:
# This should never happen. Just here to catch bugs.
raise RuntimeError("Somehow `input` was set.")
output = self.process()
else:
output = self.process(*input)
# Return immediately if output is None to skip writing phase.
if output is None:
return
# Set a tag in output if needed
if "tag" not in output.attrs and len(input) > 0 and "tag" in input[0].attrs:
output.attrs["tag"] = input[0].attrs["tag"]
# Check for NaN's etc
output = self._nan_process_output(output)
# Write the output if needed
self._save_output(output)
# Increment internal counter
self._count = self._count + 1
self.log.info("Leaving next for task %s" % self.__class__.__name__)
# Return the output for the next task
return output
def finish(self):
"""Should not need to override. Implement `process_finish` instead."""
class_name = self.__class__.__name__
self.log.info(f"Starting finish for task {class_name}")
if not hasattr(self, "process_finish"):
self.log.info(f"No finish for task {class_name}")
return
output = self.process_finish()
# Check for NaN's etc
output = self._nan_process_output(output)
# Write the output if needed
self._save_output(output)
self.log.info(f"Leaving finish for task {class_name}")
return output
def _save_output(self, output):
# Routine to write output if needed.
if self.save and output is not None:
# add metadata to output
metadata = {"versions": self.versions, "config": self.pipeline_config}
for key, value in metadata.items():
output.add_history(key, value)
# Create a tag for the output file name
tag = output.attrs["tag"] if "tag" in output.attrs else self._count
# Construct the filename
name_parts = {
"tag": tag,
"count": self._count,
"task": self.__class__.__name__,
"key": self._out_keys[0] if self._out_keys else "",
"output_root": self.output_root,
}
outfile = self.output_name.format(**name_parts)
# Expand any variables in the path
outfile = os.path.expanduser(outfile)
outfile = os.path.expandvars(outfile)
self.log.debug("Writing output %s to disk.", outfile)
self.write_output(outfile, output)
def _nan_process_output(self, output):
# Process the output to check for NaN's
# Returns the output or, None if it should be skipped
if self.nan_check:
nan_found = self._nan_check_walk(output)
if nan_found and self.nan_dump:
# Construct the filename
tag = output.attrs["tag"] if "tag" in output.attrs else self._count
outfile = "nandump_" + self.__class__.__name__ + "_" + str(tag) + ".h5"
self.log.debug("NaN found. Dumping %s", outfile)
self.write_output(outfile, output)
if nan_found and self.nan_skip:
self.log.debug("NaN found. Skipping output.")
return None
return output
def _nan_check_walk(self, cont):
# Walk through a memh5 container and check for NaN's and Inf's.
# Logs any issues found and returns True if there were any found.
from mpi4py import MPI
if isinstance(cont, memh5.MemDiskGroup):
cont = cont._data
stack = [cont]
found = False
# Walk over the container tree...
while stack:
n = stack.pop()
# Check the dataset for non-finite numbers
if isinstance(n, memh5.MemDataset):
# Try to test for NaN's and infs. This will fail for compound datatypes...
# casting to ndarray, bc MPI ranks may fall out of sync, if a nan or inf are found
arr = n[:].view(np.ndarray)
try:
total_nan = np.isnan(arr).sum()
total_inf = np.isinf(arr).sum()
except TypeError:
continue
if total_nan > 0:
self.log.info(
"NaN's found in dataset %s [%i of %i elements]",
n.name,
total_nan,
arr.size,
)
found = True
if total_inf > 0:
self.log.info(
"Inf's found in dataset %s [%i of %i elements]",
n.name,
total_inf,
arr.size,
)
found = True
elif isinstance(n, (memh5.MemGroup, memh5.MemDiskGroup)):
for item in n.values():
stack.append(item)
# All ranks need to know if any rank found a NaN/Inf
found = self.comm.allreduce(found, op=MPI.MAX)
return found
class ReturnLastInputOnFinish(SingleTask):
"""Workaround for `caput.pipeline` issues.
This caches its input on every call to `process` and then returns
the last one for a finish call.
"""
x = None
def process(self, x):
"""Take a reference to the input.
Parameters
----------
x : object
"""
self.x = x
def process_finish(self):
"""Return the last input to process.
Returns
-------
x : object
Last input to process.
"""
return self.x
class ReturnFirstInputOnFinish(SingleTask):
"""Workaround for `caput.pipeline` issues.
This caches its input on the first call to `process` and
then returns it for a finish call.
"""
x = None
def process(self, x):
"""Take a reference to the input.
Parameters
----------
x : object
"""
if self.x is None:
self.x = x
def process_finish(self):
"""Return the last input to process.
Returns
-------
x : object
Last input to process.
"""
return self.x
class Delete(SingleTask):
"""Delete pipeline products to free memory."""
def process(self, x):
"""Delete the input and collect garbage.
Parameters
----------
x : object
The object to be deleted.
"""
import gc
self.log.info("Deleting %s" % type(x))
del x
gc.collect()
return None
def group_tasks(*tasks):
"""Create a Task that groups a bunch of tasks together.
This method creates a class that inherits from all the subtasks, and
calls each `process` method in sequence, passing the output of one to the
input of the next.
This should be used like:
>>> class SuperTask(group_tasks(SubTask1, SubTask2)):
>>> pass
At the moment if the ensemble has more than one setup method, the
SuperTask will need to implement an override that correctly calls each.
"""
class TaskGroup(*tasks):
# TODO: figure out how to make the setup work at the moment it just picks the first in MRO
# def setup(self, x): pass
def process(self, x):
for t in tasks:
self.log.debug("Calling process for subtask %s", t.__name__)
x = t.process(self, x)
return x
return TaskGroup
| {
"repo_name": "radiocosmology/draco",
"path": "draco/core/task.py",
"copies": "1",
"size": "17398",
"license": "mit",
"hash": 3233856631904754700,
"line_mean": 29.0483592401,
"line_max": 118,
"alpha_frac": 0.5795493735,
"autogenerated": false,
"ratio": 4.262126408623224,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5341675782123223,
"avg_score": null,
"num_lines": null
} |
"""An improved scikit-learn-like interface of XGBoost
About
-----
scikit-learn is machine learning library for Python.
XGBoost is a new and useful gradient boosting library,
which provides a customized Python interface as well as
a simplified scikit-learn-like interface.
This repo contains a slightly improved and customized
scikit-learn-like interface of XGBoost, heavily based on
the official codes, with some small modifications.
Installation
------------
Install scikit-learn and xgboost,
download this repo and place it into your projects.
License
-------
The code in this repo follows Apache License version 2.0.
scikit-learn follows New BSD License.
XGBoost follows Apache License version 2.0.
Reference
---------
[1] Scikit-learn: Machine Learning in Python, Pedregosa et al., JMLR 12, pp. 2825-2830, 2011.
http://scikit-learn.org/stable/
[2] XGBoost: eXtreme Gradient Boosting
https://github.com/dmlc/xgboost
"""
from __future__ import print_function, division
from tempfile import NamedTemporaryFile
import os
import numpy as np
import scipy as sp
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.metrics import accuracy_score, roc_auc_score, log_loss
import xgboost as xgb
__all__ = ['XGBEstimator', 'XGBClassifier', 'XGBRegressor']
class XGBEstimator(BaseEstimator):
"""An interface of xgboost for good intention.
Parameters
----------
n_iter : integer, optional (default=1000)
The num_boost_round in original xgboost.
n_jobs : integer, optional (default=-1)
The nthread in original xgboost.
When it is -1, the estimator would use all cores on the machine.
learning_rate : float, optional (dafault=0.3)
The eta in original xgboost.
gamma : float, optional (default=0)
max_depth : int, optional (default=6)
min_child_weight : int, optional (default=1)
max_delta_step : float, optional (default=0)
subsample : float, optional (default=1)
It ranges in (0, 1].
colsample_bytree : float, optional (default=1)
It ranges in (0, 1].
base_score : float, optional (default=0.5)
random_state : int or None, optional (default=None)
early_stopping_rounds : int, optional (default=100)
num_parallel_tree : int or None, optional (default=None)
This is in experience. If it is set to an int, n_estimators would be set to 0 internally.
verbose : bool, optional (default=True)
objective : string, optional (default='reg:linear')
eval_metric : string, optional (default='rmse')
**kwargs : optional
one possible value: num_class
Attributes
----------
bst_ : the xgboost boosted object
"""
def __init__(self,
n_iter = 1000,
n_jobs=-1,
learning_rate=0.3,
gamma=0,
max_depth=6,
min_child_weight=1,
max_delta_step=0,
subsample=1,
colsample_bytree=1,
base_score=0.5,
random_state=None,
early_stopping_rounds=100,
num_parallel_tree = None,
verbose=True,
objective='reg:linear',
eval_metric='rmse',
**kwargs):
self.n_iter = n_iter
self.n_jobs = n_jobs
self.learning_rate = learning_rate
self.gamma = gamma
self.max_depth = max_depth
self.min_child_weight = min_child_weight
self.max_delta_step = max_delta_step
self.subsample = subsample
self.colsample_bytree = colsample_bytree
self.base_score = base_score
self.random_state = random_state
self.early_stopping_rounds = early_stopping_rounds
self.num_parallel_tree = num_parallel_tree
self.verbose = verbose
self.objective = objective
self.eval_metric = eval_metric
for parameter, value in kwargs.items():
self.setattr(parameter, value)
def __getstate__(self):
# can't pickle ctypes pointers so save bst_ directly
this = self.__dict__.copy() # don't modify in place
# delete = False for x-platform compatibility
# https://bugs.python.org/issue14243
with NamedTemporaryFile(mode="wb", delete=False) as tmp:
this["bst_"].save_model(tmp.name)
tmp.close()
booster = open(tmp.name, "rb").read()
os.remove(tmp.name)
this.update({"bst_": booster})
return this
def __setstate__(self, state):
with NamedTemporaryFile(mode="wb", delete=False) as tmp:
tmp.write(state["bst_"])
tmp.close()
booster = xgb.Booster(model_file=tmp.name)
os.remove(tmp.name)
state["bst_"] = booster
self.__dict__.update(state)
def get_xgb_params(self):
"""Get the params for xgboost
Returns
-------
xgb_params : dict
The suitable params for xgboost.
"""
xgb_params = {
'eta': self.learning_rate,
'gamma': self.gamma,
'max_depth': int(self.max_depth),
'min_child_weight': int(self.min_child_weight),
'max_delta_step': self.max_delta_step,
'subsample': self.subsample,
'colsample_bytree': self.colsample_bytree,
'early_stopping_rounds': int(self.early_stopping_rounds),
'objective': self.objective,
'eval_metric': self.eval_metric
}
if not self.verbose:
xgb_params['silent'] = 1
if self.random_state is None:
xgb_params['seed'] = np.random.randint(0, 2**32)
else:
xgb_params['seed'] = int(self.random_state)
if self.n_jobs > 0:
xgb_params['nthread'] = int(self.n_jobs)
if hasattr(self, 'num_class'):
xgb_params['num_class'] = int(self.num_class)
if not (self.num_parallel_tree is None):
# then we are using random forest!
self.n_iter = 1
xgb_params['num_parallel_tree'] = int(self.num_parallel_tree)
return xgb_params
def _ready_to_fit(self, X, y):
"""do nothing in BaseEstimator"""
return X, y
def fit(self, X, y, X_valid=None, y_valid=None, sample_weight=None):
"""Fit training dafa.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples.
y : array-like, shape=(n_samples,)
X_valid : array-like or sparse matrix, shape=(n_valid_samples, n_features)
The validation samples.
y_valid : array-like, shape=(n_valid_samples,)
sample_weight : array-like, shape = [n_samples], optional
Returns
-------
self : object
Returns self.
"""
X, y = self._ready_to_fit(X, y)
xgb_params = self.get_xgb_params()
# xgboost accepts dense, csc, csr
if isinstance(X, sp.sparse.coo_matrix):
X = X.tocsc()
if sample_weight is not None:
xg_train = xgb.DMatrix(X, label=y, weight=sample_weight)
else:
xg_train = xgb.DMatrix(X, label=y)
watchlist = [ (xg_train,'train')]
if not (X_valid is None):
if isinstance(X_valid, sp.sparse.coo_matrix):
X_valid = X_valid.tocsc()
if sample_weight is not None:
xg_valid = xgb.DMatrix(X_valid, label=y_valid, weight=sample_weight)
else:
xg_valid = xgb.DMatrix(X_valid, label=y_valid)
watchlist = [ (xg_train,'train'), (xg_valid, 'valid') ]
if self.verbose:
# with watchlist
self.bst_ = xgb.train(params=xgb_params, dtrain=xg_train, num_boost_round=int(self.n_iter), evals=watchlist, early_stopping_rounds=int(self.early_stopping_rounds))
else:
# without watchlist
# early stopping is not available
self.bst_ = xgb.train(params=xgb_params, dtrain=xg_train, num_boost_round=int(self.n_iter))
return self
def predict(self, X):
"""Predict y for X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Returns
-------
y : array
The predicted values.
"""
xg_test = xgb.DMatrix(X)
y = self.bst_.predict(xg_test)
return y
class XGBClassifier(XGBEstimator, ClassifierMixin):
"""A classifier interface of xgboost for good intention.
Parameters
----------
n_iter : integer, optional (default=1000)
The num_boost_round in original xgboost.
n_jobs : integer, optional (default=-1)
The nthread in original xgboost.
When it is -1, the estimator would use all cores on the machine.
learning_rate : float, optional (dafault=0.3)
The eta in original xgboost.
gamma : float, optional (default=0)
max_depth : int, optional (default=6)
min_child_weight : int, optional (default=1)
max_delta_step : float, optional (default=0)
subsample: float, optional (default=1)
It ranges in (0, 1].
colsample_bytree : float, optional (default=1)
It ranges in (0, 1].
base_score : float, optional (default=0.5)
random_state : int or None, optional (default=None)
early_stopping_rounds : int, optional (default=100)
num_parallel_tree : int or None, optional (default=None)
This is in experience. If it is set to an int, n_estimators would be set to 0 internally.
verbose : bool, optional (default=True)
Attributes
----------
classes_ : list of classes
bst_ : the xgboost boosted object
"""
def __init__(self,
n_iter = 1000,
n_jobs=-1,
learning_rate=0.3,
gamma=0,
max_depth=6,
min_child_weight=1,
max_delta_step=0,
subsample=1,
colsample_bytree=1,
base_score=0.5,
random_state=None,
early_stopping_rounds=100,
num_parallel_tree=None,
verbose=True):
super(XGBClassifier, self).__init__(n_iter,
n_jobs,
learning_rate,
gamma,
max_depth,
min_child_weight,
max_delta_step,
subsample,
colsample_bytree,
base_score,
random_state,
early_stopping_rounds,
num_parallel_tree,
verbose,
objective='multi:softprob',
eval_metric='mlogloss')
def get_xgb_params(self):
"""Get the params for xgboost
Returns
-------
xgb_params : dict
The suitable params for xgboost.
"""
xgb_params = {
'eta': self.learning_rate,
'gamma': self.gamma,
'max_depth': int(self.max_depth),
'min_child_weight': int(self.min_child_weight),
'max_delta_step': self.max_delta_step,
'subsample': self.subsample,
'colsample_bytree': self.colsample_bytree,
'early_stopping_rounds': int(self.early_stopping_rounds)
}
if not self.verbose:
xgb_params['silent'] = 1
if self.random_state is None:
xgb_params['seed'] = np.random.randint(0, 2**32)
else:
xgb_params['seed'] = int(self.random_state)
if self.n_jobs > 0:
xgb_params['nthread'] = int(self.n_jobs)
# we have to figure out the num_classes here. :-(
# that is why we want to accept y in this function
if hasattr(self, 'classes_'):
num_class = len(self.classes_)
if num_class > 2:
xgb_params['objective'] = 'multi:softprob'
xgb_params['eval_metric'] = 'mlogloss'
xgb_params['num_class'] = num_class
elif num_class == 2:
xgb_params['objective'] = 'binary:logistic'
xgb_params['eval_metric'] = 'auc'
if not (self.num_parallel_tree is None):
# then we are using random forest!
self.n_iter = 1
xgb_params['num_parallel_tree'] = int(self.num_parallel_tree)
return xgb_params
def _ready_to_fit(self, X, y):
"""
Check out the classes of y
"""
self.classes_, y = np.unique(y, return_inverse=True)
return X, y
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
p = self.predict_proba(X)
y = self.classes_[np.argmax(p, axis=p.ndim-1)]
return y
def decision_function(self, X):
"""Same as predict_proba()
"""
return self.predict_proba(X)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
xg_test = xgb.DMatrix(X)
# multi classes
if len(self.classes_) > 2:
p = self.bst_.predict(xg_test)
# 2 classes
# p is a 1 dimensional array-like if using binary:logistic
elif len(self.classes_) == 2:
pred = self.bst_.predict(xg_test)
another_pred = 1 - pred
p = np.array([another_pred, pred]).T
if p.shape[0] == 1:
p = p[0, :]
return p
def predict_log_proba(self, X):
"""Predict log of class probabilities for X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
return np.log(self.predict_proba(X))
def score(self, X, y, sample_weight=None, score_type='auto'):
"""Returns the goodness of fit on the given test data and labels.
In original sklearn, it returns the mean accuracy.
But in this implemention, it is possible to choose different types: 'n_mlogloss', 'mean_acc', 'auto'
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
score_type : 'n_mlogloss' or 'auto' or 'mean_acc' (default='auto')
If 'auto',
the function would return Area Under Curve if binary classification,
or negative of multi-class log loss if more than 2 classes.
If 'n_mlogloss',
the function would return the negative value of multi-class log loss.
If 'mean_acc',
the function would return the mean accuracy.
Returns
-------
score : float
The higher, the better.
"""
if not score_type in ['n_mlogloss', 'auto', 'mean_acc']:
score_type = 'auto'
if score_type == 'mean_acc':
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
if score_type == 'n_mlogloss':
# we want higher better, so use negative value
return -log_loss(y, self.predict_proba(X), sample_weight=sample_weight)
if score_type == 'auto':
if len(self.classes_)==2:
return roc_auc_score(y, self.predict_proba(X)[:, 1], sample_weight=sample_weight)
else:
return -log_loss(y, self.predict_proba(X), sample_weight=sample_weight)
class XGBRegressor(XGBEstimator, RegressorMixin):
"""A regressor interface of xgboost for good intention.
Parameters
----------
n_iter : integer, optional (default=1000)
The num_boost_round in original xgboost.
n_jobs : integer, optional (default=-1)
The nthread in original xgboost.
When it is -1, the estimator would use all cores on the machine.
learning_rate : float, optional (dafault=0.3)
The eta in original xgboost.
gamma : float, optional (default=0)
max_depth : int, optional (default=6)
min_child_weight : int, optional (default=1)
max_delta_step : float, optional (default=0)
subsample: float, optional (default=1)
It ranges in (0, 1].
colsample_bytree: float, optional (default=1)
It ranges in (0, 1].
base_score: float, optional (default=0.5)
random_state: int or None, optional (default=None)
early_stopping_rounds: int, optional (default=100)
num_parallel_tree: int or None, optional (default=None)
This is in experience. If it is set to an int, n_estimators would be set to 0 internally.
verbose: bool, optional (default=True)
Attributes
----------
bst_ : the xgboost boosted object
"""
def __init__(self,
n_iter = 1000,
n_jobs=-1,
learning_rate=0.3,
gamma=0,
max_depth=6,
min_child_weight=1,
max_delta_step=0,
subsample=1,
colsample_bytree=1,
base_score=0.5,
random_state=None,
early_stopping_rounds=100,
num_parallel_tree = None,
verbose = True):
super(XGBRegression, self).__init__(n_iter,
n_jobs,
learning_rate,
gamma,
max_depth,
min_child_weight,
max_delta_step,
subsample,
colsample_bytree,
base_score,
random_state,
early_stopping_rounds,
num_parallel_tree,
verbose,
objective='reg:linear',
eval_metric='rmse')
| {
"repo_name": "fyears/sklearn_xgb",
"path": "sklearn_xgb/__init__.py",
"copies": "1",
"size": "18515",
"license": "apache-2.0",
"hash": 3177853271446869500,
"line_mean": 27.7947122862,
"line_max": 175,
"alpha_frac": 0.570510397,
"autogenerated": false,
"ratio": 3.784750613246116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4855261010246116,
"avg_score": null,
"num_lines": null
} |
""" An improvement over env.py (hopefully) """
import glob
from .. import gcc_utils
import env_uno
import env_due
class ArduinoEnv():
""" Base arduino environment class """
# -----------------------------
# public
def __init__(self, proj_name, build_dir, arduino_path, hardware):
hardware_env = get_hardware_env(arduino_path, hardware)
self.arduino_core_env = gcc_utils.GccEnv(build_dir + '/core')
self.arduino_core_env.variables.update(hardware_env)
self.arduino_core_env.variables[
'core lib output path'] = build_dir + '/core/core.a'
self.user_env = gcc_utils.GccEnv(build_dir)
self.user_env.variables['project name'] = proj_name
self.user_env.variables.update(hardware_env)
self.user_env.variables['c source files'] = []
self.user_env.variables['c++ source files'] = []
self.user_env.variables['arduino path'] = arduino_path
self.user_env.variables['elf output'] = build_dir + '/' + proj_name + '.elf'
self.user_env.variables['bin output'] = build_dir + '/' + proj_name + '.bin'
def set_c_source_files(self, sources):
self.user_env.variables['c source files'] = sources
def set_cpp_source_files(self, sources):
self.user_env.variables['c++ source files'] = sources
def add_include_dirs(self, dirs):
self.user_env.variables['c header search paths'] += dirs
self.user_env.variables['c++ header search paths'] += dirs
def set_serial_port(self, serial_port):
self.user_env.variables['serial_port'] = serial_port
def get_build_tasks(self):
elf_output = self.user_env.variables['elf output']
bin_output = self.user_env.variables['bin output']
tasks = self._get_build_core_tasks()
tasks += self.user_env.get_c_compile_tasks()
tasks += self.user_env.get_cpp_compile_tasks()
tasks += [self._get_due_link_task(elf_output)]
tasks += [self._get_create_binary_task(elf_output, bin_output)]
return tasks
def get_upload_tasks(self):
return [self._get_due_upload_task()]
# -----------------------------
# private
def _get_build_core_tasks(self):
tasks = self.arduino_core_env.get_c_compile_tasks()
tasks += self.arduino_core_env.get_cpp_compile_tasks()
tasks += [self._get_archive_core_task()]
return tasks
def _get_archive_core_task(self):
objs = self.arduino_core_env.get_all_objs()
archiver = self.arduino_core_env.variables['archiver']
output = self.arduino_core_env.variables['core lib output path']
archive_command = archiver + ' rcs ' + output + ' ' + ' '.join(objs)
return {
'name': output,
'actions': [archive_command],
'targets': [output],
'file_dep': objs,
'clean': True
}
def _get_due_link_task(self, output):
# Can't use GccEnv link task as the arduino Due link command is rather
# complicated
arduino_path = self.user_env.variables['arduino path']
output_dir = self.user_env.variables['build directory']
linker = self.user_env.variables['linker']
flags = self.user_env.variables['linker flags']
script = self.user_env.variables['linker script']
core = self.arduino_core_env.variables['core lib output path']
link_map = output_dir + '/' + self.user_env.variables['project name'] + '.map'
cmd_args = [linker] + flags
cmd_args += [
'-T' + script,
'-Wl,-Map,' + link_map,
'-o',
output,
]
cmd_args += ['-Wl,--start-group']
cmd_args += glob.glob(self.arduino_core_env.variables['build directory'] + '/obj/syscalls*.o')
cmd_args += self.user_env.get_all_objs()
cmd_args += [arduino_path + '/hardware/arduino/sam/variants/arduino_due_x/libsam_sam3x8e_gcc_rel.a']
cmd_args += [core]
cmd_args += ['-Wl,--end-group']
link_cmd = ' '.join(cmd_args)
return {
'name': output,
'actions': [link_cmd],
'file_dep': self.user_env.get_all_objs(),
'targets': [output],
'clean': True
}
def _get_create_binary_task(self, input, output):
objcopy = self.user_env.variables['objcopy']
cmd = ' '.join([objcopy, '-O binary', input, output])
return {
'name': output,
'actions': [cmd],
'file_dep': [input],
'targets': [output],
'clean': True
}
def _get_due_upload_task(self):
arduino_path = self.user_env.variables['arduino path']
uploader = arduino_path + '/hardware/tools/bossac.exe'
serial_port = self.user_env.variables['serial_port']
cmd_args = [
uploader,
'--port=' + serial_port,
'-U false -e -w -v -b',
self.user_env.variables['bin output'],
'-R'
]
return {
'name': 'upload',
# TODO: the following is Windows-only.
# Linux version is:
# stty -F /dev/${PORT} raw ispeed 1200 ospeed 1200
# then bossac
'actions': ['mode ' + serial_port + ':1200,n,8,1',
'timeout /T 2',
' '.join(cmd_args)],
'file_dep': [self.user_env.variables['bin output']],
'verbosity': 2
}
def _add_arduino_library_source(self):
""" Search user source code for arduino libraries to
include into the build environment
"""
# TODO: this
pass
def _get_incl_sys_headers(self, path):
""" Return a list of <> includes in the given file """
# TODO: needed for adding arduino libraries
pass
def get_hardware_env(arduino_path, hardware):
if hardware.lower() == 'uno':
env = env_uno.get_env(arduino_path)
elif hardware.lower() == 'due':
env = env_due.get_env(arduino_path)
else:
raise Exception('Unknown hardware type: ' + hardware)
return env
| {
"repo_name": "uozuAho/doit_helpers",
"path": "doit_helpers/arduino/env2.py",
"copies": "1",
"size": "6182",
"license": "mit",
"hash": 2677083226785102300,
"line_mean": 34.5287356322,
"line_max": 108,
"alpha_frac": 0.5580718214,
"autogenerated": false,
"ratio": 3.5734104046242776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4631482226024278,
"avg_score": null,
"num_lines": null
} |
"""An incredibly simple HTTP basic auth implementation."""
from base64 import b64decode, b64encode
from six.moves.urllib.parse import quote, unquote
class DecodeError(Exception):
pass
class EncodeError(Exception):
pass
def encode(username, password):
"""Returns an HTTP basic authentication encrypted string given a valid
username and password.
"""
if ':' in username:
raise EncodeError
username_password = '%s:%s' % (quote(username), quote(password))
return 'Basic ' + b64encode(username_password.encode()).decode()
def decode(encoded_str):
"""Decode an encrypted HTTP basic authentication string. Returns a tuple of
the form (username, password), and raises a DecodeError exception if
nothing could be decoded.
"""
split = encoded_str.strip().split(' ')
# If split is only one element, try to decode the username and password
# directly.
if len(split) == 1:
try:
username, password = b64decode(split[0]).decode().split(':', 1)
except:
raise DecodeError
# If there are only two elements, check the first and ensure it says
# 'basic' so that we know we're about to decode the right thing. If not,
# bail out.
elif len(split) == 2:
if split[0].strip().lower() == 'basic':
try:
username, password = b64decode(split[1]).decode().split(':', 1)
except:
raise DecodeError
else:
raise DecodeError
# If there are more than 2 elements, something crazy must be happening.
# Bail.
else:
raise DecodeError
return unquote(username), unquote(password)
| {
"repo_name": "rdegges/python-basicauth",
"path": "basicauth.py",
"copies": "1",
"size": "1689",
"license": "unlicense",
"hash": 7181980355720775000,
"line_mean": 27.6271186441,
"line_max": 79,
"alpha_frac": 0.6370633511,
"autogenerated": false,
"ratio": 4.308673469387755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0019067796610169492,
"num_lines": 59
} |
"""An incredibly simple HTTP basic auth implementation.
"""
# stdlib imports
from base64 import b64decode
from base64 import b64encode
from urllib import quote
from urllib import unquote
class DecodeError(Exception):
pass
def encode(username, password):
"""Returns an HTTP basic authentication encrypted string given a valid
username and password.
"""
return 'Basic ' + b64encode('%s:%s' % (quote(username), quote(password)))
def decode(encoded_str):
"""Decode an encrypted HTTP basic authentication string. Returns a tuple of
the form (username, password), and raises a DecodeError exception if
nothing could be decoded.
"""
split = encoded_str.strip().split(' ')
# If split is only one element, try to decode the username and password
# directly.
if len(split) == 1:
try:
username, password = b64decode(split[0]).split(':')
except:
raise DecodeError
# If there are only two elements, check the first and ensure it says
# 'basic' so that we know we're about to decode the right thing. If not,
# bail out.
elif len(split) == 2:
if split[0].strip().lower() == 'basic':
try:
username, password = b64decode(split[1]).split(':')
except:
raise DecodeError
else:
raise DecodeError
# If there are more than 2 elements, something crazy must be happening.
# Bail.
else:
raise DecodeError
return unquote(username), unquote(password)
def check_auth(request, username, password):
"""Check if the given request contains correct auth headers
"""
if not 'Authorization' in request.headers:
return False
return (username, password) == decode(request.headers['Authorization'])
| {
"repo_name": "paddycarey/beardo-control",
"path": "app/vendor/nacelle/contrib/lockdown/utils.py",
"copies": "2",
"size": "1812",
"license": "mit",
"hash": -4561468497249334000,
"line_mean": 28.7049180328,
"line_max": 79,
"alpha_frac": 0.6490066225,
"autogenerated": false,
"ratio": 4.398058252427185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002193058946634112,
"num_lines": 61
} |
# An indexer using the DIALS methods.
import copy
import logging
import math
import os
import string
import libtbx
from cctbx import crystal, sgtbx
# wrappers for programs that this needs: DIALS
from dials.array_family import flex
from xia2.Wrappers.Dials.GenerateMask import GenerateMask as _GenerateMask
from xia2.Wrappers.Dials.EstimateGain import EstimateGain as _EstimateGain
from xia2.Wrappers.Dials.Spotfinder import Spotfinder as _Spotfinder
from xia2.Wrappers.Dials.DetectBlanks import DetectBlanks as _DetectBlanks
from xia2.Wrappers.Dials.SearchBeamPosition import (
SearchBeamPosition as _SearchBeamPosition,
)
from xia2.Wrappers.Dials.Index import Index as _Index
from xia2.Wrappers.Dials.CheckIndexingSymmetry import (
CheckIndexingSymmetry as _CheckIndexingSymmetry,
)
from xia2.Wrappers.Dials.Reindex import Reindex as _Reindex
from xia2.Wrappers.Dials.Refine import Refine as _Refine
from xia2.Wrappers.Dials.RefineBravaisSettings import (
RefineBravaisSettings as _RefineBravaisSettings,
)
from xia2.Wrappers.Dials.Report import Report as _Report
# interfaces that this must implement to be an indexer
from xia2.Schema.Interfaces.Indexer import Indexer
# odds and sods that are needed
from xia2.lib.bits import auto_logfiler
from xia2.Handlers.Streams import banner
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.Files import FileHandler
from xia2.Experts.SymmetryExpert import lattice_to_spacegroup_number
from xia2.Handlers.Citations import Citations
from dials.util.ascii_art import spot_counts_per_image_plot
from cctbx.sgtbx import bravais_types
from dxtbx.serialize import load
logger = logging.getLogger("xia2.Modules.Indexer.DialsIndexer")
class DialsIndexer(Indexer):
def __init__(self):
super().__init__()
self._background_images = None
# place to store working data
self._data_files = {}
self._solutions = {}
# FIXME this is a stupid low resolution limit to use...
self._indxr_low_resolution = 40.0
# admin functions
def get_indexed_filename(self):
return self.get_indexer_payload("indexed_filename")
# factory functions
def GenerateMask(self):
genmask = _GenerateMask()
genmask.set_working_directory(self.get_working_directory())
auto_logfiler(genmask)
return genmask
def EstimateGain(self):
estimater = _EstimateGain()
estimater.set_working_directory(self.get_working_directory())
auto_logfiler(estimater)
return estimater
def Spotfinder(self):
spotfinder = _Spotfinder()
spotfinder.set_working_directory(self.get_working_directory())
auto_logfiler(spotfinder)
spotfinder.set_hot_mask_prefix("%d_hot_mask" % spotfinder.get_xpid())
return spotfinder
def DetectBlanks(self):
params = PhilIndex.params.dials.detect_blanks
detectblanks = _DetectBlanks()
detectblanks.set_working_directory(self.get_working_directory())
detectblanks.set_phi_step(params.phi_step)
detectblanks.set_counts_fractional_loss(params.counts_fractional_loss)
detectblanks.set_misigma_fractional_loss(params.misigma_fractional_loss)
auto_logfiler(detectblanks)
return detectblanks
def SearchBeamPosition(self):
discovery = _SearchBeamPosition()
discovery.set_working_directory(self.get_working_directory())
# params = PhilIndex.params.dials.index
auto_logfiler(discovery)
return discovery
def Index(self):
index = _Index()
index.set_working_directory(self.get_working_directory())
params = PhilIndex.params.dials.index
index.set_reflections_per_degree(params.reflections_per_degree)
if params.fft3d.n_points is not None:
index.set_fft3d_n_points(params.fft3d.n_points)
auto_logfiler(index)
index.set_outlier_algorithm(PhilIndex.params.dials.outlier.algorithm)
index.set_histogram_binning(PhilIndex.params.dials.index.histogram_binning)
index.set_nearest_neighbor_percentile(
PhilIndex.params.dials.index.nearest_neighbor_percentile
)
return index
def CheckIndexingSymmetry(self):
checksym = _CheckIndexingSymmetry()
checksym.set_working_directory(self.get_working_directory())
auto_logfiler(checksym)
return checksym
def Reindex(self):
reindex = _Reindex()
reindex.set_working_directory(self.get_working_directory())
auto_logfiler(reindex)
return reindex
def Refine(self):
refine = _Refine()
params = PhilIndex.params.dials
refine.set_working_directory(self.get_working_directory())
refine.set_scan_varying(False)
refine.set_outlier_algorithm(params.outlier.algorithm)
refine.set_close_to_spindle_cutoff(params.close_to_spindle_cutoff)
if params.fix_geometry:
refine.set_detector_fix("all")
refine.set_beam_fix("all")
elif params.fix_distance:
refine.set_detector_fix("distance")
auto_logfiler(refine)
return refine
def RefineBravaisSettings(self):
rbs = _RefineBravaisSettings()
rbs.set_working_directory(self.get_working_directory())
rbs.set_close_to_spindle_cutoff(PhilIndex.params.dials.close_to_spindle_cutoff)
auto_logfiler(rbs)
return rbs
def Report(self):
report = _Report()
report.set_working_directory(self.get_working_directory())
auto_logfiler(report, "REPORT")
return report
##########################################
def _index_select_images_i(self, imageset):
# FIXME copied from XDSIndexer.py!
"""Select correct images based on image headers."""
start, end = imageset.get_scan().get_array_range()
images = tuple(range(start + 1, end + 1))
# characterise the images - are there just two (e.g. dna-style
# reference images) or is there a full block?
wedges = []
if len(images) < 3:
# work on the assumption that this is a reference pair
if len(images) == 1:
wedges.append((images[0], images[0]))
else:
wedges.append((images[0], images[1]))
else:
block_size = min(len(images), 5)
logger.debug(
"Adding images for indexer: %d -> %d", images[0], images[block_size - 1]
)
wedges.append((images[0], images[block_size - 1]))
phi_width = imageset.get_scan().get_oscillation()[1]
if int(90.0 / phi_width) + block_size in images:
# assume we can add a wedge around 45 degrees as well...
logger.debug(
"Adding images for indexer: %d -> %d",
int(45.0 / phi_width) + images[0],
int(45.0 / phi_width) + images[0] + block_size - 1,
)
logger.debug(
"Adding images for indexer: %d -> %d",
int(90.0 / phi_width) + images[0],
int(90.0 / phi_width) + images[0] + block_size - 1,
)
wedges.append(
(
int(45.0 / phi_width) + images[0],
int(45.0 / phi_width) + images[0] + block_size - 1,
)
)
wedges.append(
(
int(90.0 / phi_width) + images[0],
int(90.0 / phi_width) + images[0] + block_size - 1,
)
)
else:
# add some half-way anyway
first = (len(images) // 2) - (block_size // 2) + images[0] - 1
if first > wedges[0][1]:
last = first + block_size - 1
logger.debug("Adding images for indexer: %d -> %d", first, last)
wedges.append((first, last))
if len(images) > block_size:
logger.debug(
"Adding images for indexer: %d -> %d",
images[-block_size],
images[-1],
)
wedges.append((images[-block_size], images[-1]))
return wedges
def _index_prepare(self):
Citations.cite("dials")
# all_images = self.get_matching_images()
# first = min(all_images)
# last = max(all_images)
spot_lists = []
experiments_filenames = []
for imageset, xsweep in zip(self._indxr_imagesets, self._indxr_sweeps):
logger.notice(banner("Spotfinding %s" % xsweep.get_name()))
first, last = imageset.get_scan().get_image_range()
# at this stage, break out to run the DIALS code: this sets itself up
# now cheat and pass in some information... save re-reading all of the
# image headers
# FIXME need to adjust this to allow (say) three chunks of images
from dxtbx.model.experiment_list import ExperimentListFactory
sweep_filename = os.path.join(
self.get_working_directory(), "%s_import.expt" % xsweep.get_name()
)
ExperimentListFactory.from_imageset_and_crystal(imageset, None).as_file(
sweep_filename
)
genmask = self.GenerateMask()
genmask.set_input_experiments(sweep_filename)
genmask.set_output_experiments(
os.path.join(
self.get_working_directory(),
f"{genmask.get_xpid()}_{xsweep.get_name()}_masked.expt",
)
)
genmask.set_params(PhilIndex.params.dials.masking)
sweep_filename, mask_pickle = genmask.run()
logger.debug("Generated mask for %s: %s", xsweep.get_name(), mask_pickle)
gain = PhilIndex.params.xia2.settings.input.gain
if gain is libtbx.Auto:
gain_estimater = self.EstimateGain()
gain_estimater.set_sweep_filename(sweep_filename)
gain_estimater.run()
gain = gain_estimater.get_gain()
logger.info("Estimated gain: %.2f", gain)
PhilIndex.params.xia2.settings.input.gain = gain
# FIXME this should really use the assigned spot finding regions
# offset = self.get_frame_offset()
dfs_params = PhilIndex.params.dials.find_spots
spotfinder = self.Spotfinder()
if last - first > 10:
spotfinder.set_write_hot_mask(True)
spotfinder.set_input_sweep_filename(sweep_filename)
spotfinder.set_output_sweep_filename(
f"{spotfinder.get_xpid()}_{xsweep.get_name()}_strong.expt"
)
spotfinder.set_input_spot_filename(
f"{spotfinder.get_xpid()}_{xsweep.get_name()}_strong.refl"
)
if PhilIndex.params.dials.fast_mode:
wedges = self._index_select_images_i(imageset)
spotfinder.set_scan_ranges(wedges)
else:
spotfinder.set_scan_ranges([(first, last)])
if dfs_params.phil_file is not None:
spotfinder.set_phil_file(dfs_params.phil_file)
if dfs_params.min_spot_size is not None:
spotfinder.set_min_spot_size(dfs_params.min_spot_size)
if dfs_params.min_local is not None:
spotfinder.set_min_local(dfs_params.min_local)
if dfs_params.sigma_strong:
spotfinder.set_sigma_strong(dfs_params.sigma_strong)
gain = PhilIndex.params.xia2.settings.input.gain
if gain:
spotfinder.set_gain(gain)
if dfs_params.filter_ice_rings:
spotfinder.set_filter_ice_rings(dfs_params.filter_ice_rings)
if dfs_params.kernel_size:
spotfinder.set_kernel_size(dfs_params.kernel_size)
if dfs_params.global_threshold is not None:
spotfinder.set_global_threshold(dfs_params.global_threshold)
if dfs_params.threshold.algorithm is not None:
spotfinder.set_threshold_algorithm(dfs_params.threshold.algorithm)
spotfinder.run()
spot_filename = spotfinder.get_spot_filename()
if not os.path.exists(spot_filename):
raise RuntimeError(
"Spotfinding failed: %s does not exist."
% os.path.basename(spot_filename)
)
spot_lists.append(spot_filename)
experiments_filenames.append(spotfinder.get_output_sweep_filename())
refl = flex.reflection_table.from_file(spot_filename)
if not len(refl):
raise RuntimeError("No spots found in sweep %s" % xsweep.get_name())
logger.info(spot_counts_per_image_plot(refl))
if not PhilIndex.params.dials.fast_mode:
detectblanks = self.DetectBlanks()
detectblanks.set_sweep_filename(experiments_filenames[-1])
detectblanks.set_reflections_filename(spot_filename)
detectblanks.run()
json = detectblanks.get_results()
blank_regions = json["strong"]["blank_regions"]
if len(blank_regions):
blank_regions = [(int(s), int(e)) for s, e in blank_regions]
for blank_start, blank_end in blank_regions:
logger.info(
"WARNING: Potential blank images: %i -> %i",
blank_start + 1,
blank_end,
)
if PhilIndex.params.xia2.settings.remove_blanks:
non_blanks = []
start, end = imageset.get_array_range()
last_blank_end = start
for blank_start, blank_end in blank_regions:
if blank_start > start:
non_blanks.append((last_blank_end, blank_start))
last_blank_end = blank_end
if last_blank_end + 1 < end:
non_blanks.append((last_blank_end, end))
xsweep = self.get_indexer_sweep()
xwav = xsweep.get_wavelength()
xsample = xsweep.sample
sweep_name = xsweep.get_name()
for i, (nb_start, nb_end) in enumerate(non_blanks):
assert i < 26
if i == 0:
sub_imageset = imageset[
nb_start - start : nb_end - start
]
xsweep._frames_to_process = (nb_start + 1, nb_end + 1)
self.set_indexer_prepare_done(done=False)
self._indxr_imagesets[
self._indxr_imagesets.index(imageset)
] = sub_imageset
xsweep._integrater._setup_from_imageset(sub_imageset)
else:
min_images = (
PhilIndex.params.xia2.settings.input.min_images
)
if (nb_end - nb_start) < min_images:
continue
new_name = "_".join(
(sweep_name, string.ascii_lowercase[i])
)
new_sweep = xwav.add_sweep(
new_name,
xsample,
directory=os.path.join(
os.path.basename(xsweep.get_directory()),
new_name,
),
image=imageset.get_path(nb_start - start),
frames_to_process=(nb_start + 1, nb_end),
beam=xsweep.get_beam_centre(),
reversephi=xsweep.get_reversephi(),
distance=xsweep.get_distance(),
gain=xsweep.get_gain(),
dmin=xsweep.get_resolution_high(),
dmax=xsweep.get_resolution_low(),
polarization=xsweep.get_polarization(),
user_lattice=xsweep.get_user_lattice(),
user_cell=xsweep.get_user_cell(),
epoch=xsweep._epoch,
ice=xsweep._ice,
excluded_regions=xsweep._excluded_regions,
)
logger.info(
"Generating new sweep: %s (%s:%i:%i)",
new_sweep.get_name(),
new_sweep.get_image(),
new_sweep.get_frames_to_process()[0],
new_sweep.get_frames_to_process()[1],
)
return
if not PhilIndex.params.xia2.settings.trust_beam_centre:
discovery = self.SearchBeamPosition()
discovery.set_sweep_filename(experiments_filenames[-1])
discovery.set_spot_filename(spot_filename)
# set scan_range to correspond to not more than 180 degrees
# if we have > 20000 reflections
width = imageset.get_scan().get_oscillation()[1]
if (last - first) * width > 180.0 and len(refl) > 20000:
end = first + int(round(180.0 / width)) - 1
logger.debug("Using %d to %d for beam search", first, end)
discovery.set_image_range((first, end))
try:
discovery.run()
result = discovery.get_optimized_experiments_filename()
# overwrite indexed.expt in experiments list
experiments_filenames[-1] = result
except Exception as e:
logger.debug(
"DIALS beam centre search failed: %s", str(e), exc_info=True
)
self.set_indexer_payload("spot_lists", spot_lists)
self.set_indexer_payload("experiments", experiments_filenames)
def _index(self):
if PhilIndex.params.dials.index.method in (libtbx.Auto, None):
if self._indxr_input_cell is not None:
indexer = self._do_indexing("real_space_grid_search")
else:
try:
indexer_fft3d = self._do_indexing(method="fft3d")
nref_3d, rmsd_3d = indexer_fft3d.get_nref_rmsds()
except Exception as e:
nref_3d = None
rmsd_3d = None
indexing_failure = e
try:
indexer_fft1d = self._do_indexing(method="fft1d")
nref_1d, rmsd_1d = indexer_fft1d.get_nref_rmsds()
except Exception as e:
nref_1d = None
rmsd_1d = None
indexing_failure = e
if (
nref_1d is not None
and nref_3d is None
or (
nref_1d > nref_3d
and rmsd_1d[0] < rmsd_3d[0]
and rmsd_1d[1] < rmsd_3d[1]
and rmsd_1d[2] < rmsd_3d[2]
)
):
indexer = indexer_fft1d
elif nref_3d is not None:
indexer = indexer_fft3d
else:
raise RuntimeError(indexing_failure)
else:
indexer = self._do_indexing(method=PhilIndex.params.dials.index.method)
# not strictly the P1 cell, rather the cell that was used in indexing
self._p1_cell = indexer._p1_cell
self.set_indexer_payload("indexed_filename", indexer.get_indexed_filename())
indexed_file = indexer.get_indexed_filename()
indexed_experiments = indexer.get_experiments_filename()
fast_mode = PhilIndex.params.dials.fast_mode
trust_beam_centre = PhilIndex.params.xia2.settings.trust_beam_centre
multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing
check_indexing_symmetry = PhilIndex.params.dials.check_indexing_symmetry
if check_indexing_symmetry and not (
trust_beam_centre or fast_mode or multi_sweep_indexing
):
checksym = self.CheckIndexingSymmetry()
checksym.set_experiments_filename(indexed_experiments)
checksym.set_indexed_filename(indexed_file)
checksym.set_grid_search_scope(1)
checksym.run()
hkl_offset = checksym.get_hkl_offset()
logger.debug("hkl_offset: %s", str(hkl_offset))
if hkl_offset is not None and hkl_offset != (0, 0, 0):
reindex = self.Reindex()
reindex.set_hkl_offset(hkl_offset)
reindex.set_indexed_filename(indexed_file)
reindex.run()
indexed_file = reindex.get_reindexed_reflections_filename()
# do some scan-static refinement - run twice, first without outlier
# rejection as the model is too far from reality to do a sensible job of
# outlier rejection
refiner = self.Refine()
refiner.set_experiments_filename(indexed_experiments)
refiner.set_indexed_filename(
reindex.get_reindexed_reflections_filename()
)
refiner.set_outlier_algorithm(None)
refiner.run()
indexed_experiments = refiner.get_refined_experiments_filename()
# now again with outlier rejection (possibly)
refiner = self.Refine()
refiner.set_experiments_filename(indexed_experiments)
refiner.set_indexed_filename(indexed_file)
refiner.run()
indexed_experiments = refiner.get_refined_experiments_filename()
if self._indxr_input_lattice is None:
# FIXME in here should respect the input unit cell and lattice if provided
# FIXME from this (i) populate the helper table,
# (ii) try to avoid re-running the indexing
# step if we eliminate a solution as we have all of the refined results
# already available.
rbs = self.RefineBravaisSettings()
rbs.set_experiments_filename(indexed_experiments)
rbs.set_indexed_filename(indexed_file)
if PhilIndex.params.dials.fix_geometry:
rbs.set_detector_fix("all")
rbs.set_beam_fix("all")
elif PhilIndex.params.dials.fix_distance:
rbs.set_detector_fix("distance")
FileHandler.record_log_file(
"%s LATTICE" % self.get_indexer_full_name(), rbs.get_log_file()
)
rbs.run()
for k in sorted(rbs.get_bravais_summary()):
summary = rbs.get_bravais_summary()[k]
# FIXME need to do this better - for the moment only accept lattices
# where R.M.S. deviation is less than twice P1 R.M.S. deviation.
if self._indxr_input_lattice is None:
if not summary["recommended"]:
continue
experiments = load.experiment_list(
summary["experiments_file"], check_format=False
)
cryst = experiments.crystals()[0]
cs = crystal.symmetry(
unit_cell=cryst.get_unit_cell(), space_group=cryst.get_space_group()
)
lattice = str(bravais_types.bravais_lattice(group=cs.space_group()))
cb_op = sgtbx.change_of_basis_op(str(summary["cb_op"]))
self._solutions[k] = {
"number": k,
"mosaic": 0.0,
"metric": summary["max_angular_difference"],
"rmsd": summary["rmsd"],
"nspots": summary["nspots"],
"lattice": lattice,
"cell": cs.unit_cell().parameters(),
"experiments_file": summary["experiments_file"],
"cb_op": str(cb_op),
}
self._solution = self.get_solution()
self._indxr_lattice = self._solution["lattice"]
for solution in self._solutions:
lattice = self._solutions[solution]["lattice"]
if (
self._indxr_input_lattice is not None
and self._indxr_input_lattice != lattice
):
continue
if lattice in self._indxr_other_lattice_cell:
if (
self._indxr_other_lattice_cell[lattice]["metric"]
< self._solutions[solution]["metric"]
):
continue
self._indxr_other_lattice_cell[lattice] = {
"metric": self._solutions[solution]["metric"],
"cell": self._solutions[solution]["cell"],
}
self._indxr_mosaic = self._solution["mosaic"]
experiments_file = self._solution["experiments_file"]
experiment_list = load.experiment_list(experiments_file)
self.set_indexer_experiment_list(experiment_list)
self.set_indexer_payload("experiments_filename", experiments_file)
# reindex the output reflection list to this solution
reindex = self.Reindex()
reindex.set_indexed_filename(indexed_file)
reindex.set_cb_op(self._solution["cb_op"])
reindex.set_space_group(
str(lattice_to_spacegroup_number(self._solution["lattice"]))
)
reindex.run()
indexed_file = reindex.get_reindexed_reflections_filename()
self.set_indexer_payload("indexed_filename", indexed_file)
else:
experiment_list = load.experiment_list(indexed_experiments)
self.set_indexer_experiment_list(experiment_list)
self.set_indexer_payload("experiments_filename", indexed_experiments)
cryst = experiment_list.crystals()[0]
lattice = str(bravais_types.bravais_lattice(group=cryst.get_space_group()))
self._indxr_lattice = lattice
self._solutions = {}
self._solutions[0] = {
"number": 0,
"mosaic": 0.0,
"metric": -1,
"rmsd": -1,
"nspots": -1,
"lattice": lattice,
"cell": cryst.get_unit_cell().parameters(),
"experiments_file": indexed_experiments,
"cb_op": "a,b,c",
}
self._indxr_other_lattice_cell[lattice] = {
"metric": self._solutions[0]["metric"],
"cell": self._solutions[0]["cell"],
}
def _do_indexing(self, method=None):
indexer = self.Index()
for spot_list in self._indxr_payload["spot_lists"]:
indexer.add_spot_filename(spot_list)
for filename in self._indxr_payload["experiments"]:
indexer.add_sweep_filename(filename)
if PhilIndex.params.dials.index.phil_file is not None:
indexer.set_phil_file(PhilIndex.params.dials.index.phil_file)
indexer.set_max_cell(
max_cell=PhilIndex.params.dials.index.max_cell,
max_height_fraction=PhilIndex.params.dials.index.max_cell_estimation.max_height_fraction,
)
if PhilIndex.params.xia2.settings.small_molecule:
indexer.set_min_cell(3)
if PhilIndex.params.dials.fix_geometry:
indexer.set_detector_fix("all")
indexer.set_beam_fix("all")
elif PhilIndex.params.dials.fix_distance:
indexer.set_detector_fix("distance")
indexer.set_close_to_spindle_cutoff(
PhilIndex.params.dials.close_to_spindle_cutoff
)
if self._indxr_input_lattice:
indexer.set_indexer_input_lattice(self._indxr_input_lattice)
logger.debug("Set lattice: %s", self._indxr_input_lattice)
if self._indxr_input_cell:
indexer.set_indexer_input_cell(self._indxr_input_cell)
logger.debug("Set cell: %f %f %f %f %f %f" % self._indxr_input_cell)
if method is None:
if PhilIndex.params.dials.index.method is None:
method = "fft3d"
logger.debug("Choosing indexing method: %s", method)
else:
method = PhilIndex.params.dials.index.method
FileHandler.record_log_file(
"%s INDEX" % self.get_indexer_full_name(), indexer.get_log_file()
)
indexer.run(method)
if not os.path.exists(indexer.get_experiments_filename()):
raise RuntimeError(
"Indexing has failed: see %s for more details." % indexer.get_log_file()
)
elif not os.path.exists(indexer.get_indexed_filename()):
raise RuntimeError(
"Indexing has failed: %s does not exist."
% indexer.get_indexed_filename()
)
report = self.Report()
report.set_experiments_filename(indexer.get_experiments_filename())
report.set_reflections_filename(indexer.get_indexed_filename())
html_filename = os.path.join(
self.get_working_directory(),
"%i_dials.index.report.html" % report.get_xpid(),
)
report.set_html_filename(html_filename)
report.run()
FileHandler.record_html_file(
"%s INDEX" % self.get_indexer_full_name(), html_filename
)
return indexer
def _compare_cell(self, c_ref, c_test):
"""Compare two sets of unit cell constants: if they differ by
less than 5% / 5 degrees return True, else False."""
for j in range(3):
if math.fabs((c_test[j] - c_ref[j]) / c_ref[j]) > 0.05:
return False
for j in range(3, 6):
if math.fabs(c_test[j] - c_ref[j]) > 5:
return False
return True
def get_solution(self):
# FIXME I really need to clean up the code in here...
if self._indxr_input_lattice is None:
if PhilIndex.params.xia2.settings.integrate_p1:
return copy.deepcopy(self._solutions[1])
return copy.deepcopy(self._solutions[max(self._solutions.keys())])
else:
if self._indxr_input_cell:
for s in list(self._solutions):
if self._solutions[s]["lattice"] == self._indxr_input_lattice:
if self._compare_cell(
self._indxr_input_cell, self._solutions[s]["cell"]
):
return copy.deepcopy(self._solutions[s])
else:
del self._solutions[s]
else:
del self._solutions[s]
raise RuntimeError(
"no solution for lattice %s with given cell"
% self._indxr_input_lattice
)
else:
for s in list(self._solutions):
if self._solutions[s]["lattice"] == self._indxr_input_lattice:
return copy.deepcopy(self._solutions[s])
else:
del self._solutions[s]
raise RuntimeError(
"no solution for lattice %s" % self._indxr_input_lattice
)
def _index_finish(self):
# get estimate of low resolution limit from lowest resolution indexed
# reflection
from cctbx import crystal, miller, uctbx
reflections = flex.reflection_table.from_file(
self._indxr_payload["indexed_filename"]
)
miller_indices = reflections["miller_index"]
miller_indices = miller_indices.select(miller_indices != (0, 0, 0))
# it isn't necessarily the 'p1_cell', but it should be the cell that
# corresponds to the miller indices in the indexed.refl
symmetry = crystal.symmetry(unit_cell=uctbx.unit_cell(self._p1_cell))
miller_set = miller.set(symmetry, miller_indices)
d_max, d_min = miller_set.d_max_min()
d_max *= 1.05 # include an upper margin to avoid rounding errors
logger.debug("Low resolution limit assigned as: %.2f", d_max)
self._indxr_low_resolution = d_max
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Modules/Indexer/DialsIndexer.py",
"copies": "1",
"size": "33770",
"license": "bsd-3-clause",
"hash": -5231063319208537000,
"line_mean": 40.8982630273,
"line_max": 101,
"alpha_frac": 0.536600533,
"autogenerated": false,
"ratio": 4.105275954291272,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006201829191048438,
"num_lines": 806
} |
# An individual Particle
class Particle(object):
def __init__(self, sprite):
# A single force
self.gravity = PVector(0, 0.1)
self.partSize = random(10, 60)
# The particle is a textured quad.
self.part = createShape()
self.part.beginShape(QUAD)
self.part.noStroke()
self.part.texture(sprite)
self.part.normal(0, 0, 1)
self.part.vertex(-self.partSize / 2, -self.partSize / 2, 0, 0)
self.part.vertex(self.partSize / 2,
-self.partSize / 2, sprite.width, 0)
self.part.vertex(self.partSize / 2,
self.partSize / 2, sprite.width, sprite.height)
self.part.vertex(-self.partSize / 2,
self.partSize / 2, 0, sprite.height)
self.part.endShape()
# Initialize center vector.
self.center = PVector()
# Set the particle starting location.
self.rebirth(width / 2, height / 2)
def getShape(self):
return self.part
def rebirth(self, x, y):
a = random(TWO_PI)
speed = random(0.5, 4)
# A velocity with random angle and magnitude.
self.velocity = PVector.fromAngle(a)
self.velocity.mult(speed)
# Set lifespan.
self.lifespan = 255
# Set location using translate.
self.part.resetMatrix()
self.part.translate(x, y)
# Update center vector.
self.center.set(x, y, 0)
# Is it off the screen, or its lifespan is over?
def isDead(self):
return (self.center.x > width or self.center.x < 0
or self.center.y > height or self.center.y < 0 or
self.lifespan < 0)
def update(self):
# Decrease life.
self.lifespan = self.lifespan - 1
# Apply gravity.
self.velocity.add(self.gravity)
self.part.setTint(color(255, self.lifespan))
# Move the particle according to its velocity,
self.part.translate(self.velocity.x, self.velocity.y)
# and also update the center
self.center.add(self.velocity)
| {
"repo_name": "jdf/processing.py",
"path": "mode/examples/Topics/Create Shapes/ParticleSystemPShape/particle.py",
"copies": "4",
"size": "2115",
"license": "apache-2.0",
"hash": -84366065868247300,
"line_mean": 33.6721311475,
"line_max": 72,
"alpha_frac": 0.5735224586,
"autogenerated": false,
"ratio": 3.5969387755102042,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6170461234110205,
"avg_score": null,
"num_lines": null
} |
"""An individual treadmill spawn instance."""
import logging
import os
import yaml
from treadmill.spawn import utils as spawn_utils
_LOGGER = logging.getLogger(__name__)
class Instance(object):
"""Treadmill spawn instance"""
__slots__ = (
'id',
'proid',
'name',
'settings',
'manifest',
'manifest_path'
)
def __init__(self, manifest_path):
self.manifest_path = manifest_path
self.id = os.path.splitext(os.path.basename(self.manifest_path))[0]
self.proid = spawn_utils.get_user_safe(self.manifest_path)
self.settings = {
'name': self.id,
'stop': True,
'reconnect': False,
'reconnect_timeout': 0
}
self.manifest = None
self._read_manifest_file()
self.name = '{0}.{1}'.format(self.proid, self.settings['name'])
def _read_manifest_file(self):
"""Reads the YAML (manifest) file contents."""
docs = []
try:
stream = open(self.manifest_path, "r")
manifest_contents = stream.read()
generator = yaml.load_all(manifest_contents)
for doc in generator:
docs.append(doc)
except (IOError, yaml.YAMLError) as ex:
_LOGGER.error(ex)
return
if len(docs) < 2:
_LOGGER.error("YAML file needs to contain 2 docs")
return
self.settings.update(docs[0])
self.manifest = docs[1]
| {
"repo_name": "keithhendry/treadmill",
"path": "treadmill/spawn/instance.py",
"copies": "3",
"size": "1514",
"license": "apache-2.0",
"hash": -3171822152896527400,
"line_mean": 24.2333333333,
"line_max": 75,
"alpha_frac": 0.5462351387,
"autogenerated": false,
"ratio": 3.9324675324675327,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5978702671167533,
"avg_score": null,
"num_lines": null
} |
"""An individual treadmill spawn instance.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import logging
import os
from treadmill.spawn import utils as spawn_utils
from treadmill import yamlwrapper as yaml
_LOGGER = logging.getLogger(__name__)
class Instance:
"""Treadmill spawn instance"""
__slots__ = (
'id',
'proid',
'name',
'settings',
'manifest',
'manifest_path'
)
def __init__(self, manifest_path):
self.manifest_path = manifest_path
self.id = os.path.splitext(os.path.basename(self.manifest_path))[0]
self.proid = spawn_utils.get_user_safe(self.manifest_path)
self.settings = {
'name': self.id,
'stop': True,
'reconnect': False,
'reconnect_timeout': 0
}
self.manifest = None
self._read_manifest_file()
self.name = '{0}.{1}'.format(self.proid, self.settings['name'])
def _read_manifest_file(self):
"""Reads the YAML (manifest) file contents."""
docs = []
try:
stream = io.open(self.manifest_path, 'r')
manifest_contents = stream.read()
generator = yaml.load_all(manifest_contents)
for doc in generator:
docs.append(doc)
except (IOError, ValueError, yaml.YAMLError) as ex:
_LOGGER.error(ex)
return
if len(docs) < 2:
_LOGGER.error('YAML file needs to contain 2 docs')
return
self.settings.update(docs[0])
self.manifest = docs[1]
if self.manifest is None:
self.manifest = {}
| {
"repo_name": "Morgan-Stanley/treadmill",
"path": "lib/python/treadmill/spawn/instance.py",
"copies": "2",
"size": "1778",
"license": "apache-2.0",
"hash": -2939651276752070700,
"line_mean": 24.4,
"line_max": 75,
"alpha_frac": 0.5697412823,
"autogenerated": false,
"ratio": 3.9599109131403116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5529652195440312,
"avg_score": null,
"num_lines": null
} |
"""An inflow wind model
@moduleauthor:: Juan P. Murcia <jumu@dtu.dk>
"""
import numpy as np
def get_ABL_U(z,Ur,zr,type='Log',**kwargs):
"""Function of undisturbed inflow wind speed
Inputs
----------
z: np.array, float
Heights to evaluate the streamwise wind speed
Ur: float
Reference wind speed
zr: float
Reference height
type: str, optional
type of ABL model.
type='log': Log-law
.. math::
U = \\frac{u^*}{\\kappa} \\ln \\left( \\frac{ z }{ z0 } \\right)
kwargs
------
z0 Characteristic roughness length of the terrain
type='pow': Power law
.. math::
U = Ur \\left(\\frac{ z }{ zr } \\right)^\\alpha
kwargs
------
alpha Shear exponent
type='MOB': Monin-Obukhov from [1]
.. math::
U = \\frac{u^*}{\\kappa} \\ln \\left( \\frac{ z }{ z0 } - \\phi \\left \\frac{z}{L} \\right f_s \\right)
kwargs
------
z0 Characteristic roughness length of the terrain
L Obukhov length
zi BLH, Boundary layer height
Returns
-------
U: np.array, float
Axial wind speed [m/s]
References
----------
[1] A. Penña, T. Mikkelsen, S.-E. Gryning, C.B. Hasager,
A.N. Hahmann, M. Badger, et al., O shore vertical wind shear,
DTU Wind Energy-E-Report-0005(EN), Technical University of Denmark,
2012.
"""
if type == 'log':
kappa = 0.4 # Kappa: Von Karman constant
if kwargs is not None and 'z0' in kwargs.keys():
z0 = kwargs['z0']
else: # Default
print("Using default LogLaw characteristic roughness ",
"length of the terrain: z0 = 0.0002 (Offshore)")
z0 = 0.0002
us = Ur * kappa/np.log(zr/z0) # friction velocity
return us / kappa * np.log(z/z0)
elif type=='pow':
if kwargs is not None and 'alpha' in kwargs.keys():
alpha = kwargs['alpha']
else: # Default
print("Using default ABL power law shear coefficient: ",
"alpha = 0.143 (Offshore)")
alpha = 0.143
return Ur * (z/zr)**alpha
elif type=='MOB':
if kwargs is not None:
# Roughness Length
if 'z0' in kwargs.keys():
z0 = kwargs['z0']
else:
print("Using default MOB characteristic roughness ",
"length of the terrain: z0 = 0.0002 (Offshore)")
z0 = 0.0002
# Stability
if 'L' in kwargs.keys():
L = kwargs['L']
else:
print("Using default MOB Monin-Obukhov length: ",
"L = -1000 (Neutral from unstable asymptote)")
L = -1000.
# ABL height
if 'zi' in kwargs.keys():
zi = kwargs['zi']
else:
print("Using default MOB ABL height: ",
"zi = 400 [m]")
zi = 400.
else: # Default
print("Using default MOB ABL coefficients: ",
"z0 = 0.0002 (Offshore), ",
"L = -1000 (Neutral from unstable asymptote), ",
"zi = 400 [m]")
z0 = 0.0002
L = -1000.
zi = 400.
alpha = 0.143
if L>0: # Stable atmospheric conditions
phi_m = -4.7*z/L
phi_m_r = -4.7*zr/L
else: #L<0 Unstable atmospheric conditions
x = (1. - 12.*z/L)**(1./3.)
phi_m = (3./2.)*np.log( (1. + x + x**2.)/3. ) - \
np.sqrt(3.)*np.arctan( (2.*x+1)/np.sqrt(3.) ) + \
np.pi/np.sqrt(3.)
x_r = (1. - 12.*zr/L)**(1./3.)
phi_m_r = (3./2.)*np.log( (1. + x_r + x_r**2.)/3. ) - \
np.sqrt(3.)*np.arctan( (2.*x_r+1)/np.sqrt(3.) ) + \
np.pi/np.sqrt(3.)
return Ur * np.log(z/z0 - phi_m)/np.log(zr/z0 - phi_m_r)
def RotorAvg(f,H,R,dep='z',**kwargs):
"""Rotor averaging function
.. math::
Feq = \\int_0^{2\\pi} \\int_0^R f(r_i,\\theta_i) r dr d\\theta
Feq = sum w_i f(r_i,\\theta_i)
Gaussian quadrature using:
.. math::
\\theta \sim \\text{Uniform}(0,2\\pi)
r \sim \\text{Triangular}(0,R) = r/C = c*2/R**2.
Inputs
----------
f: python function
function to be rotor averaged over multiple rotors
H: array
Multiple rotors hub height
R: array
Multiple rotors radii
dep: str, optional
type of function dependency
type='z':
f is only a function of the height
type='r':
f is an axis-symmetrical function of the radius
type='yz'
f is a function of the height and horizontal position
Returns
-------
Ueq: np.array, float
Rotor averaged axial wind speed [m/s]
"""
# New improved quadrature rule for wake deficit rotor averaging
node_R, node_th, weight = np.array([[ 0.26349922998554242692 , 4.79436403870179805864 , 0.00579798753740115753 ],
[ 0.26349922998554242692 , 5.13630491629471475079 , 0.01299684397858970851 ],
[ 0.26349922998554242692 , 5.71955352542765460555 , 0.01905256317618122044 ],
[ 0.26349922998554242692 , 0.20924454049880022999 , 0.02341643323656225281 ],
[ 0.26349922998554242692 , 1.10309379714216659885 , 0.02569988335562909190 ],
[ 0.26349922998554242692 , 2.03849885644762496284 , 0.02569988335562912660 ],
[ 0.26349922998554242692 , 2.93234811309099407950 , 0.02341643323656214179 ],
[ 0.26349922998554242692 , 3.70522443534172518653 , 0.01905256317618119616 ],
[ 0.26349922998554242692 , 4.28847304447466459720 , 0.01299684397858971198 ],
[ 0.26349922998554242692 , 4.63041392206758217753 , 0.00579798753740114539 ],
[ 0.57446451431535072718 , 4.79436403870179805864 , 0.01086984853977092380 ],
[ 0.57446451431535072718 , 5.13630491629471475079 , 0.02436599330905551281 ],
[ 0.57446451431535072718 , 5.71955352542765460555 , 0.03571902745281423097 ],
[ 0.57446451431535072718 , 0.20924454049880022999 , 0.04390024659093685194 ],
[ 0.57446451431535072718 , 1.10309379714216659885 , 0.04818117282305908744 ],
[ 0.57446451431535072718 , 2.03849885644762496284 , 0.04818117282305915683 ],
[ 0.57446451431535072718 , 2.93234811309099407950 , 0.04390024659093664378 ],
[ 0.57446451431535072718 , 3.70522443534172518653 , 0.03571902745281418240 ],
[ 0.57446451431535072718 , 4.28847304447466459720 , 0.02436599330905552321 ],
[ 0.57446451431535072718 , 4.63041392206758217753 , 0.01086984853977089951 ],
[ 0.81852948743000586429 , 4.79436403870179805864 , 0.01086984853977090992 ],
[ 0.81852948743000586429 , 5.13630491629471475079 , 0.02436599330905548505 ],
[ 0.81852948743000586429 , 5.71955352542765460555 , 0.03571902745281418934 ],
[ 0.81852948743000586429 , 0.20924454049880022999 , 0.04390024659093679643 ],
[ 0.81852948743000586429 , 1.10309379714216659885 , 0.04818117282305903193 ],
[ 0.81852948743000586429 , 2.03849885644762496284 , 0.04818117282305909438 ],
[ 0.81852948743000586429 , 2.93234811309099407950 , 0.04390024659093658826 ],
[ 0.81852948743000586429 , 3.70522443534172518653 , 0.03571902745281413383 ],
[ 0.81852948743000586429 , 4.28847304447466459720 , 0.02436599330905549199 ],
[ 0.81852948743000586429 , 4.63041392206758217753 , 0.01086984853977088737 ],
[ 0.96465960618086743494 , 4.79436403870179805864 , 0.00579798753740116100 ],
[ 0.96465960618086743494 , 5.13630491629471475079 , 0.01299684397858971545 ],
[ 0.96465960618086743494 , 5.71955352542765460555 , 0.01905256317618123432 ],
[ 0.96465960618086743494 , 0.20924454049880022999 , 0.02341643323656226669 ],
[ 0.96465960618086743494 , 1.10309379714216659885 , 0.02569988335562910925 ],
[ 0.96465960618086743494 , 2.03849885644762496284 , 0.02569988335562914394 ],
[ 0.96465960618086743494 , 2.93234811309099407950 , 0.02341643323656215567 ],
[ 0.96465960618086743494 , 3.70522443534172518653 , 0.01905256317618120657 ],
[ 0.96465960618086743494 , 4.28847304447466459720 , 0.01299684397858972065 ],
[ 0.96465960618086743494 , 4.63041392206758217753 , 0.00579798753740114886 ]]).T
H_msh, node_R_msh = np.meshgrid(H,node_R)
R_msh, node_th_msh = np.meshgrid(R,node_th)
_, weight_msh = np.meshgrid(H,weight)
if dep=='z':
ze = H_msh + R_msh*node_R_msh*np.sin(node_th_msh)
f_msh = f(ze,**kwargs)
elif dep=='r':
ye = R_msh*node_R_msh*np.cos(node_th_msh)
ze = H_msh + R_msh*node_R_msh*np.sin(node_th_msh)
re = np.sqrt( ye**2. + ze**2. )
f_msh = f(re,**kwargs)
elif dep=='yz':
ye = R_msh*node_R_msh*np.cos(node_th_msh)
ze = H_msh + R_msh*node_R_msh*np.sin(node_th_msh)
f_msh = f(ye,ze,**kwargs)
elif dep=='xyz':
xe, weight_msh = np.meshgrid(x,weight)
ye = R_msh*node_R_msh*np.cos(node_th_msh)
ze = H_msh + R_msh*node_R_msh*np.sin(node_th_msh)
f_msh = f(xe,ye,ze,**kwargs)
return np.sum(weight_msh*f_msh,axis=0)
| {
"repo_name": "DTUWindEnergy/FUSED-Wake",
"path": "fusedwake/Inflow.py",
"copies": "1",
"size": "9422",
"license": "mit",
"hash": -1789563178772687400,
"line_mean": 36.684,
"line_max": 119,
"alpha_frac": 0.5781764144,
"autogenerated": false,
"ratio": 2.700200630553167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37783770449531673,
"avg_score": null,
"num_lines": null
} |
## an initial version
## Transform the tfrecord to slim data provider format
import numpy
import tensorflow as tf
import os
slim = tf.contrib.slim
ITEMS_TO_DESCRIPTIONS = {
'image': 'slim.tfexample_decoder.Image',
'shape': 'shape',
'height': 'height',
'width': 'width',
'object/bbox': 'box',
'object/label': 'label'
}
SPLITS_TO_SIZES = {
'train': 858750,
}
NUM_CLASSES = 2
def get_datasets(data_dir,file_pattern = '*.tfrecord'):
file_patterns = os.path.join(data_dir, file_pattern)
print 'file_path: {}'.format(file_patterns)
reader = tf.TFRecordReader
keys_to_features = {
'image/height': tf.FixedLenFeature([1], tf.int64),
'image/width': tf.FixedLenFeature([1], tf.int64),
'image/channels': tf.FixedLenFeature([1], tf.int64),
'image/shape': tf.FixedLenFeature([3], tf.int64),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
'image/format': tf.FixedLenFeature([], tf.string, default_value='jpeg'),
'image/encoded': tf.FixedLenFeature([], tf.string, default_value=''),
'image/name': tf.VarLenFeature(dtype = tf.string),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
#'image': slim.tfexample_decoder.Tensor('image/encoded'),
'shape': slim.tfexample_decoder.Tensor('image/shape'),
'height': slim.tfexample_decoder.Tensor('image/height'),
'width': slim.tfexample_decoder.Tensor('image/width'),
'object/bbox': slim.tfexample_decoder.BoundingBox(
[ 'xmin', 'ymin','xmax', 'ymax'], 'image/object/bbox/'),
'object/label': slim.tfexample_decoder.Tensor('image/object/bbox/label'),
#'imaname': slim.tfexample_decoder.Tensor('image/name'),
#'objext/txt': slim.tfexample_decoder.Tensor('image/object/bbox/label_text'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
return slim.dataset.Dataset(
data_sources=file_patterns,
reader=reader,
decoder=decoder,
num_samples=SPLITS_TO_SIZES['train'],
items_to_descriptions=ITEMS_TO_DESCRIPTIONS,
num_classes=NUM_CLASSES,
labels_to_names=labels_to_names) | {
"repo_name": "liuzz1983/open_vision",
"path": "openvision/datasets/sythtextprovider.py",
"copies": "1",
"size": "2593",
"license": "mit",
"hash": -5948938730038087000,
"line_mean": 35.0277777778,
"line_max": 85,
"alpha_frac": 0.6436559969,
"autogenerated": false,
"ratio": 3.4163372859025034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4559993282802503,
"avg_score": null,
"num_lines": null
} |
'''An in-memory backend'''
import collections
from ..exceptions import DownloadException, DeleteException
class Memory(object):
'''An in-memory backend'''
def __init__(self):
self.buckets = collections.defaultdict(dict)
def download(self, bucket, key, fobj, retries, headers=None):
'''Download the contents of bucket/key to fobj'''
obj = self.buckets[bucket].get(key)
if not obj:
raise DownloadException('%s / %s not found' % (bucket, key))
else:
fobj.write(obj)
def upload(self, bucket, key, fobj, retries, headers=None, extra=None):
'''Upload the contents of fobj to bucket/key with headers'''
self.buckets[bucket][key] = fobj.read()
def list(self, bucket, prefix=None, delimiter=None, retries=None, headers=None):
'''List the contents of a bucket.'''
if prefix is None:
prefix = ''
keys = (key for key in self.buckets[bucket].keys() if key.startswith(prefix))
if delimiter:
return (prefix for prefix in set(key.split(delimiter, 1)[0] for key in keys))
else:
return keys
def delete(self, bucket, key, retries, headers=None):
'''Delete bucket/key with headers'''
if key in self.buckets[bucket]:
del self.buckets[bucket][key]
else:
raise DeleteException('Failed to delete %s/%s' % (bucket, key))
| {
"repo_name": "seomoz/s3po",
"path": "s3po/backends/memory.py",
"copies": "1",
"size": "1433",
"license": "mit",
"hash": 949973569496496000,
"line_mean": 33.9512195122,
"line_max": 89,
"alpha_frac": 0.6099092812,
"autogenerated": false,
"ratio": 4.106017191977077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008415542431758108,
"num_lines": 41
} |
# An in-place quicksort.
# http://en.wikipedia.org/wiki/Quicksort
import random
def quicksort(array):
_quicksort(array, 0, len(array) - 1)
def _quicksort(array, left, right):
if left >= right:
return
s_i = _partition(array, left, right)
_quicksort(array, left, s_i - 1)
_quicksort(array, s_i + 1, right)
def _partition(array, left, right):
p_i = _choose_pivot(array, left, right)
p = array[p_i]
array[p_i], array[right] = array[right], array[p_i]
s_i = left
for i in range(left, right): # does not include right
v = array[i]
if v < p:
array[i], array[s_i] = array[s_i], array[i]
s_i += 1
array[s_i], array[right] = array[right], array[s_i]
return s_i
def _choose_pivot(array, left, right):
# Use the middle as the pivot. You could also use a strategy based on
# randomization, averaging, etc
return (left + right) // 2
if __name__ == '__main__':
seq = [random.randint(-10, 10) for n in range(20)]
print("Unsorted", seq)
quicksort(seq)
print("Sorted", seq)
| {
"repo_name": "calebperkins/algorithms",
"path": "algorithms/quicksort.py",
"copies": "1",
"size": "1094",
"license": "mit",
"hash": -7380096473533658000,
"line_mean": 23.3111111111,
"line_max": 73,
"alpha_frac": 0.5868372943,
"autogenerated": false,
"ratio": 2.9972602739726026,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9079982341935154,
"avg_score": 0.0008230452674897119,
"num_lines": 45
} |
"""An in-process kernel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from contextlib import contextmanager
import logging
import sys
from IPython.core.interactiveshell import InteractiveShellABC
from ipykernel.jsonutil import json_clean
from traitlets import Any, Enum, Instance, List, Type, default
from ipykernel.ipkernel import IPythonKernel
from ipykernel.zmqshell import ZMQInteractiveShell
from .constants import INPROCESS_KEY
from .socket import DummySocket
from ..iostream import OutStream, BackgroundSocket, IOPubThread
#-----------------------------------------------------------------------------
# Main kernel class
#-----------------------------------------------------------------------------
class InProcessKernel(IPythonKernel):
#-------------------------------------------------------------------------
# InProcessKernel interface
#-------------------------------------------------------------------------
# The frontends connected to this kernel.
frontends = List(
Instance('ipykernel.inprocess.client.InProcessKernelClient',
allow_none=True)
)
# The GUI environment that the kernel is running under. This need not be
# specified for the normal operation for the kernel, but is required for
# IPython's GUI support (including pylab). The default is 'inline' because
# it is safe under all GUI toolkits.
gui = Enum(('tk', 'gtk', 'wx', 'qt', 'qt4', 'inline'),
default_value='inline')
raw_input_str = Any()
stdout = Any()
stderr = Any()
#-------------------------------------------------------------------------
# Kernel interface
#-------------------------------------------------------------------------
shell_class = Type(allow_none=True)
shell_streams = List()
control_stream = Any()
_underlying_iopub_socket = Instance(DummySocket, ())
iopub_thread = Instance(IOPubThread)
@default('iopub_thread')
def _default_iopub_thread(self):
thread = IOPubThread(self._underlying_iopub_socket)
thread.start()
return thread
iopub_socket = Instance(BackgroundSocket)
@default('iopub_socket')
def _default_iopub_socket(self):
return self.iopub_thread.background_socket
stdin_socket = Instance(DummySocket, ())
def __init__(self, **traits):
super(InProcessKernel, self).__init__(**traits)
self._underlying_iopub_socket.observe(self._io_dispatch, names=['message_sent'])
self.shell.kernel = self
def execute_request(self, stream, ident, parent):
""" Override for temporary IO redirection. """
with self._redirected_io():
super(InProcessKernel, self).execute_request(stream, ident, parent)
def start(self):
""" Override registration of dispatchers for streams. """
self.shell.exit_now = False
def _abort_queues(self):
""" The in-process kernel doesn't abort requests. """
pass
def _input_request(self, prompt, ident, parent, password=False):
# Flush output before making the request.
self.raw_input_str = None
sys.stderr.flush()
sys.stdout.flush()
# Send the input request.
content = json_clean(dict(prompt=prompt, password=password))
msg = self.session.msg(u'input_request', content, parent)
for frontend in self.frontends:
if frontend.session.session == parent['header']['session']:
frontend.stdin_channel.call_handlers(msg)
break
else:
logging.error('No frontend found for raw_input request')
return str()
# Await a response.
while self.raw_input_str is None:
frontend.stdin_channel.process_events()
return self.raw_input_str
#-------------------------------------------------------------------------
# Protected interface
#-------------------------------------------------------------------------
@contextmanager
def _redirected_io(self):
""" Temporarily redirect IO to the kernel.
"""
sys_stdout, sys_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = self.stdout, self.stderr
yield
sys.stdout, sys.stderr = sys_stdout, sys_stderr
#------ Trait change handlers --------------------------------------------
def _io_dispatch(self, change):
""" Called when a message is sent to the IO socket.
"""
ident, msg = self.session.recv(self.iopub_socket, copy=False)
for frontend in self.frontends:
frontend.iopub_channel.call_handlers(msg)
#------ Trait initializers -----------------------------------------------
@default('log')
def _default_log(self):
return logging.getLogger(__name__)
@default('session')
def _default_session(self):
from jupyter_client.session import Session
return Session(parent=self, key=INPROCESS_KEY)
@default('shell_class')
def _default_shell_class(self):
return InProcessInteractiveShell
@default('stdout')
def _default_stdout(self):
return OutStream(self.session, self.iopub_thread, u'stdout')
@default('stderr')
def _default_stderr(self):
return OutStream(self.session, self.iopub_thread, u'stderr')
#-----------------------------------------------------------------------------
# Interactive shell subclass
#-----------------------------------------------------------------------------
class InProcessInteractiveShell(ZMQInteractiveShell):
kernel = Instance('ipykernel.inprocess.ipkernel.InProcessKernel',
allow_none=True)
#-------------------------------------------------------------------------
# InteractiveShell interface
#-------------------------------------------------------------------------
def enable_gui(self, gui=None):
"""Enable GUI integration for the kernel."""
from ipykernel.eventloops import enable_gui
if not gui:
gui = self.kernel.gui
enable_gui(gui, kernel=self.kernel)
self.active_eventloop = gui
def enable_matplotlib(self, gui=None):
"""Enable matplotlib integration for the kernel."""
if not gui:
gui = self.kernel.gui
return super(InProcessInteractiveShell, self).enable_matplotlib(gui)
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
"""Activate pylab support at runtime."""
if not gui:
gui = self.kernel.gui
return super(InProcessInteractiveShell, self).enable_pylab(gui, import_all,
welcome_message)
InteractiveShellABC.register(InProcessInteractiveShell)
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/ipykernel/inprocess/ipkernel.py",
"copies": "1",
"size": "6861",
"license": "mit",
"hash": 4417990565750485000,
"line_mean": 34.734375,
"line_max": 88,
"alpha_frac": 0.5512315989,
"autogenerated": false,
"ratio": 4.63894523326572,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5690176832165721,
"avg_score": null,
"num_lines": null
} |
"""An instance of an FMOD Studio Event."""
from ctypes import byref, c_bool, c_float, c_int, c_void_p
from ..channel_group import ChannelGroup
from ..utils import prepare_str
from .enums import PLAYBACK_STATE
from .studio_object import StudioObject
class EventInstance(StudioObject):
"""An instance of an FMOD Studio Event."""
function_prefix = "FMOD_Studio_EventInstance"
def start(self):
"""Starts playback.
If the instance was already playing then calling this function will
restart the event.
"""
self._call("Start")
def stop(self):
"""Stops playback."""
self._call("Stop")
@property
def paused(self):
"""Tthe pause state.
True if the event instance is paused.
"""
paused = c_bool()
self._call("GetPaused", byref(paused))
return paused.value
@paused.setter
def paused(self, val):
"""Set the pause state.
:param bool val: The desired pause state. True = paused, False =
unpaused.
"""
self._call("SetPaused", c_bool(val))
@property
def playback_state(self):
"""The playback state.
If the instance is invalid, then the state will be STOPPED.
"""
state = c_int()
self._call("GetPlaybackState", byref(state))
return PLAYBACK_STATE(state.value)
def get_parameter_by_name(self, name):
"""A parameter value.
:param str name: Parameter name (case-insensitive)."""
val = c_float()
actual = c_float()
self._call("GetParameterByName", prepare_str(name), byref(val), byref(actual))
return (val.value, actual.value)
def set_parameter_by_name(self, name, value, ignoreseekspeed=False):
"""Set a parameter value by name.
:param str name: Parameter name (case-insensitive).
:param float value: Value for given name.
:param bool ignoreseekspeed: Specifies whether to ignore the
parameter's seek speed and set the value immediately.
"""
self._call(
"SetParameterByName", prepare_str(name), c_float(value), ignoreseekspeed
)
@property
def channel_group(self):
"""The core channel group corresponding to the master track.
Until the event instance has been fully created calling this property
will raise an :py:exc:`~pyfmodex.exceptions.FmodError` with code
:py:attr:`~pyfmodex.enums.RESULT.STUDIO_NOT_LOADED`.
"""
ptr = c_void_p()
self._call("GetChannelGroup", byref(ptr))
return ChannelGroup(ptr)
@property
def reverb_level(self):
"""Not Implemented."""
raise NotImplementedError
@reverb_level.setter
def reverb_level(self, level):
raise NotImplementedError
| {
"repo_name": "tyrylu/pyfmodex",
"path": "pyfmodex/studio/event_instance.py",
"copies": "1",
"size": "2848",
"license": "mit",
"hash": 329387678341367040,
"line_mean": 28.3608247423,
"line_max": 86,
"alpha_frac": 0.6169241573,
"autogenerated": false,
"ratio": 4.097841726618705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5214765883918704,
"avg_score": null,
"num_lines": null
} |
""" An instance of data collection corresponding to a single step within a
Procedure.
:Authors: Sana dev team
:Version: 2.0
"""
import mimetypes, os
from django.db import models
from django.utils.translation import ugettext as _
from mds.api.utils import make_uuid, guess_fext
_app = "core"
class Observation(models.Model):
""" A piece of data collected about a subject during an external_id"""
class Meta:
app_label = "core"
unique_together = (('encounter', 'node'),)
ordering = ["-created"]
def __unicode__(self):
return "%s %s" % (
self.concept.name,
unicode(self.value),
)
uuid = models.SlugField(max_length=36, unique=True, default=make_uuid, editable=False)
""" A universally unique identifier """
encounter = models.ForeignKey('Encounter', to_field='uuid')
""" The instance of a procedure which this observation is associated with. """
node = models.CharField(max_length=255)
'''Unique node id within the external_id as defined by the original procedure.'''
concept = models.ForeignKey('Concept', to_field='uuid')
""" A dictionary entry which defines the type of information stored."""
value_text = models.CharField(max_length=255)
""" A textual representation of the observation data. For observations
which collect file data this will be the value of the absolute
url to the file
"""
value_complex = models.FileField(upload_to='{0}/observation'.format(_app), blank=True,)
""" File object holder """
# next two are necessary purely for packetizing
_complex_size = models.IntegerField(default=0)
""" Size of complex data in bytes """
_complex_progress = models.IntegerField(default=0)
""" Bytes recieved for value_complex when packetized """
created = models.DateTimeField(auto_now_add=True)
""" When the object was created """
modified = models.DateTimeField(auto_now=True)
""" updated on modification """
voided = models.BooleanField(default=False)
@property
def subject(self):
""" Convenience wrapper around Encounter.subject """
if self.encounter:
subj = self.encounter.subject
else:
subj = None
return subj
@property
def is_complex(self):
""" Convenience wrapper around Concept.is_complex """
if self.concept:
return self.concept.is_complex
else:
False
@property
def data_type(self):
""" Convenience wrapper around Concept.data_type """
if self.concept:
return self.concept.datatype
else:
return None
@property
def device(self):
""" Convenience wrapper around Encounter.device """
if self.encounter and self.encounter.device:
return self.encounter.device.name
else:
return None
@property
def question(self):
""" Convenience property for matching the object to the procedure
instruction-i.e. the question on a form.
"""
return self.node
def open(self, mode="w"):
if not self.is_complex:
raise Exception("Attempt to open file for non complex observation")
path, _ = os.path.split(self.value_complex.path)
# make sure we have the directory structure
if not os.path.exists(path):
self.create_file()
return open(self.value_complex.path, mode)
def _generate_filename(self):
name = '%s-%s' % (self.encounter.uuid, self.node)
ext = guess_fext(self.concept.mimetype)
fname = '%s.%s' % (name, ext)
def create_file(self, append=None):
""" Creates a zero length file stub on disk
Parameters:
append
Extra string to append to file name.
"""
name = '%s-%s' % (self.encounter.uuid, self.node)
if append:
name += '-%s' % append
ext = guess_fext(self.concept.mimetype)
fname = '%s%s' % (name, ext)
self.value_complex = self.value_complex.field.generate_filename(self, fname)
path, _ = os.path.split(self.value_complex.path)
# make sure we have the directory structure
if not os.path.exists(path):
os.makedirs(path)
# create the stub and commit if no exceptions
open(self.value_complex.path, "w").close()
self.save()
@property
def complete(self):
if self._complex_size is 0:
return True
else:
return not self._complex_progress < self._complex_size
@property
def value(self):
if self.is_complex:
return self.value_complex
else:
return self.value_text
@property
def upload_progress(self):
if self.is_complex:
return "%d/%d" % (self._complex_progress, self._complex_size)
else:
return u"NA"
def encounter_uuid(self):
return self.encounter.uuid
def save(self,*args,**kwargs):
if self.is_complex:
self.value_text = _('complex data')
super(Observation,self).save(*args, **kwargs)
| {
"repo_name": "SanaMobile/sana.mds",
"path": "src/mds/core/models/observation.py",
"copies": "1",
"size": "5330",
"license": "bsd-3-clause",
"hash": -150896457585635070,
"line_mean": 30.9161676647,
"line_max": 91,
"alpha_frac": 0.5960600375,
"autogenerated": false,
"ratio": 4.213438735177865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.025979398976693577,
"num_lines": 167
} |
""" An instance of data collection corresponding to a single step within a
Procedure.
:Authors: Sana dev team
:Version: 2.0
"""
import mimetypes, os
from django.db import models
from mds.api.utils import make_uuid, guess_fext
_app = "core"
class Observation(models.Model):
""" A piece of data collected about a subject during an external_id"""
class Meta:
app_label = "core"
unique_together = (('encounter', 'node'),)
ordering = ["-created"]
def __unicode__(self):
return "%s %s %s %s" % (self.subject.full_name,
self.node,
self.concept.name,
unicode(self.value), )
uuid = models.SlugField(max_length=36, unique=True, default=make_uuid, editable=False)
""" A universally unique identifier """
encounter = models.ForeignKey('Encounter', to_field='uuid')
""" The instance of a procedure which this observation is associated with. """
node = models.CharField(max_length=255)
'''Unique node id within the external_id as defined by the original procedure.'''
concept = models.ForeignKey('Concept', to_field='uuid')
""" A dictionary entry which defines the type of information stored."""
value_text = models.CharField(max_length=255)
""" A textual representation of the observation data. For observations
which collect file data this will be the value of the absolute
url to the file
"""
value_complex = models.FileField(upload_to='{0}/observation'.format(_app), blank=True,)
""" File object holder """
# next two are necessary purely for packetizing
_complex_size = models.IntegerField(default=0)
""" Size of complex data in bytes """
_complex_progress = models.IntegerField(default=0)
""" Bytes recieved for value_complex when packetized """
created = models.DateTimeField(auto_now_add=True)
""" When the object was created """
modified = models.DateTimeField(auto_now=True)
""" updated on modification """
voided = models.BooleanField(default=False)
@property
def subject(self):
""" Convenience wrapper around Encounter.subject """
if self.encounter:
subj = self.encounter.subject
else:
subj = None
return subj
@property
def is_complex(self):
""" Convenience wrapper around Concept.is_complex """
if self.concept:
return self.concept.is_complex
else:
False
@property
def data_type(self):
""" Convenience wrapper around Concept.data_type """
if self.concept:
return self.concept.datatype
else:
return None
@property
def device(self):
""" Convenience wrapper around Encounter.device """
if self.encounter and self.encounter.device:
return self.encounter.device.name
else:
return None
@property
def question(self):
""" Convenience property for matching the object to the procedure
instruction-i.e. the question on a form.
"""
return self.node
def open(self, mode="w"):
if not self.is_complex:
raise Exception("Attempt to open file for non complex observation")
path, _ = os.path.split(self.value_complex.path)
# make sure we have the directory structure
if not os.path.exists(path):
self.create_file()
return open(self.value_complex.path, mode)
def _generate_filename(self):
name = '%s-%s' % (self.encounter.uuid, self.node)
ext = guess_fext(self.concept.mimetype)
fname = '%s.%s' % (name, ext)
def create_file(self, append=None):
""" Creates a zero length file stub on disk
Parameters:
append
Extra string to append to file name.
"""
name = '%s-%s' % (self.encounter.uuid, self.node)
if append:
name += '-%s' % append
ext = guess_fext(self.concept.mimetype)
fname = '%s%s' % (name, ext)
self.value_complex = self.value_complex.field.generate_filename(self, fname)
path, _ = os.path.split(self.value_complex.path)
# make sure we have the directory structure
if not os.path.exists(path):
os.makedirs(path)
# create the stub and commit if no exceptions
open(self.value_complex.path, "w").close()
self.save()
@property
def complete(self):
if self._complex_size is 0:
return True
else:
return not self._complex_progress < self._complex_size
@property
def value(self):
if self.is_complex:
return self.value_complex
else:
return self.value_text
@property
def upload_progress(self):
if self.is_complex:
return "%d/%d" % (self._complex_progress, self._complex_size)
else:
return u"NA"
def encounter_uuid(self):
return self.encounter.uuid
| {
"repo_name": "rryan/sana.mds",
"path": "src/mds/core/models/observation.py",
"copies": "1",
"size": "5256",
"license": "bsd-3-clause",
"hash": -5769661608895717000,
"line_mean": 31.85,
"line_max": 91,
"alpha_frac": 0.5840943683,
"autogenerated": false,
"ratio": 4.287112561174552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.024901355585282513,
"num_lines": 160
} |
""" An instance of data collection resulting from executing a Procedure
on a Subject.
:Authors: Sana dev team
:Version: 2.0
"""
from django.db import models
from mds.api.utils import make_uuid
class Encounter(models.Model):
""" A completed procedure, where data has been collected
"""
class Meta:
app_label = "core"
def __unicode__(self):
return "%s" % (self.uuid)
uuid = models.SlugField(max_length=36, unique=True, default=make_uuid, editable=False)
""" A universally unique identifier """
created = models.DateTimeField(auto_now_add=True)
""" When the object was created """
modified = models.DateTimeField(auto_now=True)
""" updated on modification """
procedure = models.ForeignKey('Procedure', to_field='uuid')
""" The procedure used to collect this encounter """
observer = models.ForeignKey('Observer', to_field='uuid')
""" The entity which collected the data """
device = models.ForeignKey('Device', to_field='uuid')
""" The client which collected the encounter """
subject = models.ForeignKey('Subject', to_field='uuid')
""" The subject about whom the data was collected """
concept = models.ForeignKey('Concept', to_field='uuid')
""" A contextual term for the encounter."""
@property
def slug(self):
return self.uuid
#_uploaded = models.BooleanField(default=False)
#""" Whether the saved procedure was uploaded to a remote queueing server. """
#TODO move these to a manager class
def flush(self):
""" Removes the responses text and files for this Encounter """
self.save()
for obs in self.observation_set.all():
obs.flush();
def complete(self):
complete = True
for obs in self.observation_set.all():
complete = complete and obs.complete()
if not complete:
break
return complete
@models.permalink
def get_absolute_url(self):
return ( 'core:encounter', { self.uuid : self.uuid } )
| {
"repo_name": "dekatzenel/team-k",
"path": "mds/core/models/encounter.py",
"copies": "1",
"size": "2116",
"license": "bsd-3-clause",
"hash": 2922018781041516000,
"line_mean": 30.1176470588,
"line_max": 90,
"alpha_frac": 0.6214555766,
"autogenerated": false,
"ratio": 4.300813008130081,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0314172327049966,
"num_lines": 68
} |
"""An instance of echomesh, representing one node."""
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from echomesh.Cechomesh import cechomesh
from echomesh.base import Settings
from echomesh.base import Quit
from echomesh.base import Yaml
from echomesh.element import ScoreMaster
from echomesh.expression import Expression
from echomesh.graphics import Display
from echomesh.util.hardware import GPIO
from echomesh.network import PeerSocket
from echomesh.network import Peers
from echomesh.output.Registry import pause_outputs
from echomesh.output import Visualizer
from echomesh.util import CLog
from echomesh.util import Log
from echomesh.util.thread.MasterRunnable import MasterRunnable
from echomesh.util.thread.RunAfter import run_after
LOGGER = Log.logger(__name__)
USE_KEYBOARD_THREAD = False
class Instance(MasterRunnable):
def __init__(self):
super(Instance, self).__init__()
def do_quit():
pause_outputs()
self.pause()
self.unload()
Quit.register_atexit(do_quit)
gpio = Settings.get('hardware', 'gpio')
if gpio['enable']:
GPIO.on_gpio(Quit.request_quit,
gpio['shutdown_pin'],
gpio['shutdown_pin_pull_up'],
gpio['shutdown_pin_bounce_time'])
CLog.initialize()
self.score_master = ScoreMaster.ScoreMaster()
self.peers = Peers.Peers(self)
self.socket = PeerSocket.PeerSocket(self, self.peers)
self.display = Display.display(self.callback)
self.keyboard_runnable = self.osc = None
if Settings.get('execution', 'control_program'):
from echomesh.util.thread import Keyboard
args = {}
keyboard, self.keyboard_runnable = Keyboard.keyboard(
self, new_thread=USE_KEYBOARD_THREAD or self.display)
osc_client = Settings.get('osc', 'client', 'enable')
osc_server = Settings.get('osc', 'server', 'enable')
if osc_client or osc_server:
from echomesh.sound.Osc import Osc
self.osc = Osc(osc_client, osc_server)
self.add_mutual_pause_slave(
self.socket, self.keyboard_runnable, self.osc)
self.add_slave(self.score_master)
self.add_slave(self.display)
self.set_broadcasting(False)
self.timeout = Settings.get('network', 'timeout')
def keyboard_callback(self, s):
self.keyboard_runnable_queue.put(s)
def broadcasting(self):
return self._broadcasting
def set_broadcasting(self, b):
self._broadcasting = b
if self.keyboard_runnable:
self.keyboard_runnable.alert_mode = b
def send(self, **data):
self.socket.send(data)
def handle(self, event):
return self.score_master.handle(event)
def display_loop(self):
self.display.loop()
thread = getattr(self.keyboard_runnable, 'thread', None)
thread and thread.join()
def main(self):
if cechomesh.LOADED:
self.display_loop()
else:
self.after_server_starts()
time.sleep(self.timeout)
# Prevents crashes if you start and stop echomesh very fast.
def callback(self, data):
data = Yaml.decode_one(data)
event = data['event']
if event == 'start':
self.after_server_starts()
elif event == 'closeButtonPressed':
if Settings.get('execution', 'close_button_quits'):
Quit.request_quit()
elif Settings.get('execution', 'close_button_closes_window'):
Visualizer.set_visible(False)
else:
# print(data)
pass
def after_server_starts(self):
if cechomesh.LOADED:
run_after(self.run,
Expression.convert(Settings.get('execution',
'delay_before_run')))
else:
self.run()
if self.display:
self.display_loop()
elif not USE_KEYBOARD_THREAD and self.keyboard_runnable:
self.keyboard_runnable.loop()
else:
while self.is_running:
time.sleep(self.timeout)
| {
"repo_name": "rec/echomesh",
"path": "code/python/echomesh/Instance.py",
"copies": "1",
"size": "4326",
"license": "mit",
"hash": 7433514359706902000,
"line_mean": 32.796875,
"line_max": 82,
"alpha_frac": 0.6100323625,
"autogenerated": false,
"ratio": 3.8972972972972975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5007329659797297,
"avg_score": null,
"num_lines": null
} |
# Dependencies: Immunity Debugger
# Author: Brian Yip
import immlib
def main(args):
imm = immlib.Debugger()
encrypted_file = open('''Path to encrypted file''', "rb")
in_file = encrypted_file.read()
in_buffer_size = len(in_file)
# Allocate memory in the debugged process and write the data
# from the encrypted file into it
remote_in_buffer = imm.remoteVirtualAlloc(in_buffer_size)
imm.writeMemory(in_file, remote_in_buffer)
encrypted_file.close()
decrypted_file = open('''Path to where to write decrypted file''', "w")
# Change this to suit your needs
imm.setReg("EIP", 0xdeadbeef)
imm.setBreakpoint(0x01234567)
imm.run()
# Modify registers once breakpoint is reached
# Example: Set the second argument to the length of the encrypted file
# as the second argument and remote_in_buffer as the first argument
regs = imm.getRegs()
imm.writeLong(regs["EBP"] + 0xC, in_buffer_size)
imm.writeLong(regs["EBP"] + 0x8, remote_in_buffer)
# Set a breakpoint to pause execution and fetch the decrypted contents
imm.setBreakpoint(0x11111111)
imm.run()
results = imm.readMemory(remote_in_buffer, in_buffer_size)
decrypted_file.write(results)
decrypted_file.close()
| {
"repo_name": "itsbriany/Security-Tools",
"path": "reverse-engineering/self_decrypt_template.py",
"copies": "2",
"size": "1384",
"license": "mit",
"hash": -3703236248349654500,
"line_mean": 28.4468085106,
"line_max": 77,
"alpha_frac": 0.7023121387,
"autogenerated": false,
"ratio": 3.5948051948051947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5297117333505195,
"avg_score": null,
"num_lines": null
} |
"""An interactive graph to plot the trajectory of points on and off the mandelbrot
set. Illustrates the use of sliders in matplotlib"""
import pylab
from matplotlib.widgets import Slider
def compute_trajectory(x0, y0, set_boundary = 2, n_iters = 100):
"""Take the fragment and compute a further n_iters iterations for each element
that has not exceeded the bound. Also indicate if we are inside or outside the
mandelbrot set"""
set = True
C = complex(x0, y0)
Z = pylab.ones(n_iters,'complex')*C
for n in range(n_iters-1):
if abs(Z[n]) > set_boundary:
Z[n+1:] = Z[n]
set = False
break
Z[n+1] = Z[n]*Z[n] + C
return Z, set
axcolor = 'lightgoldenrodyellow'
ax_x = pylab.axes([0.1, 0.04, 0.8, 0.03], axisbg=axcolor)
ax_y = pylab.axes([0.1, 0.01, 0.8, 0.03], axisbg=axcolor)
sx = Slider(ax_x, 'x', -1.0, 1.0, valinit=0)
sy = Slider(ax_y, 'y', -1.0, 1.0, valinit=0)
ax_plot = pylab.axes([0.12, 0.12, 0.85, 0.85])
Z,s = compute_trajectory(0,0)
l, = pylab.plot(Z.real, Z.imag,'.-') #Ain't that cool?
st, = pylab.plot(Z[0].real, Z[0].imag,'ok')
pylab.setp(ax_plot,'xlim',[-1,1], 'ylim', [-1,1])
#pylab.axis('scaled')
m_set = [[0],[0]]
ms, = pylab.plot(m_set[0], m_set[1],'k.')
def update(val):
x = sx.val
y = sy.val
Z, set = compute_trajectory(x,y)
l.set_xdata(Z.real)
l.set_ydata(Z.imag)
st.set_xdata(Z[0].real)
st.set_ydata(Z[0].imag)
if set:
m_set[0] += [x]
m_set[1] += [y]
ms.set_xdata(m_set[0])
ms.set_ydata(m_set[1])
pylab.draw()
sx.on_changed(update)
sy.on_changed(update)
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/577642_Mandelbrot_trajectories/recipe-577642.py",
"copies": "1",
"size": "1571",
"license": "mit",
"hash": -7598073259739794000,
"line_mean": 27.0535714286,
"line_max": 82,
"alpha_frac": 0.6155315086,
"autogenerated": false,
"ratio": 2.416923076923077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8261740749249434,
"avg_score": 0.05414276725472855,
"num_lines": 56
} |
"""An interactive interpreter inside of a diesel event loop.
It's useful for importing and interacting with code that expects to run
inside of a diesel event loop. It works especially well for interactive
sessions with diesel's various network protocol clients.
Supports both the standard Python interactive interpreter and IPython (if
installed).
"""
import code
import sys
sys.path.insert(0, '.')
import diesel
from diesel.util.streams import create_line_input_stream
try:
from IPython.Shell import IPShell
IPYTHON_AVAILABLE = True
except ImportError:
try:
# Support changes made in iPython 0.11
from IPython.frontend.terminal.ipapp import TerminalInteractiveShell as IPShell
IPYTHON_AVAILABLE = True
except ImportError:
IPYTHON_AVAILABLE = False
# Library Functions:
# ==================
def interact_python():
"""Runs an interactive interpreter; halts the diesel app when finished."""
globals_ = globals()
env = {
'__builtins__':globals_['__builtins__'],
'__doc__':globals_['__doc__'],
'__name__':globals_['__name__'],
'diesel':diesel,
}
inp = create_line_input_stream(sys.stdin)
def diesel_input(prompt):
sys.stdout.write(prompt)
sys.stdout.flush()
return inp.get().rstrip('\n')
code.interact(None, diesel_input, env)
diesel.quickstop()
def interact_ipython():
"""Starts an IPython instance; halts the diesel app when finished."""
IPShell(user_ns={'diesel':diesel}).mainloop()
diesel.quickstop()
# Interpreter entry points:
# =========================
def python():
diesel.quickstart(interact_python)
def ipython():
if not IPYTHON_AVAILABLE:
print >> sys.stderr, "IPython not found."
raise SystemExit(1)
diesel.quickstart(interact_ipython)
| {
"repo_name": "dieseldev/diesel",
"path": "diesel/interactive.py",
"copies": "1",
"size": "1836",
"license": "bsd-3-clause",
"hash": 3595309032179243500,
"line_mean": 26,
"line_max": 87,
"alpha_frac": 0.6623093682,
"autogenerated": false,
"ratio": 4.008733624454148,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5171042992654148,
"avg_score": null,
"num_lines": null
} |
"""An interactive mouse-based example
This script tracks whether mouse clicks hit a circle.
On the screen there's a circle with a fixed center and radius. Mouse clicks
inside and outside the circle to change its color.
On click (and drag), mouse coordinates are fed into the ``mousex`` and
``mousey`` input nodes. The ``distance`` node takes those coordinates as
inputs and outputs the distance to the center of the circle. The result is fed
into the ``is_close`` node, which outputs a ``True`` value for distances
smaller than the circle radius. The ``alert`` node returns a string whose
value depends on that boolean value. Finally, the circle changes its color
based on the string in the ``alert`` node.
You can also observe debug output on the console. Note how the distance
measurement is skipped if the coordinate inputs don't change.
If you have the graphviz tool installed, you'll also see a diagram of the graph
nodes and connections on the screen. The diagram is saved in ``mouse.gif``.
"""
from lusmu.core import Input, Node, update_inputs
from lusmu.visualization import visualize_graph
import math
import Tkinter
TARGET = {'x': 90, 'y': 110}
RADIUS = 30
def get_distance(x, y):
print ('Measuring distance from ({x}, {y}) to {t[x]}'
.format(x=x, y=y, t=TARGET))
dx = x - TARGET['x']
dy = y - TARGET['y']
return math.sqrt(dx ** 2 + dy ** 2)
def is_close_to_target(distance):
return distance < RADIUS
def get_distance_description(is_close):
return "INSIDE" if is_close else "OUTSIDE"
mousex = Input(name='mouse x')
mousey = Input(name='mouse y')
distance = Node(
name='distance',
action=get_distance,
inputs=Node.inputs(mousex, mousey))
is_close = Node(
name='is close',
action=is_close_to_target,
inputs=Node.inputs(distance))
alert = Node(
name='alert',
action=get_distance_description,
inputs=Node.inputs(is_close))
def onclick(event):
update_inputs([(mousex, event.x),
(mousey, event.y)])
print 'distance.value == {:.1f}'.format(distance.value)
print 'is_close.value == {!r}'.format(is_close.value)
print 'alert.value == {!r}'.format(alert.value)
print
colors = {'INSIDE': 'red', 'OUTSIDE': 'blue'}
draw_circle(colors[alert.value])
def draw_circle(color):
tx = TARGET['x']
ty = TARGET['y']
canvas.create_oval(tx - RADIUS, ty - RADIUS, tx + RADIUS, ty + RADIUS,
fill=color)
root = Tkinter.Tk()
frame = Tkinter.Frame(root)
frame.pack(fill=Tkinter.BOTH, expand=1)
canvas = Tkinter.Canvas(frame, background='white')
draw_circle('blue')
canvas.pack(fill=Tkinter.BOTH, expand=1)
canvas.pack()
canvas.bind("<Button-1>", onclick)
canvas.bind("<B1-Motion>", onclick)
try:
visualize_graph([alert], 'mouse.gif')
print 'View mouse.gif to see a visualization of the traph.'
diagram = Tkinter.PhotoImage(file='mouse.gif')
canvas.create_image(0, 2 * (TARGET['y'] + RADIUS),
image=diagram, anchor='nw')
except OSError:
print 'Please install graphviz to visualize the graph.'
root.mainloop()
| {
"repo_name": "akaihola/lusmu",
"path": "lusmu/examples/mouse.py",
"copies": "1",
"size": "3119",
"license": "bsd-3-clause",
"hash": -8787905215254300000,
"line_mean": 28.9903846154,
"line_max": 79,
"alpha_frac": 0.677140109,
"autogenerated": false,
"ratio": 3.4050218340611353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45821619430611354,
"avg_score": null,
"num_lines": null
} |
"""An interactuve GUI to explore the patches we generate.
I use this to confirm that the patches we generate are reasonable
"""
from pylab import plt
import os
import pandas as pd
import pickle
from matplotlib.widgets import Slider, RadioButtons
from easydict import EasyDict
from srp.config import C
import numpy as np
from srp.data.generate_patches import Patch
def show_patches():
dirname = C.TRAIN.SAMPLES.DIR
labels = EasyDict(pos=EasyDict(), neg=EasyDict())
labels.pos.samples = pd.read_csv(os.path.join(dirname, 'positives.csv'))
labels.pos.index = 0
labels.neg.samples = pd.read_csv(os.path.join(dirname, 'negatives.csv'))
labels.neg.index = 0
labels.label = 'pos'
fig = plt.figure()
ax1 = plt.subplot(131)
ax2 = plt.subplot(132)
ax3 = plt.subplot(133)
plt.subplots_adjust(bottom=0.15)
# Slider to choose the sample
ax_slider = plt.axes([0.25, 0.05, 0.65, 0.05], facecolor='lightgoldenrodyellow')
slider = Slider(
ax_slider,
'Index',
0,
len(labels[labels.label].samples),
valinit=labels[labels.label].index,
valstep=1,
closedmax=False)
# Radio buttons to choose the label
ax_label = plt.axes([0.025, 0.05, 0.1, 0.1])
radio_buttons = RadioButtons(ax_label, ['pos', 'neg'], active=0)
ax_help = plt.axes([0.25, 0.10, 0.65, 0.05])
ax_help.axis('off')
ax_help.text(0, 0.5, 'Press [j,k] to change the index, [p,n] to set the label.')
def update(dummy):
if str(radio_buttons.value_selected) != labels.label:
labels.label = radio_buttons.value_selected
slider.valmax = len(labels[labels.label].samples)
slider.val = labels[labels.label].index
ax_slider.set_xlim(0, slider.valmax)
labels[labels.label].index = int(slider.val)
current = labels[labels.label].samples.iloc[labels[labels.label].index]
with open(os.path.join(dirname, current['name']), 'rb') as f:
patch = pickle.load(f)
print(f.name)
plt.suptitle(patch.name)
vol = patch.volumetric[2:2 + 3].transpose(1, 2, 0)
display_vol = 2 * np.arctan(vol) / np.pi
radius = patch.rgb.shape[1] / 2
extent = (-radius, radius, -radius, radius)
ax1.clear()
ax2.clear()
ax3.clear()
ax1.imshow(patch.rgb.transpose(1, 2, 0), extent=extent)
ax1.set_title('rgb')
ax2.imshow(patch.rgb.transpose(1, 2, 0), extent=extent)
ax2.imshow(display_vol, extent=extent, alpha=0.5)
if patch.obb is not None:
patch.obb.plot(ax2, lw=4, color='yellow')
patch.obb.plot(ax2, lw=3, ls='--', color='red')
ax2.set_title('both')
ax3.imshow(display_vol, extent=extent)
ax3.set_title('vol:max={:.1f}'.format(vol.max()))
fig.canvas.draw_idle()
# First plot
update(0)
radio_buttons.on_clicked(update)
slider.on_changed(update)
def keypress(event):
if event.key == 'j':
slider.set_val(slider.val - 1)
elif event.key == 'k':
slider.set_val(slider.val + 1)
elif event.key == 'p':
radio_buttons.set_active(0)
elif event.key == 'n':
radio_buttons.set_active(1)
fig.canvas.mpl_connect('key_press_event', keypress)
plt.show()
if __name__ == '__main__':
show_patches()
| {
"repo_name": "jfemiani/srp-boxes",
"path": "srp/visualize/show_patches.py",
"copies": "1",
"size": "3423",
"license": "mit",
"hash": -7932441709435813000,
"line_mean": 28.2564102564,
"line_max": 84,
"alpha_frac": 0.6038562664,
"autogenerated": false,
"ratio": 3.2445497630331754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9347400499015881,
"avg_score": 0.00020110608345902463,
"num_lines": 117
} |
"""An intercomponent communication protocol."""
# pylint: disable=invalid-name,bad-whitespace
from enum import Enum, IntEnum, auto, unique
import traceback
import multiprocessing
import os
import json
__version__ = (1, 5, 0)
class AutoIntEnum(IntEnum):
"""
An enum with automatically incrementing integer values, starting from zero.
References:
* https://docs.python.org/3/library/enum.html#using-automatic-values
"""
# pylint: disable=no-self-argument
def _generate_next_value_(name, start, count, last_values):
return count
class RUNTIME_CONFIG(Enum):
"""Assorted runtime constants."""
STUDENT_CODE_TIMELIMIT = 1
STUDENT_CODE_HZ = 20 # Number of times to execute studentcode.main per second
DEBUG_DELIMITER_STRING = "\n****************** RUNTIME MESSAGE ******************"
PIPE_READY = ["ready"]
TEST_OUTPUT_DIR = "../test_outputs/"
VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH = __version__
@unique
class BAD_EVENTS(Enum):
"""Assorted message types for ``BadEvent``s."""
BAD_EVENT = "BAD THINGS HAPPENED"
STUDENT_CODE_ERROR = "Student Code Crashed"
STUDENT_CODE_VALUE_ERROR = "Student Code Value Error"
STUDENT_CODE_TIMEOUT = "Student Code Timed Out"
UNKNOWN_PROCESS = "Unknown State Manager process name"
STATE_MANAGER_KEY_ERROR = "Error accessing key in State Manager"
STATE_MANAGER_CRASH = "State Manager has Crashed"
EMERGENCY_STOP = "Robot Emergency Stopped"
END_EVENT = "Process terminated"
UDP_SEND_ERROR = "UDPSend Process Crashed"
UDP_RECV_ERROR = "UDPRecv Process Crashed"
TCP_ERROR = "TCP Process Crashed"
HIBIKE_START_ERROR = "Hibike Process failed to start"
ENTER_TELEOP = "Dawn says enter Teleop"
ENTER_AUTO = "Dawn says enter Auto"
ENTER_IDLE = "Dawn says enter Idle"
NEW_IP = "Connected to new instance of Dawn"
DAWN_DISCONNECTED = "Disconnected to Dawn"
HIBIKE_NONEXISTENT_DEVICE = "Tried to access a nonexistent device"
HIBIKE_INSTRUCTION_ERROR = "Hibike received malformed instruction"
restartEvents = [BAD_EVENTS.STUDENT_CODE_VALUE_ERROR, BAD_EVENTS.STUDENT_CODE_ERROR,
BAD_EVENTS.STUDENT_CODE_TIMEOUT, BAD_EVENTS.END_EVENT, BAD_EVENTS.EMERGENCY_STOP]
studentErrorEvents = [BAD_EVENTS.STUDENT_CODE_ERROR, BAD_EVENTS.STUDENT_CODE_TIMEOUT]
@unique
class PROCESS_NAMES(Enum):
"""Names of processes."""
STUDENT_CODE = "studentProcess"
STATE_MANAGER = "stateProcess"
RUNTIME = "runtime"
UDP_SEND_PROCESS = "udpSendProcess"
UDP_RECEIVE_PROCESS = "udpReceiveProcess"
HIBIKE = "hibike"
TCP_PROCESS = "tcpProcess"
@unique
class HIBIKE_COMMANDS(Enum):
"""Hibike command types."""
ENUMERATE = "enumerate_all"
SUBSCRIBE = "subscribe_device"
WRITE = "write_params"
READ = "read_params"
DISABLE = "disable_all"
TIMESTAMP_DOWN = "timestamp_down"
@unique
class HIBIKE_RESPONSE(Enum):
"""Hibike response types."""
DEVICE_SUBBED = "device_subscribed"
DEVICE_VALUES = "device_values"
DEVICE_DISCONNECT = "device_disconnected"
TIMESTAMP_UP = "timestamp_up"
@unique
class ANSIBLE_COMMANDS(Enum):
"""Ansible command types."""
STUDENT_UPLOAD = "student_upload"
CONSOLE = "console"
TIMESTAMP_UP = "Get timestamps going up the stack"
TIMESTAMP_DOWN = "Get timestamps going down the stack"
@unique
class SM_COMMANDS(AutoIntEnum):
RESET = auto()
ADD = auto()
STUDENT_MAIN_OK = auto()
GET_VAL = auto()
SET_VAL = auto()
SEND_ANSIBLE = auto()
RECV_ANSIBLE = auto()
CREATE_KEY = auto()
GET_TIME = auto()
EMERGENCY_STOP = auto()
EMERGENCY_RESTART = auto()
SET_ADDR = auto()
SEND_ADDR = auto()
STUDENT_UPLOAD = auto()
SEND_CONSOLE = auto()
ENTER_IDLE = auto()
ENTER_TELEOP = auto()
ENTER_AUTO = auto()
END_STUDENT_CODE = auto()
SET_TEAM = auto()
class BadThing:
"""Message to runtime from one of its components."""
def __init__(self, exc_info, data, event=BAD_EVENTS.BAD_EVENT, printStackTrace=True):
self.name = multiprocessing.current_process().name #pylint: disable=not-callable
self.data = data
self.event = event
self.errorType, self.errorValue, tb = exc_info
self.stackTrace = self.genStackTrace(tb)
self.printStackTrace = printStackTrace
if event in restartEvents:
self.studentError = self.genStudentError(tb)
def genStackTrace(self, tb):
"""Get a formatted string for a traceback."""
badThingDump = \
("Fatal Error in thread: %s\n"
"Bad Event: %s\n"
"Error Type: %s\n"
"Error Value: %s\n"
"Traceback: \n%s") % \
(self.name, self.event, self.errorType,
self.errorValue, "".join(traceback.format_tb(tb)))
return badThingDump
def genStudentError(self, tb):
"""Create a human readable error message for students from a traceback."""
errorList = []
for error in traceback.format_tb(tb):
if "studentcode.py" in error:
index = error.find("line")
error = error[index:]
errorList.append(error)
studentErrorMessage = "Traceback: \n"
studentErrorMessage += "".join(errorList)
if self.errorType is not None and self.errorValue is not None:
studentErrorMessage += str(self.errorType.__name__) + ": " + str(self.errorValue)
return studentErrorMessage
def getStudentError(self):
return self.studentError
def __str__(self):
if self.printStackTrace:
return self.stackTrace
return str(self.data)
class StudentAPIError(Exception):
"""Miscellaneous student API error."""
pass
class StudentAPIKeyError(StudentAPIError):
"""Student accessed something that doesn't exist."""
pass
class StudentAPIValueError(StudentAPIError):
"""Student stored the wrong value."""
pass
class StudentAPITypeError(StudentAPIError):
"""Student stored the wrong type."""
pass
# Sensor type names are CamelCase, with the first letter capitalized as well
# TODO: could we read this ahead of time and not keep the file open?
CONFIG_FILE = open(os.path.join(os.path.dirname(__file__), 'hibikeDevices.json'), 'r')
SENSOR_TYPE = {device_data["id"]: device_data["name"] for device_data in json.load(CONFIG_FILE)}
SENSOR_TYPE[-1] = "runtime_version"
| {
"repo_name": "pioneers/PieCentral",
"path": "runtime/runtime/util.py",
"copies": "1",
"size": "6961",
"license": "apache-2.0",
"hash": 5893885445357965000,
"line_mean": 33.805,
"line_max": 98,
"alpha_frac": 0.610831777,
"autogenerated": false,
"ratio": 3.7525606469002697,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48633924239002696,
"avg_score": null,
"num_lines": null
} |
#An interesting turtle sample from CSDN, 啥是佩奇, python turtleDrawPeppa.py
from turtle import*
def nose(x,y):
penup()
goto(x,y)
pendown()
setheading(-30)
begin_fill()
a=0.4
for i in range(120):
if 0<=i<30 or 60<=i<90:
a=a+0.08
left(3)
forward(a)
else:
a=a-0.08
left(3)
forward(a)
end_fill()
penup()
setheading(90)
forward(25)
setheading(0)
forward(10)
pendown()
pencolor(255,155,192)#画笔颜色
setheading(10)
begin_fill()
circle(5)
color(160,82,45)#返回或设置pencolor和fillcolor
end_fill()
penup()
setheading(0)
forward(20)
pendown()
pencolor(255,155,192)
setheading(10)
begin_fill()
circle(5)
color(160,82,45)
end_fill()
def head(x,y):
color((255,155,192),"pink")
penup()
goto(x,y)
setheading(0)
pendown()
begin_fill()
setheading(180)
circle(300,-30)
circle(100,-60)
circle(80,-100)
circle(150,-20)
circle(60,-95)
setheading(161)
circle(-300,15)
penup()
goto(-100,100)
pendown()
setheading(-30)
a=0.4
for i in range(60):
if 0<=i<30 or 60<=i<90:
a=a+0.08
lt(3)
fd(a)
else:
a=a-0.08
lt(3)
fd(a)
end_fill()
def cheek(x,y):
color((255,155,192))
penup()
goto(x,y)
pendown()
setheading(0)
begin_fill()
circle(30)
end_fill()
def ears(x,y):
color((255,155,192),"pink")
penup()
goto(x,y)
pendown()
begin_fill()
setheading(100)
circle(-50,50)
circle(-10,120)
circle(-50,54)
end_fill()
penup()
setheading(90)
forward(-12)
setheading(0)
forward(30)
pendown()
begin_fill()
setheading(100)
circle(-50,50)
circle(-10,120)
circle(-50,56)
end_fill()
def eyes(x,y):
color((255,155,192),"white")
penup()
setheading(90)
forward(-20)
setheading(0)
forward(-95)
pendown()
begin_fill()
circle(15)
end_fill()
color("black")
penup()
setheading(90)
forward(12)
setheading(0)
forward(-3)
pendown()
begin_fill()
circle(3)
end_fill()
color((255,155,192),"white")
penup()
seth(90)
forward(-25)
seth(0)
forward(40)
pendown()
begin_fill()
circle(15)
end_fill()
color("black")
penup()
setheading(90)
forward(12)
setheading(0)
forward(-3)
pendown()
begin_fill()
circle(3)
end_fill()
def mouth(x,y):
color(239,69,19)
penup()
goto(x,y)
pendown()
setheading(-80)
circle(30,40)
circle(40,80)
def setting():
pensize(4)
hideturtle()
colormode(255)
color((255,155,192),"pink")
setup(840,500)
speed(30)
def main():
setting() #画布、画笔设置
nose(-100,100) #鼻子
head(-69,167) #头
ears(0,160) #耳朵
eyes(0,140) #眼睛
cheek(80,10) #腮
mouth(-20,30) #嘴
done()
if __name__ == '__main__':
main() | {
"repo_name": "mphz/bkdev",
"path": "pycod/turtleDrawPeppa.py",
"copies": "1",
"size": "3137",
"license": "mit",
"hash": 534133986255046800,
"line_mean": 15.6378378378,
"line_max": 72,
"alpha_frac": 0.5183620409,
"autogenerated": false,
"ratio": 2.657167530224525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3675529571124525,
"avg_score": null,
"num_lines": null
} |
"""An interface between JPL ephemerides and Skyfield."""
import jplephem
from jplephem.spk import SPK
from jplephem.names import target_names as _names
from numpy import max, min
from .constants import AU_KM, C_AUDAY, DAY_S
from .ephemerislib import Body, Segment
from .functions import length_of
from .positionlib import Astrometric, Barycentric, Topos
from .timelib import takes_julian_date
_targets = dict((name, target) for (target, name) in _names.items())
class Kernel(dict):
def __init__(self, filename):
self.filename = filename
self.spk = SPK.open(filename)
self.segments = [Segment(s.center, s.target, _build_compute(s))
for s in self.spk.segments]
self.codes = set(s.center for s in self.segments).union(
s.target for s in self.segments)
# for code in codes:
# body = Body(code, segments)
# self[code] = body
# raw_name = target_names.get(code, None)
# if raw_name is None:
# continue
# name = raw_name.lower().replace(' ', '_')
# setattr(self, name, body)
def __str__(self):
return str(self.spk)
def __call__(self, name):
code = self.decode(name)
return Body(self, code)
def decode(self, name):
if isinstance(name, int):
return name
name = name.upper()
code = _targets.get(name)
if code is None:
raise KeyError('unknown SPICE target name {0!r}'.format(name))
if code not in self.codes:
names = ', '.join(_names[c] for c in self.codes)
raise KeyError('this kernel is missing {0!r} - the targets it'
' supports are {1}'.format(name, names))
return code
def _build_compute(segment):
"""Build a Skyfield `compute` callback for the SPK `segment`."""
if segment.data_type == 2:
def compute(jd):
position, velocity = segment.compute_and_differentiate(jd.tdb)
return position / AU_KM, velocity / AU_KM
elif segment.data_type == 3:
def compute(jd):
six = segment.compute(jd.tdb)
return six[:3] / AU_KM, six[3:] * DAY_S / AU_KM
else:
raise ValueError('SPK data type {} not yet supported segment'
.format(segment.data_type))
return compute
class Planet(object):
def __init__(self, ephemeris, jplephemeris, jplname):
self.ephemeris = ephemeris
self.jplephemeris = jplephemeris
self.jplname = jplname
def __repr__(self):
return '<Planet %s>' % (self.jplname,)
@takes_julian_date
def __call__(self, jd):
"""Return the x,y,z position of this planet at the given time."""
position, velocity = self._position_and_velocity(jd.tdb)
i = Barycentric(position, velocity, jd)
i.ephemeris = self.ephemeris
return i
def _position(self, jd_tdb):
e = self.jplephemeris
c = e.position
if self.jplname == 'earth':
p = c('earthmoon', jd_tdb) - c('moon', jd_tdb) * e.earth_share
elif self.jplname == 'moon':
p = c('earthmoon', jd_tdb) + c('moon', jd_tdb) * e.moon_share
else:
p = c(self.jplname, jd_tdb)
p /= AU_KM
if getattr(jd_tdb, 'shape', ()) == ():
# Skyfield, unlike jplephem, is willing to accept and return
# plain scalars instead of only trafficking in NumPy arrays.
p = p[:,0]
return p
def _position_and_velocity(self, jd_tdb):
e = self.jplephemeris
c = e.compute
if self.jplname == 'earth':
pv = c('earthmoon', jd_tdb) - c('moon', jd_tdb) * e.earth_share
elif self.jplname == 'moon':
pv = c('earthmoon', jd_tdb) + c('moon', jd_tdb) * e.moon_share
else:
pv = c(self.jplname, jd_tdb)
pv /= AU_KM
if getattr(jd_tdb, 'shape', ()) == ():
# Skyfield, unlike jplephem, is willing to accept and return
# plain scalars instead of only trafficking in NumPy arrays.
pv = pv[:,0]
return pv[:3], pv[3:]
def _observe_from_bcrs(self, observer):
# TODO: should also accept another ICRS?
jd_tdb = observer.jd.tdb
lighttime0 = 0.0
position, velocity = self._position_and_velocity(jd_tdb)
vector = position - observer.position.au
euclidian_distance = distance = length_of(vector)
for i in range(10):
lighttime = distance / C_AUDAY
delta = lighttime - lighttime0
if -1e-12 < min(delta) and max(delta) < 1e-12:
break
lighttime0 = lighttime
position, velocity = self._position_and_velocity(jd_tdb - lighttime)
vector = position - observer.position.au
distance = length_of(vector)
else:
raise ValueError('observe_from() light-travel time'
' failed to converge')
g = Astrometric(vector, velocity - observer.velocity.au_per_d,
observer.jd)
g.observer = observer
g.distance = euclidian_distance
g.lighttime = lighttime
return g
class Earth(Planet):
def topos(self, latitude=None, longitude=None, latitude_degrees=None,
longitude_degrees=None, elevation_m=0.0):
"""Return a ``Topos`` object for a specific location on Earth."""
t = Topos(latitude, longitude, latitude_degrees,
longitude_degrees, elevation_m)
t.ephemeris = self.ephemeris
return t
def satellite(self, text):
from .sgp4lib import EarthSatellite
lines = text.splitlines()
return EarthSatellite(lines, self)
class Ephemeris(object):
def __init__(self, module):
self.jplephemeris = jplephem.Ephemeris(module)
self.sun = Planet(self, self.jplephemeris, 'sun')
self.mercury = Planet(self, self.jplephemeris, 'mercury')
self.venus = Planet(self, self.jplephemeris, 'venus')
self.earth = Earth(self, self.jplephemeris, 'earth')
self.moon = Planet(self, self.jplephemeris, 'moon')
self.mars = Planet(self, self.jplephemeris, 'mars')
self.jupiter = Planet(self, self.jplephemeris, 'jupiter')
self.saturn = Planet(self, self.jplephemeris, 'saturn')
self.uranus = Planet(self, self.jplephemeris, 'uranus')
self.neptune = Planet(self, self.jplephemeris, 'neptune')
self.pluto = Planet(self, self.jplephemeris, 'pluto')
def _position(self, name, jd):
return getattr(self, name)._position(jd)
def _position_and_velocity(self, name, jd):
return getattr(self, name)._position_and_velocity(jd)
| {
"repo_name": "GuidoBR/python-skyfield",
"path": "skyfield/jpllib.py",
"copies": "1",
"size": "6858",
"license": "mit",
"hash": 7323382856470085000,
"line_mean": 35.2857142857,
"line_max": 80,
"alpha_frac": 0.5810731992,
"autogenerated": false,
"ratio": 3.3323615160349855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44134347152349857,
"avg_score": null,
"num_lines": null
} |
"""An interface between JPL ephemerides and Skyfield."""
import jplephem
from jplephem.spk import SPK
from jplephem.names import target_names
from numpy import max, min
from .chaining import Body, Segment
from .constants import AU_KM, C_AUDAY, DAY_S
from .functions import length_of
from .positionlib import Astrometric, Barycentric, Topos
from .timelib import takes_julian_date
class Kernel(dict):
def __init__(self, file):
if isinstance(file, str):
file = open(file, 'rb')
self.spk = SPK(file)
segments = [Segment(s.center, s.target, _build_compute(s))
for s in self.spk.segments]
codes = set(s.center for s in segments).union(
s.target for s in segments)
for code in codes:
body = Body(code, segments)
self[code] = body
raw_name = target_names.get(code, None)
if raw_name is None:
continue
name = raw_name.lower().replace(' ', '_')
setattr(self, name, body)
def __str__(self):
return str(self.spk)
def _build_compute(segment):
"""Build a Skyfield `compute` callback for the SPK `segment`."""
if segment.data_type == 2:
def compute(jd):
position, velocity = segment.compute_and_differentiate(jd.tdb)
return position / AU_KM, velocity / AU_KM
elif segment.data_type == 3:
def compute(jd):
six = segment.compute(jd.tdb)
return six[:3] / AU_KM, six[3:] * DAY_S / AU_KM
else:
raise ValueError('SPK data type {} not yet supported segment'
.format(segment.data_type))
return compute
# The older ephemerides that the code below tackles use a different
# value for the AU, so, for now (until we fix our tests?):
class Planet(object):
def __init__(self, ephemeris, jplephemeris, jplname):
self.ephemeris = ephemeris
self.jplephemeris = jplephemeris
self.jplname = jplname
def __repr__(self):
return '<Planet %s>' % (self.jplname,)
@takes_julian_date
def __call__(self, jd):
"""Return the x,y,z position of this planet at the given time."""
position, velocity = self._position_and_velocity(jd.tdb)
i = Barycentric(position, velocity, jd)
i.ephemeris = self.ephemeris
return i
def _position(self, jd_tdb):
e = self.jplephemeris
c = e.position
if self.jplname == 'earth':
p = c('earthmoon', jd_tdb) - c('moon', jd_tdb) * e.earth_share
elif self.jplname == 'moon':
p = c('earthmoon', jd_tdb) + c('moon', jd_tdb) * e.moon_share
else:
p = c(self.jplname, jd_tdb)
p /= AU_KM
if getattr(jd_tdb, 'shape', ()) == ():
# Skyfield, unlike jplephem, is willing to accept and return
# plain scalars instead of only trafficking in NumPy arrays.
p = p[:,0]
return p
def _position_and_velocity(self, jd_tdb):
e = self.jplephemeris
c = e.compute
if self.jplname == 'earth':
pv = c('earthmoon', jd_tdb) - c('moon', jd_tdb) * e.earth_share
elif self.jplname == 'moon':
pv = c('earthmoon', jd_tdb) + c('moon', jd_tdb) * e.moon_share
else:
pv = c(self.jplname, jd_tdb)
pv /= AU_KM
if getattr(jd_tdb, 'shape', ()) == ():
# Skyfield, unlike jplephem, is willing to accept and return
# plain scalars instead of only trafficking in NumPy arrays.
pv = pv[:,0]
return pv[:3], pv[3:]
def _observe_from_bcrs(self, observer):
# TODO: should also accept another ICRS?
jd_tdb = observer.jd.tdb
lighttime0 = 0.0
position, velocity = self._position_and_velocity(jd_tdb)
vector = position - observer.position.au
euclidian_distance = distance = length_of(vector)
for i in range(10):
lighttime = distance / C_AUDAY
delta = lighttime - lighttime0
if -1e-12 < min(delta) and max(delta) < 1e-12:
break
lighttime0 = lighttime
position, velocity = self._position_and_velocity(jd_tdb - lighttime)
vector = position - observer.position.au
distance = length_of(vector)
else:
raise ValueError('observe_from() light-travel time'
' failed to converge')
g = Astrometric(vector, velocity - observer.velocity.au_per_d,
observer.jd)
g.observer = observer
g.distance = euclidian_distance
g.lighttime = lighttime
return g
class Earth(Planet):
def topos(self, latitude=None, longitude=None, latitude_degrees=None,
longitude_degrees=None, elevation_m=0.0):
"""Return a ``Topos`` object for a specific location on Earth."""
t = Topos(latitude, longitude, latitude_degrees,
longitude_degrees, elevation_m)
t.ephemeris = self.ephemeris
return t
def satellite(self, text):
from .sgp4lib import EarthSatellite
lines = text.splitlines()
return EarthSatellite(lines, self)
class Ephemeris(object):
def __init__(self, module):
self.jplephemeris = jplephem.Ephemeris(module)
self.sun = Planet(self, self.jplephemeris, 'sun')
self.mercury = Planet(self, self.jplephemeris, 'mercury')
self.venus = Planet(self, self.jplephemeris, 'venus')
self.earth = Earth(self, self.jplephemeris, 'earth')
self.moon = Planet(self, self.jplephemeris, 'moon')
self.mars = Planet(self, self.jplephemeris, 'mars')
self.jupiter = Planet(self, self.jplephemeris, 'jupiter')
self.saturn = Planet(self, self.jplephemeris, 'saturn')
self.uranus = Planet(self, self.jplephemeris, 'uranus')
self.neptune = Planet(self, self.jplephemeris, 'neptune')
self.pluto = Planet(self, self.jplephemeris, 'pluto')
def _position(self, name, jd):
return getattr(self, name)._position(jd)
def _position_and_velocity(self, name, jd):
return getattr(self, name)._position_and_velocity(jd)
| {
"repo_name": "exoanalytic/python-skyfield",
"path": "skyfield/jpllib.py",
"copies": "1",
"size": "6276",
"license": "mit",
"hash": -1170863877936210200,
"line_mean": 35.0689655172,
"line_max": 80,
"alpha_frac": 0.5874760994,
"autogenerated": false,
"ratio": 3.2875851231011,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9369789872285554,
"avg_score": 0.001054270043108957,
"num_lines": 174
} |
"""An interface between JPL ephemerides and Skyfield."""
import os
from collections import defaultdict, namedtuple
from numpy import max, min
from jplephem.spk import SPK
from jplephem.names import target_name_pairs, target_names as _names
from .constants import AU_KM, C_AUDAY, DAY_S
from .errors import DeprecationError, raise_error_for_deprecated_time_arguments
from .functions import length_of
from .positionlib import Astrometric, Barycentric, ICRF
from .timelib import calendar_date
Segment = namedtuple('Segment', 'center target compute')
_targets = dict((name, target) for (target, name) in target_name_pairs)
class SpiceKernel(object):
"""Ephemeris file in NASA .bsp format.
A "Spacecraft and Planet Kernel" (SPK) file from NASA provides
(x,y,z) coordinates for bodies in the Solar System like the Sun,
planets, moons, and spacecraft.
You can download a .bsp file yourself and use this class to open it,
or use the Skyfield `load` function to automatically download a
popular ephemeris. Once loaded, you can print this object to the
screen to see a report on the segments that it includes:
>>> planets = load('de421.bsp')
>>> print(planets)
SPICE kernel file 'de421.bsp' has 15 segments
JD 2414864.50 - JD 2471184.50 (1899-07-28 through 2053-10-08)
0 -> 1 SOLAR SYSTEM BARYCENTER -> MERCURY BARYCENTER
0 -> 2 SOLAR SYSTEM BARYCENTER -> VENUS BARYCENTER
0 -> 3 SOLAR SYSTEM BARYCENTER -> EARTH BARYCENTER
0 -> 4 SOLAR SYSTEM BARYCENTER -> MARS BARYCENTER
0 -> 5 SOLAR SYSTEM BARYCENTER -> JUPITER BARYCENTER
0 -> 6 SOLAR SYSTEM BARYCENTER -> SATURN BARYCENTER
0 -> 7 SOLAR SYSTEM BARYCENTER -> URANUS BARYCENTER
0 -> 8 SOLAR SYSTEM BARYCENTER -> NEPTUNE BARYCENTER
0 -> 9 SOLAR SYSTEM BARYCENTER -> PLUTO BARYCENTER
0 -> 10 SOLAR SYSTEM BARYCENTER -> SUN
3 -> 301 EARTH BARYCENTER -> MOON
3 -> 399 EARTH BARYCENTER -> EARTH
1 -> 199 MERCURY BARYCENTER -> MERCURY
2 -> 299 VENUS BARYCENTER -> VENUS
4 -> 499 MARS BARYCENTER -> MARS
To create a `Body` object for a target you are interested in, use
square brackets and supply the target's name or integer code:
>>> planets['earth']
<Body 399 'EARTH' from kernel 'de421.bsp'>
>>> planets[499]
<Body 499 'MARS' from kernel 'de421.bsp'>
"""
def __init__(self, path):
self.path = path
self.filename = os.path.basename(path)
self.spk = SPK.open(path)
self.segments = [Segment(s.center, s.target, _build_compute(s))
for s in self.spk.segments]
self.codes = set(s.center for s in self.segments).union(
s.target for s in self.segments)
def __str__(self):
segments = self.spk.segments
lines = ['SPICE kernel file {0!r} has {1} segments'
.format(self.filename, len(segments))]
format_date = '{0}-{1:02}-{2:02}'.format
start = end = None
for s in segments:
if start != s.start_jd or end != s.end_jd:
start, end = s.start_jd, s.end_jd
starts = format_date(*calendar_date(int(start)))
ends = format_date(*calendar_date(int(end)))
lines.append(' JD {0:.2f} - JD {1:.2f} ({2} through {3})'
.format(start, end, starts, ends))
lines.append(_segment_line(s))
return '\n'.join(lines)
def __getitem__(self, name):
"""Return a `Body` given a target name or integer."""
code = self.decode(name)
return Body(self, code)
def comments(self):
"""Return the comments string of this kernel.
The resulting string often contains embedded newlines, and is
formatted for a human reader.
>>> print(planets.comments())
; de421.bsp LOG FILE
;
; Created 2008-02-12/11:33:34.00.
...
LEAPSECONDS_FILE = naif0007.tls
SPK_FILE = de421.bsp
...
"""
return self.spk.comments()
def names(self):
"""Return all target names that are valid with this kernel.
>>> pprint(planets.names())
{0: ['SOLAR_SYSTEM_BARYCENTER', 'SSB', 'SOLAR SYSTEM BARYCENTER'],
1: ['MERCURY_BARYCENTER', 'MERCURY BARYCENTER'],
2: ['VENUS_BARYCENTER', 'VENUS BARYCENTER'],
3: ['EARTH_BARYCENTER',
'EMB',
...
The result is a dictionary with target code keys and name lists
as values. The last name in each list is the one that Skyfield
uses when printing information about a body.
"""
d = defaultdict(list)
for code, name in target_name_pairs:
if code in self.codes:
d[code].append(name)
return dict(d)
def decode(self, name):
"""Translate a target name into its integer code.
>>> planets.decode('Venus')
299
Raises ``ValueError`` if you supply an unknown name, or
``KeyError`` if the target is missing from this kernel. You can
supply an integer code if you already have one and just want to
check whether it is present in this kernel.
"""
if isinstance(name, int):
code = name
else:
name = name.upper()
code = _targets.get(name)
if code is None:
raise ValueError('unknown SPICE target {0!r}'.format(name))
if code not in self.codes:
targets = ', '.join(_code_and_name(c) for c in self.codes)
raise KeyError('kernel {0!r} is missing {1!r} -'
' the targets it supports are: {2}'
.format(self.filename, name, targets))
return code
def _build_compute(segment):
"""Build a Skyfield `compute` callback for the SPK `segment`."""
if segment.data_type == 2:
def compute(t):
position, velocity = segment.compute_and_differentiate(t.tdb)
return position / AU_KM, velocity / AU_KM
elif segment.data_type == 3:
def compute(t):
six = segment.compute(t.tdb)
return six[:3] / AU_KM, six[3:] * DAY_S / AU_KM
else:
raise ValueError('SPK data type {0} not yet supported segment'
.format(segment.data_type))
return compute
class Body(object):
"""A target body from a SPICE .bsp kernel file.
Skyfield programmers usually ask a kernel object to look up and
return a body object for them, instead of trying to instantiate this
class directly:
>>> planets = load('de421.bsp')
>>> planets['ssb']
<Body 0 'SOLAR SYSTEM BARYCENTER' from kernel 'de421.bsp'>
>>> planets[299]
<Body 299 'VENUS' from kernel 'de421.bsp'>
"""
def __init__(self, ephemeris, code):
self.ephemeris = ephemeris
self.segments = ephemeris.segments
self.code = code
def __repr__(self):
return '<Body {0} from kernel {1!r}>'.format(_code_and_name(self.code),
self.ephemeris.filename)
@raise_error_for_deprecated_time_arguments
def at(self, t):
"""Compute a `Barycentric` position for time `t`.
The time `t` should be a `Time` object. The returned position
will also offer a velocity, if the kernel supports it.
"""
segments = self.segments
segment_dict = dict((segment.target, segment) for segment in segments)
chain = list(_center(self.code, segment_dict))[::-1]
pos, vel = _tally((), chain, t)
barycentric = Barycentric(pos, vel, t)
barycentric.ephemeris = self.ephemeris
return barycentric
def geometry_of(self, body):
"""Return a `Geometry` path to another body.
Given either a `Body` object, or else the name or integer code
identifying a body in the same ephemeris as this one, compute
the minimum number of segments necessary to determine their
relative position and return a `Geometry` object.
>>> g = earth.geometry_of(moon)
>>> print(g)
Geometry from center 399 to target 301 using:
3 -> 399 EARTH BARYCENTER -> EARTH
3 -> 301 EARTH BARYCENTER -> MOON
"""
if not isinstance(body, Body):
code = self.ephemeris.decode(body)
body = Body(self.ephemeris, code)
center_chain, target_chain = _connect(self, body)
return Geometry(self.code, body.code, center_chain, target_chain)
def _observe_from_bcrs(self, observer):
return observe(observer, self)
def topos(self, latitude=None, longitude=None, latitude_degrees=None,
longitude_degrees=None, elevation_m=0.0, x=0.0, y=0.0):
"""Return a `Topos` representing a place on Earth.
See the `Topos` class for a description of the parameters.
"""
assert self.code == 399
from .toposlib import Topos
t = Topos(latitude, longitude, latitude_degrees,
longitude_degrees, elevation_m, x, y)
t.ephemeris = self.ephemeris
t.segments += self.segments
return t
def satellite(self, text):
assert self.code == 399
from .sgp4lib import EarthSatellite
lines = text.splitlines()
return EarthSatellite(lines, self)
def __call__(self, jd):
"""Deprecated alternative to the new at() method."""
raise DeprecationError("""use method body.at(t), not the call body(t)
If you simply want your old Skyfield script to start working again,
downgrade to Skyfield version 0.4 using a command like:
pip install skyfield==0.4
Otherwise, you can upgrade your script to modern Skyfield by finding
each place you called a body like a function to generate a position:
position = body(t)
Instead, Skyfield now offers a method named at(t) to makes the
operation easier to read and more symmetrical with other method calls:
position = body.at(t)
More documentation can be found at: http://rhodesmill.org/skyfield/""")
def observe(observer, target):
"""Return a light-time corrected astrometric position and velocity.
Given an `observer` that is a `Barycentric` position somewhere in
the solar system, compute where in the sky they will see the body
`target`, by computing the light-time between them and figuring out
where `target` was back when the light was leaving it that is now
reaching the eyes or instruments of the `observer`.
"""
# cposition, cvelocity = _tally([], self.center_chain, jd)
# tposition, tvelocity = _tally([], self.target_chain, jd)
t = observer.t
ts = t.ts
cposition = observer.position.au
cvelocity = observer.velocity.au_per_d
t_bary = target.at(t)
tposition = t_bary.position.au
distance = length_of(tposition - cposition)
light_time0 = 0.0
t_tdb = t.tdb
for i in range(10):
light_time = distance / C_AUDAY
delta = light_time - light_time0
if -1e-12 < min(delta) and max(delta) < 1e-12:
break
t2 = ts.tdb(jd=t_tdb - light_time)
t_bary = target.at(t2)
tposition = t_bary.position.au
distance = length_of(tposition - cposition)
light_time0 = light_time
else:
raise ValueError('observe_from() light-travel time'
' failed to converge')
tvelocity = t_bary.velocity.au_per_d
pos = Astrometric(tposition - cposition, tvelocity - cvelocity, t)
pos.light_time = light_time
pos.observer = observer
return pos
def _connect(body1, body2):
"""Return ``(sign, segment)`` tuple list leading from body1 to body2."""
every = body1.segments + body2.segments
segment_dict = dict((segment.target, segment) for segment in every)
segments1 = list(_center(body1.code, segment_dict))[::-1]
segments2 = list(_center(body2.code, segment_dict))[::-1]
if segments1[0].center != segments2[0].center:
raise ValueError('cannot trace these bodies back to a common center')
i = sum(1 for s1, s2 in zip(segments1, segments2) if s1.target == s2.target)
return segments1[i:], segments2[i:]
def _center(code, segment_dict):
"""Starting with `code`, follow segments from target to center."""
while code in segment_dict:
segment = segment_dict[code]
yield segment
code = segment.center
class Geometry(object):
"""The kernel segments for predicting two bodies' relative position.
Computing an instantaneous geometry can be faster than computing a
normal astrometric observation because, instead of referencing both
bodies back to the Solar System barycenter, the geometry only needs
the segments that link them.
For example, the more expensive ``earth.at(t).observe(moon)``
operation will first compute an Earth position using two kernel
segments::
0 -> 3 SOLAR SYSTEM BARYCENTER -> EARTH BARYCENTER
3 -> 399 EARTH BARYCENTER -> EARTH
Then it will repeatedly compute the Moon's position as it
narrows down the light travel time, using two segments::
0 -> 3 SOLAR SYSTEM BARYCENTER -> EARTH BARYCENTER
3 -> 301 EARTH BARYCENTER -> MOON
But a geometry, because it can ignore the real physics required for
light from one body to reach another, can take a shortcut and avoid
the Solar System barycenter. It can also skip the iteration
required to find the light travel time.
>>> g = earth.geometry_of(moon)
>>> print(g)
Geometry from center 399 to target 301 using:
3 -> 399 EARTH BARYCENTER -> EARTH
3 -> 301 EARTH BARYCENTER -> MOON
Instantaneous geometry positions can be appropriate when plotting a
diagram of the solar system or of a particular planetary system from
the point of view of a distant observer.
"""
def __init__(self, center, target, center_chain, target_chain):
self.center = center
self.target = target
self.center_chain = center_chain
self.target_chain = target_chain
def __str__(self):
segments = self.center_chain + self.target_chain
lines = '\n'.join(_segment_line(s) for s in segments)
return 'Geometry from center {0} to target {1} using:\n{2}'.format(
self.center, self.target, lines)
@raise_error_for_deprecated_time_arguments
def at(self, t):
"""Compute instantaneous position and velocity between two bodies.
The argument ``t`` should be a `Time`, and the return value will
be the relative position of the the target body relative to the
center body. If the center is 0 "Solar System Barycenter" then
the result will be `Barycentric`. Otherwise, it will be a plain
`ICRF` position.
"""
pos, vel = _tally(self.center_chain, self.target_chain, t)
cls = Barycentric if self.center == 0 else ICRF
return cls(pos, vel, t)
def _code_and_name(code):
name = _names.get(code, None)
if name is None:
return str(code)
return '{0} {1!r}'.format(code, name)
def _segment_line(segment):
cname = _names.get(segment.center, 'unknown')
tname = _names.get(segment.target, 'unknown')
return ' {0:3} -> {1:<3} {2} -> {3}'.format(
segment.center, segment.target, cname, tname)
def _tally(minus_chain, plus_chain, t):
position = velocity = 0.0
for segment in minus_chain:
p, v = segment.compute(t)
position -= p
velocity -= v
for segment in plus_chain:
p, v = segment.compute(t)
position += p
velocity += v
return position, velocity
| {
"repo_name": "ozialien/python-skyfield",
"path": "skyfield/jpllib.py",
"copies": "1",
"size": "15948",
"license": "mit",
"hash": 6297223191010244000,
"line_mean": 36.1748251748,
"line_max": 80,
"alpha_frac": 0.6192626035,
"autogenerated": false,
"ratio": 3.694232105628909,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4813494709128909,
"avg_score": null,
"num_lines": null
} |
"""An interface between JPL ephemerides and Skyfield."""
import os
from collections import defaultdict
from jplephem.exceptions import OutOfRangeError
from jplephem.spk import SPK
from jplephem.names import target_name_pairs
from .constants import AU_KM, DAY_S
from .errors import EphemerisRangeError
from .timelib import compute_calendar_date
from .vectorlib import VectorFunction, VectorSum, _jpl_code_name_dict
_jpl_name_code_dict = dict(
(name, target) for (target, name) in target_name_pairs
)
class SpiceKernel(object):
"""Ephemeris file in NASA .bsp format.
A "Spacecraft and Planet Kernel" (SPK) file from NASA provides
|xyz| coordinates for bodies in the Solar System like the Sun,
planets, moons, and spacecraft.
You can download a .bsp file yourself and use this class to open it,
or use the Skyfield ``load()`` function to automatically download a
popular ephemeris. Once loaded, you can print this object to the
screen to see a report on the segments that it includes:
>>> planets = load('de421.bsp')
>>> print(planets)
SPICE kernel file 'de421.bsp' has 15 segments
JD 2414864.50 - JD 2471184.50 (1899-07-28 through 2053-10-08)
0 -> 1 SOLAR SYSTEM BARYCENTER -> MERCURY BARYCENTER
0 -> 2 SOLAR SYSTEM BARYCENTER -> VENUS BARYCENTER
0 -> 3 SOLAR SYSTEM BARYCENTER -> EARTH BARYCENTER
0 -> 4 SOLAR SYSTEM BARYCENTER -> MARS BARYCENTER
0 -> 5 SOLAR SYSTEM BARYCENTER -> JUPITER BARYCENTER
0 -> 6 SOLAR SYSTEM BARYCENTER -> SATURN BARYCENTER
0 -> 7 SOLAR SYSTEM BARYCENTER -> URANUS BARYCENTER
0 -> 8 SOLAR SYSTEM BARYCENTER -> NEPTUNE BARYCENTER
0 -> 9 SOLAR SYSTEM BARYCENTER -> PLUTO BARYCENTER
0 -> 10 SOLAR SYSTEM BARYCENTER -> SUN
3 -> 301 EARTH BARYCENTER -> MOON
3 -> 399 EARTH BARYCENTER -> EARTH
1 -> 199 MERCURY BARYCENTER -> MERCURY
2 -> 299 VENUS BARYCENTER -> VENUS
4 -> 499 MARS BARYCENTER -> MARS
To retrieve the one or more vectors necessary to compute the
position of a body relative to the Solar System barycenter, look up
the body by its name or official SPICE identifying integer:
>>> planets['earth']
<VectorSum of 2 vectors:
'de421.bsp' segment 0 SOLAR SYSTEM BARYCENTER -> 3 EARTH BARYCENTER
'de421.bsp' segment 3 EARTH BARYCENTER -> 399 EARTH>
>>> planets[499]
<VectorSum of 2 vectors:
'de421.bsp' segment 0 SOLAR SYSTEM BARYCENTER -> 4 MARS BARYCENTER
'de421.bsp' segment 4 MARS BARYCENTER -> 499 MARS>
The result will be a :class:`~skyfield.vectorlib.VectorFunction`
instance that you can ask for a position at a given input time.
"""
def __init__(self, path):
self.path = path
self.filename = os.path.basename(path)
self.spk = SPK.open(path)
self.segments = [SPICESegment(self, s) for s in self.spk.segments]
self.codes = set(s.center for s in self.segments).union(
s.target for s in self.segments)
def __repr__(self):
return '<{0} {1!r}>'.format(type(self).__name__, self.path)
def __str__(self):
segments = self.spk.segments
lines = ['SPICE kernel file {0!r} has {1} segments'
.format(self.filename, len(segments))]
format_date = '{0}-{1:02}-{2:02}'.format
start = end = None
for s in segments:
if start != s.start_jd or end != s.end_jd:
start, end = s.start_jd, s.end_jd
starts = format_date(*compute_calendar_date(int(start)))
ends = format_date(*compute_calendar_date(int(end)))
lines.append(' JD {0:.2f} - JD {1:.2f} ({2} through {3})'
.format(start, end, starts, ends))
lines.append(_format_segment(s))
return '\n'.join(lines)
def close(self):
"""Close this ephemeris file."""
self.spk.close()
def comments(self):
"""Return the comments string of this kernel.
The resulting string often contains embedded newlines, and is
formatted for a human reader.
>>> print(planets.comments())
; de421.bsp LOG FILE
;
; Created 2008-02-12/11:33:34.00.
...
LEAPSECONDS_FILE = naif0007.tls
SPK_FILE = de421.bsp
...
"""
return self.spk.comments()
def names(self):
"""Return all target names that are valid with this kernel.
>>> pprint(planets.names())
{0: ['SOLAR_SYSTEM_BARYCENTER', 'SSB', 'SOLAR SYSTEM BARYCENTER'],
1: ['MERCURY_BARYCENTER', 'MERCURY BARYCENTER'],
2: ['VENUS_BARYCENTER', 'VENUS BARYCENTER'],
3: ['EARTH_BARYCENTER',
'EMB',
...
The result is a dictionary with target code keys and name lists
as values. The last name in each list is the one that Skyfield
uses when printing information about a body.
"""
d = defaultdict(list)
for code, name in target_name_pairs:
if code in self.codes:
d[code].append(name)
return dict(d)
def decode(self, name):
"""Translate a target name into its integer code.
>>> planets.decode('Venus')
299
Raises ``ValueError`` if you supply an unknown name, or
``KeyError`` if the target is missing from this kernel. You can
supply an integer code if you already have one and just want to
check whether it is present in this kernel.
"""
if isinstance(name, int):
code = name
else:
name = name.upper()
code = _jpl_name_code_dict.get(name)
if code is None:
raise ValueError('unknown SPICE target {0!r}'.format(name))
if code not in self.codes:
targets = ', '.join(_format_code_and_name(c) for c in self.codes)
raise KeyError('kernel {0!r} is missing {1!r} -'
' the targets it supports are: {2}'
.format(self.filename, name, targets))
return code
def __getitem__(self, target):
"""Return a vector function for computing the location of `target`."""
target = self.decode(target)
segments = self.segments
segment_dict = dict((segment.target, segment) for segment in segments)
chain = tuple(_center(target, segment_dict))
if len(chain) == 1:
return chain[0]
chain = chain[::-1]
center = chain[0].center
target = chain[-1].target
return VectorSum(center, target, chain)
def __contains__(self, name_or_code):
if isinstance(name_or_code, int):
code = name_or_code
else:
code = _jpl_name_code_dict.get(name_or_code.upper())
return code in self.codes
class SPICESegment(VectorFunction):
def __new__(cls, ephemeris, spk_segment):
if spk_segment.data_type == 2:
return object.__new__(ChebyshevPosition)
if spk_segment.data_type == 3:
return object.__new__(ChebyshevPositionVelocity)
raise ValueError('SPK data type {0} not yet supported'
.format(spk_segment.data_type))
def __init__(self, ephemeris, spk_segment):
self.ephemeris = ephemeris
self.center = spk_segment.center
self.target = spk_segment.target
self.spk_segment = spk_segment
@property
def vector_name(self):
return '{0!r} segment'.format(self.ephemeris.path)
def time_range(self, ts):
s = self.spk_segment
return ts.tdb_jd(s.start_jd), ts.tdb_jd(s.end_jd)
class ChebyshevPosition(SPICESegment):
def _at(self, t):
segment = self.spk_segment
try:
position, velocity = segment.compute_and_differentiate(
t.whole, t.tdb_fraction)
except OutOfRangeError as e:
start_time = t.ts.tdb(jd=segment.start_jd)
end_time = t.ts.tdb(jd=segment.end_jd)
# TODO: switch to calendar dates in TDB to produce round numbers?
text = ('ephemeris segment only covers dates %s through %s UT'
% (start_time.utc_iso(' '), end_time.utc_iso(' ')))
mask = e.out_of_range_times
segment = self.spk_segment
e = EphemerisRangeError(text, start_time, end_time, mask, segment)
e.__cause__ = None # avoid exception chaining in Python 3
raise e
return position / AU_KM, velocity / AU_KM, None, None
class ChebyshevPositionVelocity(SPICESegment):
def _at(self, t):
pv = self.spk_segment.compute(t.whole, t.tdb_fraction)
return pv[:3] / AU_KM, pv[3:] * DAY_S / AU_KM, None, None
def _center(code, segment_dict):
"""Starting with `code`, follow segments from target to center."""
while code in segment_dict:
segment = segment_dict[code]
yield segment
code = segment.center
def _format_code_and_name(code):
name = _jpl_code_name_dict.get(code, None)
if name is None:
return str(code)
return '{0} {1}'.format(code, name)
def _format_segment(segment):
cname = _jpl_code_name_dict.get(segment.center, 'unknown')
tname = _jpl_code_name_dict.get(segment.target, 'unknown')
return ' {0:3} -> {1:<3} {2} -> {3}'.format(
segment.center, segment.target, cname, tname)
| {
"repo_name": "skyfielders/python-skyfield",
"path": "skyfield/jpllib.py",
"copies": "1",
"size": "9581",
"license": "mit",
"hash": 4450481301664066000,
"line_mean": 36.8695652174,
"line_max": 78,
"alpha_frac": 0.5980586578,
"autogenerated": false,
"ratio": 3.569672131147541,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9664685488471411,
"avg_score": 0.0006090600952261031,
"num_lines": 253
} |
"""An interface between Skyfield and the Python ``sgp4`` library."""
from numpy import array, cross, einsum, zeros_like
from sgp4.earth_gravity import wgs72
from sgp4.io import twoline2rv
from sgp4.propagation import sgp4
from .constants import AU_KM, DAY_S, T0, tau
from .errors import raise_error_for_deprecated_time_arguments
from .functions import rot_x, rot_y, rot_z
from .positionlib import Apparent, Geocentric, ITRF_to_GCRS
# important ones:
# jdsatepoch
# bstar
# inclo - inclination
# nodeo - right ascension of ascending node
# ecco - eccentricity
# argpo - argument of perigee
# mo - mean anomaly
# no - mean motion
_minutes_per_day = 1440.
class EarthSatellite(object):
"""An Earth satellite loaded from a TLE file and propagated with SGP4."""
def __init__(self, lines, earth):
sat = twoline2rv(*lines[-2:], whichconst=wgs72)
self._sgp4_satellite = sat
self._earth = earth
# TODO: Drat. Where should this Timescale come from?
# Should they have to pass it in?
from skyfield import api
self.epoch = api.load.timescale().utc(sat.epochyr, 1, sat.epochdays)
def __repr__(self):
sat = self._sgp4_satellite
return '<EarthSatellite number={1!r} epoch={0}>'.format(
self.epoch.utc_iso(), sat.satnum)
def _position_and_velocity_TEME_km(self, t):
"""Return the raw true equator mean equinox (TEME) vectors from SGP4.
Returns a tuple of NumPy arrays ``([x y z], [xdot ydot zdot])``
expressed in kilometers and kilometers per second. Note that we
assume the TLE epoch to be a UTC date, per AIAA 2006-6753.
"""
sat = self._sgp4_satellite
epoch = sat.jdsatepoch
minutes_past_epoch = (t._utc_float() - epoch) * 1440.
if getattr(minutes_past_epoch, 'shape', None):
position = []
velocity = []
error = []
for m in minutes_past_epoch:
p, v = sgp4(sat, m)
position.append(p)
velocity.append(v)
error.append(sat.error_message)
return array(position).T, array(velocity).T, error
else:
position, velocity = sgp4(sat, minutes_past_epoch)
return array(position), array(velocity), sat.error_message
def _compute_GCRS(self, t):
"""Compute where satellite is in space on a given date."""
rTEME, vTEME, error = self._position_and_velocity_TEME_km(t)
rTEME /= AU_KM
vTEME /= AU_KM
vTEME *= DAY_S
rITRF, vITRF = TEME_to_ITRF(t.ut1, rTEME, vTEME)
rGCRS = ITRF_to_GCRS(t, rITRF)
vGCRS = zeros_like(rGCRS) # todo: someday also compute vGCRS?
return rGCRS, vGCRS, error
@raise_error_for_deprecated_time_arguments
def gcrs(self, t):
"""Return a GCRS position for this Earth satellite.
Uses standard SGP4 theory to predict the satellite location.
"""
position_au, velociy_au_per_d, error = self._compute_GCRS(t)
g = Geocentric(position_au, velociy_au_per_d, t)
g.sgp4_error = error
return g
def _observe_from_bcrs(self, observer):
# TODO: what if someone on Mars tries to look at the ISS?
t = observer.t
rGCRS, vGCRS, error = self._compute_GCRS(t)
rGCRS - observer.rGCRS
vGCRS - observer.vGCRS
g = Apparent(rGCRS - observer.rGCRS, vGCRS - observer.vGCRS, t)
g.sgp4_error = error
g.observer = observer
# g.distance = euclidian_distance
return g
_second = 1.0 / (24.0 * 60.0 * 60.0)
def theta_GMST1982(jd_ut1):
"""Return the angle of Greenwich Mean Standard Time 1982 given the JD.
This angle defines the difference between the idiosyncratic True
Equator Mean Equinox (TEME) frame of reference used by SGP4 and the
more standard Pseudo Earth Fixed (PEF) frame of reference.
From AIAA 2006-6753 Appendix C.
"""
t = (jd_ut1 - T0) / 36525.0
g = 67310.54841 + (8640184.812866 + (0.093104 + (-6.2e-6) * t) * t) * t
dg = 8640184.812866 + (0.093104 * 2.0 + (-6.2e-6 * 3.0) * t) * t
theta = (jd_ut1 % 1.0 + g * _second % 1.0) * tau
theta_dot = (1.0 + dg * _second / 36525.0) * tau
return theta, theta_dot
def TEME_to_ITRF(jd_ut1, rTEME, vTEME, xp=0.0, yp=0.0):
"""Convert TEME position and velocity into standard ITRS coordinates.
This converts a position and velocity vector in the idiosyncratic
True Equator Mean Equinox (TEME) frame of reference used by the SGP4
theory into vectors into the more standard ITRS frame of reference.
The velocity should be provided in units per day, not per second.
From AIAA 2006-6753 Appendix C.
"""
theta, theta_dot = theta_GMST1982(jd_ut1)
zero = theta_dot * 0.0
angular_velocity = array([zero, zero, -theta_dot])
R = rot_z(-theta)
if len(rTEME.shape) == 1:
rPEF = (R).dot(rTEME)
vPEF = (R).dot(vTEME) + cross(angular_velocity, rPEF)
else:
rPEF = einsum('ij...,j...->i...', R, rTEME)
vPEF = einsum('ij...,j...->i...', R, vTEME) + cross(
angular_velocity, rPEF, 0, 0).T
if xp == 0.0 and yp == 0.0:
rITRF = rPEF
vITRF = vPEF
else:
W = (rot_x(yp)).dot(rot_y(xp))
rITRF = (W).dot(rPEF)
vITRF = (W).dot(vPEF)
return rITRF, vITRF
| {
"repo_name": "ozialien/python-skyfield",
"path": "skyfield/sgp4lib.py",
"copies": "1",
"size": "5410",
"license": "mit",
"hash": 7817404253149138000,
"line_mean": 33.4585987261,
"line_max": 77,
"alpha_frac": 0.6112754159,
"autogenerated": false,
"ratio": 3.0055555555555555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9114063303878971,
"avg_score": 0.0005535335153169548,
"num_lines": 157
} |
"""An interface for a directory watcher."""
import abc
import collections
import logging
import os
import sys
import enum
_LOGGER = logging.getLogger(__name__)
class DirWatcherEvent(enum.Enum):
"""DirWatcher's emitted event types.
"""
# W0232: Class has no __init__ method
# pylint: disable=W0232
CREATED = 'created'
DELETED = 'deleted'
MODIFIED = 'modified'
#: Fake event returned when more events where received than allowed to
#: process in ``process_events``
MORE_PENDING = 'more events pending'
class DirWatcher(object):
"""Directory watcher base, invoking callbacks on file create/delete events.
"""
__metaclass__ = abc.ABCMeta
__slots__ = (
'event_list',
'on_created',
'on_deleted',
'on_modified',
'_watches'
)
def __init__(self, watch_dir=None):
self.event_list = collections.deque()
self.on_created = self._noop
self.on_deleted = self._noop
self.on_modified = self._noop
self._watches = {}
if watch_dir is not None:
self.add_dir(watch_dir)
@abc.abstractmethod
def _add_dir(self, watch_dir):
"""Add `directory` to the list of watched directories.
:param watch_dir: watch directory real path
:returns: watch id
"""
return
def add_dir(self, directory):
"""Add `directory` to the list of watched directories.
"""
watch_dir = os.path.realpath(directory)
wid = self._add_dir(watch_dir)
_LOGGER.info('Watching directory %r (id: %r)', watch_dir, wid)
self._watches[wid] = watch_dir
@abc.abstractmethod
def _remove_dir(self, watch_id):
"""Remove `directory` from the list of watched directories.
:param watch_id: watch id
"""
return
def remove_dir(self, directory):
"""Remove `directory` from the list of watched directories.
"""
watch_dir = os.path.realpath(directory)
for wid, w_dir in self._watches.items():
if w_dir == watch_dir:
break
else:
wid = None
_LOGGER.warn('Directory %r not currently watched', watch_dir)
return
_LOGGER.info('Unwatching directory %r (id: %r)', watch_dir, wid)
del self._watches[wid]
self._remove_dir(wid)
@staticmethod
def _noop(event_src):
"""Default NOOP callback"""
_LOGGER.debug('event on %r', event_src)
return None
@abc.abstractmethod
def _wait_for_events(self, timeout):
"""Wait for directory change event for up to ``timeout`` seconds.
:param timeout:
Time in seconds to wait for an event (-1 means forever)
:returns:
``True`` if events were received, ``False`` otherwise.
"""
return
def wait_for_events(self, timeout=-1):
"""Wait for directory change event for up to ``timeout`` seconds.
:param timeout:
Time in seconds to wait for an event (-1 means forever)
:returns:
``True`` if events were received, ``False`` otherwise.
"""
# We already have cached events
if self.event_list:
return True
if timeout != -1:
timeout *= 1000 # timeout is in milliseconds
return self._wait_for_events(timeout)
@abc.abstractmethod
def _read_events(self):
"""Reads the events from the system and formats as ``DirWatcherEvent``.
:returns: List of ``(DirWatcherEvent, <path>)``
"""
return
def process_events(self, max_events=0, resume=False):
"""Process events received.
This function will parse all received events and invoke the registered
callbacks accordingly.
:param ``int`` max_events:
Maximum number of events to process
:param ``bool`` resume:
Continue processing any event that was still in the queue (do not
read new ones).
:returns ``list``:
List of ``(DirWatcherEvent, <path>, <callback_return>)``.
"""
if max_events <= 0:
max_events = sys.maxsize
# If we are out of cached events, get more from inotify
if not self.event_list and not resume:
self.event_list.extend(self._read_events())
results = []
step = 0
while self.event_list:
if step >= max_events:
# We reach the max number of events we could process
results.append(
(
DirWatcherEvent.MORE_PENDING,
None,
None,
)
)
break
step += 1
event, src_path = self.event_list.popleft()
res = None
# E1128: Assigning to function call which only returns None
if event == DirWatcherEvent.MODIFIED:
res = self.on_modified(src_path) # pylint: disable=E1128
elif event == DirWatcherEvent.DELETED:
res = self.on_deleted(src_path) # pylint: disable=E1128
elif event == DirWatcherEvent.CREATED:
res = self.on_created(src_path) # pylint: disable=E1128
else:
continue
results.append(
(
event,
src_path,
res
)
)
return results
| {
"repo_name": "ThoughtWorksInc/treadmill",
"path": "treadmill/dirwatch/dirwatch_base.py",
"copies": "3",
"size": "5583",
"license": "apache-2.0",
"hash": -3772069707316996600,
"line_mean": 27.9274611399,
"line_max": 79,
"alpha_frac": 0.5479133083,
"autogenerated": false,
"ratio": 4.297921478060046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6345834786360046,
"avg_score": null,
"num_lines": null
} |
"""An interface for a directory watcher.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import collections
import logging
import os
import sys
import enum
import six
_LOGGER = logging.getLogger(__name__)
class DirWatcherEvent(enum.Enum):
"""DirWatcher's emitted event types.
"""
# W0232: Class has no __init__ method
# pylint: disable=W0232
CREATED = 'created'
DELETED = 'deleted'
MODIFIED = 'modified'
#: Fake event returned when more events where received than allowed to
#: process in ``process_events``
MORE_PENDING = 'more events pending'
@six.add_metaclass(abc.ABCMeta)
class DirWatcher(object):
"""Directory watcher base, invoking callbacks on file create/delete events.
"""
__slots__ = (
'event_list',
'on_created',
'on_deleted',
'on_modified',
'_watches'
)
def __init__(self, watch_dir=None):
self.event_list = collections.deque()
self.on_created = self._noop
self.on_deleted = self._noop
self.on_modified = self._noop
self._watches = {}
if watch_dir is not None:
self.add_dir(watch_dir)
@abc.abstractmethod
def _add_dir(self, watch_dir):
"""Add `directory` to the list of watched directories.
:param watch_dir: watch directory real path
:returns: watch id
"""
return
def add_dir(self, directory):
"""Add `directory` to the list of watched directories.
"""
watch_dir = os.path.realpath(directory)
wid = self._add_dir(watch_dir)
_LOGGER.info('Watching directory %r (id: %r)', watch_dir, wid)
self._watches[wid] = watch_dir
@abc.abstractmethod
def _remove_dir(self, watch_id):
"""Remove `directory` from the list of watched directories.
:param watch_id: watch id
"""
return
def remove_dir(self, directory):
"""Remove `directory` from the list of watched directories.
"""
watch_dir = os.path.realpath(directory)
for wid, w_dir in self._watches.items():
if w_dir == watch_dir:
break
else:
wid = None
_LOGGER.warning('Directory %r not currently watched', watch_dir)
return
_LOGGER.info('Unwatching directory %r (id: %r)', watch_dir, wid)
del self._watches[wid]
self._remove_dir(wid)
@staticmethod
def _noop(event_src):
"""Default NOOP callback"""
_LOGGER.debug('event on %r', event_src)
return None
@abc.abstractmethod
def _wait_for_events(self, timeout):
"""Wait for directory change event for up to ``timeout`` seconds.
:param timeout:
Time in seconds to wait for an event (-1 means forever)
:returns:
``True`` if events were received, ``False`` otherwise.
"""
return
def wait_for_events(self, timeout=-1):
"""Wait for directory change event for up to ``timeout`` seconds.
:param timeout:
Time in seconds to wait for an event (-1 means forever)
:returns:
``True`` if events were received, ``False`` otherwise.
"""
# We already have cached events
if self.event_list:
return True
if timeout != -1:
timeout *= 1000 # timeout is in milliseconds
return self._wait_for_events(timeout)
@abc.abstractmethod
def _read_events(self):
"""Reads the events from the system and formats as ``DirWatcherEvent``.
:returns: List of ``(DirWatcherEvent, <path>)``
"""
return
def process_events(self, max_events=0, resume=False):
"""Process events received.
This function will parse all received events and invoke the registered
callbacks accordingly.
:param ``int`` max_events:
Maximum number of events to process
:param ``bool`` resume:
Continue processing any event that was still in the queue (do not
read new ones).
:returns ``list``:
List of ``(DirWatcherEvent, <path>, <callback_return>)``.
"""
if max_events <= 0:
max_events = sys.maxsize
# If we are out of cached events, get more from inotify
if not self.event_list and not resume:
self.event_list.extend(self._read_events())
results = []
step = 0
while self.event_list:
if step >= max_events:
# We reach the max number of events we could process
results.append(
(
DirWatcherEvent.MORE_PENDING,
None,
None,
)
)
break
step += 1
event, src_path = self.event_list.popleft()
res = None
# E1128: Assigning to function call which only returns None
if event == DirWatcherEvent.MODIFIED:
res = self.on_modified(src_path) # pylint: disable=E1128
elif event == DirWatcherEvent.DELETED:
res = self.on_deleted(src_path) # pylint: disable=E1128
elif event == DirWatcherEvent.CREATED:
res = self.on_created(src_path) # pylint: disable=E1128
else:
continue
results.append(
(
event,
src_path,
res
)
)
return results
| {
"repo_name": "captiosus/treadmill",
"path": "treadmill/dirwatch/dirwatch_base.py",
"copies": "2",
"size": "5749",
"license": "apache-2.0",
"hash": 7836205973088194000,
"line_mean": 27.6019900498,
"line_max": 79,
"alpha_frac": 0.5550530527,
"autogenerated": false,
"ratio": 4.277529761904762,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 201
} |
# An interface for controlling LEDs via the Multimorphic PD-LED board,
# for use with P-ROC pinball hardware. This code is a module for pyprocgame,
# written by Adam Preble and Gerry Stellenberg
# More information is avaible at http://pyprocgame.pindev.org/
# and http://pinballcontrollers.com/
# This module was written by Brian Madden, brian@missionpinball.com
# Version 0.1 - April 20, 2014
# This code is released under the MIT License.
#The MIT License (MIT)
#Copyright (c) 2013 Brian Madden
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import logging
import yaml
import weakref
import time
import uuid
class LEDshow(object):
"""Represents a LEDshow which is a sequential list of LEDs, colors, and
timings that can be played back. Individual shows can be started, stopped,
reset, etc. Shows can be played at any speed, sped up, slowed down, etc.
Parameters:
'game': Parent game object.
'filename': File (and path) of the LEDshow yaml file
'actions': List of LEDshow actions which are passed directly instead of
read from a yaml file
If you pass *filename*, it will process the actions based on that file.
Otherwise it will look for the actions from the list passed via *actions*.
Either *filename* or *actions* is required.
"""
def __init__(self, game, filename=None, actions=None):
super(LEDshow, self).__init__()
self.logger = logging.getLogger("LEDshow")
self.game = game
self.active_LEDs = {} # current active LEDs (and fades) for this show
# active_LEDs keys are LEDname
# vals are color, prevcolor, fadestart, fadeend, dest_color
self.tocks_per_sec = 32 # how many steps per second this show runs at
# you can safely read this value to determine the current playback rate
# But don't update it directly to change the speed of a running show.
# Use the change_speed() method instead.
self.secs_per_tock = 0 # calculated based on tocks_per_sec
self.repeat = False # whether this show repeats when finished
self.num_repeats = 0 # if self.repeat=True, how many times it repeats
# self.num_repeats = 0 means it repeats indefinitely until stopped
self.current_repeat_step = 0 # tracks which repeat we're on, used with
# num_repeats above
self.hold = False # hold the LED states when the show ends.
self.priority = 0 # relative priority of this show
self.ending = False # show will end after the current tock ends
self.running = False # is this show running currently?
self.blend = False # when an LED is off in this show, should it allow
# lower priority LEDs to show through?
self.LEDshow_actions = None # show commands from LEDshow yaml file
self.current_location = 0 # index of which command block (tock) a
# running show is in need to be serviced.
self.last_action_time = 0.0 # when the last action happened
self.total_locations = 0 # total number of action locations
self.current_tock = 0 # index of which tock a running show is in
self.next_action_time = 0 # time of when the next action happens
self.callback = None # if the show should call something when it ends
# naturally. (Not invoked if show is manually stopped)
if filename:
self._load(filename)
elif actions:
self._process(actions)
else:
self.logger.warning("Couldn't set up LEDshow as we didn't receive "
"a LEDshow file or action list as input!")
def _load(self, filename):
# Loads a LEDshow yaml file from disk
self.logger.info("Loading LED show: %s", filename)
try:
LEDshow_actions = yaml.load(open(filename, 'r'))
except:
self.logger.error("Error loading LED show: %s", filename)
else:
self._process(LEDshow_actions)
def _process(self, LEDshow_actions):
# Process a new LEDshow's actions. This is a separate method from
# load so we can also use it to process new LEDshows that we load in
# ways other than from LEDshow files from disk. For example, for LED
# scripts.
self.logger.info("Processing the LEDshow")
# add this LEDshow to LEDcontoller's list of registered shows
# use a weakref so garbage collection will del it if we delete the show
self.game.LEDs.registered_shows.append(weakref.proxy(self))
self.LEDshow_actions = LEDshow_actions
# count how many total locations are in the show. We need this later
# so we can know when we're at the end of a show
self.total_locations = len(self.LEDshow_actions)
if not self.game.LEDs.initialized:
self.game.LEDs._initialize()
def play(self, repeat=False, priority=0, blend=False, hold=False,
tocks_per_sec=32, start_location=-1, callback=None,
num_repeats=0):
"""Plays a LEDshow. There are many parameters you can use here which
affect how the show is played. This includes things like the playback
speed, priority, whether this show blends with others, etc. These are
all set when the show plays. (For example, you could have a LEDshow
file which lights a bunch of LEDs sequentially in a circle pattern,
but you can have that circle "spin" as fast as you want depending on
how you play the show.)
:param boolean repeat: True/False, whether the show repeats when it's
done.
:param integer priority: The relative priority of this show. If there's ever a
situation where multiple shows (or LED commands) want to control the
same LED, the one with the higher priority will win. ("Higher" means
a bigger number, so a show with priority 2 will override a priority 1.)
:param boolean blend: Controls whether this show "blends" with lower
priority shows and scripts. For example, if this show turns a LED off,
but a lower priority show has that LED set to blue, then the LED will
"show through" as blue while it's off here. If you don't want that
behavior, set blend to be False. Then off here will be off for sure
(unless there's a higher priority show or command that turns the LED
on).
:param boolean hold: If True, then when this LEDshow ends, all the LEDs
that are on at that time will remain on. If False, it turns them all
off when the show ends
:param integer tocks_per_sec: This is how fast your show runs. ("Playback speed," in
other words. Your LEDshow files specify action times in terms of
'tocks', like "make this LED red for 3 tocks, then off for 4 tocks,
then a different LED on for 6 tocks. When you play a show, you specify
how many tocks per second you want it to play. Default is 32, but you
might even want tocks_per_sec of only 1 or 2 if your show doesn't
need to move than fast. Note this does not affect fade rates. So you
can have tocks_per_sec of 1 but still have LEDs fade on and off at
whatever rate you want. Also the term "tocks" was chosen so as not to
confuse it with "ticks" which is used by the game loop.
:param integer start_location: Which position in the show file the show should start
in. Usually this is 0 but it's nice to start part way through. Also
used for restarting shows that you paused.
:param object callback: A callback function that is invoked when the show is
stopped.
:param integer num_repeats: How many times you want this show to repeat before
stopping. A value of 0 means that it repeats indefinitely. Note this
only works if you also have repeat=True.
Example usage from a game mode:
Load the show (typically done once when the game is booting up)
self.show1 = LEDs.LEDshow(self.game, "LEDshows\\show1.yaml")
Play the show:
self.show1.play(repeat=True, tocks_per_sec=10, priority=3)
Stop the show:
self.show1.stop()
Play the show again, but twice as fast as before
self.show1.play(repeat=True, tocks_per_sec=20, priority=3)
Play the show so it only repeats twice and then stops itself
self.show1.play(repeat=True, tocks_per_sec=20, priority=3,
num_repeats=True)
Play two shows at once:
self.show1.play(repeat=True, tocks_per_sec=20, priority=3)
self.show2.play(repeat=True, tocks_per_sec=5, priority=3)
Play two shows at once, but have one be a higher priority meaning it
will "win" if both shows want to control the same LED at the same time:
self.show1.play(repeat=True, tocks_per_sec=20, priority=4)
self.show2.play(repeat=True, tocks_per_sec=5, priority=3)
etc.
"""
self.repeat = repeat
self.priority = int(priority)
self.blend = blend
self.hold = hold
self.tocks_per_sec = tocks_per_sec # also referred to as 'tps'
self.secs_per_tock = 1/float(tocks_per_sec)
self.callback = callback
self.num_repeats = num_repeats
if start_location >= 0:
# if you don't specify a start location, it will start where it
# left off (if you stopped it with reset=False). If the show has
# never been run, it will start at 0 per the initialization
self.current_location = start_location
self.game.LEDs._run_show(self)
def stop(self, reset=True, hold=False):
"""Stops a LEDshow.
:param boolean reset: True means it resets the show to the beginning. False it keeps
it where it is so the show can pick up where it left off
:param boolean hold: Lets you specify that the LEDs will be held in their current
state after the show ends. Note that if you have a show that's set
to hold but you pass hold=False here, it won't work. In that case you'd
have to set <show>.hold=False and then call this method to stop the
show.
"""
if hold:
self.hold = True
self.game.LEDs._end_show(self, reset)
def change_speed(self, tocks_per_sec=1):
"""Changes the playback speed of a running LEDshow.
:param integer tocks_per_sec: The new tocks_per_second play rate.
If you want to change the playback speed by a percentage, you can
access the current tocks_per_second rate via LEDshow's tocks_per_second
variable. So if you want to double the playback speed of your show,
you could do something like:
self.your_show.change_speed(self.your_show.tocks_per_second*2)
Note that you can't just update the show's tocks_per_second directly
because we also need to update self.secs_per_tock.
"""
self.tocks_per_sec = tocks_per_sec
self.secs_per_tock = 1/float(tocks_per_sec)
def _advance(self):
# Advances through the LEDshow. This method schedules all the LEDs
# that need to be serviced now, tracks the current show's location and
# handles repeats, and update the show's active_LEDs list with what
# they all should be
# if this show is done and we're just waiting for the final tock
if self.ending:
self.game.LEDs._end_show(self)
return
action_loop_count = 0 # Tracks how many loops we've done here
while self.next_action_time <= self.game.LEDs.current_time:
action_loop_count += 1
# Set the next action time & step to the next location
self.next_action_time = ((self.LEDshow_actions[self.current_location]
['tocks'] * self.secs_per_tock) +
self.last_action_time)
self.last_action_time = self.next_action_time
# create a dictionary of the current actions
for LEDname, color in (self.LEDshow_actions[self.current_location]
['LEDs'].iteritems()):
# convert colorwithfade (like 111111-f2) into dictionary of:
# color:
# fadestart:
# fadeend:
color = str(color).zfill(6)
LED_dic = {'LEDname': LEDname, 'color': color,
'priority': self.priority, 'blend': self.blend}
if "-f" in color:
color = self._convert_colorwithfades_to_time(color,
self.secs_per_tock)
LED_dic['dest_color'] = str(color['dest_color']).zfill(6)
LED_dic['fadestart'] = color['fadestart']
LED_dic['fadeend'] = color['fadeend']
LED_dic['color'] = None
self.game.LEDs._add_to_update_list(LED_dic)
# If this LED is off and not involved in a fade,
# remove it from the active list
if LEDname in self.active_LEDs:
if (LED_dic.get('dest_color', None) == '000000' or \
LED_dic.get('dest_color', None) is None) \
and (LED_dic['color'] == '000000'):
self.active_LEDs.pop(LEDname)
else:
# Update this show's active LEDs list with the latest
# settings
active_LEDs_dic = {}
# loop through current actions
if LEDname in self.active_LEDs:
# if we have a current entry for this LEDname, copy its
# color to the prevcolor key. (We need this to restore
# fades since we need to know where the fade started.)
active_LEDs_dic['prevcolor'] = self.active_LEDs[
LEDname]['color']
active_LEDs_dic['color'] = LED_dic.get('color', None)
active_LEDs_dic['fadestart'] = LED_dic.get('fadestart',
None)
active_LEDs_dic['fadeend'] = LED_dic.get('fadeend', None)
active_LEDs_dic['dest_color'] = LED_dic.get('dest_color',
None)
new_dic = {LEDname: active_LEDs_dic}
self.active_LEDs.update(new_dic)
# increment this show's current_location pointer and handle repeats
# if we're at the end of the show
if self.current_location == self.total_locations-1:
# if we're repeating with an unlimited number of repeats
if self.repeat and self.num_repeats == 0:
self.current_location = 0
# if we're repeating, but only for a certain number of times
elif self.repeat and self.num_repeats > 0:
# if we haven't hit the repeat limit yet
if self.current_repeat_step < self.num_repeats-1:
self.current_location = 0
self.current_repeat_step += 1
else:
self.ending = True
else:
self.ending = True
return # no need to continue if the show's over
# else, we're in the middle of a show
else:
self.current_location += 1
# If our LEDshow is running so fast that it has done a complete
# loop, then let's just break out of the loop
if action_loop_count == self.total_locations:
return
def _convert_colorwithfades_to_time(self, color, secs_per_tock):
# Receives the combined color-fade combinations from LEDshow files
# and breaks them out into colors and real time fade targets.
# Receives inputs of hex colors with fades specified in tocks, like:
# ff00aa-f2
# (This is a hex color of #ff00aa with a fade of 2 tocks)
# Returns a tuple of:
# color: ff00aa
# fadestart: time start in real time since epoch, like 123456789012.123
# fadeend: time target in real time since epoch, like 123456789012.123
# Look through our dictionary for any "-f" characters indicating fade
# times that are still in tocks
colorwithfade = color.split('-f')
i = {}
i['dest_color'] = colorwithfade[0]
i['fadestart'] = self.game.LEDs.current_time
i['fadeend'] = ((int(colorwithfade[1]) * secs_per_tock) +
self.game.LEDs.current_time)
return i
# todo check to make sure we received a list?
class Playlist(object):
"""A list of :class:`LEDshow` objects which are then played sequentially.
Playlists are useful for things like attract mode where you play one show
for a few seconds, then another, etc.
Parameters:
'game': Parent game object
Each step in a playlist can contain more than one :class:`LEDshow`. This is
useful if you have a lot of little shows for different areas of the
playfield that you want run at the same time. For example, you might have
one show that only controls a group of rollover lane LEDs, and another
which blinks the lights in the center of the playfield. You can run them
at the by putting them in the same step in your playlist. (Note you don't
need to use a playlist if you simply want to run two LEDshows at the same
time. In that case you could just call :meth:`LEDshow.play` twice to play
both shows.
For each "step" in the playlist, you can specify the number of seconds it
runs those shows before moving on, or you can specify that one of the shows
in that step plays a certain number of times and then the playlist moves
to the next step from there.
You create a show by creating an instance :class:`Playlist`. Then you
add LEDshows to it via :meth:`add_show`. Finally, you specify the
settings for each step (like how it knows when to move on) via :meth:
`step_settings`.
When you start a playlist (via :meth:`start`, you can specify
settings like what priority the show runs at, whether it repeats, etc.)
Example usage from a game mode:
(This example assumes we have self.show1, self.show2, and self.show3
already loaded.)
Setup the playlist::
self.my_playlist = LEDs.Playlist(self.game)
self.my_playlist.add_show(step_num=1, show=self.show1, tocks_per_sec=10)
self.my_playlist.add_show(step_num=2, show=self.show2, tocks_per_sec=5)
self.my_playlist.add_show(step_num=3, show=self.show3, tocks_per_sec=32)
self.my_playlist.step_settings(step=1, time=5)
self.my_playlist.step_settings(step=2, time=5)
self.my_playlist.step_settings(step=3, time=5)
Run the playlist::
self.my_playlist.start(priority=100, repeat=True)
Stop the playlist:
``self.my_playlist.stop()``
"""
def __init__(self, game):
super(Playlist, self).__init__()
self.logger = logging.getLogger("Playlist")
self.game = game
self.step_settings_dic = {} # dictionary with step_num as the key. Values:
# time - sec this entry runs
# trigger_show
self.step_actions = [] # The actions for the steps in the playlist
# step_num
# show
# num_repeats
# tocks_per_sec
# blend
self.steps = [] # list of values of steps, like [1,2,3,5,10]
self.current_step_position = 0
self.repeat = False
self.repeat_count = 0
self.current_repeat_loop = 0
self.running = False
self.priority = 0
self.starting = False # used to know if we're on our first step
self.stopping = False # used to tell the playlist it should stop on
# the next advance
def add_show(self, step_num, show, num_repeats=0, tocks_per_sec=32,
blend=False, repeat=True):
"""Adds a LEDshow to this playlist. You have to add at least one show
before you start playing the playlist.
:param integer step_num:
Which step number you're adding this show to.
You have to specify this since it's possible to add multiple shows to
the same step (in cases where you want them both to play at the same
time during that step). If you want the same show to play in multiple
steps, then add it multiple times (once to each step). The show plays
starting with the lowest number step and then moving on. Ideally they'd
be 1, 2, 3... but it doesn't matter. If you have step numbers of 1, 2,
5... then the player will figure it out.
:param object show: The LEDshow object that you're adding to this step.
:param integer num_repeats: How many times you want this show to repeat
within this step. Note this does not affect when the playlist advances
to the next step. (That is controlled via :meth:`step_settings`.)
Rather, this is just how many loops this show plays. A value of 0
means it repeats indefinitely. (Well, until the playlist advances to
the next step.) Note that you also have to have repeat=True for it to
repeat here.
:param integer tocks_per_sec: How fast you want this show to play. See\
:meth:`LEDshow.play` for details.
:param boolean blend: Whether you want this show to blend with lower
priority shows below it. See :meth:`LEDshow.play` for details.
:param boolean repeat: Causes the show to keep repeating until the
playlist moves on to the next step.
"""
# Make a temp copy of our steps since we might have to remove one while
# iterating through it
temp_steps = list(self.step_actions)
# If the show we're adding is already in the step we're adding it to,
# remove it.
for step in temp_steps:
if step['step_num'] == step_num and step['show'] == show:
self.step_actions.remove(step)
self.step_actions.append({'step_num': step_num,
'show': show,
'num_repeats': num_repeats,
'tocks_per_sec': tocks_per_sec,
'repeat': repeat,
'blend': blend})
# Add this number to our list of step numbers
# We do all this here when we add a show to a playlist so we don't have
# to deal with it later.
self.steps.append(step_num)
# Remove duplicates
self.steps = list(set(self.steps))
# Reorder the list from smallest to biggest
self.steps.sort()
def step_settings(self, step, time=0, trigger_show=None):
"""Used to configure the settings for a step in a :class:`Playlist`.
This configuration is required for each step. The main thing you use
this for is to specify how the playlist knows to move on to the next
step.
:param integer step: Which step number you're configuring
:param float time: The time in seconds that you want this step to run
before moving on to the next one.
:param object trigger_show: If you want to move to the next step after
one of the LEDshows in this step is done playing, specify that LEDshow
here. This is required because if there are multiple LEDshows in this
step of the playlist which all end at different times, we wouldn't know
which one to watch in order to know when to move on.
Note that you can have repeats with a trigger show, but in that case
you also need to have the num_repeats specified. Otherwise if you have
your trigger show repeating forever then the playlist will never move
on. (In that case use the *time* parameter to move on based on time.)
"""
settings = {'time': time,
'trigger_show': trigger_show}
self.step_settings_dic.update({step: settings})
def start(self, priority, repeat=True, repeat_count=0, reset=True):
"""Starts playing a playlist. You can only use this after you've added
at least one show via :meth:`add_show` and configured the settings for
each step via :meth:`step_settings`.
:param integer priority: What priority you want the :class:`LEDshow`
shows in this playlist to play at. These shows will play "on top" of
lower priority stuff, but "under" higher priority things.
:param boolean repeat: - Controls whether this playlist to repeats when
it's finished.
:param integer repeat_count: How many times you want this playlist to
repeat before it stops itself. (Must be used with *repeat=True* above.)
A value of 0 here means that this playlist repeats forever until you
manually stop it. (This is ideal for attract mode.)
:param boolean reset: - Controls whether you want this playlist to
start at the begining (True) or you want it to pick up where it left
off (False). You can also use *reset* to restart a playlist that's
currently running.
"""
if not self.running:
if reset:
self.current_step_position = 0
self.current_repeat_loop = 0
self.running = True
self.starting = True
self.stopping = False
self.repeat = repeat
self.repeat_count = repeat_count
self.priority = int(priority)
self._advance()
else:
# we got a command to start a playlist, but the playlist is already
# running? If they also passed a reset parameter, let's restart
# the playlist from the beginning
if reset:
self.stop(reset=True)
self.start(priority=priority, repeat=repeat,
repeat_count=repeat_count)
def stop(self, reset=True):
"""Stops a playlist. Pretty simple.
:param boolean reset: If *True*, it resets the playlist tracking
counter back to the beginning. You can use *False* here if you want to
stop and then restart a playlist to pick up where it left off.
"""
for action in self.step_actions:
if action['step_num'] == self.steps[self.current_step_position-1]:
# we have to use the "-1" above because the playlist current
# position represents the *next* step of shows to play. So when
# we stop the current show, we have to come back one.
action['show'].stop()
self.running = False
for item in self.game.LEDs.queue:
if item['playlist'] == self:
self.game.LEDs.queue.remove(item)
if reset:
self.current_step_position = 0
self.current_repeat_loop = 0
def _advance(self):
#Runs the LEDshow(s) at the current step of the plylist and advances
# the pointer to the next step
# Creating a local variable for this just to keep the code easier to
# read. We track this because it's possible the game programmer will
# skip numbers in the steps in the playlist, like [1, 2, 5]
current_step_value = self.steps[self.current_step_position]
prev_step = self.steps[self.current_step_position-1]
# Stop the previous step's shows
# Don't do anything if this playlist hasn't started yet
if not self.starting:
for action in self.step_actions:
if action['step_num'] == prev_step:
# We have to make sure the show is running before we try to
# stop it, because if this show was a trigger show then it
# stopped itself already
if action['show'].running:
action['show'].stop()
self.starting = False
# If this playlist is marked to stop, then stop here
if self.stopping:
return
# Now do the actions in our current step
# Pull in the stuff we need for this current step
step_time = self.step_settings_dic[current_step_value]['time']
step_trigger_show = self.step_settings_dic[current_step_value]['trigger_show']
# Now step through all the actions for this step and schedule the
# LEDshows to play
for action in self.step_actions:
if action['step_num'] == current_step_value:
show = action['show']
num_repeats = action['num_repeats']
tocks_per_sec = action['tocks_per_sec']
blend = action['blend']
repeat = action['repeat']
if show == step_trigger_show:
# This show finishing will be used to trigger the advancement
# to the next step.
callback = self._advance
if num_repeats == 0: # Hmm.. we're using this show as the
# trigger, but it's set to repeat indefinitely?!?
# That won't work. Resetting repeat to 1 and raising
# a warning
num_repeats = 1
# todo warning
else:
callback = None
show.play(repeat=repeat, priority=self.priority, blend=blend,
tocks_per_sec=tocks_per_sec, num_repeats=num_repeats,
callback=callback)
# if we don't have a trigger_show but we have a time value for this
# step, set up the time to move on
if step_time and not step_trigger_show:
self.game.LEDs.queue.append({'playlist': self,
'action_time': (self.game.LEDs.current_time + step_time)})
# Advance our current_step_position counter
if self.current_step_position == len(self.steps)-1:
# We're at the end of our playlist. So now what?
self.current_step_position = 0
# Are we repeating?
if self.repeat:
# Are we repeating forever, or x number of times?
if self.repeat_count: # we're repeating x number of times
if self.current_repeat_loop < self.repeat_count-1:
self.current_repeat_loop += 1
else:
self.stopping = True
return
else: # we're repeating forever
pass
else: # there's no repeat
self.stopping = True
return
else:
self.current_step_position += 1
class LEDcontroller(object):
"""Manages all the LEDs in the pinball machine. Handles updates,
priorities, restores, running and stopping LEDshows, etc. There should be
only one per game.
Parameters:
'game': Parent game object.
Contains :meth:`update` which should be called once per game loop to do the
actual work.
**Using LEDcontroller**
todo
"""
def __init__(self, game):
self.logger = logging.getLogger("LEDcontroller")
self.game = game
self.registered_shows = []
self.update_list = []
# self.update_list is a list of dicts:
# LEDname (str)
# color (str)
# fade(ms) (int)
# priority (int)
# fadeend (float)
# fadestart (float)
# dest_color (int) - hex color or list
self.running_shows = []
self.LED_priorities = {} # dictionary which tracks the priorities of
# whatever last set each LED in the machine
self.initialized = False # We need to run some stuff once but we can't
# do it here since this loads because our LED game items are created
self.queue = [] # contains list of dics for things that need to be
# serviced in the future, including: (not all are always used)
# LEDname
# priority
# blend
# fadeend
# dest_color
# color
# playlist
# action_time
self.active_scripts = [] # list of active scripts that have been
# converted to LEDshows. We need this to facilitate removing shows when
# they're done, since programmers don't use a name for scripts like
# they do with shows. active_scripts is a list of dictionaries, with
# the following k/v pairs:
# LEDname - the LED the script is applied to
# priority - what priority the script was running at
# show - the associated LEDshow object for that script
self.manual_commands = [] # list that holds the last states of any
# LEDs that were set manually. We keep track of this so we can restore
# lower priority LEDs when shows end or need to be blended.
# Each entry is a dictionary with the following k/v pairs:
# LEDname
# color - the current color *or* fade destination color
# priority
# fadeend - (optional) realtime of when the fade should end
self.current_time = time.time()
# we use a common system time for the entire LED system so that every
# "current_time" of a single update cycle is the same everywhere. This
# ensures that multiple shows, scripts, and commands start in-sync
# regardless of any processing lag.
def _initialize(self):
# Internal method which populates our LED_priorities list with the
# priority of whatever last set that LED. We need to do this here
# instead of in __init__ because the LEDcontroller instance might
# be created before we read the LEDs from our machine yaml file.
for LED in self.game.leds:
self.LED_priorities[LED.name] = 0
self.initialized = True
def _run_show(self, show):
# Internal method which starts a LEDshow
show.running = True
show.ending = False
show.current_repeat_step = 0
show.last_action_time = self.current_time
# or in the advance loop?
self.running_shows.append(show) # should this be a set?
self.running_shows.sort(key=lambda x: x.priority)
def _end_show(self, show, reset=True):
# Internal method which ends a running LEDshow
running_shows_copy = list(self.running_shows)
if show in running_shows_copy:
self.running_shows.remove(show)
show.running = False
# Restore the LEDs not "holding" the final states
if not show.hold:
for LEDname in show.active_LEDs:
self.restore_LED_state(LEDname, show.priority)
if reset:
show.current_location = 0
# if this show that's ending was from a script, remove it from the
# active_scripts list
# Make a copy of the active scripts object since we're potentially
# deleting from it while we're also iterating through it. We have to
# use list() here since if we just write a=b then they would both
# point to the same place and that wouldn't solve the problem.
active_scripts_copy = list(self.active_scripts)
for entry in active_scripts_copy:
if entry['show'] == show:
self.active_scripts.remove(entry)
if show.callback:
show.callback()
def update(self):
"""Runs once per game loop and services any LED updates that are
needed. This checks several places:
1. Running LEDshows
2. The LEDcontroller queue for any future commands that should be
processed now.
Parameters:
none
"""
self.current_time = time.time()
# we calculate current_time one per loop because we want every action
# in this loop to write the same "last action time" so they all stay
# in sync. Also we truncate to 3 decimals for ease of comparisons later
# Check the running LEDshows
for show in self.running_shows:
# we use a while loop so we can catch multiple action blocks
# if the show tocked more than once since our last update
while show.next_action_time <= self.current_time:
# add the current location to the list to be serviced
# show.service_locations.append(show.current_location)
# advance the show to the current time
show._advance()
if not show.running:
# if we hit the end of the show, we can stop
break
# Check to see if we need to service any items from our queue. This can
# be single commands or playlists
# Make a copy of the queue since we might delete items as we iterate
# through it
queue_copy = list(self.queue)
for item in queue_copy:
if item['action_time'] <= self.current_time:
# If the queue is for a fade, we ignore the current color
if item.get('fadeend', None):
self._add_to_update_list({'LEDname': item['LEDname'],
'priority': item['priority'],
'blend': item.get('blend', None),
'fadeend': item.get('fadeend', None),
'dest_color': item.get('dest_color',
None)})
elif item.get('color', None):
self._add_to_update_list({'LEDname': item['LEDname'],
'priority': item['priority'],
'color': item.get('color', None)})
elif item.get('playlist', None):
item['playlist']._advance()
# We have to check again since one of these advances could have
# removed it already
if item in self.queue:
self.queue.remove(item)
if self.update_list:
self._do_update()
def restore_LED_state(self, LEDname, priority=None, fadeend=None,
color=None):
"""Restores an LED to whatever state it should be in below the passed
priority parameter. Similar to :meth:`get_LED_state` except it actually
makes the change rather than only returning values.
:param string LEDname: The name of the LED we want to restore
:param integer priority: We will only restore the LED to a priority
lower than this.
Optional parameters which are used if our current LED is fading to off
and we need to blend it with whatever is below it
:param string fadeend: Realtime of when the fade will end
:param string color: The current hex color of the LED
"""
# If this LED was last touched by something with a higher priority
# than the priority we're restoring below, then we can end here
if self.LED_priorities[LEDname] > priority:
return
# If there are any pending updates, these can jack up what we're trying
# to restore here if they're for the same LED but with a higher
# priority, so first flush that out.
if self.update_list:
self._do_update()
restored_state = self.get_LED_state(LEDname, priority)
# restored state will be a tuple of either length 2 or 4.
# if 2, then we have new_color, new_priority and we can restore the LED
# to that and be done.
# if 4, we have new_color, new_priority, new_fadetime, new_dest_color
# so we need to restore the color now and then on next tick
# Since we're restoring this LED from something with a lower priority,
# we need to reset the priority of whatever last touched this LED.
# Otherwise _do_update will ignore this restored setting.
self.LED_priorities[LEDname] = 0
# If there's an incoming blend, we need to calculate the colors and
# write them into our update
if fadeend: # this catches if the LED we're restoring FROM is fading
# with a blend to whatever's below it
if len(restored_state) == 3: # the LED we're restoring to is not
# involved in a fade, so we can just apply the fade of the LED
# we're fading away from to fade to the new LED's color
self._add_to_update_list({'LEDname': LEDname,
'dest_color': restored_state[0],
'priority': restored_state[1],
'blend': restored_state[2],
'fadeend': fadeend})
else: # this means that our LED we're restoring *from* has a fade
# and blend AND the LED we're restoring *to* is also in the
# process of fading. So this is kind of complex because we have
# to set TWO fades. First we have to fade out our current LED
# to wherever the new LED is in its fade. Then we have to set
# a second fade to pick up the continuation of the original
# fade from the new LED. Phew!
# Set the first fade from where our current LED is now to where
# the new LED will be when our current LED is done fading to it
# Figure out what color our target LED will be when our current
# LED fade is done. To do that we have use our old LED's
# fadeend time to calculate the midpoint
target_color = self.get_midfade_color(fadestart=restored_state[5],
fadeend=restored_state[3],
midpoint_time=fadeend,
orig_color=color,
dest_color=restored_state[4])
# Now let's schedule it.
self._add_to_update_list({'LEDname': LEDname,
'dest_color': target_color,
'priority': restored_state[1],
'blend': restored_state[2],
'fadeend': fadeend})
# todo schedule a script that is for this LEDname, with a
# color of 000000, and an endtime of when the fadeend from
# above is and with blend = True. This will have the effect
# of restoring whatever lower level whatever is already
# running
"""
self.queue.append({'action_time': fadeend,
'LEDname': LEDname,
'color': "000000",
'blend': True,
'priority': priority})
"""
self.queue.append({'action_time': fadeend,
'LEDname': LEDname,
'blend': True,
'priority': priority,
'fadeend': restored_state[3],
'dest_color': restored_state[4]})
# otherwise our LED is not involved in a fade, so just restore
# whatever we got immediately
else:
if len(restored_state) == 3:
self._add_to_update_list({'LEDname': LEDname,
'color': restored_state[0],
'priority': restored_state[1],
'blend': restored_state[2]})
else:
self._add_to_update_list({'LEDname': LEDname,
'color': restored_state[0],
'priority': restored_state[1],
'blend': restored_state[2],
'fadeend': restored_state[3],
'dest_color': restored_state[4]})
def get_LED_state(self, LEDname, priority=0):
"""Looks at all the active shows and returns the current
details for a given LED.
:param string LEDname: The LED we're looking for
:param integer priority: Returns the color from the highest priority
lower than the priority passed it.
Returns tuple of:
'color': The restored color for the LED.
'priority': The priority of that LED.
'blend': If the LED we're restoring is blending below itself.
'fadeend': (optional) If there's a fade, what fade time should be
used.
'dest_color': (optional) If there's a fade, what fade color
should be used.
"""
new_color = None
new_prevcolor = None
new_priority = 0
new_fadestart = None
new_fadeend = None
new_dest_color = None
new_blend = False
for show in self.running_shows:
if LEDname in show.active_LEDs and show.priority < priority:
if show.priority > new_priority:
new_color = show.active_LEDs[LEDname]['color']
new_priority = show.priority
new_blend = show.blend
# if we have a fade, grab those values now
# we won't process them now in case a they're overwritten
# by a higher priority. No sense don't that math now since
# we might not need it
if show.active_LEDs[LEDname]['fadeend']:
# first check to make sure the fade is still happening
if (show.active_LEDs[LEDname]['fadeend'] >
self.current_time):
new_prevcolor = (show.active_LEDs[LEDname].
get('prevcolor', "000000"))
new_fadestart = (show.active_LEDs[LEDname]
['fadestart'])
new_fadeend = (show.active_LEDs[LEDname]
['fadeend'])
new_dest_color = (show.active_LEDs[LEDname]
['dest_color'])
else:
# we had a fade, but it's no longer active
new_color = (show.active_LEDs[LEDname]
['dest_color'])
else: # reset these since the new LED we found doesn't
# use them
new_prevcolor = None
new_fadestart = None
new_fadeend = None
new_dest_color = None
# Next check to see if there were any manual commands to enable this
# LED. If there are and they're higher than the priority that we found
# in all the active shows, and higher than the priority that we're
# restoring to, then we're going to return this color instead.
for entry in self.manual_commands:
if entry['LEDname'] == LEDname and \
entry['priority'] > new_priority and \
entry['priority'] < priority:
new_color = entry['color']
new_priority = entry['priority']
new_prevcolor = None
new_fadestart = None
new_fadeend = None
new_dest_color = None
new_blend = None
if entry.get('fadeend', None):
# we have a command that involves a fade
if entry['fadeend'] > self.current_time:
# the fade is still happening
new_fadeend = entry['fadeend']
new_dest_color = entry['dest_color']
new_fadestart = entry['fadestart']
new_prevcolor = entry['prevcolor']
new_blend = entry['blend']
new_color = "000000"
else:
# we had a fade, but it's over now, so we just need to
# restore this like a static color
new_color = entry['dest_color']
# now that we have the values, we can process them to return them
if not new_color:
# If we didn't find a color in any of the running shows or commands
# then we'll just pass 000000 to turn off the LED
new_color = "000000"
if new_fadeend: # the LED we found is involved in a fade. We have to
# figure out where it is now and where it's going
# new_color is where this LED is now
new_color = self.get_midfade_color(fadestart=new_fadestart,
fadeend=new_fadeend,
midpoint_time=self.current_time,
orig_color=new_prevcolor,
dest_color=new_dest_color)
# contruct the return based on what we have
if new_fadeend:
return new_color, new_priority, new_blend, new_fadeend,\
new_dest_color, new_fadestart
else:
return new_color, new_priority, new_blend
def get_midfade_color(self, fadestart, fadeend, midpoint_time, orig_color,
dest_color):
"""Figures out the new fade values based on a current fade in progress.
Parameters:
'fadestart': The realtime time that this fade begin.
'fadeend': The realtime time that this fade ends.
'midpoint_time': The current time "mid point" that we're using for our
new fade.
'orig_color': The original color in hex.
'dest_color': the final destination color in hex.
Returns:
'color': The current color we need to reset the LED to in hex
"""
# figure out where we are in the fade process (in percent)
current_percent = (midpoint_time - fadestart) / (fadeend - fadestart)
if type(orig_color) is not list:
orig_color = self.convert_hex_to_list(orig_color)
if type(dest_color) is not list:
dest_color = self.convert_hex_to_list(dest_color)
# create a new color dictionary that represents where the current color
# should be at this point in the fade
color = []
for i in range(len(dest_color)):
delta = (dest_color[i] - orig_color[i]) * current_percent
if delta < 0: # need to subtract from orig color
color.append(int(orig_color[i]-delta))
else: # need to add to orig color
color.append(int(orig_color[i]+delta))
return color
def _add_to_update_list(self, update):
# Adds an update to our update list, with intelligence that if the list
# already contains an update for this LEDname & priority combination,
# it deletes it first. This is done so if the game loop is running
# slower than our updates are coming in, we only keep the most recent
# entry.
for item in self.update_list:
if item['LEDname'] == update['LEDname'] and (item['priority'] ==
update['priority']):
self.update_list.remove(item)
self.update_list.append(update)
def _do_update(self):
# Updates the LEDs in the game with whatever's in the update_list.
# The update_list is a list of dictionaries w/the following k/v pairs:
# color: 111111
# priority: 9
# LEDname: laneP
# fadecolor: list of colors or hex color
# fadeend: realtime fade end
# blend: True/False
# First sort the update_list so we only have one of each LEDname.
# If there are multiple entries for one LEDname, only keep the one with
# the highest priority
filtered={}
for di in sorted(self.update_list, key=lambda d: d['priority']):
filtered[di['LEDname']] = di
self.update_list = filtered.values()
# Make a copy of the update_list and then clear the original
# Why? In case any of these updates need to call their own updates
current_list = self.update_list
self.update_list = []
for item in current_list:
# Only perform the update if the priority is higher than whatever
# touched that LED last.
if item['priority'] >= self.LED_priorities.get(item['LEDname']):
# Now we're doing the actual update. First set our color:
# If we have an entry for color and it is not None
if ("color" in item) and item['color']:
if type(item['color']) is not list:
item['color'] = item['color'].zfill(6)
if item.get('blend', False) and \
(item['color'] == '000000' or\
item['color'] == [0, 0, 0]):
self.restore_LED_state(item['LEDname'],
item['priority'])
else:
if type(item['color']) is not list:
item['color'] = self.convert_hex_to_list(item\
['color'])
# Uncomment the comment block below if you want to log
# every LED action. Warning this will be a crazy amount
# of logging
"""
self.game.logger.info("LED Command: %s %s",
item['LEDname'], item['color'])
"""
# now do the actual update
self.game.leds[item['LEDname']].color(item['color'])
# Update our list of LEDs so we know which priority
# last touched it
self.LED_priorities[item['LEDname']] = item['priority']
# Next, if we have a fade:
if "fadeend" in item and item.get('fadeend', None):
if type(item['dest_color']) is not list:
item['dest_color'] = item['dest_color'].zfill(6)
if item['blend'] and \
(item['dest_color'] == '000000' or\
item['dest_color'] == [0, 0, 0]):
self.restore_LED_state(item['LEDname'],
item['priority'],
item['fadeend'])
else:
if type(item['dest_color']) is not list:
item['dest_color'] = self.convert_hex_to_list(
item['dest_color'])
# Calculate the fade duration:
fadems = (item['fadeend'] - self.current_time) * 1000
# Uncomment the comment block below if you want to log
# every LED action. Warning this will be a crazy amount
# of logging
"""
self.game.logger.info("LED Command: %s %s, "
"Fade time: %s",
item['LEDname'],
item['dest_color'], fadems)
"""
self.game.leds[item['LEDname']].color_with_fade(
item['dest_color'],fadems)
# Update our list of LED priorities so we know which
# priority last touched each LED. We use this to know
# if an update should overwrite the actual LED in the
# game.
self.LED_priorities[item['LEDname']] = item['priority']
current_list=[]
# If we got any updates while iterating this list, process them now
if self.update_list:
self._do_update()
def convert_hex_to_list(self, inputstring):
"""Takes a string input of hex numbers and returns a list of integers
Parameters:
'inputstring': incoming string with hex colors, like ffff00.
Returns:
List of colors as integers, like [255, 255, 0]
"""
output = []
if not inputstring:
inputstring = "000000"
inputstring = str(inputstring).zfill(6)
for i in xrange(0, len(inputstring), 2): # step through every 2 chars
output.append(int(inputstring[i:i+2], 16))
# convert from base 16 (hex) to int
return output
def run_script(self, LEDname, script, priority=0, repeat=True, blend=False,
tps=1000, num_repeats=0, callback=None):
"""Runs a LED script. Scripts are similar to LEDshows, except they only
apply to single LEDs and you can "attach" any script to any LED.
Scripts are used anytime you want an LED to have more than one action.
A simple example would be a flash an LED. You would make a script that
turned it on (with your color), then off, repeating forever.
Scripts could be more complex, like cycling through multiple colors,
blinking out secret messages in Morse code, etc.
Interally we actually just take a script and dynamically convert it
into a LEDshow (that just happens to only be for a single LED), so we
can have all the other LEDshow-like features, including playback speed,
repeats, blends, callbacks, etc.
Parameters:
'LEDname': The name of the LED for this script to control.
'script': A list of dictionaries of script commands. (See below)
'priority': The priority the LED in this script should operate at.
'repeat': True/False. Whether the script repeats (loops).
'blend': Whether the script should blend the LED colors with lower
prioirty things. todo
'tps': Tocks per second. todo
'num_repeats': How many times this script should repeat before
ending. A value of 0 indicates it will repeat forever. Also
requires *repeat=True*.
'callback': A callback function that is called when the script is
stopped. todo update
Returns:
:class:`LEDshow` object. Since running a script just sets up and
runs a regular LEDshow, run_script returns the LEDshow object. In
most cases you won't need this, but it's nice if you want to know
exactly which LEDshow was created by this script so you can stop
it later. (See the examples below for usage.)
The script is a list of dictionaries, with each list item being a
sequential instruction, and the dictionary defining what you want to
do at that step. Dictionary items for each step are:
'color': The hex color for the LED
'time': How long (in ms) you want the LED to be at that color
'fade': True/False. Whether you want that LED to fade to the color
(using the *time* above), or whether you want it to switch to that
color instantly.
Example usage:
Here's how you would use the script to flash an RGB LED between red
and off:
self.flash_red = []
self.flash_red.append({"color": "ff0000", "time": 100})
self.flash_red.append({"color": "000000", "time": 100})
self.game.LEDs.run_script("LED1", self.flash_red, "4", blend=True)
Once the "flash_red" script is defined as self.flash_red, you can use
it anytime for any LED. So if you want to flash two LEDs red, it would
be:
self.game.LEDs.run_script("LED1", self.flash_red, "4", blend=True)
self.game.LEDs.run_script("LED2", self.flash_red, "4", blend=True)
Most likely you would define your scripts once when the game loads and
then call them as needed.
You can also make more complex scripts. For example, here's a script
which smoothly cycles an RGB LED through all colors of the rainbow:
self.rainbow = []
self.rainbow.append({'color': 'ff0000', 'time': 400, 'fade': True})
self.rainbow.append({'color': 'ff7700', 'time': 400, 'fade': True})
self.rainbow.append({'color': 'ffcc00', 'time': 400, 'fade': True})
self.rainbow.append({'color': '00ff00', 'time': 400, 'fade': True})
self.rainbow.append({'color': '0000ff', 'time': 400, 'fade': True})
self.rainbow.append({'color': 'ff00ff', 'time': 400, 'fade': True})
If you have single color LEDs, your *color* entries in your script
would only contain a single hex value for the intensity of that LED.
For example, a script to flash a single-color LED on-and-off (which you
can apply to any LED):
self.flash = []
self.flash.append({"color": "ff", "time": 100})
self.flash.append({"color": "00", "time": 100})
If you'd like to save a reference to the :class:`LEDshow` that's
created by this script, call it like this:
self.blah = self.game.LEDs.run_script("LED2", self.flash_red, "4")
"""
# convert the steps from the script list that was passed into the
# format that's used in an LEDshow
LEDshow_actions = []
for step in script:
if step.get('fade', None):
color = str(step['color']) + "-f" + str(step['time'])
else:
color = str(step['color'])
color_dic = {LEDname: color}
current_action = {'tocks': step['time'],
'LEDs': color_dic}
LEDshow_actions.append(current_action)
show = None
show = LEDshow(self.game, actions=LEDshow_actions)
show_obj = show.play(repeat=repeat, tocks_per_sec=tps,
priority=priority, blend=blend,
num_repeats=num_repeats, callback=callback)
self.active_scripts.append({'LEDname': LEDname,
'priority': priority,
'show': show})
return show_obj
def stop_script(self, LEDname=None, priority=0, show=None):
"""Stops and remove an LED script.
Rarameters:
'LEDname': The LED(s) with the script you want to stop.
'priority': The priority of the script(s) you want to stop.
'show': The show object associated with a script you want to stop.
In a practical sense there are several ways you can use this
stop_script method:
- Specify *LEDname* only to stop (and remove) all active LEDshows
created from scripts for that LEDname, regardless of priority.
- Specify *priority* only to stop (and remove) all active LEDshows
based on scripts running at that priority for all LEDs.
- Specify *LEDname* and *priority* to stop (and remove) all active
LEDshows for that LEDname at the specific priority you passed.
- Specify a *show* object to stop and remove that specific show.
- If you call stop_script() without passing it anything, it will
remove all the LEDsshows started from all scripts. This is useful
for things like end of ball or tilt where you just want to kill
everything.
"""
# Make a copy of the active scripts object since we're potentially
# deleting from it while we're also iterating through it. We have to
# use list() here since if we just write a=b then they would both
# point to the same place and that wouldn't solve the problem.
active_scripts_copy = list(self.active_scripts)
if show:
for entry in active_scripts_copy:
if entry['show'] == show:
self._end_show(show)
elif LEDname and priority:
for entry in active_scripts_copy:
if entry['LEDname'] == LEDname and entry['priority'] == priority:
self._end_show(entry['show'])
elif LEDname:
for entry in active_scripts_copy:
if entry['LEDname'] == LEDname:
self._end_show(entry['show'])
elif priority:
for entry in active_scripts_copy:
if entry['priority'] == priority:
self._end_show(entry['show'])
else:
for entry in active_scripts_copy:
self._end_show(entry['show'])
# todo callback?
def enable(self, LEDname, priority=0, color=None, dest_color=None,
fade=0, blend=True):
"""This is a single one-time command to enable an LED.
Parameters:
LEDname - the LED you're enabling
'priority': - the priority this LED will be enabled at. This is
used when determining what other enables, scripts, and LEDshows
will play over or under it). If you enable an LED with the same
priority that was previously used to enable it, it will overwrite
the old color with the new one.
'color': A hex string (like "ff00aa") for what color you want to
enable this LED to be. Note if you have a single color (i.e. one
element) LED, then just pass it "ff" to enable it on full
brightness, or "80" for 50%, etc.
'dest_color': If you want to fade the LED to your color instead of
enabling it instantly, pass *dest_color* instead of *color*.
'fade': If you want to fade the LED on, use *fade* to specify the
fade on time (in ms). Note this also requires *dest_color* above
instead of *color*.
'blend': True/False. If *True* and if you're using a fade, it will
fade from whatever color the LED currently is to your *dest_color*.
If False it will turn the LED off first before fading.
Note that since you enable LEDs with priorities, you can actually
"enable" the LED multiple times at different priorties. Then if you
disable the LED via a higher priority, the lower priority will still
be there. This means that in your game, each mode can do whatever it
wants with LEDs and you don't have to worry about a higher priority
mode clearing out an LED and messing up the lower priority mode's
status.
The ability for this enable method to also keep track of the priority
that a LED is enabled is the reason you'd want to use this method
versus calling :meth:`leds.color` directly. If you do use
:meth:`leds.color` to enable an LED directly, :class:`LEDcontroller`
won't know about it, and your LED could be overwritten the next time
a LEDshow, script, or enable command is used.
"""
# Add / update this latest info in our manual_commands dictionary
params = {'LEDname': LEDname,
'color': color,
'priority': priority}
#fadeend = None
if fade:
fadestart = self.current_time
fadeend = fadestart + (fade / 1000)
if dest_color and not color: # received dest_color but not color
dest_color = dest_color
color = "000000"
elif dest_color and color: # received dest_color and color
dest_color = dest_color
color = color
else: # received color but not dest_color
dest_color = color
color = None
prevcolor = color
params.update({'fadeend': fadeend, 'blend': blend,
'fadestart': fadestart, 'dest_color': dest_color,
'color': color, 'prevcolor': prevcolor})
# check to see if we already have an entry for this LEDname / priority
# pair. If so, remove it.
for entry in self.manual_commands:
if entry['LEDname'] == LEDname and entry['priority'] == priority:
self.manual_commands.remove(entry)
# now add our new command to the list
self.manual_commands.append(params)
# Add this command to our update_list so it gets serviced along with
# all the other updates
if fade: # if we have a fade
self._add_to_update_list({'LEDname': LEDname,
'priority': priority,
'dest_color': dest_color,
'fadeend': fadeend,
'blend': blend,
'fadestart': fadestart,
'prevcolor': prevcolor})
else: # no fade
self._add_to_update_list({'LEDname': LEDname,
'priority': priority,
'color': color})
def disable(self, LEDname, priority=0, clear_all=True):
"""Command to disable an LED
Parameters:
'LEDname': The name of the LED you're disabling
'priority': Which priority you're clearing (disabling) the LED at.
(See *clear_all* below for details.) If you don't pass a priority,
then it will disable the LED and remove *all* entries from the
manual commands list. i.e. In that case it disables/removes all the
previously issued manual commands
'clear_all': If True, it will clear all the commands from the
priority you passed and from any lower priority commands. If False
then it only clears out the command from the priority that was
passed.
Once cleared, :class:`LEDcontroller` restore the LED's state from any
commands or shows running at a lower priority.
Note that this method does not affect running :class:`LEDshow` shows,
so if you clear the :meth:`enable` commands but you have a running
LEDshow which enables that LED, then the LED will be enabled. If you
want to absolutely disable the LED regardless of whatever LEDshow is
running, then use :meth:`enable` with *color=000000*, *blend=False*,
and a *priority* that's higher than any running show.
"""
priority_to_restore = priority
# check to see if we have an entry for this LED in our manual commands
# dictionary
if clear_all:
if priority: # clear all, with priority specified
for entry in self.manual_commands:
if entry['LEDname'] == LEDname and \
entry['priority'] <= priority:
self.manual_commands.remove(entry)
priority_to_restore = priority
self.restore_LED_state(LEDname, priority_to_restore)
else: # clear all, no priority specified
for entry in self.manual_commands:
if entry['LEDname'] == LEDname:
self.manual_commands.remove(entry)
self.restore_LED_state(LEDname, priority_to_restore)
else: # just remove any commands of the priority passed
for entry in self.manual_commands:
if entry['LEDname'] == LEDname and \
entry['priority'] == priority:
self.manual_commands.remove(entry)
priority_to_restore = 0
self.restore_LED_state(LEDname, priority_to_restore)
# if we get to here, we didn't find any commands to restore, so
# don't do anything
| {
"repo_name": "mjocean/PyProcGameHD-SkeletonGame",
"path": "procgame/LEDs.py",
"copies": "1",
"size": "74833",
"license": "mit",
"hash": 6255868642551011000,
"line_mean": 45.1932098765,
"line_max": 98,
"alpha_frac": 0.569708551,
"autogenerated": false,
"ratio": 4.393154866737114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009605068205978819,
"num_lines": 1620
} |
"""An interface for creating and retrieving alerts in Matchlight."""
from __future__ import absolute_import
import calendar
import datetime
import json
import matchlight.error
__all__ = (
'Alert',
'AlertMethods',
)
class Alert(object):
"""Represents an alert.
Attributes:
id (:obj:`str`): A 128-bit UUID.
number (:obj:`int`): The account specific alert number.
type (:obj:`str`): The type of the associated Record.
url (:obj:`str`): The url where the match was found.
url_metadata (:obj:`dict`): additional information about the url.
ctime (:obj:`int`): A Unix timestamp of the alert creation
timestamp.
mtime (:obj:`int`): A Unix timestamp of the alert last
modification date timestamp.
seen (:obj:`bool`): User specific flag.
archived (:obj:`bool`): User specific flag.
upload_token (:obj:`str`): The upload_token of the associated Project.
details (:obj:`dict`): Additional information about the Alert.
project_name (:obj:`str`): The name of the associated Project
record_name (:obj:`str`): The name of the associated Record.
"""
def __init__(self, id, number, type, url, url_metadata, ctime, mtime, seen,
archived, upload_token, details, project_name, record_name):
"""Initializes a new alert.
Args:
id (:obj:`str`): A 128-bit UUID.
number (:obj:`int`): The account specific alert number.
type (:obj:`str`): The type of the associated Record.
url (:obj:`str`): The url where the match was found.
ctime (:obj:`int`): A Unix timestamp of the
alert creation timestamp.
mtime (:obj:`int`): A Unix timestamp of the
alert last modification date timestamp.
seen (:obj:`bool`): User specific flag.
archived (:obj:`bool`): User specific flag.
upload_token (:obj:`str`): The upload_token of the associated
record.
details (:obj:`dict`): details about the Alert.
project_name (:obj:`str`): The name of the associated Project
record_name (:obj:`str`): The name of the associated Record.
"""
self.id = id
self.number = number
self.type = type
self.url = url
self.url_metadata = url_metadata
self.ctime = ctime
self.mtime = mtime
self.seen = seen
self.archived = archived
self.upload_token = upload_token
self.details = details
self.project_name = project_name
self.record_name = record_name
@classmethod
def from_mapping(cls, mapping):
"""Creates a new alert instance from the given mapping."""
return cls(
id=mapping['id'],
number=mapping['alert_number'],
type=mapping['type'],
url=mapping['url'],
url_metadata=mapping['url_metadata'],
ctime=mapping['ctime'],
mtime=mapping['mtime'],
seen=True if mapping['seen'] == 'true' else False,
archived=True if mapping['archived'] == 'true' else False,
upload_token=mapping['upload_token'],
details=mapping['details'],
project_name=mapping['project_name'],
record_name=mapping['asset_name']
)
@property
def last_modified(self):
""":class:`datetime.datetime`: The last modified timestamp."""
if self.mtime is None:
return None
return datetime.datetime.fromtimestamp(self.mtime)
@property
def date(self):
""":class:`datetime.datetime`: The date created timestamp."""
if self.ctime is None:
return None
return datetime.datetime.fromtimestamp(self.ctime)
@property
def score(self):
""":obj:`int`: Represents how much of the record appeared on the page.
Scores range from 1 to 800, with 800 representing that the entire
record was found on the page. PII records will always have a score
of 800.
"""
if self.type == 'pii':
return 800
if self.type == 'document':
return self.details['document'].get('score', None)
if self.type == 'source_code':
return self.details['source_code'].get('score', None)
@property
def fields(self):
""":obj:`list`: PII records will match on one or more 'fields'."""
if self.type == 'pii':
return self.details['pii'].get('fields', [])
return None
def __repr__(self): # pragma: no cover
return '<Alert(number="{}", id="{}")>'.format(
self.number,
self.id
)
class AlertMethods(object):
"""Provides methods for interfacing with the alerts API."""
def __init__(self, ml_connection): # noqa: D205,D400
"""Initializes an alerts interface with the given Matchlight
connection.
Args:
ml_connection (:class:`~.Connection`): A Matchlight
connection instance.
"""
self.conn = ml_connection
def filter(self, limit, seen=None, archived=None, project=None,
record=None, last_modified=None, last_alert=None):
"""Returns a list of alerts.
Providing a **limit** keyword argument will limit the number of alerts
returned. The request may time out if this is set too high, a limit of
50 is recomended to avoid timeouts.
Providing an optional **seen** keyword argument will only
return alerts that match that property
Providing an optional **archived** keyword argument will only
return alerts that match that property
Providing an optional **project** keyword argument will only
return alerts that are associated with a specific project.
Providing an optional **record** keyword argument will only
return alerts that are associated with a specific record.
Providing an optional **last_modified** keyword argument will only
return alerts with a last_modifed less than the argument.
Providing an optional **last_alert** keyword argument will only return
results after this Alert
Examples:
Request all unseen alerts::
>>> ml.alerts.filter(seen=False, limit=50)
[<Alert(number="1024",
id="625a732ad0f247beab18595z951c2088a3")>,
Alert(number="1025",
id="f9427dd5a24d4a98b2069004g04c2977")]
Request all alerts for a project::
>>> my_project
<Project(name="Super Secret Algorithm", type="source_code")>
>>> ml.alerts.filter(project=my_project, limit=50)
[<Alert(number="1024",
id="625a732ad0f247beab18595z951c2088a3")>,
Alert(number="1025",
id="f9427dd5a24d4a98b2069004g04c2977")]
Request sets of alerts using pagination::
>>> ml.alerts.filter(limit=50)
[<Alert(number="1027",
id="625a732ad247beab18595z951c2088a3")>,
Alert(number="1026",
id="f9427dd5a24d4a98b2069004g04c2977")...
>>> ml.alerts.filter(limit=50, last_alert=50)
[<Alert(number="977",
id="59d5a791g8d4436aaffe64e4b15474a5")>,
Alert(number="976",
id="6b1001aaec5a48f19d17171169eebb56")...
Args:
limit (:obj:`int`):
Don't return more than this number of alerts.
seen (:obj:`bool`, optional):
archived (:obj:`bool`, optional):
project (:class:`~.Project`, optional): a project object.
Defaults to all projects if not specified.
record (:class:`~.Record`, optional): a record object.
Defaults to all projects if not specified.
last_modified (:obj:`datetime`, optional):
last_alert (:obj:`int`):
Only return Alerts after this one.
Returns:
:obj:`list` of :class:`~.Alert`: List of alerts that
are associated with a project.
"""
if seen is not None:
seen_int = 1 if seen is True else 0
else:
seen_int = None
if archived is not None:
archived_int = 1 if archived is True else 0
else:
archived_int = None
if project is not None:
upload_token = project.upload_token
else:
upload_token = None
if record is not None:
record_id = record.id
else:
record_id = None
if last_modified is not None:
mtime = calendar.timegm(last_modified.timetuple())
else:
mtime = None
response = self.conn.request(
'/alerts',
params={
'limit': limit,
'seen': seen_int,
'archived': archived_int,
'upload_token_filter': upload_token,
'record_id_filter': record_id,
'mtime': mtime,
'last_alert': last_alert
}
)
alerts = []
for payload in response.json().get('alerts', []):
alerts.append(Alert.from_mapping(payload))
return alerts
def edit(self, alert_id, seen=None, archived=None):
"""Edits an alert.
Example:
Archive an alert::
>>> alert
<Alert(number=1024,
id="0760570a2c4a4ea68d526f58bab46cbd")>
>>> ml.alerts.edit(alert, archived=True)
{
'seen': True,
'archived': True
}
Arguments:
alert (:obj:`str`): An alert id.
seen (:obj:`bool`, optional):
archived (:obj:`bool`, optional):
Returns:
:obj:`dict`: Updated alert metadata.
"""
if isinstance(alert_id, Alert):
alert_id = alert_id.id
data = {}
if seen is not None:
data['seen'] = seen
if archived is not None:
data['archived'] = archived
response = self.conn.request(
'/alert/{}/edit'.format(alert_id),
data=json.dumps(data)
)
response = response.json()
return {
'seen': response['seen'],
'archived': response['archived']
}
def get_details(self, alert_id):
"""Returns details of an alert by the given alert ID.
Args:
alert_id (:obj:`str`): The alert identifier.
Returns:
:obj:`dict`: map of the alert details.
"""
if isinstance(alert_id, Alert):
alert_id = alert_id.id
try:
response = self.conn.request('/alert/{}/details'.format(alert_id))
return response.json()
except matchlight.error.APIError as err:
if err.args[0] == 404:
return
else:
raise
| {
"repo_name": "TerbiumLabs/python-matchlightsdk",
"path": "src/matchlight/alert.py",
"copies": "1",
"size": "11268",
"license": "bsd-3-clause",
"hash": 7224883908109211000,
"line_mean": 32.7365269461,
"line_max": 79,
"alpha_frac": 0.5502307419,
"autogenerated": false,
"ratio": 4.434474616292798,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5484705358192797,
"avg_score": null,
"num_lines": null
} |
"""An interface for creating and retrieving Matchlight projects."""
from __future__ import absolute_import
import datetime
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
import json
import six
import matchlight.error
import matchlight.utils
class Project(object):
"""A Matchlight Fingerprint Monitoring Project.
Attributes:
name (:obj:`str`): The project name.
project_type (:obj:`str`): The project type.
upload_token (:obj:`str`): The project upload token.
last_date_modified (:obj:`int`): The Unix timestamp of the last
modification.
number_of_records (:obj:`int`): The number of total records in
the project.
number_of_unseen_alerts (:obj:`int`): The number of unread
alerts.
"""
def __init__(self, name, project_type, upload_token,
last_date_modified, number_of_records,
number_of_unseen_alerts):
"""Initializes a new project.
Args:
name (:obj:`str`): The project name.
project_type (:obj:`str`): The project type.
upload_token (:obj:`str`): The project upload token.
last_date_modified (:obj:`int`): The Unix timestamp of the
last modification.
number_of_records (:obj:`int`): The number of total records
in the project.
number_of_unseen_alerts (:obj:`int`): The number of unread
alerts.
"""
self.name = name
self.project_type = project_type
self.upload_token = upload_token
self.last_date_modified = last_date_modified
self.number_of_records = number_of_records
self.number_of_unseen_alerts = number_of_unseen_alerts
@classmethod
def from_mapping(cls, mapping):
"""Creates a new project instance from the given mapping."""
return cls(
name=mapping['project_name'],
project_type=mapping['project_type'],
upload_token=mapping['project_upload_token'],
last_date_modified=mapping['last_date_modified'],
number_of_records=mapping['number_of_records'],
number_of_unseen_alerts=mapping['number_of_unseen_alerts'],
)
@property
def last_modified(self):
""":class:`datetime.datetime`: The last modified timestamp."""
return datetime.datetime.fromtimestamp(self.last_date_modified)
@property
def details(self):
""":obj:`dict`: Returns the project details as a mapping."""
return {
'name': self.name,
'project_type': self.project_type,
'upload_token': self.upload_token,
'last_date_modified': self.last_date_modified,
'number_of_records': self.number_of_records,
'number_of_unseen_alerts': self.number_of_unseen_alerts,
}
def __repr__(self): # pragma: no cover
return '<Project(name="{}", project_type="{}")>'.format(
self.name, self.project_type)
class ProjectMethods(collections_abc.Iterable):
"""Provides methods for interfacing with the feeds API.
Examples:
Get project from upload token::
>>> ml.projects.get('3ef85448c-d244-431e-a207-cf8d37ae3bfe')
<Project(name='Customer Database May 2016',
project_type='pii')>
Filter on project types::
>>> ml.projects.filter(project_type='pii')
[<Project(name='...', project_type='pii'),
<Project(name='...', project_type='pii'),
<Project(name='...', project_type='pii')]
>>> ml.projects.filter()
[<Project(name='...', project_type='pii'),
<Project(name='...', project_type='document'),
<Project(name='...', project_type='source_code')]
Create a new project::
>>> project = ml.projects.add(
... name='Super secret algorithm',
... project_type='source_code')
>>> project
<Project(name='Super Secret Algorithm', type='source_code')>
Edit a project::
>>> project = ml.projects.edit(project,
... 'Updated Super Secret Algorithm')
>>> project
<Project(name='Updated Super Secret Algorithm',
type='source_code'>
Delete a project::
>>> ml.projects.delete(project)
>>> ml.projects.get(project.upload_token)
None
Get project details::
>>> executives = ml.projects.add("Executive List 2016", "pii")
>>> executives.details
{'last_date_modified': 1472671877,
'number_of_records': 0,
'number_of_unseen_alerts': 0,
'name': 'Executive List 2016',
'project_type': 'pii',
'upload_token': 'a1c7140a-17e5-4016-8f0a-ef4aa87619ce'}
"""
def __init__(self, ml_connection): # noqa: D205,D400
"""Initializes a project interface with the given Matchlight
connection.
Args:
ml_connection (:class:`~.Connection`): A Matchlight
connection instance.
"""
self.conn = ml_connection
def all(self):
"""Returns all projects associated with the account."""
return self.filter()
def add(self, name, project_type):
"""Creates a new project or group.
Arguments:
name (:obj:`str`): The name of the project to be created.
project_type (:obj:`str`): The type of project to be
created.
Returns:
:class:`~.Project` : Created project with upload token.
"""
data = json.dumps({'name': name, 'type': project_type})
r = self.conn.request('/project/add', data=data)
return Project.from_mapping(r.json()['data'])
def delete(self, project):
"""Deletes a project and all associated records.
Args:
project (:class:`~.Project` or :obj:`str`): The project
object or upload token to be deleted.
"""
if isinstance(project, six.string_types):
upload_token = project
else:
upload_token = project.upload_token
self.conn.request('/project/{upload_token}/delete'.format(
upload_token=upload_token), data='{}')
def edit(self, project, updated_name):
"""Renames a project.
Arguments:
project (:class:`~.Project` or :obj:`str`): A project
instance or upload token.
updated_name (:obj:`str`): New project name.
Returns:
:class:`~.Project`: Updated project instance with new name.
Note that this method mutates any project instances passed.
"""
if not isinstance(project, Project):
project = self.get(project)
data = json.dumps({'name': updated_name})
self.conn.request('/project/{}/edit'.format(
project.upload_token), data=data)
project.name = updated_name
return project
def filter(self, project_type=None):
"""Returns a list of projects associated with the account.
Providing an optional **project_type** keyword argument will
only return projects of the specified type: ``source_code``,
``document`` or ``pii``.
Arguments:
project_type (:obj:`str`, optional): The project type to
filter on. If not provided or ``None``, returns all
projects.
Returns:
list of :class:`~.Project`: List of filtered projects.
"""
response = self.conn.request('/projects', params={
'project_type': project_type})
projects = []
for payload in response.json().get('data', []):
if project_type and payload['project_type'] != project_type:
continue
projects.append(Project.from_mapping(payload))
return projects
def get(self, upload_token):
"""Returns a project by the given upload token.
Args:
upload_token (:obj:`str`): The project upload token.
Returns:
:class: `~.Project`: A Matchlight project.
"""
try:
response = self.conn.request('/project/{}'.format(upload_token))
return Project.from_mapping(response.json())
# for consistency since records.get() returns None if not found.
except matchlight.error.APIError as err:
if err.args[0] == 404:
return
else:
raise
def __iter__(self):
return (project for project in self.filter())
| {
"repo_name": "TerbiumLabs/python-matchlightsdk",
"path": "src/matchlight/project.py",
"copies": "1",
"size": "8788",
"license": "bsd-3-clause",
"hash": -8966841957121999000,
"line_mean": 33.328125,
"line_max": 76,
"alpha_frac": 0.5696404188,
"autogenerated": false,
"ratio": 4.407221664994985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5476862083794986,
"avg_score": null,
"num_lines": null
} |
"""An interface for creating and retrieving PII records in Matchlight."""
from __future__ import absolute_import
import io
import json
import six
import matchlight.error
import matchlight.utils
from pylibfp import (
fingerprint,
fingerprints_pii_address_variants,
fingerprints_pii_city_state_zip_variants,
fingerprints_pii_credit_card,
fingerprints_pii_email_address,
fingerprints_pii_iban,
fingerprints_pii_medicare_id,
fingerprints_pii_name_variants,
fingerprints_pii_passport,
fingerprints_pii_phone_number,
fingerprints_pii_ssn,
MODE_CODE,
OPTIONS_TILED,
)
__all__ = (
'Record',
'RecordMethods',
)
MAX_DOCUMENT_FINGERPRINTS = 840
class Record(object):
"""Represents a personal information record."""
def __init__(self, id, name, description, ctime=None, mtime=None,
metadata=None):
"""Initializes a new personal information record.
Args:
id (:obj:`str`): A 128-bit UUID.
name (:obj:`str`): The name of the record.
description (:obj:`str`): The description of the record.
ctime (:obj:`int`, optional): A Unix timestamp of the
record creation timestamp.
mtime (:obj:`int`, optional): A Unix timestamp of the
record last modification date timestamp.
"""
if metadata is None:
metadata = {}
self.id = id
self.name = name
self.description = description
self.ctime = ctime
self.mtime = mtime
self.metadata = metadata
@classmethod
def from_mapping(cls, mapping):
"""Creates a new project instance from the given mapping."""
return cls(
id=mapping['id'],
name=mapping['name'],
description=mapping['description'],
ctime=mapping['ctime'],
mtime=mapping['mtime'],
metadata=mapping['metadata'],
)
@property
def user_provided_id(self):
""":obj:`int`: The user provided record identifier."""
return self.metadata.get('user_record_id', None)
@property
def details(self):
""":obj:`dict`: Returns the feed details as a mapping."""
return {
'id': self.id,
'name': self.name,
'description': self.description,
'ctime': self.ctime,
'mtime': self.mtime,
'metadata': self.metadata,
}
def __repr__(self): # pragma: no cover
return '<Record(name="{}", id="{}")>'.format(self.name, self.id)
class RecordMethods(object):
"""Provides methods for interfacing with the records API.
Examples:
Get record by record id::
>>> record = ml.records.get("0760570a2c4a4ea68d526f58bab46cbd")
>>> record
<Record(name="pce****@terbiumlabs.com",
id="0760570a2c4a4ea68d526f58bab46cbd")>
Add PII records to a project::
>>> pii_project = ml.projects.add(
... name="Employee Database May 2016",
... project_type="pii")
>>> record_data = {
... "first_name": "Bird",
... "last_name": "Feather",
... "email": "familybird@teribumlabs.com",
... }
>>> new_record = ml.records.add_pii(
... pii_project,
... "uploaded on 20160519",
... **record_data)
Delete a record::
>>> record
<Record(name="fam****@terbiumlabs.com",
id="655a732ad0f243beab1801651c2088a3")>
>>> ml.record.delete(record)
"""
def __init__(self, ml_connection): # noqa: D205,D400
"""Initializes a records interface with the given Matchlight
connection.
Args:
ml_connection (:class:`~.Connection`): A Matchlight
connection instance.
"""
self.conn = ml_connection
def all(self):
"""Returns all records associated with the account."""
return self.filter()
def add_document(self, project, name, description, content,
user_record_id='-', min_score=None, offline=False):
"""Creates a new document record in the given project.
Args:
project (:class:`~.Project`): Project object to associate
with record.
name (:obj:`str`): The name of the document (not
fingerprinted).
description (:obj:`str`): A description of the record (not
fingerprinted).
content (:obj:`str`): The text of the document to be
fingerprinted. Must be 840 characters or less.
user_record_id (:obj:`str`, optional): An optional, user
provided custom record identifier. Defaults to
:obj:`NoneType`.
offline (:obj:`bool`, optional): Run in "offline mode". No
data is sent to the Matchlight server. Returns a
dictionary of values instead of a :class:`~.Report`
instance.
Returns:
:class:`~.Record`: Created record with metadata.
"""
if len(content) > MAX_DOCUMENT_FINGERPRINTS:
raise matchlight.error.SDKError(
'Fingerprinter Failed: the maximum length of a Document record'
' is 840 characters.'
)
result_json = fingerprint(content, flags=OPTIONS_TILED)
result = json.loads(result_json)
fingerprints = result['data']['fingerprints']
data = {
'name': name,
'desc': description,
'user_record_id': user_record_id,
'fingerprints': fingerprints,
'metadata': {
'fingerprinting_tool_name': 'Python SDK',
'fingerprinting_tool_version': matchlight.__version__
}
}
if min_score is not None:
data['metadata']['min_score'] = str(min_score)
if offline:
return data
else:
return self.add_document_from_fingerprints(project, data)
def add_document_from_fingerprints(self, project, fingerprint_data):
"""Add a document record from fingerprints.
Add a document record from fingerprinted data generated by the
:class:`~/.Record.add_pii` in offline mode.
Args:
project (:class:`~.Project`): Project object to associate
with record.
fingerprint_data (:obj:`dict`): The output of
:class:`~/.Record.add_document(offline=True)`
"""
response = self.conn.request(
'/records/upload/document/{upload_token}'.format(
upload_token=project.upload_token
),
data=json.dumps(fingerprint_data)
)
return Record.from_mapping(response.json())
def add_pii(self, project, description, email, first_name=None,
middle_name=None, last_name=None, ssn=None, address=None,
city=None, state=None, zipcode=None, phone=None,
credit_card=None, medicare_id=None, passport=None, iban=None,
user_record_id='-', offline=False):
"""Creates a new PII record in the given project.
Args:
project (:class:`~.Project`): Project object to associate
with record.
description (:obj:`str`): A description of the record (not
fingerprinted).
email (:obj:`str`, optional): An email address.
first_name (:obj:`str`, optional): Defaults to
:obj:`NoneType`.
middle_name (:obj:`str`, optional): Defaults to
:obj:`NoneType`.
last_name (:obj:`str`, optional): Defaults to
:obj:`NoneType`.
ssn (:obj:`str`, optional): Defaults to :obj:`NoneType`.
address (:obj:`str`, optional): Defaults to :obj:`NoneType`.
city (:obj:`str`, optional): Defaults to :obj:`NoneType`.
state (:obj:`str`, optional): Defaults to :obj:`NoneType`.
zipcode (int, optional): Defaults to :obj:`NoneType`.
phone (:obj:`str`, optional): Defaults to :obj:`NoneType`.
credit_card (:obj:`str`, optional): Defaults to :obj:`NoneType`.
medicare_id (:obj:`str`, optional): Defaults to :obj:`NoneType`.
passport (:obj:`str`, optional): Defaults to :obj:`NoneType`.
iban (:obj:`str`, optional): Defaults to :obj:`NoneType`.
user_record_id (:obj:`str`, optional): An optional, user
provided custom record identifier. Defaults to
:obj:`NoneType`.
offline (:obj:`bool`, optional): Run in "offline mode". No
data is sent to the Matchlight server. Returns a
dictionary of values instead of a :class:`~.Report`
instance.
Returns:
:class:`~.Record`: Created record with metadata.
"""
if first_name is not None and last_name is None:
raise matchlight.error.SDKError(
'Fingerprinter Failed: the last_name argument is required '
'along with the first_name argument.'
)
if first_name is None and last_name is not None:
raise matchlight.error.SDKError(
'Fingerprinter Failed: the first_name argument is required '
'along with the last_name argument.'
)
data = {
'desc': description,
'user_record_id': user_record_id,
'blinded_first': matchlight.utils.blind_name(first_name),
'blinded_last': matchlight.utils.blind_name(last_name),
'blinded_email': matchlight.utils.blind_email(email),
'metadata': {
'fingerprinting_tool_name': 'Python SDK',
'fingerprinting_tool_version': matchlight.__version__
}
}
if any((first_name, middle_name, last_name)):
name_fingerprints = fingerprints_pii_name_variants(
first_name or '', middle_name or None, last_name or '')
data['name_fingerprints'] = name_fingerprints
if email:
email_fingerprints = fingerprints_pii_email_address(email)
data['email_fingerprints'] = email_fingerprints
data['blinded_email'] = matchlight.utils.blind_email(email)
data['name'] = matchlight.utils.blind_email(email)
if ssn:
ssn_fingerprints = [fingerprints_pii_ssn(ssn)]
data['ssn_fingerprints'] = ssn_fingerprints
if address:
address_fingerprints = fingerprints_pii_address_variants(
address)
data['street_address_fingerprints'] = address_fingerprints
if any((city, state, zipcode)):
csz_fingerprints = fingerprints_pii_city_state_zip_variants(
*[six.text_type(text) if text is not None else ''
for text in (city, state, zipcode)])
data['city_state_zip_fingerprints'] = csz_fingerprints
if phone:
phone_fingerprints = fingerprints_pii_phone_number(phone)
data['phone_fingerprints'] = [phone_fingerprints]
if credit_card:
cc_fingerprints = fingerprints_pii_credit_card(credit_card)
data['credit_card_fingerprints'] = [cc_fingerprints]
if medicare_id:
medicare_id_fingerprints = fingerprints_pii_medicare_id(
medicare_id
)
data['medicare_id_fingerprints'] = [medicare_id_fingerprints]
if passport:
passport_fingerprints = fingerprints_pii_passport(passport)
data['passport_fingerprints'] = [passport_fingerprints]
if iban:
iban_fingerprints = fingerprints_pii_iban(iban)
data['iban_fingerprints'] = iban_fingerprints
if offline:
return data
else:
return self.add_pii_from_fingerprints(project, data)
def add_pii_from_fingerprints(self, project, fingerprint_data):
"""Add a PII record from fingerprints.
Add a PII record from fingerprinted data generated by the
:class:`~/.Record.add_pii` in offline mode.
Args:
project (:class:`~.Project`): Project object to associate
with record.
fingerprint_data (:obj:`dict`): The output of
:class:`~/.Record.add_pii(offline=True)`
"""
response = self.conn.request(
'/records/upload/pii/{}'.format(
project.upload_token
),
data=json.dumps(fingerprint_data)
)
return Record.from_mapping(response.json())
def add_source_code(self, project, name, description, code_path,
min_score=None, offline=False):
"""Creates a new source code record in the given project.
Args:
project (:class:`~.Project`): Project object to associate
with record.
name (:obj:`str`): The name of the file (not
fingerprinted).
description (:obj:`str`): A description of the code (not
fingerprinted).
code_path (:obj:`str`): The location of the source code.
Code must be 840 characters or less.
user_record_id (:obj:`str`, optional): An optional, user
provided custom record identifier. Defaults to
:obj:`NoneType`.
offline (:obj:`bool`, optional): Run in "offline mode". No
data is sent to the Matchlight server. Returns a
dictionary of values instead of a :class:`~.Report`
instance.
Returns:
:class:`~.Record`: Created record with metadata.
"""
with io.open(code_path, 'r', encoding='utf-8') as document:
content = document.read()
if len(content) > MAX_DOCUMENT_FINGERPRINTS:
raise matchlight.error.SDKError(
'Fingerprinter Failed: the maximum length of a Source Code '
'record is 840 characters.'
)
result_json = fingerprint(content, flags=OPTIONS_TILED, mode=MODE_CODE)
result = json.loads(result_json)
fingerprints = result['data']['fingerprints']
data = {
'name': name,
'desc': description,
'fingerprints': fingerprints,
'metadata': {
'fingerprinting_tool_name': 'Python SDK',
'fingerprinting_tool_version': matchlight.__version__
}
}
if min_score is not None:
data['metadata']['min_score'] = str(min_score)
if offline:
return data
else:
return self.add_source_code_from_fingerprints(project, data)
def add_source_code_from_fingerprints(self, project, fingerprint_data):
"""Add a source code record from fingerprints.
Add a souce code record from fingerprinted data generated by the
:class:`~/.Record.add_source_code` in offline mode.
Args:
project (:class:`~.Project`): Project object to associate
with record.
fingerprint_data (:obj:`dict`): The output of
:class:`~/.Record.add_source_code(offline=True)`
"""
response = self.conn.request(
'/records/upload/source_code/{}'.format(
upload_token=project.upload_token
),
data=json.dumps(fingerprint_data)
)
return Record.from_mapping(response.json())
def delete(self, record_or_id):
"""Delete a fingerprinted record.
Args:
record_or_id (:class:`~.Record` or :obj:`str`): The record
object or identifier to be deleted.
Returns:
:obj:`NoneType`
"""
if isinstance(record_or_id, Record):
record_upload_token = record_or_id.id
else:
record_upload_token = record_or_id
self.conn.request('/record/{}/delete'.format(record_upload_token),
data=json.dumps({}))
def filter(self, project=None):
"""Returns a list of records.
Providing an optional **project** keyword argument will only
return records that are associated with a specific project.
Example:
Request all records::
>>> my_project
<Project(name="Super Secret Algorithm", type="source_code")>
>>> ml.records.filter(project=my_project)
[<Record(name="fam****@fakeemail.com",
id="625a732ad0f247beab18595z951c2088a3")>,
Record(name="pce****@fakeemail.com",
id="f9427dd5a24d4a98b2069004g04c2977")]
Args:
project (:class:`~.Project`, optional): a project object.
Defaults to all projects if not specified.
Returns:
:obj:`list` of :class:`~.Record`: List of records that
are associated with a project.
"""
if project is not None:
upload_token = project.upload_token
else:
upload_token = None
response = self.conn.request('/records', params={
'upload_token': upload_token})
records = []
for payload in response.json().get('data', []):
records.append(Record(
id=payload['id'],
name=payload['name'],
description=payload['description'],
ctime=int(payload['ctime']),
mtime=int(payload['mtime']),
))
return records
def get(self, record_id):
"""Returns a record by the given record ID.
Args:
record_id (:obj:`str`): The record identifier.
Returns:
:class:`~.Record`: A record instance.
"""
return next((record for record in self.filter()
if record.id == record_id), None)
def __iter__(self):
return iter(self.filter())
| {
"repo_name": "TerbiumLabs/python-matchlightsdk",
"path": "src/matchlight/record.py",
"copies": "1",
"size": "18317",
"license": "bsd-3-clause",
"hash": 8489491299121575000,
"line_mean": 35.3432539683,
"line_max": 79,
"alpha_frac": 0.5559862423,
"autogenerated": false,
"ratio": 4.3799617407938785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5435947983093878,
"avg_score": null,
"num_lines": null
} |
"""An interface for downloading and filtering Matchlight feeds."""
from __future__ import absolute_import, print_function
import datetime
import io
import json
import time
import six
import matchlight.error
import matchlight.utils
if not six.PY3:
from backports import csv
else:
import csv
class Feed(object):
"""Represents a Matchlight Data Feed.
Examples:
>>> ml = matchlight.Matchlight()
>>> feed = ml.feeds.filter()[0]
>>> feed
<Feed(name="CompanyEmailAddress", recent_alerts=2)>
>>> feed.details
{'description': None, 'name': u'CompanyEmailAddress',
'recent_alerts_count': 2,
'start_timestamp': '2016-06-03T00:00:00',
'stop_timestamp': None}
Attributes:
description (:obj:`str`): Description of the feed.
name (:obj:`str`): Name of the feed.
recent_alerts_count (int): Number of recent alerts.
start_timestamp (:class:`datetime.datetime`): Start time of the
feed.
stop_timestamp (:class:`datetime.datetime`): Stop time of the
feed.
"""
def __init__(self, name, description, recent_alerts_count,
start_timestamp, stop_timestamp=None):
"""Initializes a new Matchlight feed.
Args:
description (:obj:`str`): Description of the feed.
name (:obj:`str`): Name of the feed.
recent_alerts_count (int): Number of recent alerts.
start_timestamp (:obj:`str`): ISO 8601 formatted timestamp
of when feed collection began.
stop_timestamp (:obj:`str`, optional): ISO 8601 formatted
timestamp of when feed collection ended or will end. If
not provided, the feed is assumed to never expired.
"""
self.name = name
self.description = description
self.recent_alerts_count = recent_alerts_count
self.start_timestamp = start_timestamp
self.stop_timestamp = stop_timestamp
@property
def details(self):
""":obj:`dict`: Returns the feed details as a mapping."""
return {
'name': self.name,
'description': self.description,
'recent_alerts_count': self.recent_alerts_count,
'start_timestamp': self.start_timestamp,
'stop_timestamp': self.stop_timestamp,
}
@property
def start(self):
""":class:`datetime.datetime`: When feed data collection began."""
return datetime.datetime.fromtimestamp(self.start_timestamp)
@property
def end(self): # noqa: D205,D400
""":obj:`NoneType` or :class:`datetime.datetime`: If the feed
has a ``stop_timestamp``, returns a datetime object. Otherwise,
returns :obj:`NoneType`.
"""
if self.stop_timestamp:
return datetime.datetime.fromtimestamp(self.stop_timestamp)
def __repr__(self): # pragma: no cover
return '<Feed(name="{name}", recent_alerts={alerts})>'.format(
alerts=self.recent_alerts_count, name=self.name)
class FeedMethods(object):
"""Provides methods for interfacing with the feeds API."""
def __init__(self, ml_connection): # noqa: D205,D400
"""Initializes a feed interface with the given Matchlight
connection.
Args:
ml_connection (:class:`~.Connection`): A Matchlight
connection instance.
"""
self.conn = ml_connection
def all(self):
"""Returns a list of feeds associated with a Matchlight account.
Returns:
:obj:`list` of :class:`matchlight.Feed`: A list of feeds
associated with an account.
"""
r = self.conn.request('/feeds')
return [Feed(**feed) for feed in r.json()['feeds']]
def counts(self, feed, start_date, end_date):
"""Daily counts for a feed for a given date range.
Args:
feed (:class:`~.Feed`): A feed instance or feed name.
start_date (:class:`datetime.datetime`): Start of date range.
end_date (:class:`datetime.datetime`): End of date range.
Returns:
:obj:`dict`: Mapping of dates (``YYYY-MM-DD``) to alert counts.
"""
if isinstance(feed, six.string_types):
feed_name = feed
else:
feed_name = feed.name
data = {
'start_date': int(matchlight.utils.datetime_to_unix(start_date)),
'end_date': int(matchlight.utils.datetime_to_unix(end_date)),
}
response = self.conn.request(
'/feeds/{feed_name}'.format(feed_name=feed_name),
data=json.dumps(data))
return self._format_count(response.json())
def download(self, feed, start_date, end_date, save_path=None):
"""Downloads feed data for the given date range.
Args:
feed (:class:`~.Feed`): A feed instance or feed name.
start_date (:class:`datetime.datetime`): Start of date range.
end_date (:class:`datetime.datetime`): End of date range.
save_path (:obj:`str`): Path to output file.
Returns:
:obj:`list` of :obj:`dict`: All feed hits for the given range.
"""
if isinstance(feed, six.string_types):
feed_name = feed
else:
feed_name = feed.name
data = {
'start_date': int(matchlight.utils.datetime_to_unix(start_date)),
'end_date': int(matchlight.utils.datetime_to_unix(end_date)),
}
response = self.conn.request(
'/feed/{feed_name}/prepare'.format(feed_name=feed_name),
data=json.dumps(data))
if response.status_code != 200:
raise matchlight.error.SDKError(
'Feed failed to be generated. Please try again later.')
data = {'feed_response_id': response.json().get('feed_response_id')}
status = 'pending'
while status == 'pending':
response = self.conn.request(
'/feed/{feed_name}/link'.format(feed_name=feed_name),
data=json.dumps(data))
status = response.json().get('status', None)
time.sleep(1)
# TODO: backoff and timeout
if status == 'failed':
raise matchlight.error.SDKError(
'Feed failed to be generated. Please try again later.')
elif status == 'ready':
content = self.conn._request('GET', response.json().get('url'))
else:
raise matchlight.error.SDKError('An unknown error occurred.')
if save_path:
with io.open(save_path, 'wb') as f:
f.write(content.content)
else:
unicode_feed = content.content.decode('utf-8-sig')
return [
self._format_feed(row)
for row in csv.DictReader(unicode_feed.split('\n'))
]
def _format_count(self, counts):
return {
datetime.datetime.fromtimestamp(int(k)).strftime('%Y-%m-%d'): v
for k, v in counts.items()
}
def _format_feed(self, feed_row):
feed_row['ts'] = matchlight.utils.terbium_timestamp_to_datetime(
feed_row['ts'])
return feed_row
def __iter__(self):
return (item for item in self.all())
| {
"repo_name": "TerbiumLabs/python-matchlightsdk",
"path": "src/matchlight/feed.py",
"copies": "1",
"size": "7421",
"license": "bsd-3-clause",
"hash": 786974627101397600,
"line_mean": 32.8858447489,
"line_max": 77,
"alpha_frac": 0.5721600862,
"autogenerated": false,
"ratio": 4.245423340961098,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5317583427161098,
"avg_score": null,
"num_lines": null
} |
"""An interface for extending pandas with custom arrays.
.. warning::
This is an experimental API and subject to breaking changes
without warning.
"""
import numpy as np
from pandas.errors import AbstractMethodError
from pandas.compat.numpy import function as nv
_not_implemented_message = "{} does not implement {}."
class ExtensionArray(object):
"""Abstract base class for custom 1-D array types.
pandas will recognize instances of this class as proper arrays
with a custom type and will not attempt to coerce them to objects. They
may be stored directly inside a :class:`DataFrame` or :class:`Series`.
.. versionadded:: 0.23.0
Notes
-----
The interface includes the following abstract methods that must be
implemented by subclasses:
* _from_sequence
* _from_factorized
* __getitem__
* __len__
* dtype
* nbytes
* isna
* take
* copy
* _concat_same_type
An additional method is available to satisfy pandas' internal,
private block API.
* _formatting_values
Some methods require casting the ExtensionArray to an ndarray of Python
objects with ``self.astype(object)``, which may be expensive. When
performance is a concern, we highly recommend overriding the following
methods:
* fillna
* unique
* factorize / _values_for_factorize
* argsort / _values_for_argsort
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
``pandas.errors.AbstractMethodError`` and no ``register`` method is
provided for registering virtual subclasses.
ExtensionArrays are limited to 1 dimension.
They may be backed by none, one, or many NumPy arrays. For example,
``pandas.Categorical`` is an extension array backed by two arrays,
one for codes and one for categories. An array of IPv6 address may
be backed by a NumPy structured array with two fields, one for the
lower 64 bits and one for the upper 64 bits. Or they may be backed
by some other storage type, like Python lists. Pandas makes no
assumptions on how the data are stored, just that it can be converted
to a NumPy array.
The ExtensionArray interface does not impose any rules on how this data
is stored. However, currently, the backing data cannot be stored in
attributes called ``.values`` or ``._values`` to ensure full compatibility
with pandas internals. But other names as ``.data``, ``._data``,
``._items``, ... can be freely used.
"""
# '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray.
# Don't override this.
_typ = 'extension'
# ------------------------------------------------------------------------
# Constructors
# ------------------------------------------------------------------------
@classmethod
def _from_sequence(cls, scalars):
"""Construct a new ExtensionArray from a sequence of scalars.
Parameters
----------
scalars : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type``.
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(cls)
@classmethod
def _from_factorized(cls, values, original):
"""Reconstruct an ExtensionArray after factorization.
Parameters
----------
values : ndarray
An integer ndarray with the factorized values.
original : ExtensionArray
The original ExtensionArray that factorize was called on.
See Also
--------
pandas.factorize
ExtensionArray.factorize
"""
raise AbstractMethodError(cls)
# ------------------------------------------------------------------------
# Must be a Sequence
# ------------------------------------------------------------------------
def __getitem__(self, item):
# type (Any) -> Any
"""Select a subset of self.
Parameters
----------
item : int, slice, or ndarray
* int: The position in 'self' to get.
* slice: A slice object, where 'start', 'stop', and 'step' are
integers or None
* ndarray: A 1-d boolean NumPy ndarray the same length as 'self'
Returns
-------
item : scalar or ExtensionArray
Notes
-----
For scalar ``item``, return a scalar value suitable for the array's
type. This should be an instance of ``self.dtype.type``.
For slice ``key``, return an instance of ``ExtensionArray``, even
if the slice is length 0 or 1.
For a boolean mask, return an instance of ``ExtensionArray``, filtered
to the values where ``item`` is True.
"""
raise AbstractMethodError(self)
def __setitem__(self, key, value):
# type: (Union[int, np.ndarray], Any) -> None
"""Set one or more values inplace.
This method is not required to satisfy the pandas extension array
interface.
Parameters
----------
key : int, ndarray, or slice
When called from, e.g. ``Series.__setitem__``, ``key`` will be
one of
* scalar int
* ndarray of integers.
* boolean ndarray
* slice object
value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object
value or values to be set of ``key``.
Returns
-------
None
"""
# Some notes to the ExtensionArray implementor who may have ended up
# here. While this method is not required for the interface, if you
# *do* choose to implement __setitem__, then some semantics should be
# observed:
#
# * Setting multiple values : ExtensionArrays should support setting
# multiple values at once, 'key' will be a sequence of integers and
# 'value' will be a same-length sequence.
#
# * Broadcasting : For a sequence 'key' and a scalar 'value',
# each position in 'key' should be set to 'value'.
#
# * Coercion : Most users will expect basic coercion to work. For
# example, a string like '2018-01-01' is coerced to a datetime
# when setting on a datetime64ns array. In general, if the
# __init__ method coerces that value, then so should __setitem__
raise NotImplementedError(_not_implemented_message.format(
type(self), '__setitem__')
)
def __len__(self):
"""Length of this array
Returns
-------
length : int
"""
# type: () -> int
raise AbstractMethodError(self)
def __iter__(self):
"""Iterate over elements of the array.
"""
# This needs to be implemented so that pandas recognizes extension
# arrays as list-like. The default implementation makes successive
# calls to ``__getitem__``, which may be slower than necessary.
for i in range(len(self)):
yield self[i]
# ------------------------------------------------------------------------
# Required attributes
# ------------------------------------------------------------------------
@property
def dtype(self):
# type: () -> ExtensionDtype
"""An instance of 'ExtensionDtype'."""
raise AbstractMethodError(self)
@property
def shape(self):
# type: () -> Tuple[int, ...]
"""Return a tuple of the array dimensions."""
return (len(self),)
@property
def ndim(self):
# type: () -> int
"""Extension Arrays are only allowed to be 1-dimensional."""
return 1
@property
def nbytes(self):
# type: () -> int
"""The number of bytes needed to store this object in memory.
"""
# If this is expensive to compute, return an approximate lower bound
# on the number of bytes needed.
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
# Additional Methods
# ------------------------------------------------------------------------
def astype(self, dtype, copy=True):
"""Cast to a NumPy array with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray
NumPy ndarray with 'dtype' for its dtype.
"""
return np.array(self, dtype=dtype, copy=copy)
def isna(self):
# type: () -> np.ndarray
"""Boolean NumPy array indicating if each value is missing.
This should return a 1-D array the same length as 'self'.
"""
raise AbstractMethodError(self)
def _values_for_argsort(self):
# type: () -> ndarray
"""Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort
"""
# Note: this is used in `ExtensionArray.argsort`.
return np.array(self)
def argsort(self, ascending=True, kind='quicksort', *args, **kwargs):
"""
Return the indices that would sort this array.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
index_array : ndarray
Array of indices that sort ``self``.
See Also
--------
numpy.argsort : Sorting implementation used internally.
"""
# Implementor note: You have two places to override the behavior of
# argsort.
# 1. _values_for_argsort : construct the values passed to np.argsort
# 2. argsort : total control over sorting.
ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
values = self._values_for_argsort()
result = np.argsort(values, kind=kind, **kwargs)
if not ascending:
result = result[::-1]
return result
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, array-like
If a scalar value is passed it is used to fill all missing values.
Alternatively, an array-like 'value' can be given. It's expected
that the array-like have the same length as 'self'.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : ExtensionArray with NA/NaN filled
"""
from pandas.api.types import is_array_like
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.missing import pad_1d, backfill_1d
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError("Length of 'value' does not match. Got ({}) "
" expected {}".format(len(value), len(self)))
value = value[mask]
if mask.any():
if method is not None:
func = pad_1d if method == 'pad' else backfill_1d
new_values = func(self.astype(object), limit=limit,
mask=mask)
new_values = self._from_sequence(new_values)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
def unique(self):
"""Compute the ExtensionArray of unique values.
Returns
-------
uniques : ExtensionArray
"""
from pandas import unique
uniques = unique(self.astype(object))
return self._from_sequence(uniques)
def _values_for_factorize(self):
# type: () -> Tuple[ndarray, Any]
"""Return an array and missing value suitable for factorization.
Returns
-------
values : ndarray
An array suitable for factorization. This should maintain order
and be a supported dtype (Float64, Int64, UInt64, String, Object).
By default, the extension array is cast to object dtype.
na_value : object
The value in `values` to consider missing. This will be treated
as NA in the factorization routines, so it will be coded as
`na_sentinal` and not included in `uniques`. By default,
``np.nan`` is used.
"""
return self.astype(object), np.nan
def factorize(self, na_sentinel=-1):
# type: (int) -> Tuple[ndarray, ExtensionArray]
"""Encode the extension array as an enumerated type.
Parameters
----------
na_sentinel : int, default -1
Value to use in the `labels` array to indicate missing values.
Returns
-------
labels : ndarray
An integer NumPy array that's an indexer into the original
ExtensionArray.
uniques : ExtensionArray
An ExtensionArray containing the unique values of `self`.
.. note::
uniques will *not* contain an entry for the NA value of
the ExtensionArray if there are any missing values present
in `self`.
See Also
--------
pandas.factorize : Top-level factorize method that dispatches here.
Notes
-----
:meth:`pandas.factorize` offers a `sort` keyword as well.
"""
# Impelmentor note: There are two ways to override the behavior of
# pandas.factorize
# 1. _values_for_factorize and _from_factorize.
# Specify the values passed to pandas' internal factorization
# routines, and how to convert from those values back to the
# original ExtensionArray.
# 2. ExtensionArray.factorize.
# Complete control over factorization.
from pandas.core.algorithms import _factorize_array
arr, na_value = self._values_for_factorize()
labels, uniques = _factorize_array(arr, na_sentinel=na_sentinel,
na_value=na_value)
uniques = self._from_factorized(uniques, self)
return labels, uniques
# ------------------------------------------------------------------------
# Indexing methods
# ------------------------------------------------------------------------
def take(self, indices, allow_fill=False, fill_value=None):
# type: (Sequence[int], bool, Optional[Any]) -> ExtensionArray
"""Take elements from an array.
Parameters
----------
indices : sequence of integers
Indices to be taken.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : any, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type, ``self.dtype.na_value``, is used.
For many ExtensionArrays, there will be two representations of
`fill_value`: a user-facing "boxed" scalar, and a low-level
physical NA value. `fill_value` should be the user-facing version,
and the implementation should handle translating that to the
physical version for processing the take if nescessary.
Returns
-------
ExtensionArray
Raises
------
IndexError
When the indices are out of bounds for the array.
ValueError
When `indices` contains negative values other than ``-1``
and `allow_fill` is True.
Notes
-----
ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,
``iloc``, when `indices` is a sequence of values. Additionally,
it's called by :meth:`Series.reindex`, or any other method
that causes realignemnt, with a `fill_value`.
See Also
--------
numpy.take
pandas.api.extensions.take
Examples
--------
Here's an example implementation, which relies on casting the
extension array to object dtype. This uses the helper method
:func:`pandas.api.extensions.take`.
.. code-block:: python
def take(self, indices, allow_fill=False, fill_value=None):
from pandas.core.algorithms import take
# If the ExtensionArray is backed by an ndarray, then
# just pass that here instead of coercing to object.
data = self.astype(object)
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
# fill value should always be translated from the scalar
# type for the array, to the physical storage type for
# the data, before passing to take.
result = take(data, indices, fill_value=fill_value,
allow_fill=allow_fill)
return self._from_sequence(result)
"""
# Implementer note: The `fill_value` parameter should be a user-facing
# value, an instance of self.dtype.type. When passed `fill_value=None`,
# the default of `self.dtype.na_value` should be used.
# This may differ from the physical storage type your ExtensionArray
# uses. In this case, your implementation is responsible for casting
# the user-facing type to the storage type, before using
# pandas.api.extensions.take
raise AbstractMethodError(self)
def copy(self, deep=False):
# type: (bool) -> ExtensionArray
"""Return a copy of the array.
Parameters
----------
deep : bool, default False
Also copy the underlying data backing this array.
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
# Block-related methods
# ------------------------------------------------------------------------
def _formatting_values(self):
# type: () -> np.ndarray
# At the moment, this has to be an array since we use result.dtype
"""An array of values to be printed in, e.g. the Series repr"""
return np.array(self)
@classmethod
def _concat_same_type(cls, to_concat):
# type: (Sequence[ExtensionArray]) -> ExtensionArray
"""Concatenate multiple array
Parameters
----------
to_concat : sequence of this type
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(cls)
# The _can_hold_na attribute is set to True so that pandas internals
# will use the ExtensionDtype.na_value as the NA value in operations
# such as take(), reindex(), shift(), etc. In addition, those results
# will then be of the ExtensionArray subclass rather than an array
# of objects
_can_hold_na = True
@property
def _ndarray_values(self):
# type: () -> np.ndarray
"""Internal pandas method for lossy conversion to a NumPy ndarray.
This method is not part of the pandas interface.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
"""
return np.array(self)
| {
"repo_name": "ryfeus/lambda-packs",
"path": "Tensorflow_Pandas_Numpy/source3.6/pandas/core/arrays/base.py",
"copies": "1",
"size": "21492",
"license": "mit",
"hash": 5165872286571083000,
"line_mean": 34.1176470588,
"line_max": 79,
"alpha_frac": 0.5648613438,
"autogenerated": false,
"ratio": 4.869053013140009,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5933914356940009,
"avg_score": null,
"num_lines": null
} |
"""An interface for extending pandas with custom arrays.
.. warning::
This is an experimental API and subject to breaking changes
without warning.
"""
import numpy as np
import operator
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass
from pandas.errors import AbstractMethodError
from pandas.compat.numpy import function as nv
from pandas.compat import set_function_name, PY3
from pandas.core import ops
from pandas.core.dtypes.common import is_list_like
_not_implemented_message = "{} does not implement {}."
class ExtensionArray(object):
"""Abstract base class for custom 1-D array types.
pandas will recognize instances of this class as proper arrays
with a custom type and will not attempt to coerce them to objects. They
may be stored directly inside a :class:`DataFrame` or :class:`Series`.
.. versionadded:: 0.23.0
Notes
-----
The interface includes the following abstract methods that must be
implemented by subclasses:
* _from_sequence
* _from_factorized
* __getitem__
* __len__
* dtype
* nbytes
* isna
* take
* copy
* _concat_same_type
An additional method is available to satisfy pandas' internal,
private block API.
* _formatting_values
Some methods require casting the ExtensionArray to an ndarray of Python
objects with ``self.astype(object)``, which may be expensive. When
performance is a concern, we highly recommend overriding the following
methods:
* fillna
* dropna
* unique
* factorize / _values_for_factorize
* argsort / _values_for_argsort
The remaining methods implemented on this class should be performant,
as they only compose abstract methods. Still, a more efficient
implementation may be available, and these methods can be overridden.
One can implement methods to handle array reductions.
* _reduce
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
``pandas.errors.AbstractMethodError`` and no ``register`` method is
provided for registering virtual subclasses.
ExtensionArrays are limited to 1 dimension.
They may be backed by none, one, or many NumPy arrays. For example,
``pandas.Categorical`` is an extension array backed by two arrays,
one for codes and one for categories. An array of IPv6 address may
be backed by a NumPy structured array with two fields, one for the
lower 64 bits and one for the upper 64 bits. Or they may be backed
by some other storage type, like Python lists. Pandas makes no
assumptions on how the data are stored, just that it can be converted
to a NumPy array.
The ExtensionArray interface does not impose any rules on how this data
is stored. However, currently, the backing data cannot be stored in
attributes called ``.values`` or ``._values`` to ensure full compatibility
with pandas internals. But other names as ``.data``, ``._data``,
``._items``, ... can be freely used.
"""
# '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray.
# Don't override this.
_typ = 'extension'
# ------------------------------------------------------------------------
# Constructors
# ------------------------------------------------------------------------
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new ExtensionArray from a sequence of scalars.
Parameters
----------
scalars : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type``.
dtype : dtype, optional
Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
copy : boolean, default False
If True, copy the underlying data.
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(cls)
@classmethod
def _from_factorized(cls, values, original):
"""Reconstruct an ExtensionArray after factorization.
Parameters
----------
values : ndarray
An integer ndarray with the factorized values.
original : ExtensionArray
The original ExtensionArray that factorize was called on.
See Also
--------
pandas.factorize
ExtensionArray.factorize
"""
raise AbstractMethodError(cls)
# ------------------------------------------------------------------------
# Must be a Sequence
# ------------------------------------------------------------------------
def __getitem__(self, item):
# type (Any) -> Any
"""Select a subset of self.
Parameters
----------
item : int, slice, or ndarray
* int: The position in 'self' to get.
* slice: A slice object, where 'start', 'stop', and 'step' are
integers or None
* ndarray: A 1-d boolean NumPy ndarray the same length as 'self'
Returns
-------
item : scalar or ExtensionArray
Notes
-----
For scalar ``item``, return a scalar value suitable for the array's
type. This should be an instance of ``self.dtype.type``.
For slice ``key``, return an instance of ``ExtensionArray``, even
if the slice is length 0 or 1.
For a boolean mask, return an instance of ``ExtensionArray``, filtered
to the values where ``item`` is True.
"""
raise AbstractMethodError(self)
def __setitem__(self, key, value):
# type: (Union[int, np.ndarray], Any) -> None
"""Set one or more values inplace.
This method is not required to satisfy the pandas extension array
interface.
Parameters
----------
key : int, ndarray, or slice
When called from, e.g. ``Series.__setitem__``, ``key`` will be
one of
* scalar int
* ndarray of integers.
* boolean ndarray
* slice object
value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object
value or values to be set of ``key``.
Returns
-------
None
"""
# Some notes to the ExtensionArray implementor who may have ended up
# here. While this method is not required for the interface, if you
# *do* choose to implement __setitem__, then some semantics should be
# observed:
#
# * Setting multiple values : ExtensionArrays should support setting
# multiple values at once, 'key' will be a sequence of integers and
# 'value' will be a same-length sequence.
#
# * Broadcasting : For a sequence 'key' and a scalar 'value',
# each position in 'key' should be set to 'value'.
#
# * Coercion : Most users will expect basic coercion to work. For
# example, a string like '2018-01-01' is coerced to a datetime
# when setting on a datetime64ns array. In general, if the
# __init__ method coerces that value, then so should __setitem__
raise NotImplementedError(_not_implemented_message.format(
type(self), '__setitem__')
)
def __len__(self):
# type: () -> int
"""Length of this array
Returns
-------
length : int
"""
raise AbstractMethodError(self)
def __iter__(self):
"""Iterate over elements of the array.
"""
# This needs to be implemented so that pandas recognizes extension
# arrays as list-like. The default implementation makes successive
# calls to ``__getitem__``, which may be slower than necessary.
for i in range(len(self)):
yield self[i]
# ------------------------------------------------------------------------
# Required attributes
# ------------------------------------------------------------------------
@property
def dtype(self):
# type: () -> ExtensionDtype
"""An instance of 'ExtensionDtype'."""
raise AbstractMethodError(self)
@property
def shape(self):
# type: () -> Tuple[int, ...]
"""Return a tuple of the array dimensions."""
return (len(self),)
@property
def ndim(self):
# type: () -> int
"""Extension Arrays are only allowed to be 1-dimensional."""
return 1
@property
def nbytes(self):
# type: () -> int
"""The number of bytes needed to store this object in memory.
"""
# If this is expensive to compute, return an approximate lower bound
# on the number of bytes needed.
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
# Additional Methods
# ------------------------------------------------------------------------
def astype(self, dtype, copy=True):
"""Cast to a NumPy array with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray
NumPy ndarray with 'dtype' for its dtype.
"""
return np.array(self, dtype=dtype, copy=copy)
def isna(self):
# type: () -> Union[ExtensionArray, np.ndarray]
"""
A 1-D array indicating if each value is missing.
Returns
-------
na_values : Union[np.ndarray, ExtensionArray]
In most cases, this should return a NumPy ndarray. For
exceptional cases like ``SparseArray``, where returning
an ndarray would be expensive, an ExtensionArray may be
returned.
Notes
-----
If returning an ExtensionArray, then
* ``na_values._is_boolean`` should be True
* `na_values` should implement :func:`ExtensionArray._reduce`
* ``na_values.any`` and ``na_values.all`` should be implemented
"""
raise AbstractMethodError(self)
def _values_for_argsort(self):
# type: () -> ndarray
"""Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort
"""
# Note: this is used in `ExtensionArray.argsort`.
return np.array(self)
def argsort(self, ascending=True, kind='quicksort', *args, **kwargs):
"""
Return the indices that would sort this array.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
index_array : ndarray
Array of indices that sort ``self``.
See Also
--------
numpy.argsort : Sorting implementation used internally.
"""
# Implementor note: You have two places to override the behavior of
# argsort.
# 1. _values_for_argsort : construct the values passed to np.argsort
# 2. argsort : total control over sorting.
ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
values = self._values_for_argsort()
result = np.argsort(values, kind=kind, **kwargs)
if not ascending:
result = result[::-1]
return result
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, array-like
If a scalar value is passed it is used to fill all missing values.
Alternatively, an array-like 'value' can be given. It's expected
that the array-like have the same length as 'self'.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : ExtensionArray with NA/NaN filled
"""
from pandas.api.types import is_array_like
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.missing import pad_1d, backfill_1d
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError("Length of 'value' does not match. Got ({}) "
" expected {}".format(len(value), len(self)))
value = value[mask]
if mask.any():
if method is not None:
func = pad_1d if method == 'pad' else backfill_1d
new_values = func(self.astype(object), limit=limit,
mask=mask)
new_values = self._from_sequence(new_values, dtype=self.dtype)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
def dropna(self):
""" Return ExtensionArray without NA values
Returns
-------
valid : ExtensionArray
"""
return self[~self.isna()]
def shift(self, periods=1):
# type: (int) -> ExtensionArray
"""
Shift values by desired number.
Newly introduced missing values are filled with
``self.dtype.na_value``.
.. versionadded:: 0.24.0
Parameters
----------
periods : int, default 1
The number of periods to shift. Negative values are allowed
for shifting backwards.
Returns
-------
shifted : ExtensionArray
"""
# Note: this implementation assumes that `self.dtype.na_value` can be
# stored in an instance of your ExtensionArray with `self.dtype`.
if periods == 0:
return self.copy()
empty = self._from_sequence([self.dtype.na_value] * abs(periods),
dtype=self.dtype)
if periods > 0:
a = empty
b = self[:-periods]
else:
a = self[abs(periods):]
b = empty
return self._concat_same_type([a, b])
def unique(self):
"""Compute the ExtensionArray of unique values.
Returns
-------
uniques : ExtensionArray
"""
from pandas import unique
uniques = unique(self.astype(object))
return self._from_sequence(uniques, dtype=self.dtype)
def _values_for_factorize(self):
# type: () -> Tuple[ndarray, Any]
"""Return an array and missing value suitable for factorization.
Returns
-------
values : ndarray
An array suitable for factorization. This should maintain order
and be a supported dtype (Float64, Int64, UInt64, String, Object).
By default, the extension array is cast to object dtype.
na_value : object
The value in `values` to consider missing. This will be treated
as NA in the factorization routines, so it will be coded as
`na_sentinal` and not included in `uniques`. By default,
``np.nan`` is used.
Notes
-----
The values returned by this method are also used in
:func:`pandas.util.hash_pandas_object`.
"""
return self.astype(object), np.nan
def factorize(self, na_sentinel=-1):
# type: (int) -> Tuple[ndarray, ExtensionArray]
"""Encode the extension array as an enumerated type.
Parameters
----------
na_sentinel : int, default -1
Value to use in the `labels` array to indicate missing values.
Returns
-------
labels : ndarray
An integer NumPy array that's an indexer into the original
ExtensionArray.
uniques : ExtensionArray
An ExtensionArray containing the unique values of `self`.
.. note::
uniques will *not* contain an entry for the NA value of
the ExtensionArray if there are any missing values present
in `self`.
See Also
--------
pandas.factorize : Top-level factorize method that dispatches here.
Notes
-----
:meth:`pandas.factorize` offers a `sort` keyword as well.
"""
# Impelmentor note: There are two ways to override the behavior of
# pandas.factorize
# 1. _values_for_factorize and _from_factorize.
# Specify the values passed to pandas' internal factorization
# routines, and how to convert from those values back to the
# original ExtensionArray.
# 2. ExtensionArray.factorize.
# Complete control over factorization.
from pandas.core.algorithms import _factorize_array
arr, na_value = self._values_for_factorize()
labels, uniques = _factorize_array(arr, na_sentinel=na_sentinel,
na_value=na_value)
uniques = self._from_factorized(uniques, self)
return labels, uniques
# ------------------------------------------------------------------------
# Indexing methods
# ------------------------------------------------------------------------
def take(self, indices, allow_fill=False, fill_value=None):
# type: (Sequence[int], bool, Optional[Any]) -> ExtensionArray
"""Take elements from an array.
Parameters
----------
indices : sequence of integers
Indices to be taken.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : any, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type, ``self.dtype.na_value``, is used.
For many ExtensionArrays, there will be two representations of
`fill_value`: a user-facing "boxed" scalar, and a low-level
physical NA value. `fill_value` should be the user-facing version,
and the implementation should handle translating that to the
physical version for processing the take if necessary.
Returns
-------
ExtensionArray
Raises
------
IndexError
When the indices are out of bounds for the array.
ValueError
When `indices` contains negative values other than ``-1``
and `allow_fill` is True.
Notes
-----
ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,
``iloc``, when `indices` is a sequence of values. Additionally,
it's called by :meth:`Series.reindex`, or any other method
that causes realignment, with a `fill_value`.
See Also
--------
numpy.take
pandas.api.extensions.take
Examples
--------
Here's an example implementation, which relies on casting the
extension array to object dtype. This uses the helper method
:func:`pandas.api.extensions.take`.
.. code-block:: python
def take(self, indices, allow_fill=False, fill_value=None):
from pandas.core.algorithms import take
# If the ExtensionArray is backed by an ndarray, then
# just pass that here instead of coercing to object.
data = self.astype(object)
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
# fill value should always be translated from the scalar
# type for the array, to the physical storage type for
# the data, before passing to take.
result = take(data, indices, fill_value=fill_value,
allow_fill=allow_fill)
return self._from_sequence(result, dtype=self.dtype)
"""
# Implementer note: The `fill_value` parameter should be a user-facing
# value, an instance of self.dtype.type. When passed `fill_value=None`,
# the default of `self.dtype.na_value` should be used.
# This may differ from the physical storage type your ExtensionArray
# uses. In this case, your implementation is responsible for casting
# the user-facing type to the storage type, before using
# pandas.api.extensions.take
raise AbstractMethodError(self)
def copy(self, deep=False):
# type: (bool) -> ExtensionArray
"""Return a copy of the array.
Parameters
----------
deep : bool, default False
Also copy the underlying data backing this array.
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
# Block-related methods
# ------------------------------------------------------------------------
def _formatting_values(self):
# type: () -> np.ndarray
# At the moment, this has to be an array since we use result.dtype
"""An array of values to be printed in, e.g. the Series repr"""
return np.array(self)
@classmethod
def _concat_same_type(cls, to_concat):
# type: (Sequence[ExtensionArray]) -> ExtensionArray
"""Concatenate multiple array
Parameters
----------
to_concat : sequence of this type
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(cls)
# The _can_hold_na attribute is set to True so that pandas internals
# will use the ExtensionDtype.na_value as the NA value in operations
# such as take(), reindex(), shift(), etc. In addition, those results
# will then be of the ExtensionArray subclass rather than an array
# of objects
_can_hold_na = True
@property
def _ndarray_values(self):
# type: () -> np.ndarray
"""Internal pandas method for lossy conversion to a NumPy ndarray.
This method is not part of the pandas interface.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
"""
return np.array(self)
def _reduce(self, name, skipna=True, **kwargs):
"""
Return a scalar result of performing the reduction operation.
Parameters
----------
name : str
Name of the function, supported values are:
{ any, all, min, max, sum, mean, median, prod,
std, var, sem, kurt, skew }.
skipna : bool, default True
If True, skip NaN values.
**kwargs
Additional keyword arguments passed to the reduction function.
Currently, `ddof` is the only supported kwarg.
Returns
-------
scalar
Raises
------
TypeError : subclass does not define reductions
"""
raise TypeError("cannot perform {name} with type {dtype}".format(
name=name, dtype=self.dtype))
class ExtensionOpsMixin(object):
"""
A base class for linking the operators to their dunder names.
.. note::
You may want to set ``__array_priority__`` if you want your
implementation to be called when involved in binary operations
with NumPy arrays.
"""
@classmethod
def _add_arithmetic_ops(cls):
cls.__add__ = cls._create_arithmetic_method(operator.add)
cls.__radd__ = cls._create_arithmetic_method(ops.radd)
cls.__sub__ = cls._create_arithmetic_method(operator.sub)
cls.__rsub__ = cls._create_arithmetic_method(ops.rsub)
cls.__mul__ = cls._create_arithmetic_method(operator.mul)
cls.__rmul__ = cls._create_arithmetic_method(ops.rmul)
cls.__pow__ = cls._create_arithmetic_method(operator.pow)
cls.__rpow__ = cls._create_arithmetic_method(ops.rpow)
cls.__mod__ = cls._create_arithmetic_method(operator.mod)
cls.__rmod__ = cls._create_arithmetic_method(ops.rmod)
cls.__floordiv__ = cls._create_arithmetic_method(operator.floordiv)
cls.__rfloordiv__ = cls._create_arithmetic_method(ops.rfloordiv)
cls.__truediv__ = cls._create_arithmetic_method(operator.truediv)
cls.__rtruediv__ = cls._create_arithmetic_method(ops.rtruediv)
if not PY3:
cls.__div__ = cls._create_arithmetic_method(operator.div)
cls.__rdiv__ = cls._create_arithmetic_method(ops.rdiv)
cls.__divmod__ = cls._create_arithmetic_method(divmod)
cls.__rdivmod__ = cls._create_arithmetic_method(ops.rdivmod)
@classmethod
def _add_comparison_ops(cls):
cls.__eq__ = cls._create_comparison_method(operator.eq)
cls.__ne__ = cls._create_comparison_method(operator.ne)
cls.__lt__ = cls._create_comparison_method(operator.lt)
cls.__gt__ = cls._create_comparison_method(operator.gt)
cls.__le__ = cls._create_comparison_method(operator.le)
cls.__ge__ = cls._create_comparison_method(operator.ge)
class ExtensionScalarOpsMixin(ExtensionOpsMixin):
"""
A mixin for defining ops on an ExtensionArray.
It is assumed that the underlying scalar objects have the operators
already defined.
Notes
-----
If you have defined a subclass MyExtensionArray(ExtensionArray), then
use MyExtensionArray(ExtensionArray, ExtensionScalarOpsMixin) to
get the arithmetic operators. After the definition of MyExtensionArray,
insert the lines
MyExtensionArray._add_arithmetic_ops()
MyExtensionArray._add_comparison_ops()
to link the operators to your class.
.. note::
You may want to set ``__array_priority__`` if you want your
implementation to be called when involved in binary operations
with NumPy arrays.
"""
@classmethod
def _create_method(cls, op, coerce_to_dtype=True):
"""
A class method that returns a method that will correspond to an
operator for an ExtensionArray subclass, by dispatching to the
relevant operator defined on the individual elements of the
ExtensionArray.
Parameters
----------
op : function
An operator that takes arguments op(a, b)
coerce_to_dtype : bool, default True
boolean indicating whether to attempt to convert
the result to the underlying ExtensionArray dtype.
If it's not possible to create a new ExtensionArray with the
values, an ndarray is returned instead.
Returns
-------
Callable[[Any, Any], Union[ndarray, ExtensionArray]]
A method that can be bound to a class. When used, the method
receives the two arguments, one of which is the instance of
this class, and should return an ExtensionArray or an ndarray.
Returning an ndarray may be necessary when the result of the
`op` cannot be stored in the ExtensionArray. The dtype of the
ndarray uses NumPy's normal inference rules.
Example
-------
Given an ExtensionArray subclass called MyExtensionArray, use
>>> __add__ = cls._create_method(operator.add)
in the class definition of MyExtensionArray to create the operator
for addition, that will be based on the operator implementation
of the underlying elements of the ExtensionArray
"""
def _binop(self, other):
def convert_values(param):
if isinstance(param, ExtensionArray) or is_list_like(param):
ovalues = param
else: # Assume its an object
ovalues = [param] * len(self)
return ovalues
if isinstance(other, (ABCSeries, ABCIndexClass)):
# rely on pandas to unbox and dispatch to us
return NotImplemented
lvalues = self
rvalues = convert_values(other)
# If the operator is not defined for the underlying objects,
# a TypeError should be raised
res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
def _maybe_convert(arr):
if coerce_to_dtype:
# https://github.com/pandas-dev/pandas/issues/22850
# We catch all regular exceptions here, and fall back
# to an ndarray.
try:
res = self._from_sequence(arr)
except Exception:
res = np.asarray(arr)
else:
res = np.asarray(arr)
return res
if op.__name__ in {'divmod', 'rdivmod'}:
a, b = zip(*res)
res = _maybe_convert(a), _maybe_convert(b)
else:
res = _maybe_convert(res)
return res
op_name = ops._get_op_name(op, True)
return set_function_name(_binop, op_name, cls)
@classmethod
def _create_arithmetic_method(cls, op):
return cls._create_method(op)
@classmethod
def _create_comparison_method(cls, op):
return cls._create_method(op, coerce_to_dtype=False)
| {
"repo_name": "amolkahat/pandas",
"path": "pandas/core/arrays/base.py",
"copies": "3",
"size": "31271",
"license": "bsd-3-clause",
"hash": 1093223014130310400,
"line_mean": 34.3344632768,
"line_max": 79,
"alpha_frac": 0.574366026,
"autogenerated": false,
"ratio": 4.74666059502125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6821026621021251,
"avg_score": null,
"num_lines": null
} |
"""An interface for extending pandas with custom arrays.
.. warning::
This is an experimental API and subject to breaking changes
without warning.
"""
import operator
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union
import numpy as np
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCExtensionArray, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas._typing import ArrayLike
from pandas.core import ops
from pandas.core.sorting import nargsort
_not_implemented_message = "{} does not implement {}."
_extension_array_shared_docs = dict() # type: Dict[str, str]
class ExtensionArray:
"""
Abstract base class for custom 1-D array types.
pandas will recognize instances of this class as proper arrays
with a custom type and will not attempt to coerce them to objects. They
may be stored directly inside a :class:`DataFrame` or :class:`Series`.
.. versionadded:: 0.23.0
Attributes
----------
dtype
nbytes
ndim
shape
Methods
-------
argsort
astype
copy
dropna
factorize
fillna
isna
ravel
repeat
searchsorted
shift
take
unique
_concat_same_type
_formatter
_formatting_values
_from_factorized
_from_sequence
_from_sequence_of_strings
_ndarray_values
_reduce
_values_for_argsort
_values_for_factorize
Notes
-----
The interface includes the following abstract methods that must be
implemented by subclasses:
* _from_sequence
* _from_factorized
* __getitem__
* __len__
* dtype
* nbytes
* isna
* take
* copy
* _concat_same_type
A default repr displaying the type, (truncated) data, length,
and dtype is provided. It can be customized or replaced by
by overriding:
* __repr__ : A default repr for the ExtensionArray.
* _formatter : Print scalars inside a Series or DataFrame.
Some methods require casting the ExtensionArray to an ndarray of Python
objects with ``self.astype(object)``, which may be expensive. When
performance is a concern, we highly recommend overriding the following
methods:
* fillna
* dropna
* unique
* factorize / _values_for_factorize
* argsort / _values_for_argsort
* searchsorted
The remaining methods implemented on this class should be performant,
as they only compose abstract methods. Still, a more efficient
implementation may be available, and these methods can be overridden.
One can implement methods to handle array reductions.
* _reduce
One can implement methods to handle parsing from strings that will be used
in methods such as ``pandas.io.parsers.read_csv``.
* _from_sequence_of_strings
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
``pandas.errors.AbstractMethodError`` and no ``register`` method is
provided for registering virtual subclasses.
ExtensionArrays are limited to 1 dimension.
They may be backed by none, one, or many NumPy arrays. For example,
``pandas.Categorical`` is an extension array backed by two arrays,
one for codes and one for categories. An array of IPv6 address may
be backed by a NumPy structured array with two fields, one for the
lower 64 bits and one for the upper 64 bits. Or they may be backed
by some other storage type, like Python lists. Pandas makes no
assumptions on how the data are stored, just that it can be converted
to a NumPy array.
The ExtensionArray interface does not impose any rules on how this data
is stored. However, currently, the backing data cannot be stored in
attributes called ``.values`` or ``._values`` to ensure full compatibility
with pandas internals. But other names as ``.data``, ``._data``,
``._items``, ... can be freely used.
If implementing NumPy's ``__array_ufunc__`` interface, pandas expects
that
1. You defer by raising ``NotImplemented`` when any Series are present
in `inputs`. Pandas will extract the arrays and call the ufunc again.
2. You define a ``_HANDLED_TYPES`` tuple as an attribute on the class.
Pandas inspect this to determine whether the ufunc is valid for the
types present.
See :ref:`extending.extension.ufunc` for more.
"""
# '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray.
# Don't override this.
_typ = "extension"
# ------------------------------------------------------------------------
# Constructors
# ------------------------------------------------------------------------
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""
Construct a new ExtensionArray from a sequence of scalars.
Parameters
----------
scalars : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type``.
dtype : dtype, optional
Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
copy : boolean, default False
If True, copy the underlying data.
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(cls)
@classmethod
def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
"""Construct a new ExtensionArray from a sequence of strings.
.. versionadded:: 0.24.0
Parameters
----------
strings : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type``.
dtype : dtype, optional
Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
copy : boolean, default False
If True, copy the underlying data.
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(cls)
@classmethod
def _from_factorized(cls, values, original):
"""
Reconstruct an ExtensionArray after factorization.
Parameters
----------
values : ndarray
An integer ndarray with the factorized values.
original : ExtensionArray
The original ExtensionArray that factorize was called on.
See Also
--------
factorize
ExtensionArray.factorize
"""
raise AbstractMethodError(cls)
# ------------------------------------------------------------------------
# Must be a Sequence
# ------------------------------------------------------------------------
def __getitem__(self, item):
# type (Any) -> Any
"""
Select a subset of self.
Parameters
----------
item : int, slice, or ndarray
* int: The position in 'self' to get.
* slice: A slice object, where 'start', 'stop', and 'step' are
integers or None
* ndarray: A 1-d boolean NumPy ndarray the same length as 'self'
Returns
-------
item : scalar or ExtensionArray
Notes
-----
For scalar ``item``, return a scalar value suitable for the array's
type. This should be an instance of ``self.dtype.type``.
For slice ``key``, return an instance of ``ExtensionArray``, even
if the slice is length 0 or 1.
For a boolean mask, return an instance of ``ExtensionArray``, filtered
to the values where ``item`` is True.
"""
raise AbstractMethodError(self)
def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None:
"""
Set one or more values inplace.
This method is not required to satisfy the pandas extension array
interface.
Parameters
----------
key : int, ndarray, or slice
When called from, e.g. ``Series.__setitem__``, ``key`` will be
one of
* scalar int
* ndarray of integers.
* boolean ndarray
* slice object
value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object
value or values to be set of ``key``.
Returns
-------
None
"""
# Some notes to the ExtensionArray implementor who may have ended up
# here. While this method is not required for the interface, if you
# *do* choose to implement __setitem__, then some semantics should be
# observed:
#
# * Setting multiple values : ExtensionArrays should support setting
# multiple values at once, 'key' will be a sequence of integers and
# 'value' will be a same-length sequence.
#
# * Broadcasting : For a sequence 'key' and a scalar 'value',
# each position in 'key' should be set to 'value'.
#
# * Coercion : Most users will expect basic coercion to work. For
# example, a string like '2018-01-01' is coerced to a datetime
# when setting on a datetime64ns array. In general, if the
# __init__ method coerces that value, then so should __setitem__
# Note, also, that Series/DataFrame.where internally use __setitem__
# on a copy of the data.
raise NotImplementedError(
_not_implemented_message.format(type(self), "__setitem__")
)
def __len__(self) -> int:
"""
Length of this array
Returns
-------
length : int
"""
raise AbstractMethodError(self)
def __iter__(self):
"""
Iterate over elements of the array.
"""
# This needs to be implemented so that pandas recognizes extension
# arrays as list-like. The default implementation makes successive
# calls to ``__getitem__``, which may be slower than necessary.
for i in range(len(self)):
yield self[i]
# ------------------------------------------------------------------------
# Required attributes
# ------------------------------------------------------------------------
@property
def dtype(self) -> ExtensionDtype:
"""
An instance of 'ExtensionDtype'.
"""
raise AbstractMethodError(self)
@property
def shape(self) -> Tuple[int, ...]:
"""
Return a tuple of the array dimensions.
"""
return (len(self),)
@property
def ndim(self) -> int:
"""
Extension Arrays are only allowed to be 1-dimensional.
"""
return 1
@property
def nbytes(self) -> int:
"""
The number of bytes needed to store this object in memory.
"""
# If this is expensive to compute, return an approximate lower bound
# on the number of bytes needed.
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
# Additional Methods
# ------------------------------------------------------------------------
def astype(self, dtype, copy=True):
"""
Cast to a NumPy array with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray
NumPy ndarray with 'dtype' for its dtype.
"""
return np.array(self, dtype=dtype, copy=copy)
def isna(self) -> ArrayLike:
"""
A 1-D array indicating if each value is missing.
Returns
-------
na_values : Union[np.ndarray, ExtensionArray]
In most cases, this should return a NumPy ndarray. For
exceptional cases like ``SparseArray``, where returning
an ndarray would be expensive, an ExtensionArray may be
returned.
Notes
-----
If returning an ExtensionArray, then
* ``na_values._is_boolean`` should be True
* `na_values` should implement :func:`ExtensionArray._reduce`
* ``na_values.any`` and ``na_values.all`` should be implemented
"""
raise AbstractMethodError(self)
def _values_for_argsort(self) -> np.ndarray:
"""
Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort
"""
# Note: this is used in `ExtensionArray.argsort`.
return np.array(self)
def argsort(self, ascending=True, kind="quicksort", *args, **kwargs):
"""
Return the indices that would sort this array.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
index_array : ndarray
Array of indices that sort ``self``. If NaN values are contained,
NaN values are placed at the end.
See Also
--------
numpy.argsort : Sorting implementation used internally.
"""
# Implementor note: You have two places to override the behavior of
# argsort.
# 1. _values_for_argsort : construct the values passed to np.argsort
# 2. argsort : total control over sorting.
ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
result = nargsort(self, kind=kind, ascending=ascending, na_position="last")
return result
def fillna(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, array-like
If a scalar value is passed it is used to fill all missing values.
Alternatively, an array-like 'value' can be given. It's expected
that the array-like have the same length as 'self'.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : ExtensionArray with NA/NaN filled
"""
from pandas.api.types import is_array_like
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.missing import pad_1d, backfill_1d
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError(
"Length of 'value' does not match. Got ({}) "
" expected {}".format(len(value), len(self))
)
value = value[mask]
if mask.any():
if method is not None:
func = pad_1d if method == "pad" else backfill_1d
new_values = func(self.astype(object), limit=limit, mask=mask)
new_values = self._from_sequence(new_values, dtype=self.dtype)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
def dropna(self):
"""
Return ExtensionArray without NA values
Returns
-------
valid : ExtensionArray
"""
return self[~self.isna()]
def shift(self, periods: int = 1, fill_value: object = None) -> ABCExtensionArray:
"""
Shift values by desired number.
Newly introduced missing values are filled with
``self.dtype.na_value``.
.. versionadded:: 0.24.0
Parameters
----------
periods : int, default 1
The number of periods to shift. Negative values are allowed
for shifting backwards.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default is ``self.dtype.na_value``
.. versionadded:: 0.24.0
Returns
-------
shifted : ExtensionArray
Notes
-----
If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is
returned.
If ``periods > len(self)``, then an array of size
len(self) is returned, with all values filled with
``self.dtype.na_value``.
"""
# Note: this implementation assumes that `self.dtype.na_value` can be
# stored in an instance of your ExtensionArray with `self.dtype`.
if not len(self) or periods == 0:
return self.copy()
if isna(fill_value):
fill_value = self.dtype.na_value
empty = self._from_sequence(
[fill_value] * min(abs(periods), len(self)), dtype=self.dtype
)
if periods > 0:
a = empty
b = self[:-periods]
else:
a = self[abs(periods) :]
b = empty
return self._concat_same_type([a, b])
def unique(self):
"""
Compute the ExtensionArray of unique values.
Returns
-------
uniques : ExtensionArray
"""
from pandas import unique
uniques = unique(self.astype(object))
return self._from_sequence(uniques, dtype=self.dtype)
def searchsorted(self, value, side="left", sorter=None):
"""
Find indices where elements should be inserted to maintain order.
.. versionadded:: 0.24.0
Find the indices into a sorted array `self` (a) such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Assuming that `self` is sorted:
====== ================================
`side` returned index `i` satisfies
====== ================================
left ``self[i-1] < value <= self[i]``
right ``self[i-1] <= value < self[i]``
====== ================================
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
Returns
-------
array of ints
Array of insertion points with the same shape as `value`.
See Also
--------
numpy.searchsorted : Similar method from NumPy.
"""
# Note: the base tests provided by pandas only test the basics.
# We do not test
# 1. Values outside the range of the `data_for_sorting` fixture
# 2. Values between the values in the `data_for_sorting` fixture
# 3. Missing values.
arr = self.astype(object)
return arr.searchsorted(value, side=side, sorter=sorter)
def _values_for_factorize(self) -> Tuple[np.ndarray, Any]:
"""
Return an array and missing value suitable for factorization.
Returns
-------
values : ndarray
An array suitable for factorization. This should maintain order
and be a supported dtype (Float64, Int64, UInt64, String, Object).
By default, the extension array is cast to object dtype.
na_value : object
The value in `values` to consider missing. This will be treated
as NA in the factorization routines, so it will be coded as
`na_sentinal` and not included in `uniques`. By default,
``np.nan`` is used.
Notes
-----
The values returned by this method are also used in
:func:`pandas.util.hash_pandas_object`.
"""
return self.astype(object), np.nan
def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, ABCExtensionArray]:
"""
Encode the extension array as an enumerated type.
Parameters
----------
na_sentinel : int, default -1
Value to use in the `labels` array to indicate missing values.
Returns
-------
labels : ndarray
An integer NumPy array that's an indexer into the original
ExtensionArray.
uniques : ExtensionArray
An ExtensionArray containing the unique values of `self`.
.. note::
uniques will *not* contain an entry for the NA value of
the ExtensionArray if there are any missing values present
in `self`.
See Also
--------
factorize : Top-level factorize method that dispatches here.
Notes
-----
:meth:`pandas.factorize` offers a `sort` keyword as well.
"""
# Implementer note: There are two ways to override the behavior of
# pandas.factorize
# 1. _values_for_factorize and _from_factorize.
# Specify the values passed to pandas' internal factorization
# routines, and how to convert from those values back to the
# original ExtensionArray.
# 2. ExtensionArray.factorize.
# Complete control over factorization.
from pandas.core.algorithms import _factorize_array
arr, na_value = self._values_for_factorize()
labels, uniques = _factorize_array(
arr, na_sentinel=na_sentinel, na_value=na_value
)
uniques = self._from_factorized(uniques, self)
return labels, uniques
_extension_array_shared_docs[
"repeat"
] = """
Repeat elements of a %(klass)s.
Returns a new %(klass)s where each element of the current %(klass)s
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
%(klass)s.
axis : None
Must be ``None``. Has no effect but is accepted for compatibility
with numpy.
Returns
-------
repeated_array : %(klass)s
Newly created %(klass)s with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series.
Index.repeat : Equivalent function for Index.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
ExtensionArray.take : Take arbitrary positions.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.repeat(2)
[a, a, b, b, c, c]
Categories (3, object): [a, b, c]
>>> cat.repeat([1, 2, 3])
[a, b, b, c, c, c]
Categories (3, object): [a, b, c]
"""
@Substitution(klass="ExtensionArray")
@Appender(_extension_array_shared_docs["repeat"])
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
ind = np.arange(len(self)).repeat(repeats)
return self.take(ind)
# ------------------------------------------------------------------------
# Indexing methods
# ------------------------------------------------------------------------
def take(
self, indices: Sequence[int], allow_fill: bool = False, fill_value: Any = None
) -> ABCExtensionArray:
"""
Take elements from an array.
Parameters
----------
indices : sequence of integers
Indices to be taken.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : any, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type, ``self.dtype.na_value``, is used.
For many ExtensionArrays, there will be two representations of
`fill_value`: a user-facing "boxed" scalar, and a low-level
physical NA value. `fill_value` should be the user-facing version,
and the implementation should handle translating that to the
physical version for processing the take if necessary.
Returns
-------
ExtensionArray
Raises
------
IndexError
When the indices are out of bounds for the array.
ValueError
When `indices` contains negative values other than ``-1``
and `allow_fill` is True.
See Also
--------
numpy.take
api.extensions.take
Notes
-----
ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,
``iloc``, when `indices` is a sequence of values. Additionally,
it's called by :meth:`Series.reindex`, or any other method
that causes realignment, with a `fill_value`.
Examples
--------
Here's an example implementation, which relies on casting the
extension array to object dtype. This uses the helper method
:func:`pandas.api.extensions.take`.
.. code-block:: python
def take(self, indices, allow_fill=False, fill_value=None):
from pandas.core.algorithms import take
# If the ExtensionArray is backed by an ndarray, then
# just pass that here instead of coercing to object.
data = self.astype(object)
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
# fill value should always be translated from the scalar
# type for the array, to the physical storage type for
# the data, before passing to take.
result = take(data, indices, fill_value=fill_value,
allow_fill=allow_fill)
return self._from_sequence(result, dtype=self.dtype)
"""
# Implementer note: The `fill_value` parameter should be a user-facing
# value, an instance of self.dtype.type. When passed `fill_value=None`,
# the default of `self.dtype.na_value` should be used.
# This may differ from the physical storage type your ExtensionArray
# uses. In this case, your implementation is responsible for casting
# the user-facing type to the storage type, before using
# pandas.api.extensions.take
raise AbstractMethodError(self)
def copy(self) -> ABCExtensionArray:
"""
Return a copy of the array.
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
# Printing
# ------------------------------------------------------------------------
def __repr__(self):
from pandas.io.formats.printing import format_object_summary
template = "{class_name}" "{data}\n" "Length: {length}, dtype: {dtype}"
# the short repr has no trailing newline, while the truncated
# repr does. So we include a newline in our template, and strip
# any trailing newlines from format_object_summary
data = format_object_summary(
self, self._formatter(), indent_for_name=False
).rstrip(", \n")
class_name = "<{}>\n".format(self.__class__.__name__)
return template.format(
class_name=class_name, data=data, length=len(self), dtype=self.dtype
)
def _formatter(self, boxed: bool = False) -> Callable[[Any], Optional[str]]:
"""Formatting function for scalar values.
This is used in the default '__repr__'. The returned formatting
function receives instances of your scalar type.
Parameters
----------
boxed : bool, default False
An indicated for whether or not your array is being printed
within a Series, DataFrame, or Index (True), or just by
itself (False). This may be useful if you want scalar values
to appear differently within a Series versus on its own (e.g.
quoted or not).
Returns
-------
Callable[[Any], str]
A callable that gets instances of the scalar type and
returns a string. By default, :func:`repr` is used
when ``boxed=False`` and :func:`str` is used when
``boxed=True``.
"""
if boxed:
return str
return repr
def _formatting_values(self) -> np.ndarray:
# At the moment, this has to be an array since we use result.dtype
"""
An array of values to be printed in, e.g. the Series repr
.. deprecated:: 0.24.0
Use :meth:`ExtensionArray._formatter` instead.
Returns
-------
array : ndarray
"""
return np.array(self)
# ------------------------------------------------------------------------
# Reshaping
# ------------------------------------------------------------------------
def ravel(self, order="C") -> ABCExtensionArray:
"""
Return a flattened view on this array.
Parameters
----------
order : {None, 'C', 'F', 'A', 'K'}, default 'C'
Returns
-------
ExtensionArray
Notes
-----
- Because ExtensionArrays are 1D-only, this is a no-op.
- The "order" argument is ignored, is for compatibility with NumPy.
"""
return self
@classmethod
def _concat_same_type(
cls, to_concat: Sequence[ABCExtensionArray]
) -> ABCExtensionArray:
"""
Concatenate multiple array
Parameters
----------
to_concat : sequence of this type
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(cls)
# The _can_hold_na attribute is set to True so that pandas internals
# will use the ExtensionDtype.na_value as the NA value in operations
# such as take(), reindex(), shift(), etc. In addition, those results
# will then be of the ExtensionArray subclass rather than an array
# of objects
_can_hold_na = True
@property
def _ndarray_values(self) -> np.ndarray:
"""
Internal pandas method for lossy conversion to a NumPy ndarray.
This method is not part of the pandas interface.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
Returns
-------
array : ndarray
"""
return np.array(self)
def _reduce(self, name, skipna=True, **kwargs):
"""
Return a scalar result of performing the reduction operation.
Parameters
----------
name : str
Name of the function, supported values are:
{ any, all, min, max, sum, mean, median, prod,
std, var, sem, kurt, skew }.
skipna : bool, default True
If True, skip NaN values.
**kwargs
Additional keyword arguments passed to the reduction function.
Currently, `ddof` is the only supported kwarg.
Returns
-------
scalar
Raises
------
TypeError : subclass does not define reductions
"""
raise TypeError(
"cannot perform {name} with type {dtype}".format(
name=name, dtype=self.dtype
)
)
class ExtensionOpsMixin:
"""
A base class for linking the operators to their dunder names.
.. note::
You may want to set ``__array_priority__`` if you want your
implementation to be called when involved in binary operations
with NumPy arrays.
"""
@classmethod
def _add_arithmetic_ops(cls):
cls.__add__ = cls._create_arithmetic_method(operator.add)
cls.__radd__ = cls._create_arithmetic_method(ops.radd)
cls.__sub__ = cls._create_arithmetic_method(operator.sub)
cls.__rsub__ = cls._create_arithmetic_method(ops.rsub)
cls.__mul__ = cls._create_arithmetic_method(operator.mul)
cls.__rmul__ = cls._create_arithmetic_method(ops.rmul)
cls.__pow__ = cls._create_arithmetic_method(operator.pow)
cls.__rpow__ = cls._create_arithmetic_method(ops.rpow)
cls.__mod__ = cls._create_arithmetic_method(operator.mod)
cls.__rmod__ = cls._create_arithmetic_method(ops.rmod)
cls.__floordiv__ = cls._create_arithmetic_method(operator.floordiv)
cls.__rfloordiv__ = cls._create_arithmetic_method(ops.rfloordiv)
cls.__truediv__ = cls._create_arithmetic_method(operator.truediv)
cls.__rtruediv__ = cls._create_arithmetic_method(ops.rtruediv)
cls.__divmod__ = cls._create_arithmetic_method(divmod)
cls.__rdivmod__ = cls._create_arithmetic_method(ops.rdivmod)
@classmethod
def _add_comparison_ops(cls):
cls.__eq__ = cls._create_comparison_method(operator.eq)
cls.__ne__ = cls._create_comparison_method(operator.ne)
cls.__lt__ = cls._create_comparison_method(operator.lt)
cls.__gt__ = cls._create_comparison_method(operator.gt)
cls.__le__ = cls._create_comparison_method(operator.le)
cls.__ge__ = cls._create_comparison_method(operator.ge)
class ExtensionScalarOpsMixin(ExtensionOpsMixin):
"""
A mixin for defining ops on an ExtensionArray.
It is assumed that the underlying scalar objects have the operators
already defined.
Notes
-----
If you have defined a subclass MyExtensionArray(ExtensionArray), then
use MyExtensionArray(ExtensionArray, ExtensionScalarOpsMixin) to
get the arithmetic operators. After the definition of MyExtensionArray,
insert the lines
MyExtensionArray._add_arithmetic_ops()
MyExtensionArray._add_comparison_ops()
to link the operators to your class.
.. note::
You may want to set ``__array_priority__`` if you want your
implementation to be called when involved in binary operations
with NumPy arrays.
"""
@classmethod
def _create_method(cls, op, coerce_to_dtype=True):
"""
A class method that returns a method that will correspond to an
operator for an ExtensionArray subclass, by dispatching to the
relevant operator defined on the individual elements of the
ExtensionArray.
Parameters
----------
op : function
An operator that takes arguments op(a, b)
coerce_to_dtype : bool, default True
boolean indicating whether to attempt to convert
the result to the underlying ExtensionArray dtype.
If it's not possible to create a new ExtensionArray with the
values, an ndarray is returned instead.
Returns
-------
Callable[[Any, Any], Union[ndarray, ExtensionArray]]
A method that can be bound to a class. When used, the method
receives the two arguments, one of which is the instance of
this class, and should return an ExtensionArray or an ndarray.
Returning an ndarray may be necessary when the result of the
`op` cannot be stored in the ExtensionArray. The dtype of the
ndarray uses NumPy's normal inference rules.
Examples
--------
Given an ExtensionArray subclass called MyExtensionArray, use
>>> __add__ = cls._create_method(operator.add)
in the class definition of MyExtensionArray to create the operator
for addition, that will be based on the operator implementation
of the underlying elements of the ExtensionArray
"""
def _binop(self, other):
def convert_values(param):
if isinstance(param, ExtensionArray) or is_list_like(param):
ovalues = param
else: # Assume its an object
ovalues = [param] * len(self)
return ovalues
if isinstance(other, (ABCSeries, ABCIndexClass)):
# rely on pandas to unbox and dispatch to us
return NotImplemented
lvalues = self
rvalues = convert_values(other)
# If the operator is not defined for the underlying objects,
# a TypeError should be raised
res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
def _maybe_convert(arr):
if coerce_to_dtype:
# https://github.com/pandas-dev/pandas/issues/22850
# We catch all regular exceptions here, and fall back
# to an ndarray.
try:
res = self._from_sequence(arr)
except Exception:
res = np.asarray(arr)
else:
res = np.asarray(arr)
return res
if op.__name__ in {"divmod", "rdivmod"}:
a, b = zip(*res)
res = _maybe_convert(a), _maybe_convert(b)
else:
res = _maybe_convert(res)
return res
op_name = ops._get_op_name(op, True)
return set_function_name(_binop, op_name, cls)
@classmethod
def _create_arithmetic_method(cls, op):
return cls._create_method(op)
@classmethod
def _create_comparison_method(cls, op):
return cls._create_method(op, coerce_to_dtype=False)
| {
"repo_name": "toobaz/pandas",
"path": "pandas/core/arrays/base.py",
"copies": "2",
"size": "40003",
"license": "bsd-3-clause",
"hash": 6232905153799439000,
"line_mean": 32.9296013571,
"line_max": 87,
"alpha_frac": 0.5712071595,
"autogenerated": false,
"ratio": 4.699600563909774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6270807723409775,
"avg_score": null,
"num_lines": null
} |
"""An interface for extending pandas with custom arrays.
.. warning::
This is an experimental API and subject to breaking changes
without warning.
"""
import operator
import numpy as np
from pandas.compat import PY3, set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core import ops
_not_implemented_message = "{} does not implement {}."
_extension_array_shared_docs = dict()
class ExtensionArray(object):
"""
Abstract base class for custom 1-D array types.
pandas will recognize instances of this class as proper arrays
with a custom type and will not attempt to coerce them to objects. They
may be stored directly inside a :class:`DataFrame` or :class:`Series`.
.. versionadded:: 0.23.0
Notes
-----
The interface includes the following abstract methods that must be
implemented by subclasses:
* _from_sequence
* _from_factorized
* __getitem__
* __len__
* dtype
* nbytes
* isna
* take
* copy
* _concat_same_type
A default repr displaying the type, (truncated) data, length,
and dtype is provided. It can be customized or replaced by
by overriding:
* __repr__ : A default repr for the ExtensionArray.
* _formatter : Print scalars inside a Series or DataFrame.
Some methods require casting the ExtensionArray to an ndarray of Python
objects with ``self.astype(object)``, which may be expensive. When
performance is a concern, we highly recommend overriding the following
methods:
* fillna
* dropna
* unique
* factorize / _values_for_factorize
* argsort / _values_for_argsort
* searchsorted
The remaining methods implemented on this class should be performant,
as they only compose abstract methods. Still, a more efficient
implementation may be available, and these methods can be overridden.
One can implement methods to handle array reductions.
* _reduce
One can implement methods to handle parsing from strings that will be used
in methods such as ``pandas.io.parsers.read_csv``.
* _from_sequence_of_strings
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
``pandas.errors.AbstractMethodError`` and no ``register`` method is
provided for registering virtual subclasses.
ExtensionArrays are limited to 1 dimension.
They may be backed by none, one, or many NumPy arrays. For example,
``pandas.Categorical`` is an extension array backed by two arrays,
one for codes and one for categories. An array of IPv6 address may
be backed by a NumPy structured array with two fields, one for the
lower 64 bits and one for the upper 64 bits. Or they may be backed
by some other storage type, like Python lists. Pandas makes no
assumptions on how the data are stored, just that it can be converted
to a NumPy array.
The ExtensionArray interface does not impose any rules on how this data
is stored. However, currently, the backing data cannot be stored in
attributes called ``.values`` or ``._values`` to ensure full compatibility
with pandas internals. But other names as ``.data``, ``._data``,
``._items``, ... can be freely used.
"""
# '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray.
# Don't override this.
_typ = 'extension'
# ------------------------------------------------------------------------
# Constructors
# ------------------------------------------------------------------------
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""
Construct a new ExtensionArray from a sequence of scalars.
Parameters
----------
scalars : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type``.
dtype : dtype, optional
Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
copy : boolean, default False
If True, copy the underlying data.
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(cls)
@classmethod
def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
"""Construct a new ExtensionArray from a sequence of strings.
.. versionadded:: 0.24.0
Parameters
----------
strings : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type``.
dtype : dtype, optional
Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
copy : boolean, default False
If True, copy the underlying data.
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(cls)
@classmethod
def _from_factorized(cls, values, original):
"""
Reconstruct an ExtensionArray after factorization.
Parameters
----------
values : ndarray
An integer ndarray with the factorized values.
original : ExtensionArray
The original ExtensionArray that factorize was called on.
See Also
--------
pandas.factorize
ExtensionArray.factorize
"""
raise AbstractMethodError(cls)
# ------------------------------------------------------------------------
# Must be a Sequence
# ------------------------------------------------------------------------
def __getitem__(self, item):
# type (Any) -> Any
"""
Select a subset of self.
Parameters
----------
item : int, slice, or ndarray
* int: The position in 'self' to get.
* slice: A slice object, where 'start', 'stop', and 'step' are
integers or None
* ndarray: A 1-d boolean NumPy ndarray the same length as 'self'
Returns
-------
item : scalar or ExtensionArray
Notes
-----
For scalar ``item``, return a scalar value suitable for the array's
type. This should be an instance of ``self.dtype.type``.
For slice ``key``, return an instance of ``ExtensionArray``, even
if the slice is length 0 or 1.
For a boolean mask, return an instance of ``ExtensionArray``, filtered
to the values where ``item`` is True.
"""
raise AbstractMethodError(self)
def __setitem__(self, key, value):
# type: (Union[int, np.ndarray], Any) -> None
"""
Set one or more values inplace.
This method is not required to satisfy the pandas extension array
interface.
Parameters
----------
key : int, ndarray, or slice
When called from, e.g. ``Series.__setitem__``, ``key`` will be
one of
* scalar int
* ndarray of integers.
* boolean ndarray
* slice object
value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object
value or values to be set of ``key``.
Returns
-------
None
"""
# Some notes to the ExtensionArray implementor who may have ended up
# here. While this method is not required for the interface, if you
# *do* choose to implement __setitem__, then some semantics should be
# observed:
#
# * Setting multiple values : ExtensionArrays should support setting
# multiple values at once, 'key' will be a sequence of integers and
# 'value' will be a same-length sequence.
#
# * Broadcasting : For a sequence 'key' and a scalar 'value',
# each position in 'key' should be set to 'value'.
#
# * Coercion : Most users will expect basic coercion to work. For
# example, a string like '2018-01-01' is coerced to a datetime
# when setting on a datetime64ns array. In general, if the
# __init__ method coerces that value, then so should __setitem__
# Note, also, that Series/DataFrame.where internally use __setitem__
# on a copy of the data.
raise NotImplementedError(_not_implemented_message.format(
type(self), '__setitem__')
)
def __len__(self):
# type: () -> int
"""
Length of this array
Returns
-------
length : int
"""
raise AbstractMethodError(self)
def __iter__(self):
"""
Iterate over elements of the array.
"""
# This needs to be implemented so that pandas recognizes extension
# arrays as list-like. The default implementation makes successive
# calls to ``__getitem__``, which may be slower than necessary.
for i in range(len(self)):
yield self[i]
# ------------------------------------------------------------------------
# Required attributes
# ------------------------------------------------------------------------
@property
def dtype(self):
# type: () -> ExtensionDtype
"""
An instance of 'ExtensionDtype'.
"""
raise AbstractMethodError(self)
@property
def shape(self):
# type: () -> Tuple[int, ...]
"""
Return a tuple of the array dimensions.
"""
return (len(self),)
@property
def ndim(self):
# type: () -> int
"""
Extension Arrays are only allowed to be 1-dimensional.
"""
return 1
@property
def nbytes(self):
# type: () -> int
"""
The number of bytes needed to store this object in memory.
"""
# If this is expensive to compute, return an approximate lower bound
# on the number of bytes needed.
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
# Additional Methods
# ------------------------------------------------------------------------
def astype(self, dtype, copy=True):
"""
Cast to a NumPy array with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray
NumPy ndarray with 'dtype' for its dtype.
"""
return np.array(self, dtype=dtype, copy=copy)
def isna(self):
# type: () -> Union[ExtensionArray, np.ndarray]
"""
A 1-D array indicating if each value is missing.
Returns
-------
na_values : Union[np.ndarray, ExtensionArray]
In most cases, this should return a NumPy ndarray. For
exceptional cases like ``SparseArray``, where returning
an ndarray would be expensive, an ExtensionArray may be
returned.
Notes
-----
If returning an ExtensionArray, then
* ``na_values._is_boolean`` should be True
* `na_values` should implement :func:`ExtensionArray._reduce`
* ``na_values.any`` and ``na_values.all`` should be implemented
"""
raise AbstractMethodError(self)
def _values_for_argsort(self):
# type: () -> ndarray
"""
Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort
"""
# Note: this is used in `ExtensionArray.argsort`.
return np.array(self)
def argsort(self, ascending=True, kind='quicksort', *args, **kwargs):
"""
Return the indices that would sort this array.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
index_array : ndarray
Array of indices that sort ``self``.
See Also
--------
numpy.argsort : Sorting implementation used internally.
"""
# Implementor note: You have two places to override the behavior of
# argsort.
# 1. _values_for_argsort : construct the values passed to np.argsort
# 2. argsort : total control over sorting.
ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
values = self._values_for_argsort()
result = np.argsort(values, kind=kind, **kwargs)
if not ascending:
result = result[::-1]
return result
def fillna(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, array-like
If a scalar value is passed it is used to fill all missing values.
Alternatively, an array-like 'value' can be given. It's expected
that the array-like have the same length as 'self'.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : ExtensionArray with NA/NaN filled
"""
from pandas.api.types import is_array_like
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.missing import pad_1d, backfill_1d
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError("Length of 'value' does not match. Got ({}) "
" expected {}".format(len(value), len(self)))
value = value[mask]
if mask.any():
if method is not None:
func = pad_1d if method == 'pad' else backfill_1d
new_values = func(self.astype(object), limit=limit,
mask=mask)
new_values = self._from_sequence(new_values, dtype=self.dtype)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
def dropna(self):
"""
Return ExtensionArray without NA values
Returns
-------
valid : ExtensionArray
"""
return self[~self.isna()]
def shift(self, periods=1, fill_value=None):
# type: (int, object) -> ExtensionArray
"""
Shift values by desired number.
Newly introduced missing values are filled with
``self.dtype.na_value``.
.. versionadded:: 0.24.0
Parameters
----------
periods : int, default 1
The number of periods to shift. Negative values are allowed
for shifting backwards.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default is ``self.dtype.na_value``
.. versionadded:: 0.24.0
Returns
-------
shifted : ExtensionArray
Notes
-----
If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is
returned.
If ``periods > len(self)``, then an array of size
len(self) is returned, with all values filled with
``self.dtype.na_value``.
"""
# Note: this implementation assumes that `self.dtype.na_value` can be
# stored in an instance of your ExtensionArray with `self.dtype`.
if not len(self) or periods == 0:
return self.copy()
if isna(fill_value):
fill_value = self.dtype.na_value
empty = self._from_sequence(
[fill_value] * min(abs(periods), len(self)),
dtype=self.dtype
)
if periods > 0:
a = empty
b = self[:-periods]
else:
a = self[abs(periods):]
b = empty
return self._concat_same_type([a, b])
def unique(self):
"""
Compute the ExtensionArray of unique values.
Returns
-------
uniques : ExtensionArray
"""
from pandas import unique
uniques = unique(self.astype(object))
return self._from_sequence(uniques, dtype=self.dtype)
def searchsorted(self, value, side="left", sorter=None):
"""
Find indices where elements should be inserted to maintain order.
.. versionadded:: 0.24.0
Find the indices into a sorted array `self` (a) such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `self` would be preserved.
Assuming that `a` is sorted:
====== ============================
`side` returned index `i` satisfies
====== ============================
left ``self[i-1] < v <= self[i]``
right ``self[i-1] <= v < self[i]``
====== ============================
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `value`.
See Also
--------
numpy.searchsorted : Similar method from NumPy.
"""
# Note: the base tests provided by pandas only test the basics.
# We do not test
# 1. Values outside the range of the `data_for_sorting` fixture
# 2. Values between the values in the `data_for_sorting` fixture
# 3. Missing values.
arr = self.astype(object)
return arr.searchsorted(value, side=side, sorter=sorter)
def _values_for_factorize(self):
# type: () -> Tuple[ndarray, Any]
"""
Return an array and missing value suitable for factorization.
Returns
-------
values : ndarray
An array suitable for factorization. This should maintain order
and be a supported dtype (Float64, Int64, UInt64, String, Object).
By default, the extension array is cast to object dtype.
na_value : object
The value in `values` to consider missing. This will be treated
as NA in the factorization routines, so it will be coded as
`na_sentinal` and not included in `uniques`. By default,
``np.nan`` is used.
Notes
-----
The values returned by this method are also used in
:func:`pandas.util.hash_pandas_object`.
"""
return self.astype(object), np.nan
def factorize(self, na_sentinel=-1):
# type: (int) -> Tuple[ndarray, ExtensionArray]
"""
Encode the extension array as an enumerated type.
Parameters
----------
na_sentinel : int, default -1
Value to use in the `labels` array to indicate missing values.
Returns
-------
labels : ndarray
An integer NumPy array that's an indexer into the original
ExtensionArray.
uniques : ExtensionArray
An ExtensionArray containing the unique values of `self`.
.. note::
uniques will *not* contain an entry for the NA value of
the ExtensionArray if there are any missing values present
in `self`.
See Also
--------
pandas.factorize : Top-level factorize method that dispatches here.
Notes
-----
:meth:`pandas.factorize` offers a `sort` keyword as well.
"""
# Impelmentor note: There are two ways to override the behavior of
# pandas.factorize
# 1. _values_for_factorize and _from_factorize.
# Specify the values passed to pandas' internal factorization
# routines, and how to convert from those values back to the
# original ExtensionArray.
# 2. ExtensionArray.factorize.
# Complete control over factorization.
from pandas.core.algorithms import _factorize_array
arr, na_value = self._values_for_factorize()
labels, uniques = _factorize_array(arr, na_sentinel=na_sentinel,
na_value=na_value)
uniques = self._from_factorized(uniques, self)
return labels, uniques
_extension_array_shared_docs['repeat'] = """
Repeat elements of a %(klass)s.
Returns a new %(klass)s where each element of the current %(klass)s
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
%(klass)s.
axis : None
Must be ``None``. Has no effect but is accepted for compatibility
with numpy.
Returns
-------
repeated_array : %(klass)s
Newly created %(klass)s with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series.
Index.repeat : Equivalent function for Index.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
ExtensionArray.take : Take arbitrary positions.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.repeat(2)
[a, a, b, b, c, c]
Categories (3, object): [a, b, c]
>>> cat.repeat([1, 2, 3])
[a, b, b, c, c, c]
Categories (3, object): [a, b, c]
"""
@Substitution(klass='ExtensionArray')
@Appender(_extension_array_shared_docs['repeat'])
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
ind = np.arange(len(self)).repeat(repeats)
return self.take(ind)
# ------------------------------------------------------------------------
# Indexing methods
# ------------------------------------------------------------------------
def take(self, indices, allow_fill=False, fill_value=None):
# type: (Sequence[int], bool, Optional[Any]) -> ExtensionArray
"""
Take elements from an array.
Parameters
----------
indices : sequence of integers
Indices to be taken.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : any, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type, ``self.dtype.na_value``, is used.
For many ExtensionArrays, there will be two representations of
`fill_value`: a user-facing "boxed" scalar, and a low-level
physical NA value. `fill_value` should be the user-facing version,
and the implementation should handle translating that to the
physical version for processing the take if necessary.
Returns
-------
ExtensionArray
Raises
------
IndexError
When the indices are out of bounds for the array.
ValueError
When `indices` contains negative values other than ``-1``
and `allow_fill` is True.
Notes
-----
ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,
``iloc``, when `indices` is a sequence of values. Additionally,
it's called by :meth:`Series.reindex`, or any other method
that causes realignment, with a `fill_value`.
See Also
--------
numpy.take
pandas.api.extensions.take
Examples
--------
Here's an example implementation, which relies on casting the
extension array to object dtype. This uses the helper method
:func:`pandas.api.extensions.take`.
.. code-block:: python
def take(self, indices, allow_fill=False, fill_value=None):
from pandas.core.algorithms import take
# If the ExtensionArray is backed by an ndarray, then
# just pass that here instead of coercing to object.
data = self.astype(object)
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
# fill value should always be translated from the scalar
# type for the array, to the physical storage type for
# the data, before passing to take.
result = take(data, indices, fill_value=fill_value,
allow_fill=allow_fill)
return self._from_sequence(result, dtype=self.dtype)
"""
# Implementer note: The `fill_value` parameter should be a user-facing
# value, an instance of self.dtype.type. When passed `fill_value=None`,
# the default of `self.dtype.na_value` should be used.
# This may differ from the physical storage type your ExtensionArray
# uses. In this case, your implementation is responsible for casting
# the user-facing type to the storage type, before using
# pandas.api.extensions.take
raise AbstractMethodError(self)
def copy(self, deep=False):
# type: (bool) -> ExtensionArray
"""
Return a copy of the array.
Parameters
----------
deep : bool, default False
Also copy the underlying data backing this array.
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
# Printing
# ------------------------------------------------------------------------
def __repr__(self):
from pandas.io.formats.printing import format_object_summary
template = (
u'{class_name}'
u'{data}\n'
u'Length: {length}, dtype: {dtype}'
)
# the short repr has no trailing newline, while the truncated
# repr does. So we include a newline in our template, and strip
# any trailing newlines from format_object_summary
data = format_object_summary(self, self._formatter(),
indent_for_name=False).rstrip(', \n')
class_name = u'<{}>\n'.format(self.__class__.__name__)
return template.format(class_name=class_name, data=data,
length=len(self),
dtype=self.dtype)
def _formatter(self, boxed=False):
# type: (bool) -> Callable[[Any], Optional[str]]
"""Formatting function for scalar values.
This is used in the default '__repr__'. The returned formatting
function receives instances of your scalar type.
Parameters
----------
boxed: bool, default False
An indicated for whether or not your array is being printed
within a Series, DataFrame, or Index (True), or just by
itself (False). This may be useful if you want scalar values
to appear differently within a Series versus on its own (e.g.
quoted or not).
Returns
-------
Callable[[Any], str]
A callable that gets instances of the scalar type and
returns a string. By default, :func:`repr` is used
when ``boxed=False`` and :func:`str` is used when
``boxed=True``.
"""
if boxed:
return str
return repr
def _formatting_values(self):
# type: () -> np.ndarray
# At the moment, this has to be an array since we use result.dtype
"""
An array of values to be printed in, e.g. the Series repr
.. deprecated:: 0.24.0
Use :meth:`ExtensionArray._formatter` instead.
"""
return np.array(self)
# ------------------------------------------------------------------------
# Reshaping
# ------------------------------------------------------------------------
@classmethod
def _concat_same_type(cls, to_concat):
# type: (Sequence[ExtensionArray]) -> ExtensionArray
"""
Concatenate multiple array
Parameters
----------
to_concat : sequence of this type
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(cls)
# The _can_hold_na attribute is set to True so that pandas internals
# will use the ExtensionDtype.na_value as the NA value in operations
# such as take(), reindex(), shift(), etc. In addition, those results
# will then be of the ExtensionArray subclass rather than an array
# of objects
_can_hold_na = True
@property
def _ndarray_values(self):
# type: () -> np.ndarray
"""
Internal pandas method for lossy conversion to a NumPy ndarray.
This method is not part of the pandas interface.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
"""
return np.array(self)
def _reduce(self, name, skipna=True, **kwargs):
"""
Return a scalar result of performing the reduction operation.
Parameters
----------
name : str
Name of the function, supported values are:
{ any, all, min, max, sum, mean, median, prod,
std, var, sem, kurt, skew }.
skipna : bool, default True
If True, skip NaN values.
**kwargs
Additional keyword arguments passed to the reduction function.
Currently, `ddof` is the only supported kwarg.
Returns
-------
scalar
Raises
------
TypeError : subclass does not define reductions
"""
raise TypeError("cannot perform {name} with type {dtype}".format(
name=name, dtype=self.dtype))
class ExtensionOpsMixin(object):
"""
A base class for linking the operators to their dunder names.
.. note::
You may want to set ``__array_priority__`` if you want your
implementation to be called when involved in binary operations
with NumPy arrays.
"""
@classmethod
def _add_arithmetic_ops(cls):
cls.__add__ = cls._create_arithmetic_method(operator.add)
cls.__radd__ = cls._create_arithmetic_method(ops.radd)
cls.__sub__ = cls._create_arithmetic_method(operator.sub)
cls.__rsub__ = cls._create_arithmetic_method(ops.rsub)
cls.__mul__ = cls._create_arithmetic_method(operator.mul)
cls.__rmul__ = cls._create_arithmetic_method(ops.rmul)
cls.__pow__ = cls._create_arithmetic_method(operator.pow)
cls.__rpow__ = cls._create_arithmetic_method(ops.rpow)
cls.__mod__ = cls._create_arithmetic_method(operator.mod)
cls.__rmod__ = cls._create_arithmetic_method(ops.rmod)
cls.__floordiv__ = cls._create_arithmetic_method(operator.floordiv)
cls.__rfloordiv__ = cls._create_arithmetic_method(ops.rfloordiv)
cls.__truediv__ = cls._create_arithmetic_method(operator.truediv)
cls.__rtruediv__ = cls._create_arithmetic_method(ops.rtruediv)
if not PY3:
cls.__div__ = cls._create_arithmetic_method(operator.div)
cls.__rdiv__ = cls._create_arithmetic_method(ops.rdiv)
cls.__divmod__ = cls._create_arithmetic_method(divmod)
cls.__rdivmod__ = cls._create_arithmetic_method(ops.rdivmod)
@classmethod
def _add_comparison_ops(cls):
cls.__eq__ = cls._create_comparison_method(operator.eq)
cls.__ne__ = cls._create_comparison_method(operator.ne)
cls.__lt__ = cls._create_comparison_method(operator.lt)
cls.__gt__ = cls._create_comparison_method(operator.gt)
cls.__le__ = cls._create_comparison_method(operator.le)
cls.__ge__ = cls._create_comparison_method(operator.ge)
class ExtensionScalarOpsMixin(ExtensionOpsMixin):
"""
A mixin for defining ops on an ExtensionArray.
It is assumed that the underlying scalar objects have the operators
already defined.
Notes
-----
If you have defined a subclass MyExtensionArray(ExtensionArray), then
use MyExtensionArray(ExtensionArray, ExtensionScalarOpsMixin) to
get the arithmetic operators. After the definition of MyExtensionArray,
insert the lines
MyExtensionArray._add_arithmetic_ops()
MyExtensionArray._add_comparison_ops()
to link the operators to your class.
.. note::
You may want to set ``__array_priority__`` if you want your
implementation to be called when involved in binary operations
with NumPy arrays.
"""
@classmethod
def _create_method(cls, op, coerce_to_dtype=True):
"""
A class method that returns a method that will correspond to an
operator for an ExtensionArray subclass, by dispatching to the
relevant operator defined on the individual elements of the
ExtensionArray.
Parameters
----------
op : function
An operator that takes arguments op(a, b)
coerce_to_dtype : bool, default True
boolean indicating whether to attempt to convert
the result to the underlying ExtensionArray dtype.
If it's not possible to create a new ExtensionArray with the
values, an ndarray is returned instead.
Returns
-------
Callable[[Any, Any], Union[ndarray, ExtensionArray]]
A method that can be bound to a class. When used, the method
receives the two arguments, one of which is the instance of
this class, and should return an ExtensionArray or an ndarray.
Returning an ndarray may be necessary when the result of the
`op` cannot be stored in the ExtensionArray. The dtype of the
ndarray uses NumPy's normal inference rules.
Example
-------
Given an ExtensionArray subclass called MyExtensionArray, use
>>> __add__ = cls._create_method(operator.add)
in the class definition of MyExtensionArray to create the operator
for addition, that will be based on the operator implementation
of the underlying elements of the ExtensionArray
"""
def _binop(self, other):
def convert_values(param):
if isinstance(param, ExtensionArray) or is_list_like(param):
ovalues = param
else: # Assume its an object
ovalues = [param] * len(self)
return ovalues
if isinstance(other, (ABCSeries, ABCIndexClass)):
# rely on pandas to unbox and dispatch to us
return NotImplemented
lvalues = self
rvalues = convert_values(other)
# If the operator is not defined for the underlying objects,
# a TypeError should be raised
res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
def _maybe_convert(arr):
if coerce_to_dtype:
# https://github.com/pandas-dev/pandas/issues/22850
# We catch all regular exceptions here, and fall back
# to an ndarray.
try:
res = self._from_sequence(arr)
except Exception:
res = np.asarray(arr)
else:
res = np.asarray(arr)
return res
if op.__name__ in {'divmod', 'rdivmod'}:
a, b = zip(*res)
res = _maybe_convert(a), _maybe_convert(b)
else:
res = _maybe_convert(res)
return res
op_name = ops._get_op_name(op, True)
return set_function_name(_binop, op_name, cls)
@classmethod
def _create_arithmetic_method(cls, op):
return cls._create_method(op)
@classmethod
def _create_comparison_method(cls, op):
return cls._create_method(op, coerce_to_dtype=False)
| {
"repo_name": "GuessWhoSamFoo/pandas",
"path": "pandas/core/arrays/base.py",
"copies": "1",
"size": "38965",
"license": "bsd-3-clause",
"hash": -3184456759167220700,
"line_mean": 33.7901785714,
"line_max": 79,
"alpha_frac": 0.5662004363,
"autogenerated": false,
"ratio": 4.712178014270165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5778378450570165,
"avg_score": null,
"num_lines": null
} |
# An interface for programs which do integration - this will handle
# all of the input and output, delegating the actual processing to an
# implementation of this interfacing.
#
# The following are considered critical:
#
# Input:
# An implementation of the indexer class.
#
# Output:
# [processed reflections?]
#
# This is a complex problem to solve...
#
# Assumptions & Assertions:
#
# (1) Integration includes any cell and orientation refinement.
# This should be handled under the prepare phase.
# (2) If there is no indexer implementation provided as input,
# it's ok to go make one, or raise an exception (maybe.)
#
# This means...
#
# (1) That this needs to have the posibility of specifying images for
# use in both cell refinement (as a list of wedges, similar to
# the indexer interface) and as a SINGLE WEDGE for use in integration.
# (2) This may default to a local implementation using the same program,
# e.g. XDS or Mosflm - will not necessarily select the best one.
# This is left to the implementation to sort out.
import inspect
import json
import logging
import math
import os
import xia2.Schema.Interfaces.Indexer
import xia2.Schema.Interfaces.Refiner
# symmetry operator management functionality
from xia2.Experts.SymmetryExpert import compose_symops, symop_to_mat
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.Streams import banner
from xia2.Schema.Exceptions.BadLatticeError import BadLatticeError
# interfaces that this inherits from ...
from xia2.Schema.Interfaces.FrameProcessor import FrameProcessor
logger = logging.getLogger("xia2.Schema.Interfaces.Integrater")
class Integrater(FrameProcessor):
"""An interface to present integration functionality in a similar
way to the indexer interface."""
def __init__(self):
super().__init__()
# admin junk
self._intgr_working_directory = os.getcwd()
# a pointer to an implementation of the indexer class from which
# to get orientation (maybe) and unit cell, lattice (definitely)
self._intgr_indexer = None
self._intgr_refiner = None
# optional parameters - added user for # 3183
self._intgr_reso_high = 0.0
self._intgr_reso_low = 0.0
self._intgr_reso_user = False
# presence of ice rings - 0 indicates "no" anything else
# indicates "yes". FIXME this should be able to identify
# different resolution rings.
self._intgr_ice = 0
self._intgr_excluded_regions = []
# required parameters
self._intgr_wedge = None
# implementation dependent parameters - these should be keyed by
# say 'mosflm':{'yscale':0.9999} etc.
self._intgr_program_parameters = {}
# the same, but for export to other instances of this interface
# via the .xinfo hierarchy
self._intgr_export_program_parameters = {}
# batches to integrate, batches which were integrated - this is
# to allow programs like rebatch to work c/f self._intgr_wedge
# note well that this may have to be implemented via mtzdump?
# or just record the images which were integrated...
self._intgr_batches_out = [0, 0]
# flags which control how the execution is performed
# in the main integrate() method below.
self._intgr_done = False
self._intgr_prepare_done = False
self._intgr_finish_done = False
# the output reflections
self._intgr_hklout_raw = None
self._intgr_hklout = None
# 'hkl' or 'pickle', if pickle then self._intgr_hklout returns a refl table.
self._output_format = "hkl"
# a place to store the project, crystal, wavelength, sweep information
# to interface with the scaling...
self._intgr_epoch = 0
self._intgr_pname = None
self._intgr_xname = None
self._intgr_dname = None
self._intgr_sweep = None
self._intgr_sweep_name = None
# results - refined cell and number of reflections
self._intgr_cell = None
self._intgr_n_ref = None
# reindexing operations etc. these will come from feedback
# from the scaling to ensure that the setting is uniform
self._intgr_spacegroup_number = 0
self._intgr_reindex_operator = None
self._intgr_reindex_matrix = None
# extra information which could be helpful for integration
self._intgr_anomalous = False
# mosaic spread information
self._intgr_mosaic_min = None
self._intgr_mosaic_mean = None
self._intgr_mosaic_max = None
self._intgr_per_image_statistics = None
# serialization functions
def to_dict(self):
obj = {}
obj["__id__"] = "Integrater"
obj["__module__"] = self.__class__.__module__
obj["__name__"] = self.__class__.__name__
attributes = inspect.getmembers(self, lambda m: not inspect.isroutine(m))
for a in attributes:
if a[0] in ("_intgr_indexer", "_intgr_refiner") and a[1] is not None:
obj[a[0]] = a[1].to_dict()
elif a[0] == "_fp_imageset":
from dxtbx.serialize.imageset import imageset_to_dict
obj[a[0]] = imageset_to_dict(a[1])
elif a[0] == "_intgr_sweep":
# XXX I guess we probably want this?
continue
elif a[0].startswith("_intgr_") or a[0].startswith("_fp_"):
obj[a[0]] = a[1]
return obj
@classmethod
def from_dict(cls, obj):
assert obj["__id__"] == "Integrater"
return_obj = cls()
for k, v in obj.items():
if k in ("_intgr_indexer", "_intgr_refiner") and v is not None:
from libtbx.utils import import_python_object
cls = import_python_object(
import_path=".".join((v["__module__"], v["__name__"])),
error_prefix="",
target_must_be="",
where_str="",
).object
v = cls.from_dict(v)
if isinstance(v, dict):
if v.get("__id__") == "ExperimentList":
from dxtbx.model.experiment_list import ExperimentListFactory
v = ExperimentListFactory.from_dict(v)
elif v.get("__id__") == "imageset":
from dxtbx.serialize.imageset import imageset_from_dict
v = imageset_from_dict(v, check_format=False)
setattr(return_obj, k, v)
return return_obj
def as_json(self, filename=None, compact=False):
obj = self.to_dict()
if compact:
text = json.dumps(
obj, skipkeys=True, separators=(",", ":"), ensure_ascii=True
)
else:
text = json.dumps(obj, skipkeys=True, indent=2, ensure_ascii=True)
# If a filename is set then dump to file otherwise return string
if filename is not None:
with open(filename, "w") as outfile:
outfile.write(text)
else:
return text
@classmethod
def from_json(cls, filename=None, string=None):
assert [filename, string].count(None) == 1
if filename is not None:
with open(filename, "rb") as f:
string = f.read()
obj = json.loads(string)
return cls.from_dict(obj)
# ------------------------------------------------------------------
# These methods need to be overloaded by the actual implementation -
# they are all called from within the main integrate() method. The
# roles of each of these could be as follows -
#
# prepare - prerefine the unit cell
# integrate - measure the intensities of all reflections
# finish - reindex these to the correct setting
#
# though this is just one interpretation...
# ------------------------------------------------------------------
def _integrate_prepare(self):
raise NotImplementedError("overload me")
def _integrate(self):
raise NotImplementedError("overload me")
def _integrate_finish(self):
raise NotImplementedError("overload me")
# ------------------------------------
# end methods which MUST be overloaded
# ------------------------------------
def _integrater_reset(self):
"""Reset the integrater, e.g. if the autoindexing solution
has changed."""
# reset the status flags
self.set_integrater_prepare_done(False)
self.set_integrater_done(False)
self.set_integrater_finish_done(False)
# reset the "knowledge" from the data
# note well - if we have set a resolution limit
# externally then this will have to be respected...
# e.g. - added user for # 3183
if not self._intgr_reso_user:
self._intgr_reso_high = 0.0
self._intgr_reso_low = 0.0
self._intgr_hklout_raw = None
self._intgr_hklout = None
self._intgr_program_parameters = {}
self._integrater_reset_callback()
def set_integrater_sweep(self, sweep, reset=True):
self._intgr_sweep = sweep
if reset:
self._integrater_reset()
def get_integrater_sweep(self):
return self._intgr_sweep
# setters and getters for the "done"-ness of different operations
# note that this cascades
def set_integrater_prepare_done(self, done=True):
self._intgr_prepare_done = done
if not done:
self.set_integrater_done(False)
def set_integrater_done(self, done=True):
self._intgr_done = done
# FIXME should I remove / reset the reindexing operation here?
# probably...!
if not done:
self._intgr_reindex_operator = None
if not done:
self.set_integrater_finish_done(False)
def set_integrater_finish_done(self, done=True):
self._intgr_finish_done = done
# getters of the status - note how these cascade the get to ensure
# that everything is up-to-date...
def get_integrater_prepare_done(self):
if not self.get_integrater_refiner():
return self._intgr_prepare_done
refiner = self.get_integrater_refiner()
if not refiner.get_refiner_done() and self._intgr_prepare_done:
for sweep in refiner._refinr_sweeps:
logger.debug(
"Resetting integrater for sweep {} as refiner updated.".format(
sweep._name
)
)
sweep._integrater._integrater_reset()
return self._intgr_prepare_done
def get_integrater_done(self):
if not self.get_integrater_prepare_done():
logger.debug("Resetting integrater done as prepare not done")
self.set_integrater_done(False)
return self._intgr_done
def get_integrater_finish_done(self):
if not self.get_integrater_done():
logger.debug("Resetting integrater finish done as integrate not done")
self.set_integrater_finish_done(False)
return self._intgr_finish_done
# end job control stuff - next getters for results
def get_integrater_cell(self):
"""Get the (post) refined unit cell."""
self.integrate()
return self._intgr_cell
def get_integrater_n_ref(self):
"""Get the number of reflections in the data set."""
self.integrate()
return self._intgr_n_ref
# getters and setters of administrative information
def set_working_directory(self, working_directory):
self._intgr_working_directory = working_directory
def get_working_directory(self):
return self._intgr_working_directory
def set_integrater_sweep_name(self, sweep_name):
self._intgr_sweep_name = sweep_name
def get_integrater_sweep_name(self):
return self._intgr_sweep_name
def set_integrater_project_info(self, project_name, crystal_name, dataset_name):
"""Set the metadata information, to allow passing on of information
both into the reflection files (if possible) or to the scaling stages
for dataset administration."""
self._intgr_pname = project_name
self._intgr_xname = crystal_name
self._intgr_dname = dataset_name
def get_integrater_project_info(self):
return self._intgr_pname, self._intgr_xname, self._intgr_dname
def get_integrater_epoch(self):
return self._intgr_epoch
def set_integrater_epoch(self, epoch):
self._intgr_epoch = epoch
def set_integrater_wedge(self, start, end):
"""Set the wedge of images to process."""
start = start - self.get_frame_offset()
end = end - self.get_frame_offset()
self._intgr_wedge = (start, end)
# get the epoch for the sweep if not already defined
epoch = self.get_scan().get_epochs()[0]
if epoch > 0 and self._intgr_epoch == 0:
self._intgr_epoch = epoch
logger.debug("Sweep epoch: %d" % self._intgr_epoch)
self.set_integrater_done(False)
def get_integrater_wedge(self):
"""Get the wedge of images assigned to this integrater."""
return self._intgr_wedge
def get_integrater_resolution(self):
"""Get both resolution limits, high then low."""
return self._intgr_reso_high, self._intgr_reso_low
def get_integrater_high_resolution(self):
return self._intgr_reso_high
def get_integrater_low_resolution(self):
return self._intgr_reso_low
def get_integrater_user_resolution(self):
"""Return a boolean: were the resolution limits set by
the user? See bug # 3183"""
return self._intgr_reso_user
def set_integrater_resolution(self, dmin, dmax, user=False):
"""Set both resolution limits."""
if self._intgr_reso_user and not user:
raise RuntimeError("cannot override user set limits")
if user:
self._intgr_reso_user = True
self._intgr_reso_high = min(dmin, dmax)
self._intgr_reso_low = max(dmin, dmax)
self.set_integrater_done(False)
def set_integrater_high_resolution(self, dmin, user=False):
"""Set high resolution limit."""
if self._intgr_reso_user and not user:
raise RuntimeError("cannot override user set limits")
if user:
self._intgr_reso_user = True
self._intgr_reso_high = dmin
self.set_integrater_done(False)
def set_integrater_low_resolution(self, dmax):
"""Set low resolution limit."""
self._intgr_reso_low = dmax
self.set_integrater_done(False)
def set_integrater_mosaic_min_mean_max(self, m_min, m_mean, m_max):
self._intgr_mosaic_min = m_min
self._intgr_mosaic_mean = m_mean
self._intgr_mosaic_max = m_max
def get_integrater_mosaic_min_mean_max(self):
return self._intgr_mosaic_min, self._intgr_mosaic_mean, self._intgr_mosaic_max
# getters and setters for program specific parameters
# => values kept in dictionary
def set_integrater_parameter(self, program, parameter, value):
"""Set an arbitrary parameter for the program specified to
use in integration, e.g. the YSCALE or GAIN values in Mosflm."""
if program not in self._intgr_program_parameters:
self._intgr_program_parameters[program] = {}
self._intgr_program_parameters[program][parameter] = value
def set_integrater_parameters(self, parameters):
"""Set all parameters and values."""
self._intgr_program_parameters = parameters
self.set_integrater_done(False)
def get_integrater_export_parameter(self, program, parameter):
"""Get a parameter value."""
try:
return self._intgr_export_program_parameters[program][parameter]
except Exception:
return None
def get_integrater_export_parameters(self):
"""Get all parameters and values."""
try:
return self._intgr_export_program_parameters
except Exception:
return {}
def set_integrater_indexer(self, indexer):
"""Set the indexer implementation to use for this integration."""
assert issubclass(indexer.__class__, xia2.Schema.Interfaces.Indexer.Indexer), (
"%s is not an Indexer implementation" % indexer
)
self._intgr_indexer = indexer
self.set_integrater_prepare_done(False)
def set_integrater_refiner(self, refiner):
"""Set the refiner implementation to use for this integration."""
assert issubclass(refiner.__class__, xia2.Schema.Interfaces.Refiner.Refiner), (
"%s is not a Refiner implementation" % refiner
)
self._intgr_refiner = refiner
self.set_integrater_prepare_done(False)
def integrate(self):
"""Actually perform integration until we think we are done..."""
while not self.get_integrater_finish_done():
while not self.get_integrater_done():
while not self.get_integrater_prepare_done():
logger.debug("Preparing to do some integration...")
self.set_integrater_prepare_done(True)
# if this raises an exception, perhaps the autoindexing
# solution has too high symmetry. if this the case, then
# perform a self._intgr_indexer.eliminate() - this should
# reset the indexing system
try:
self._integrate_prepare()
except BadLatticeError as e:
logger.info("Rejecting bad lattice %s", str(e))
self._intgr_refiner.eliminate()
self._integrater_reset()
# FIXME x1698 - may be the case that _integrate() returns the
# raw intensities, _integrate_finish() returns intensities
# which may have been adjusted or corrected. See #1698 below.
logger.debug("Doing some integration...")
self.set_integrater_done(True)
template = self.get_integrater_sweep().get_template()
if self._intgr_sweep_name:
if PhilIndex.params.xia2.settings.show_template:
logger.notice(
banner(
"Integrating %s (%s)"
% (self._intgr_sweep_name, template)
)
)
else:
logger.notice(banner("Integrating %s" % self._intgr_sweep_name))
try:
# 1698
self._intgr_hklout_raw = self._integrate()
except BadLatticeError as e:
logger.info("Rejecting bad lattice %s", str(e))
self._intgr_refiner.eliminate()
self._integrater_reset()
self.set_integrater_finish_done(True)
try:
# allow for the fact that postrefinement may be used
# to reject the lattice...
self._intgr_hklout = self._integrate_finish()
except BadLatticeError as e:
logger.info("Bad Lattice Error: %s", str(e))
self._intgr_refiner.eliminate()
self._integrater_reset()
return self._intgr_hklout
def set_output_format(self, output_format="hkl"):
logger.debug("setting integrator output format to %s" % output_format)
assert output_format in ["hkl", "pickle"]
self._output_format = output_format
def get_integrater_refiner(self):
return self._intgr_refiner
def get_integrater_intensities(self):
self.integrate()
return self._intgr_hklout
def get_integrater_batches(self):
self.integrate()
return self._intgr_batches_out
# Should anomalous pairs be treated separately? Implementations
# of Integrater are free to ignore this.
def set_integrater_anomalous(self, anomalous):
self._intgr_anomalous = anomalous
def get_integrater_anomalous(self):
return self._intgr_anomalous
# ice rings
def set_integrater_ice(self, ice):
self._intgr_ice = ice
def get_integrater_ice(self):
return self._intgr_ice
# excluded_regions is a list of tuples representing
# upper and lower resolution ranges to exclude
def set_integrater_excluded_regions(self, excluded_regions):
self._intgr_excluded_regions = excluded_regions
def get_integrater_excluded_regions(self):
return self._intgr_excluded_regions
def set_integrater_spacegroup_number(self, spacegroup_number):
# FIXME check that this is appropriate with what the
# indexer things is currently correct. Also - should this
# really just refer to a point group??
logger.debug("Set spacegroup as %d" % spacegroup_number)
# certainly should wipe the reindexing operation! erp! only
# if the spacegroup number is DIFFERENT
if spacegroup_number == self._intgr_spacegroup_number:
return
self._intgr_reindex_operator = None
self._intgr_reindex_matrix = None
self._intgr_spacegroup_number = spacegroup_number
self.set_integrater_finish_done(False)
def get_integrater_spacegroup_number(self):
return self._intgr_spacegroup_number
def integrater_reset_reindex_operator(self):
"""Reset the reindex operator."""
return self.set_integrater_reindex_operator("h,k,l", compose=False)
def set_integrater_reindex_operator(
self, reindex_operator, compose=True, reason=None
):
"""Assign a symmetry operator to the reflections - note
that this is cumulative..."""
reindex_operator = reindex_operator.lower().strip()
# see if we really need to do anything
if reindex_operator == "h,k,l" and self._intgr_reindex_operator == "h,k,l":
return
# ok we need to do something - either just record the new
# operation or compose it with the existing operation
self.set_integrater_finish_done(False)
if reason:
logger.debug(
"Reindexing to %s (compose=%s) because %s"
% (reindex_operator, compose, reason)
)
if self._intgr_reindex_operator is None or not compose:
self._intgr_reindex_operator = reindex_operator
else:
old_operator = self._intgr_reindex_operator
self._intgr_reindex_operator = compose_symops(
reindex_operator, old_operator
)
logger.debug(
"Composing %s and %s -> %s"
% (old_operator, reindex_operator, self._intgr_reindex_operator)
)
# convert this to a 3x3 matrix form for e.g. XDS CORRECT
self._intgr_reindex_matrix = symop_to_mat(self._intgr_reindex_operator)
self._set_integrater_reindex_operator_callback()
def get_integrater_reindex_operator(self):
return self._intgr_reindex_operator
def get_integrater_reindex_matrix(self):
return self._intgr_reindex_matrix
# ------------------------------------------------
# callback methods - overloading these is optional
# ------------------------------------------------
def _integrater_reset_callback(self):
"""Overload this if you have other things which need to be reset."""
pass
def _set_integrater_reindex_operator_callback(self):
pass
def show_per_image_statistics(self):
lines = []
assert self._intgr_per_image_statistics is not None
stats = self._intgr_per_image_statistics
# analyse stats here, perhaps raising an exception if we
# are unhappy with something, so that the indexing solution
# can be eliminated in the integrater.
images = sorted(stats)
# these may not be present if only a couple of the
# images were integrated...
try:
stddev_pixel = [stats[i]["rmsd_pixel"] for i in images]
# fix to bug # 2501 - remove the extreme values from this
# list...
stddev_pixel = sorted(set(stddev_pixel))
# only remove the extremes if there are enough values
# that this is meaningful... very good data may only have
# two values!
if len(stddev_pixel) > 4:
stddev_pixel = stddev_pixel[1:-1]
low, high = min(stddev_pixel), max(stddev_pixel)
lines.append("Processed batches %d to %d" % (min(images), max(images)))
lines.append(f"Standard Deviation in pixel range: {low:.2f} {high:.2f}")
overloads = None
fraction_weak = None
isigi = None
# print a one-spot-per-image rendition of this...
stddev_pixel = [stats[i]["rmsd_pixel"] for i in images]
if "overloads" in list(stats.values())[0]:
overloads = [stats[i]["overloads"] for i in images]
strong = [stats[i]["strong"] for i in images]
if "fraction_weak" in list(stats.values())[0]:
fraction_weak = [stats[i]["fraction_weak"] for i in images]
if "isigi" in list(stats.values())[0]:
isigi = [stats[i]["isigi"] for i in images]
# FIXME need to allow for blank images in here etc.
status_record = ""
for i, stddev in enumerate(stddev_pixel):
if fraction_weak is not None and fraction_weak[i] > 0.99:
status_record += "."
elif isigi is not None and isigi[i] < 1.0:
status_record += "."
elif stddev > 2.5:
status_record += "!"
elif stddev > 1.0:
status_record += "%"
elif overloads is not None and overloads[i] > 0.01 * strong[i]:
status_record += "O"
else:
status_record += "o"
if len(status_record) > 60:
lines.append("Integration status per image (60/record):")
else:
lines.append("Integration status per image:")
for chunk in (
status_record[i : i + 60] for i in range(0, len(status_record), 60)
):
lines.append(chunk)
lines.append('"o" => good "%" => ok "!" => bad rmsd')
lines.append('"O" => overloaded "#" => many bad "." => weak')
lines.append('"@" => abandoned')
# next look for variations in the unit cell parameters
if "unit_cell" in list(stats.values())[0]:
unit_cells = [stats[i]["unit_cell"] for i in images]
# compute average
uc_mean = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
for uc in unit_cells:
for j in range(6):
uc_mean[j] += uc[j]
for j in range(6):
uc_mean[j] /= len(unit_cells)
max_rel_dev = 0.0
for uc in unit_cells:
for j in range(6):
if (math.fabs(uc[j] - uc_mean[j]) / uc_mean[j]) > max_rel_dev:
max_rel_dev = math.fabs(uc[j] - uc_mean[j]) / uc_mean[j]
lines.append("Maximum relative deviation in cell: %.3f" % max_rel_dev)
except KeyError:
raise RuntimeError("Refinement not performed...")
return "\n".join(lines)
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Schema/Interfaces/Integrater.py",
"copies": "1",
"size": "28028",
"license": "bsd-3-clause",
"hash": -6803015810135161000,
"line_mean": 33.8606965174,
"line_max": 88,
"alpha_frac": 0.585093478,
"autogenerated": false,
"ratio": 4.04678024833959,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.513187372633959,
"avg_score": null,
"num_lines": null
} |
# An interface for programs which perform indexing - this will handle
# all of the aspects of the interface which are common between indexing
# progtrams, and which must be presented in order to satisfy the contract
# for the indexer interface.
#
# The following are considered to be critical for this class:
#
# Images to index - optional this could be decided by the implementation
# Refined beam position
# Refined distance
# Mosaic spread
#
# Input: ?Selected lattice?
# Input: ?Cell?
# Output: Selected lattice
# Output: Unit cell
# Output: Aux information - may include matrix files &c. This is going to
# be in the "payload" and will be program specific.
#
# Methods:
#
# index() -> delegated to implementation._index()
#
# Notes:
#
# All properties of this class are prefixed with either indxr for protected
# things or Indexer for public things.
#
# Error Conditions:
#
# A couple of conditions will give indexing errors -
# (1) if no solution matching the input was found
# (2) if the images were blank
# (3) if the indexing just failed (bad beam, etc.)
#
# These need to be handled properly with helpful error messages.
#
import inspect
import json
import logging
import os
from functools import reduce
from cctbx.sgtbx import bravais_types
from xia2.Experts.LatticeExpert import SortLattices
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.Streams import banner
logger = logging.getLogger("xia2.Schema.Interfaces.Indexer")
class _IndexerHelper:
"""
Manage autoindexing results in a useful way.
Ensure that the indexing solutions are properly managed, including in the case
of pseudo-symmetry.
"""
def __init__(self, lattice_cell_dict):
"""Initialise myself from a dictionary keyed by crystal lattice
classes (e.g. tP) containing unit cells for these lattices."""
self._sorted_list = SortLattices(lattice_cell_dict.items())
def get(self):
"""Get the highest currently allowed lattice."""
return self._sorted_list[0]
def get_all(self):
"""Return a list of all allowed lattices, as [(lattice, cell)]."""
return self._sorted_list
def repr(self):
"""Return a string representation."""
return [
"%s %s" % (l[0], "%6.2f %6.2f %6.2f %6.2f %6.2f %6.2f" % l[1])
for l in self._sorted_list
]
def insert(self, lattice, cell):
"""Insert a new solution, e.g. from some postprocessing from
the indexer. N.B. this will be re-sorted."""
lattices = [(lattice, cell)]
for l in self._sorted_list:
lattices.append(l)
self._sorted_list = SortLattices(lattices)
def eliminate(self, indxr_print=True):
"""Eliminate the highest currently allowed lattice."""
if len(self._sorted_list) <= 1:
raise RuntimeError("cannot eliminate only solution")
if indxr_print:
logger.info("Eliminating indexing solution:")
logger.info(self.repr()[0])
self._sorted_list = self._sorted_list[1:]
def beam_centre(detector, beam):
s0 = beam.get_s0()
x, y = (None, None)
for panel_id, panel in enumerate(detector):
try:
x, y = panel.get_bidirectional_ray_intersection(s0)
except RuntimeError:
continue
else:
if panel.is_coord_valid_mm((x, y)):
break
return panel_id, (x, y)
def beam_centre_raw_image(detector, beam):
panel_id, (x, y) = beam_centre(detector, beam)
panel = detector[panel_id]
x_px, y_px = panel.millimeter_to_pixel((x, y))
offset = panel.get_raw_image_offset()
return panel.pixel_to_millimeter((x_px + offset[0], y_px + offset[1]))
class Indexer:
"""A class interface to present autoindexing functionality in a standard
way for all indexing programs. Note that this interface defines the
contract - what the implementation actually does is a matter for the
implementation."""
LATTICE_POSSIBLE = "LATTICE_POSSIBLE"
LATTICE_IMPOSSIBLE = "LATTICE_IMPOSSIBLE"
LATTICE_CORRECT = "LATTICE_CORRECT"
def __init__(self):
self._indxr_working_directory = os.getcwd()
# (optional) input parameters
self._indxr_input_lattice = None
self._indxr_input_cell = None
self._indxr_user_input_lattice = False
# job management parameters
self._indxr_done = False
self._indxr_prepare_done = False
self._indxr_finish_done = False
self._indxr_sweep_name = None
self._indxr_pname = None
self._indxr_xname = None
self._indxr_dname = None
# links to where my data is coming from
self._indxr_sweeps = []
self._indxr_imagesets = []
# the helper to manage the solutions table
self._indxr_helper = None
# output items - best solution
self._indxr_lattice = None
self._indxr_cell = None
# a place to store other plausible solutions - used
# for populating the helper in the main index() method
self._indxr_other_lattice_cell = {}
# refined experimental parameters
self._indxr_mosaic = None
self._indxr_refined_beam_centre = None
self._indxr_refined_distance = None
self._indxr_resolution_estimate = 0.0
self._indxr_low_resolution = 0.0
# refined dxtbx experimental objects
# XXX here we would be better storing a dials experiment object
self._indxr_refined_beam = None
self._indxr_refined_detector = None
self._indxr_refined_goniometer = None
self._indxr_refined_scan = None
self._indxr_experiment_list = None
# spot list in an as yet to be defined standard reference frame
self._indxr_spot_list = None
# error information
self._indxr_error = None
# extra indexing guff - a dictionary which the implementation
# can store things in
self._indxr_payload = {}
self._indxr_print = True
self.LATTICE_CORRECT = Indexer.LATTICE_CORRECT
self.LATTICE_POSSIBLE = Indexer.LATTICE_POSSIBLE
self.LATTICE_IMPOSSIBLE = Indexer.LATTICE_IMPOSSIBLE
# admin functions
def set_working_directory(self, working_directory):
self._indxr_working_directory = working_directory
def get_working_directory(self):
return self._indxr_working_directory
# serialization functions
def to_dict(self):
obj = {}
obj["__id__"] = "Indexer"
obj["__module__"] = self.__class__.__module__
obj["__name__"] = self.__class__.__name__
attributes = inspect.getmembers(self, lambda m: not inspect.isroutine(m))
for a in attributes:
if a[0] == "_indxr_helper" and a[1] is not None:
lattice_cell_dict = {}
lattice_list = a[1].get_all()
for l, c in lattice_list:
lattice_cell_dict[l] = c
obj[a[0]] = lattice_cell_dict
elif a[0] == "_indxr_experiment_list" and a[1] is not None:
obj[a[0]] = a[1].to_dict()
elif a[0] == "_indxr_imagesets":
from dxtbx.serialize.imageset import imageset_to_dict
obj[a[0]] = [imageset_to_dict(imgset) for imgset in a[1]]
elif a[0] == "_indxr_sweeps":
# XXX I guess we probably want this?
continue
elif a[0].startswith("_indxr_") or a[0].startswith("_fp_"):
obj[a[0]] = a[1]
return obj
@classmethod
def from_dict(cls, obj):
assert obj["__id__"] == "Indexer"
assert obj["__name__"] == cls.__name__
return_obj = cls()
for k, v in obj.items():
if k == "_indxr_helper" and v is not None:
v = _IndexerHelper(v)
if k == "_indxr_imagesets" and len(v):
assert v[0].get("__id__") == "imageset"
from dxtbx.serialize.imageset import imageset_from_dict
v = [imageset_from_dict(v_, check_format=False) for v_ in v]
if isinstance(v, dict):
if v.get("__id__") == "ExperimentList":
from dxtbx.model.experiment_list import ExperimentListFactory
v = ExperimentListFactory.from_dict(v, check_format=False)
setattr(return_obj, k, v)
return return_obj
def as_json(self, filename=None, compact=False):
obj = self.to_dict()
if compact:
text = json.dumps(
obj, skipkeys=True, separators=(",", ":"), ensure_ascii=True
)
else:
text = json.dumps(obj, skipkeys=True, indent=2, ensure_ascii=True)
# If a filename is set then dump to file otherwise return string
if filename is not None:
with open(filename, "w") as outfile:
outfile.write(text)
else:
return text
@classmethod
def from_json(cls, filename=None, string=None):
assert [filename, string].count(None) == 1
if filename is not None:
with open(filename, "rb") as f:
string = f.read()
obj = json.loads(string)
return cls.from_dict(obj)
# ----------------------------------------------------------------
# These are functions which will want to be overloaded for the
# actual implementation - preparation may do things like gathering
# spots on the images, index to perform the actual autoindexing
# and then finish to do any finishing up you want... see the
# method index() below for how these are used
# ----------------------------------------------------------------
def _index_prepare(self):
"""Prepare to index, e.g. finding spots on the images."""
raise NotImplementedError("overload me")
def _index(self):
"""Actually perform the autoindexing calculations."""
raise NotImplementedError("overload me")
def _index_finish(self):
"""This may be a no-op if you have no use for it..."""
pass
# setters and getters of the status of the tasks - note that
# these will cascade, so setting an early task not done will
# set later tasks not done.
def set_indexer_prepare_done(self, done=True):
self._indxr_prepare_done = done
if not done:
self.set_indexer_done(False)
def set_indexer_done(self, done=True):
self._indxr_done = done
if not done:
self.set_indexer_finish_done(False)
def set_indexer_finish_done(self, done=True):
self._indxr_finish_done = done
def set_indexer_sweep(self, sweep):
self.add_indexer_sweep(sweep)
def get_indexer_sweep(self):
if self._indxr_sweeps:
return self._indxr_sweeps[0]
def add_indexer_sweep(self, sweep):
self._indxr_sweeps.append(sweep)
def get_indexer_sweeps(self):
return self._indxr_sweeps
def set_indexer_sweep_name(self, sweep_name):
self._indxr_sweep_name = sweep_name
def get_indexer_sweep_name(self):
return self._indxr_sweep_name
def set_indexer_project_info(self, project_name, crystal_name, dataset_name):
self._indxr_pname = project_name
self._indxr_xname = crystal_name
self._indxr_dname = dataset_name
def get_indexer_project_info(self):
return self._indxr_pname, self._indxr_xname, self._indxr_dname
def get_indexer_full_name(self):
return "%s %s %s %s" % tuple(
list(self.get_indexer_project_info()) + [self._indxr_sweep_name]
)
# getters of the status - note well that these need to cascade
# the status... note that for the prepare get there is no previous
# step we could cascade to...
def get_indexer_prepare_done(self):
return self._indxr_prepare_done
def get_indexer_done(self):
if not self.get_indexer_prepare_done():
logger.debug("Resetting indexer done as prepare not done")
self.set_indexer_done(False)
return self._indxr_done
def get_indexer_finish_done(self):
if not self.get_indexer_done():
f = inspect.currentframe().f_back
m = f.f_code.co_filename
l = f.f_lineno
logger.debug(
"Resetting indexer finish done as index not done, from %s/%d", m, l
)
self.set_indexer_finish_done(False)
return self._indxr_finish_done
# ----------------------------------------------------------
# "real" methods which actually do something interesting -
# eliminate() will remove a solution from the indexing table
# and reset the done, such that the next get() will return
# the next solution down.
# ----------------------------------------------------------
def eliminate(self, indxr_print=True):
"""Eliminate the current solution for autoindexing."""
if not self._indxr_helper:
raise RuntimeError("no indexing done yet")
# not allowed to eliminate a solution provided by the
# user via set_indexer_lattice... - this is determined by
# the fact that the set lattice has user = true as
# an argument
if self._indxr_user_input_lattice:
raise RuntimeError("eliminating user supplied lattice")
self._indxr_helper.eliminate(indxr_print=indxr_print)
self.set_indexer_done(False)
def _indxr_replace(self, lattice, cell, indxr_print=True):
"""Replace the highest symmetry in the solution table with this...
Only use this method if you REALLY know what you are doing!"""
self._indxr_helper.eliminate(indxr_print=indxr_print)
self._indxr_helper.insert(lattice, cell)
def index(self):
if not self.get_indexer_finish_done():
f = inspect.currentframe().f_back.f_back
m = f.f_code.co_filename
l = f.f_lineno
logger.debug(
"Index in %s called from %s %d" % (self.__class__.__name__, m, l)
)
while not self.get_indexer_finish_done():
while not self.get_indexer_done():
while not self.get_indexer_prepare_done():
# --------------
# call prepare()
# --------------
self.set_indexer_prepare_done(True)
self._index_prepare()
# --------------------------------------------
# then do the proper indexing - using the best
# solution already stored if available (c/f
# eliminate above)
# --------------------------------------------
self.set_indexer_done(True)
if self.get_indexer_sweeps():
xsweeps = [s.get_name() for s in self.get_indexer_sweeps()]
if len(xsweeps) > 1:
# find "SWEEPn, SWEEP(n+1), (..), SWEEPm" and aggregate to "SWEEPS n-m"
xsweeps = [
(int(x[5:]), int(x[5:])) if x.startswith("SWEEP") else x
for x in xsweeps
]
xsweeps[0] = [xsweeps[0]]
def compress(seen, nxt):
if (
isinstance(seen[-1], tuple)
and isinstance(nxt, tuple)
and (seen[-1][1] + 1 == nxt[0])
):
seen[-1] = (seen[-1][0], nxt[1])
else:
seen.append(nxt)
return seen
xsweeps = reduce(compress, xsweeps)
xsweeps = [
(
"SWEEP%d" % x[0]
if x[0] == x[1]
else "SWEEPS %d to %d" % (x[0], x[1])
)
if isinstance(x, tuple)
else x
for x in xsweeps
]
if len(xsweeps) > 1:
sweep_names = ", ".join(xsweeps[:-1])
sweep_names += " & " + xsweeps[-1]
else:
sweep_names = xsweeps[0]
if PhilIndex.params.xia2.settings.show_template:
template = self.get_indexer_sweep().get_template()
logger.notice(
banner("Autoindexing %s (%s)", sweep_names, template)
)
else:
logger.notice(banner("Autoindexing %s" % sweep_names))
if not self._indxr_helper:
self._index()
if not self._indxr_done:
logger.debug("Looks like indexing failed - try again!")
continue
solutions = {
k: c["cell"] for k, c in self._indxr_other_lattice_cell.items()
}
# create a helper for the indexer to manage solutions
self._indxr_helper = _IndexerHelper(solutions)
solution = self._indxr_helper.get()
# compare these against the final solution, if different
# reject solution and return - correct solution will
# be used next cycle
if (
self._indxr_lattice != solution[0]
and not self._indxr_input_cell
and not PhilIndex.params.xia2.settings.integrate_p1
):
logger.info(
"Rerunning indexing lattice %s to %s",
self._indxr_lattice,
solution[0],
)
self.set_indexer_done(False)
else:
# rerun autoindexing with the best known current solution
solution = self._indxr_helper.get()
self._indxr_input_lattice = solution[0]
self._indxr_input_cell = solution[1]
self._index()
# next finish up...
self.set_indexer_finish_done(True)
self._index_finish()
if self._indxr_print:
logger.info(self.show_indexer_solutions())
def show_indexer_solutions(self):
lines = ["All possible indexing solutions:"]
for l in self._indxr_helper.repr():
lines.append(l)
crystal_model = self._indxr_experiment_list[0].crystal
lattice = str(
bravais_types.bravais_lattice(group=crystal_model.get_space_group())
)
lines.append("Indexing solution:")
lines.append(
"%s %s"
% (
lattice,
"%6.2f %6.2f %6.2f %6.2f %6.2f %6.2f"
% crystal_model.get_unit_cell().parameters(),
)
)
return "\n".join(lines)
# setter methods for the input - most of these will reset the
# indexer in one way or another
def add_indexer_imageset(self, imageset):
self._indxr_imagesets.append(imageset)
# these relate to propogation of the fact that this is user assigned ->
# so if we try to eliminate raise an exception... must be coordinated
# with lattice setting below
def set_indexer_user_input_lattice(self, user):
self._indxr_user_input_lattice = user
def get_indexer_user_input_lattice(self):
return self._indxr_user_input_lattice
def set_indexer_input_lattice(self, lattice):
"""Set the input lattice for this indexing job. Exactly how this
is handled depends on the implementation. FIXED decide on the
format for the lattice. This will be say tP."""
self._indxr_input_lattice = lattice
self.set_indexer_done(False)
def set_indexer_input_cell(self, cell):
"""Set the input unit cell (optional.)"""
if not (isinstance(cell, type(())) or isinstance(cell, type([]))):
raise RuntimeError("cell must be a 6-tuple of floats, is %s" % str(cell))
if len(cell) != 6:
raise RuntimeError("cell must be a 6-tuple of floats")
self._indxr_input_cell = tuple(map(float, cell))
self.set_indexer_done(False)
# getter methods for the output - all of these will call index()
# which will guarantee that the results are up to date (recall
# while structure above)
def get_indexer_cell(self):
"""Get the selected unit cell."""
self.index()
return self._indxr_experiment_list[0].crystal.get_unit_cell().parameters()
def get_indexer_lattice(self):
"""Get the selected lattice as tP form."""
self.index()
crystal_model = self._indxr_experiment_list[0].crystal
return str(bravais_types.bravais_lattice(group=crystal_model.get_space_group()))
def get_indexer_mosaic(self):
"""Get the estimated mosaic spread in degrees."""
self.index()
return self._indxr_mosaic
def get_indexer_distance(self):
"""Get the refined distance."""
self.index()
experiment = self.get_indexer_experiment_list()[0]
return experiment.detector[0].get_directed_distance()
def get_indexer_beam_centre(self):
"""Get the refined beam."""
self.index()
experiment = self.get_indexer_experiment_list()[0]
# FIXME need to consider interaction of xia2 with multi-panel detectors
return tuple(reversed(beam_centre(experiment.detector, experiment.beam)[1]))
def get_indexer_beam_centre_raw_image(self):
"""Get the refined beam in raw image coordinates."""
self.index()
experiment = self.get_indexer_experiment_list()[0]
return tuple(
reversed(beam_centre_raw_image(experiment.detector, experiment.beam))
)
def get_indexer_payload(self, this):
"""Attempt to get something from the indexer payload."""
self.index()
return self._indxr_payload.get(this)
def get_indexer_low_resolution(self):
"""Get an estimate of the low resolution limit of the data."""
self.index()
return self._indxr_low_resolution
def set_indexer_payload(self, this, value):
"""Set something in the payload."""
self._indxr_payload[this] = value
# new method to handle interaction with the pointgroup determination
# much later on in the process - this allows a dialogue to be established.
def set_indexer_asserted_lattice(self, asserted_lattice):
"""Assert that this lattice is correct - if this is allowed (i.e.
is in the helpers list of kosher lattices) then it will be enabled.
If this is different to the current favourite then processing
may ensue, otherwise nothing will happen."""
assert self._indxr_helper
all_lattices = self._indxr_helper.get_all()
if asserted_lattice not in [l[0] for l in all_lattices]:
return self.LATTICE_IMPOSSIBLE
# check if this is the top one - if so we don't need to
# do anything
if asserted_lattice == all_lattices[0][0]:
if (
PhilIndex.params.xia2.settings.integrate_p1
and asserted_lattice != self.get_indexer_lattice()
and asserted_lattice != "aP"
):
if PhilIndex.params.xia2.settings.reintegrate_correct_lattice:
self.set_indexer_done(False)
return self.LATTICE_POSSIBLE
return self.LATTICE_CORRECT
return self.LATTICE_CORRECT
# ok this means that we need to do something - work through
# eliminating lattices until the "correct" one is found...
while self._indxr_helper.get()[0] != asserted_lattice:
self._indxr_helper.eliminate()
if (
not PhilIndex.params.xia2.settings.integrate_p1
or PhilIndex.params.xia2.settings.reintegrate_correct_lattice
):
self.set_indexer_done(False)
return self.LATTICE_POSSIBLE
def set_indexer_experiment_list(self, experiments_list):
self._indxr_experiment_list = experiments_list
def get_indexer_experiment_list(self):
self.index()
return self._indxr_experiment_list
# class for legacy Indexers that only support indexing from a single sweep
class IndexerSingleSweep(Indexer):
def __init__(self):
super().__init__()
self._indxr_images = []
def get_imageset(self):
return self._indxr_imagesets[0]
def get_scan(self):
return self.get_imageset().get_scan()
def get_detector(self):
return self.get_imageset().get_detector()
def set_detector(self, detector):
self.get_imageset().set_detector(detector)
def get_goniometer(self):
return self.get_imageset().get_goniometer()
def set_goniometer(self, goniometer):
return self.get_imageset().set_goniometer(goniometer)
def get_beam(self):
return self.get_imageset().get_beam()
def set_beam(self, beam):
return self.get_imageset().set_beam(beam)
def get_wavelength(self):
return self.get_beam().get_wavelength()
def get_distance(self):
return self.get_detector()[0].get_directed_distance()
def get_phi_width(self):
return self.get_scan().get_oscillation()[1]
def get_matching_images(self):
start, end = self.get_scan().get_array_range()
return tuple(range(start + 1, end + 1))
def get_image_name(self, number):
first = self.get_scan().get_image_range()[0]
return self.get_imageset().get_path(number - first)
def get_template(self):
return self.get_imageset().get_template()
def get_directory(self):
return os.path.dirname(self.get_template())
def add_indexer_image_wedge(self, image, reset=True):
"""Add some images for autoindexing (optional) input is a 2-tuple
or an integer."""
if isinstance(image, type(())):
self._indxr_images.append(image)
if isinstance(image, type(1)):
self._indxr_images.append((image, image))
if reset:
self.set_indexer_prepare_done(False)
def get_indexer_images(self):
return self._indxr_images
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Schema/Interfaces/Indexer.py",
"copies": "1",
"size": "26930",
"license": "bsd-3-clause",
"hash": -5571892499541710000,
"line_mean": 33.3933588761,
"line_max": 95,
"alpha_frac": 0.5666171556,
"autogenerated": false,
"ratio": 4.0278193239605145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5094436479560515,
"avg_score": null,
"num_lines": null
} |
# An interface for programs which process X-Ray diffraction images.
# This adds the code for handling the templates, directories etc.
# but not the use of them e.g. the keyworded input.
#
# This is a virtual class - and should be inherited from only for the
# purposes of using the methods.
#
# The following are considered critical to this class:
#
# Template, directory. Template in the form ### not ???
# Distance (mm), wavelength (ang), beam centre (mm, mm),
# image header information
import logging
import math
import os
from dxtbx.model.detector_helpers import set_mosflm_beam_centre
from scitbx import matrix
from xia2.Experts.FindImages import (
digest_template,
find_matching_images,
image2image,
image2template_directory,
template_directory_number2image,
)
from xia2.Schema import load_imagesets
logger = logging.getLogger("xia2.Schema.Interfaces.FrameProcessor")
class FrameProcessor:
"""A class to handle the information needed to process X-Ray
diffraction frames."""
def __init__(self, image=None):
super().__init__()
self._fp_template = None
self._fp_directory = None
self._fp_matching_images = []
self._fp_offset = 0
self._fp_two_theta = 0.0
self._fp_beam_prov = None
self._fp_gain = 0.0
self._fp_polarization = 0.0
self._fp_header = {}
# see FIXME for 06/SEP/06
self._fp_xsweep = None
# also need to keep track of allowed images in here
self._fp_wedge = None
self._fp_imageset = None
# if image has been specified, construct much of this information
# from the image
if image:
self._setup_from_image(image)
def set_frame_wedge(self, start, end, apply_offset=True):
"""Set the allowed range of images for processing."""
# XXX RJG Better to pass slice of imageset here?
if apply_offset:
start = start - self._fp_offset
end = end - self._fp_offset
self._fp_wedge = start, end
if self._fp_matching_images:
images = []
for j in self._fp_matching_images:
if j < start or j > end:
continue
images.append(j)
self._fp_matching_images = images
## reload the header information as well - this will be
## for the old wedge...# read the image header
## XXX this shouldn't be needed
from dxtbx.imageset import ImageSetFactory
imageset = ImageSetFactory.new(self.get_image_name(start))[0]
# print this to the debug channel
logger.debug("Latest header information for image %d:" % start)
logger.debug(imageset.get_detector())
logger.debug(imageset.get_scan())
logger.debug(imageset.get_beam())
logger.debug(imageset.get_goniometer())
# populate wavelength, beam etc from this
if self._fp_beam_prov is None or self._fp_beam_prov == "header":
self._fp_beam_prov = "header"
def get_frame_wedge(self):
return self._fp_wedge
def get_template(self):
return self._fp_template
def get_frame_offset(self):
return self._fp_offset
def get_directory(self):
return self._fp_directory
def get_matching_images(self):
return self._fp_matching_images
def set_wavelength(self, wavelength):
self.get_beam_obj().set_wavelength(wavelength)
def get_wavelength(self):
return self.get_beam_obj().get_wavelength()
def set_distance(self, distance):
pass
def get_distance(self):
return self.get_detector()[0].get_directed_distance()
def set_gain(self, gain):
self._fp_gain = gain
def get_gain(self):
return self._fp_gain
def set_polarization(self, polarization):
self._fp_polarization = polarization
def get_polarization(self):
return self._fp_polarization
def set_beam_centre(self, beam_centre):
try:
set_mosflm_beam_centre(
self.get_detector(), self.get_beam_obj(), beam_centre
)
self._fp_beam_prov = "user"
except AssertionError as e:
logger.debug("Error setting mosflm beam centre: %s" % e)
def get_beam_centre(self):
detector = self.get_detector()
beam = self.get_beam_obj()
return get_beam_centre(detector, beam)
def get_two_theta(self):
return self._fp_two_theta
def get_phi_width(self):
return self.get_scan().get_oscillation()[1]
def get_header(self):
return self._fp_header
# utility functions
def get_image_name(self, number):
"""Convert an image number into a name."""
return template_directory_number2image(
self.get_template(), self.get_directory(), number
)
def get_image_number(self, image):
"""Convert an image name to a number."""
if isinstance(image, type(1)):
return image
return image2image(image)
# getters/setters for dxtbx objects
def get_imageset(self):
return self._fp_imageset
def get_scan(self):
return self._fp_imageset.get_scan()
def get_detector(self):
return self._fp_imageset.get_detector()
def set_detector(self, detector):
self._fp_imageset.set_detector(detector)
def get_goniometer(self):
return self._fp_imageset.get_goniometer()
def set_goniometer(self, goniometer):
self._fp_imageset.set_goniometer(goniometer)
def get_beam_obj(self):
return self._fp_imageset.get_beam()
def set_beam_obj(self, beam):
self._fp_imageset.set_beam(beam)
def setup_from_image(self, image):
if self._fp_template and self._fp_directory:
raise RuntimeError("FrameProcessor implementation already set up")
self._setup_from_image(image)
def setup_from_imageset(self, imageset):
if self._fp_imageset:
raise RuntimeError("FrameProcessor implementation already set up")
self._setup_from_imageset(imageset)
# private methods
def _setup_from_image(self, image):
"""Configure myself from an image name."""
template, directory = image2template_directory(image)
self._fp_matching_images = find_matching_images(template, directory)
# trim this down to only allowed images...
if self._fp_wedge:
start, end = self._fp_wedge
images = []
for j in self._fp_matching_images:
if j < start or j > end:
continue
images.append(j)
self._fp_matching_images = images
imagesets = load_imagesets(
template,
directory,
image_range=(self._fp_matching_images[0], self._fp_matching_images[-1]),
)
assert len(imagesets) == 1, "multiple imagesets match %s" % template
imageset = imagesets[0]
self._setup_from_imageset(imageset)
def _setup_from_imageset(self, imageset):
"""Configure myself from an image name."""
image_range = imageset.get_scan().get_image_range()
self._fp_imageset = imageset
try:
self._fp_directory, self._fp_template = os.path.split(
imageset.get_template()
)
except AttributeError:
try:
self._fp_directory = os.path.dirname(imageset.get_path(image_range[0]))
except Exception:
pass
except Exception:
pass
self._fp_matching_images = tuple(range(image_range[0], image_range[1] + 1))
if self._fp_beam_prov is None:
beam = imageset.get_beam()
detector = imageset.get_detector()
y, x = get_beam_centre(detector, beam)
self._fp_beam = y, x
self._fp_beam_prov = "header"
if self._fp_template is not None:
self.digest_template()
def digest_template(self):
"""Strip out common characters from the image list and move them
to the template."""
if self._fp_template.endswith(".h5"):
# do not mess with the templates if container file
return
template, images, offset = digest_template(
self._fp_template, self._fp_matching_images
)
self._fp_template = template
self._fp_matching_images = images
self._fp_offset = offset
def get_beam_centre(detector, beam):
if len(detector) > 1:
panel_id = detector.get_panel_intersection(beam.get_s0())
else:
panel_id = 0
panel = detector[panel_id]
s0 = matrix.col(beam.get_s0())
f = matrix.col(panel.get_fast_axis())
s = matrix.col(panel.get_slow_axis())
n = matrix.col(panel.get_normal())
o = matrix.col(panel.get_origin())
# find axis of 2theta shift
if abs(f.dot(s0)) > abs(s.dot(s0)):
r = n.cross(s0)
a = n.angle(s0)
else:
r = n.cross(s0)
a = n.angle(s0)
# if two theta small use old version of code - remembering modulo pi
if abs(a % math.pi) < 5.0 * math.pi / 180.0:
D = matrix.sqr(panel.get_D_matrix())
v = D * beam.get_s0()
x, y = v[0] / v[2], v[1] / v[2]
return y, x
# apply matrix
R = r.axis_and_angle_as_r3_rotation_matrix(a)
# compute beam centre at two-theta=0
Ro = R * o
b = -Ro + Ro.dot(s0) * s0
beam_x = b.dot(R * f)
beam_y = b.dot(R * s)
return beam_y, beam_x
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Schema/Interfaces/FrameProcessor.py",
"copies": "1",
"size": "9676",
"license": "bsd-3-clause",
"hash": 7502535004706504000,
"line_mean": 28.3212121212,
"line_max": 87,
"alpha_frac": 0.5937370814,
"autogenerated": false,
"ratio": 3.759129759129759,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9851512061563497,
"avg_score": 0.0002709557932525394,
"num_lines": 330
} |
"""An interface for publishing rich data to frontends.
There are two components of the display system:
* Display formatters, which take a Python object and compute the
representation of the object in various formats (text, HTML, SVG, etc.).
* The display publisher that is used to send the representation data to the
various frontends.
This module defines the logic display publishing. The display publisher uses
the ``display_data`` message type that is defined in the IPython messaging
spec.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
from IPython.config.configurable import Configurable
from IPython.utils import io
#-----------------------------------------------------------------------------
# Main payload class
#-----------------------------------------------------------------------------
class DisplayPublisher(Configurable):
"""A traited class that publishes display data to frontends.
Instances of this class are created by the main IPython object and should
be accessed there.
"""
def _validate_data(self, source, data, metadata=None):
"""Validate the display data.
Parameters
----------
source : str
The fully dotted name of the callable that created the data, like
:func:`foo.bar.my_formatter`.
data : dict
The formata data dictionary.
metadata : dict
Any metadata for the data.
"""
if not isinstance(source, basestring):
raise TypeError('source must be a str, got: %r' % source)
if not isinstance(data, dict):
raise TypeError('data must be a dict, got: %r' % data)
if metadata is not None:
if not isinstance(metadata, dict):
raise TypeError('metadata must be a dict, got: %r' % data)
def publish(self, source, data, metadata=None):
"""Publish data and metadata to all frontends.
See the ``display_data`` message in the messaging documentation for
more details about this message type.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/latex
* application/json
* application/javascript
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
source : str
A string that give the function or method that created the data,
such as 'IPython.core.page'.
data : dict
A dictionary having keys that are valid MIME types (like
'text/plain' or 'image/svg+xml') and values that are the data for
that MIME type. The data itself must be a JSON'able data
structure. Minimally all data should have the 'text/plain' data,
which can be displayed by all frontends. If more than the plain
text is given, it is up to the frontend to decide which
representation to use.
metadata : dict
A dictionary for metadata related to the data. This can contain
arbitrary key, value pairs that frontends can use to interpret
the data. Metadata specific to each mime-type can be specified
in the metadata dict with the same mime-type keys as
the data itself.
"""
# The default is to simply write the plain text data using io.stdout.
if 'text/plain' in data:
print(data['text/plain'], file=io.stdout)
def clear_output(self, stdout=True, stderr=True, other=True):
"""Clear the output of the cell receiving output."""
if stdout:
print('\033[2K\r', file=io.stdout, end='')
io.stdout.flush()
if stderr:
print('\033[2K\r', file=io.stderr, end='')
io.stderr.flush()
def publish_display_data(source, data, metadata=None):
"""Publish data and metadata to all frontends.
See the ``display_data`` message in the messaging documentation for
more details about this message type.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/latex
* application/json
* application/javascript
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
source : str
A string that give the function or method that created the data,
such as 'IPython.core.page'.
data : dict
A dictionary having keys that are valid MIME types (like
'text/plain' or 'image/svg+xml') and values that are the data for
that MIME type. The data itself must be a JSON'able data
structure. Minimally all data should have the 'text/plain' data,
which can be displayed by all frontends. If more than the plain
text is given, it is up to the frontend to decide which
representation to use.
metadata : dict
A dictionary for metadata related to the data. This can contain
arbitrary key, value pairs that frontends can use to interpret
the data. mime-type keys matching those in data can be used
to specify metadata about particular representations.
"""
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.instance().display_pub.publish(
source,
data,
metadata
)
| {
"repo_name": "marcoantoniooliveira/labweb",
"path": "oscar/lib/python2.7/site-packages/IPython/core/displaypub.py",
"copies": "2",
"size": "5918",
"license": "bsd-3-clause",
"hash": -2574726621297347600,
"line_mean": 35.0853658537,
"line_max": 78,
"alpha_frac": 0.5904021629,
"autogenerated": false,
"ratio": 4.842880523731588,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6433282686631588,
"avg_score": null,
"num_lines": null
} |
"""An interface for searching the Matchlight fingerprints database."""
from __future__ import absolute_import
import datetime
import json
import six
import matchlight.error
import pylibfp
class SearchMethods(object):
"""Provides methods for interfacing with the search API."""
def __init__(self, ml_connection): # noqa: D205,D400
"""Initializes a search interface with the given Matchlight
connection.
Args:
ml_connection (:class:`~.Connection`): A Matchlight
connection instance.
"""
self.conn = ml_connection
def search(self, query=None, email=None, ssn=None, phone=None,
fingerprints=None):
"""Performs a Matchlight search.
Provides a retrospective search capability. User can only
perform one search type at time. Search type is specified using
keyword arguments.
Example:
Search for text::
>>> ml.search(query="magic madness heaven sin")
Search for an email address::
>>> ml.search(email="familybird@terbiumlabs.com")
Search for a social security number::
>>> ml.search(ssn="000-00-0000")
Search for a phone number::
>>> ml.search(phone="804-222-1111")
Args:
query (:obj:`str`, optional): A text query.
email (:obj:`str`, optional): A valid email address.
ssn (:obj:`str`, optional): A social security number.
phone (:obj:`str`, optional): A phone number.
fingerprints (:obj:`list` of :obj:`str`, optional): A sequence of
Matchlight fingerprints, these will be searched as if one query.
Returns:
:obj:`list` of :obj:`dict`: Each search result returns a
score, url, ts.
"""
# only search for one thing at a time.
if sum([1 for k in [query, fingerprints, email, ssn, phone]
if k is not None]) != 1:
raise matchlight.error.SDKError(
'Input Error: Must specify exactly one search type per call.')
if email:
fingerprints = pylibfp.fingerprints_pii_email_address(str(email))
elif phone:
fingerprints = pylibfp.fingerprints_pii_phone_number(str(phone))
elif ssn:
fingerprints = pylibfp.fingerprints_pii_ssn(str(ssn))
elif query:
result_json = pylibfp.fingerprint(
query, flags=pylibfp.OPTIONS_TILED)
result = json.loads(result_json)
fingerprints = result['data']['fingerprints']
# We have to convert possible lists of lists to a flat list of strings
def flatten_iter(x):
if not isinstance(x, list):
yield(x)
else:
for i in x:
# same as yield from flatten_iter(i)
for j in flatten_iter(i):
yield j
data = {'fingerprints': list(flatten_iter(fingerprints))}
response = self.conn.request(
'/detailed_search',
data=json.dumps(data),
endpoint=self.conn.search_endpoint,
)
try:
results = response.json()['results']
except KeyError:
raise matchlight.error.SDKError('Failed to get search results')
for result in results:
for url in result['urls']:
yield {
# PII Alerts are always 800, Search results on PII filds
# should be as well.
'score': 800 if any(
[email, ssn, phone]
) else result['score'],
'ts': datetime.datetime.fromtimestamp(float(url[0])),
'url': url[1]
}
def pii_search(self, email=None, limit=50):
"""Performs a Matchlight search specifically for PII.
Provides a retrospective search capability designed specifically for
finding compromised PII data.
Search results are sorted & show which fields matched on each hit.
Only exact matches are returned.
Example:
>>> ml.pii_search(email="familybird@terbiumlabs.com")
Args:
email (:obj:`str`, required): A valid email address.
limit (:obj:`int`, optional): The number of Alerts to return,
defaults to 50.
Returns:
:obj:`list` of :obj:`dict`: Each search result returns a
source, ts, fields
"""
if not any(email):
raise matchlight.error.SDKError(
'Input Error: At least one field is required.'
)
request_data = {}
if email:
request_data['email_fingerprints'] = (
pylibfp.fingerprints_pii_email_address(str(email))
)
if limit:
request_data['limit'] = limit
response = self.conn.request(
'/pii_search',
data=json.dumps(request_data),
endpoint=self.conn.search_endpoint,
)
try:
results = response.json()['results']
except KeyError:
raise matchlight.error.SDKError('Failed to get search results')
for result in results:
# This result can seemingly be in different formats.
try:
result['ts'] = int(result['ts'])
except ValueError:
pass
if isinstance(result['ts'], six.text_type):
result['ts'] = datetime.datetime.strptime(
result['ts'],
'%Y-%m-%dT%H:%M:%S'
)
elif isinstance(result['ts'], int):
result['ts'] = datetime.datetime.fromtimestamp(
float(result['ts'])
)
yield result
| {
"repo_name": "TerbiumLabs/python-matchlightsdk",
"path": "src/matchlight/search.py",
"copies": "1",
"size": "5952",
"license": "bsd-3-clause",
"hash": -3089249613834879500,
"line_mean": 32.251396648,
"line_max": 78,
"alpha_frac": 0.5398185484,
"autogenerated": false,
"ratio": 4.613953488372093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5653772036772093,
"avg_score": null,
"num_lines": null
} |
"""An interface for the Slack web API
https://github.com/Shir0kamii/slack-client
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='slack_client',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.3.0',
description='An interface for the Slack web API',
long_description=long_description,
# The project's main homepage.
url='https://github.com/Shir0kamii/slack-client',
# Author details
author='shir0kamii',
author_email='Shir0kamii@prologin.org',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Communications',
'Topic :: Software Development :: User Interfaces',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='slack api interface',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=['slack_client'],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests'],
)
| {
"repo_name": "Shir0kamii/slack-client",
"path": "setup.py",
"copies": "1",
"size": "2472",
"license": "mit",
"hash": 585912390156804600,
"line_mean": 31.96,
"line_max": 79,
"alpha_frac": 0.6662621359,
"autogenerated": false,
"ratio": 4.085950413223141,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5252212549123141,
"avg_score": null,
"num_lines": null
} |
"""An interface that manages Digital Signal Processor (DSP) Connections."""
from ctypes import *
from .enums import DSPCONNECTION_TYPE
from .fmodobject import *
from .globalvars import get_class
class DSPConnection(FmodObject):
"""An interface that manages Digital Signal Processor (DSP) Connections."""
@property
def input(self):
"""The connection's input DSP unit.
If the input was just added, the connection might not be ready because
the DSP system is still queued to be connected, and may need to wait
several milliseconds for the next mix to occur. If so the function will
return :py:attr:`~pyfmodex.enums.RESULT.NOTREADY` and `input` will be
None.
:type: DSP
"""
dsp_ptr = c_void_p()
self._call_fmod("FMOD_DSPConnection_GetInput", byref(dsp_ptr))
return get_class("DSP")(dsp_ptr)
@property
def mix(self):
"""The connection's volume scale.
Volume scale applied to the input before being passed to the output.
- 0: silent
- 1: full
- Negative level: inverts the signal
- Values larger than 1: amplify the signal
:type: float
:default: 1
:range: -inf, inf
"""
m_val = c_float()
self._call_fmod("FMOD_DSPConnection_GetMix", byref(m_val))
return m_val.value
@mix.setter
def mix(self, mix):
self._call_fmod("FMOD_DSPConnection_SetMix", c_float(mix))
def get_mix_matrix(self, hop=0):
"""Retrieve a 2 dimensional pan matrix that maps the signal from input
channels (columns) to output speakers (rows).
:param int hop: Width (total number of columns) in destination matrix.
Can be larger than in_channels to represent a smaller valid region
inside a larger matrix. When 0, the full matrix is retrieved.
:returns: Two dimensional list of volume levels in row-major order.
Each row represents an output speaker, each column represents an
input channel. A matrix element is referenced as out_channel *
(hop or in_channels) + in_channel.
:rtype: list of floats
"""
in_channels = c_int()
out_channels = c_int()
self._call_fmod(
"FMOD_DSPConnection_GetMixMatrix",
None,
byref(out_channels),
byref(in_channels),
hop,
)
matrix = (c_float * (hop or in_channels.value * out_channels.value))()
self._call_fmod(
"FMOD_DSPConnection_GetMixMatrix",
matrix,
byref(out_channels),
byref(in_channels),
hop,
)
return list(matrix)
def set_mix_matrix(self, matrix, out_channels, in_channels):
"""Set a 2 dimensional pan matrix that maps the signal from input
channels (columns) to output speakers (rows).
Matrix element values can be below 0 to invert a signal and above 1 to
amplify the signal. Note that increasing the signal level too far may
cause audible distortion.
:param list matrix: List of volume levels (float) in row-major order.
Each row represents an output speaker, each column represents an
input channel.
:param int out_channels: Number of output channels (rows) in matrix.
Always assumed 0 if `matrix` is empty.
:param int in_channels: Number of input channels (columns) in matrix.
Always assumed 0 if `matrix` is empty.
"""
if not matrix:
in_channels = 0
out_channels = 0
raw_matrix = (c_float * (in_channels * out_channels))(*matrix)
self._call_fmod("FMOD_DSPConnection_SetMixMatrix", raw_matrix, out_channels, in_channels, 0)
@property
def output(self):
"""The connection's output DSP unit.
If the output was just added, the connection might not be ready because
the DSP system is still queued to be connected, and may need to wait
several milliseconds for the next mix to occur. If so the function will
return :py:attr:`~pyfmodex.enums.RESULT.NOTREADY` and `input` will be
None.
:type: DSP
"""
o_ptr = c_void_p()
self._call_fmod("FMOD_DSPConnection_GetOutput", byref(o_ptr))
return get_class("DSP")(o_ptr)
@property
def type(self):
"""The type of the connection between two DSP units.
:type: DSPCONNECTION_TYPE
"""
typ = c_int()
self._call_fmod("FMOD_DSPConnection_GetType", byref(typ))
return DSPCONNECTION_TYPE(typ.value)
| {
"repo_name": "tyrylu/pyfmodex",
"path": "pyfmodex/dsp_connection.py",
"copies": "1",
"size": "4690",
"license": "mit",
"hash": -6347538798286331000,
"line_mean": 35.3565891473,
"line_max": 100,
"alpha_frac": 0.6147121535,
"autogenerated": false,
"ratio": 4.078260869565217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00017613568154668775,
"num_lines": 129
} |
"""An interface that manages Sound Groups."""
from ctypes import byref, c_float, c_int, c_void_p, create_string_buffer
from .enums import SOUNDGROUP_BEHAVIOR
from .fmodobject import FmodObject
from .globalvars import get_class
class SoundGroup(FmodObject):
"""An interface that manages Sound Groups."""
@property
def max_audible(self):
"""The maximum number of playbacks to be audible at once in a sound
group.
:type: int
"""
val = c_int()
self._call_fmod("FMOD_SoundGroup_GetMaxAudible", byref(val))
return val.value
@max_audible.setter
def max_audible(self, val):
self._call_fmod("FMOD_SoundGroup_SetMaxAudible", val)
@property
def max_audible_behavior(self):
"""The current max audible behavior.
:type: SOUNDGROUP_BEHAVIOR
"""
behavior = c_int()
self._call_fmod("FMOD_SoundGroup_GetMaxAudibleBehavior", byref(behavior))
return SOUNDGROUP_BEHAVIOR(behavior.value)
@max_audible_behavior.setter
def max_audible_behavior(self, behavior):
self._call_fmod("FMOD_SoundGroup_SetMaxAudibleBehavior", behavior.value)
@property
def mute_fade_speed(self):
"""The current mute fade time.
0 means no fading.
If a mode besides :py:attr:`~pyfmodex.enums.SOUNDGROUP_BEHAVIOR.MUTE`
is used, the fade speed is ignored.
When more sounds are playing in a SoundGroup than are specified with
:py:attr:`max_audible`, the least important
:py:class:`~pyfmodex.sound.Sound` (i.e. lowest priority / lowest
audible volume due to 3D position, volume etc) will fade to silence if
:py:attr:`~pyfmodex.enums.SOUNDGROUP_BEHAVIOR.MUTE` is used, and any
previous sounds that were silent because of this rule will fade in if
they are more important.
:type: float
"""
speed = c_float()
self._call_fmod("FMOD_SoundGroup_GetMuteFadeSpeed", byref(speed))
return speed.value
@mute_fade_speed.setter
def mute_fade_speed(self, speed):
self._call_fmod("FMOD_SoundGroup_SetMuteFadeSpeed", c_float(speed))
@property
def name(self):
"""The name of the sound group.
:type: str
"""
buf = create_string_buffer(512)
self._call_fmod("FMOD_SoundGroup_GetName", buf, 512)
return buf.value
@property
def num_playing(self):
"""The number of currently playing channels for the sound group.
This returns the number of :py:class:`Channels
<pyfmodex.channel.Channel>` playing. If the :py:class:`SoundGroup`
only has one :py:class:`~pyfmodex.sound.Sound`, and that
:py:class:`~pyfmodex.sound.Sound` is playing twice, the figure returned
will be two.
:type: int
"""
num = c_int()
self._call_fmod("FMOD_SoundGroup_GetNumPlaying", byref(num))
return num.value
@property
def num_sounds(self):
"""The current number of sounds in this sound group.
:type: int
"""
num = c_int()
self._call_fmod("FMOD_SoundGroup_GetNumSounds", byref(num))
return num.value
def get_sound(self, idx):
"""The sound in this group at the given index.
:param int idx: Index of the sound.
:returns: Sound object
:rtype: Sound
"""
sndptr = c_void_p()
self._call_fmod("FMOD_SoundGroup_GetSound", idx, byref(sndptr))
return get_class("Sound")(sndptr)
@property
def system_object(self):
"""The parent System object.
:type: System
"""
sysptr = c_void_p()
self._call_fmod("FMOD_SoundGroup_GetSystemObject", byref(sysptr))
return get_class("System")(sysptr)
@property
def volume(self):
"""The volume of the sound group.
:type: float
"""
vol = c_float()
self._call_fmod("FMOD_SoundGroup_GetVolume", byref(vol))
return vol.value
@volume.setter
def volume(self, vol):
self._call_fmod("FMOD_SoundGroup_SetVolume", c_float(vol))
def release(self):
"""Release the soundgroup object and return all sounds back to the
master sound group.
You cannot release the master :py:class:`SoundGroup`.
"""
self._call_fmod("FMOD_SoundGroup_Release")
def stop(self):
"""Stop all sounds within this soundgroup."""
self._call_fmod("FMOD_SoundGroup_Stop")
| {
"repo_name": "tyrylu/pyfmodex",
"path": "pyfmodex/sound_group.py",
"copies": "1",
"size": "4543",
"license": "mit",
"hash": 5453038591049585000,
"line_mean": 29.4899328859,
"line_max": 81,
"alpha_frac": 0.6178736518,
"autogenerated": false,
"ratio": 3.6141607000795544,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47320343518795543,
"avg_score": null,
"num_lines": null
} |
"""An interface that manages virtual 3D reverb spheres."""
from ctypes import *
from .fmodobject import FmodObject
from .structures import REVERB_PROPERTIES, VECTOR
from .utils import check_type
class Reverb3D(FmodObject):
"""An interface that manages virtual 3D reverb spheres."""
@property
def _threed_attrs(self):
"""The 3D attributes of a reverb sphere.
:type: list with
- Position in 3D space represnting the center of the reverb as a
list of three coordinate floats
- Distance from the centerpoint within which the reverb will have
full effect
- Distance from the centerpoint beyond which the reverb will have
no effect
"""
pos = VECTOR()
mindist = c_float()
maxdist = c_float()
self._call_fmod(
"FMOD_Reverb3D_Get3DAttributes", byref(pos), byref(mindist), byref(maxdist)
)
return [pos.to_list(), mindist.value, maxdist.value]
@_threed_attrs.setter
def _threed_attrs(self, attrs):
pos = VECTOR.from_list(attrs[0])
self._call_fmod(
"FMOD_Reverb3D_Set3DAttributes",
byref(pos),
c_float(attrs[1]),
c_float(attrs[2]),
)
@property
def position(self):
"""Position in 3D space represnting the center of the reverb.
:type: list of three coordinate floats
"""
return self._threed_attrs[0]
@position.setter
def position(self, pos):
attrs = self._threed_attrs
attrs[0] = pos
self._threed_attrs = attrs
@property
def min_distance(self):
"""Distance from the centerpoint within which the reverb will have full
effect.
:type: float
"""
return self._threed_attrs[1]
@min_distance.setter
def min_distance(self, mindist):
attrs = self._threed_attrs
attrs[1] = mindist
self._threed_attrs = attrs
@property
def max_distance(self):
"""Distance from the centerpoint within which the reverb will have no
effect.
:type: float
"""
return self._threed_attrs[2]
@max_distance.setter
def max_distance(self, maxdist):
attrs = self._threed_attrs
attrs[2] = maxdist
self._threed_attrs = attrs
@property
def active(self):
"""The active state of the reverb sphere.
:type: bool
"""
active = c_bool()
self._call_fmod("FMOD_Reverb3D_GetActive", byref(active))
return active.value
@active.setter
def active(self, active):
self._call_fmod("FMOD_Reverb3D_SetActive", active)
@property
def properties(self):
"""The environmental properties of a reverb sphere.
:type: REVERB_PROPERTIES
"""
props = REVERB_PROPERTIES()
self._call_fmod("FMOD_Reverb3D_GetProperties", byref(props))
return props
@properties.setter
def properties(self, props):
check_type(props, REVERB_PROPERTIES)
self._call_fmod("FMOD_Reverb3D_SetProperties", byref(props))
def release(self):
"""Release the memory for a reverb object and make it inactive.
If you release all Reverb3D objects and have not added a new Reverb3D
object, :py:meth:`~pyfmodex.system.System.set_reverb_properties` should
be called to reset the reverb properties.
"""
self._call_fmod("FMOD_Reverb3D_Release")
| {
"repo_name": "tyrylu/pyfmodex",
"path": "pyfmodex/reverb.py",
"copies": "1",
"size": "3530",
"license": "mit",
"hash": -7798097461079376000,
"line_mean": 27.6991869919,
"line_max": 87,
"alpha_frac": 0.6042492918,
"autogenerated": false,
"ratio": 3.8453159041394334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4949565195939434,
"avg_score": null,
"num_lines": null
} |
"""An interface that represents the shared APIs between
:py:class:`pyfmodex.channel.Channel` and
:py:class:`pyfmodex.channel_group.ChannelGroup`.
"""
# pylint: disable=too-many-public-methods
# That's not our fault... :-)
from ctypes import *
from .callback_prototypes import CHANNELCONTROL_CALLBACK
from .cone_settings import ConeSettings
from .flags import MODE
from .fmodobject import FmodObject
from .globalvars import get_class
from .structobject import Structobject as so
from .structures import VECTOR
from .utils import check_type
class ChannelControl(FmodObject):
"""An interface that represents the shared APIs between
:py:class:`pyfmodex.channel.Channel` and
:py:class:`pyfmodex.channel_group.ChannelGroup`.
"""
def __init__(self, ptr):
super().__init__(ptr)
self._custom_rolloff_curve = None # To keep the custom rolloff curve alive
def _call_specific(self, specific_function_suffix, *args):
return self._call_fmod(
"FMOD_%s_%s" % (self.__class__.__name__, specific_function_suffix), *args
)
def add_dsp(self, index, dsp):
"""Add a DSP unit to the specified index in the DSP chain.
:param int index: Offset into the DSP chain. Has to be between 0 and
the number of DSPs.
:param DSP dsp: DSP unit to be added.
"""
check_type(dsp, get_class("DSP"))
c_ptr = c_void_p()
if hasattr(index, "value"):
index = index.value
self._call_specific("AddDSP", index, dsp._ptr, byref(c_ptr))
return get_class("DSP_Connection")(c_ptr)
def add_fade_point(self, dsp_clock, volume):
"""Add a sample accurate fade point at a time relative to the parent
ChannelGroup DSP clock.
Fade points are scaled against other volume settings and in-between
each fade point the volume will be linearly ramped.
To perform sample accurate fading use :py:attr:`dsp_clock` to
query the parent clock value. If a parent ChannelGroup changes its
pitch, the fade points will still be correct as the parent clock rate
is adjusted by that pitch.
:param int dsp_clock: DSP clock of the parent
:py:class:`pyfmodex.channel_group.ChannelGroup` to set the fade
point volume.
:param float volume: Volume level at the given dsp_clock. Values above
1.0 amplify the signal.
"""
self._call_specific("AddFadePoint", c_ulonglong(dsp_clock), c_float(volume))
@property
def _threed_attrs(self):
"""The 3D position and velocity used to apply panning, attenuation and
doppler.
:type: list[VECTOR]
"""
pos = VECTOR()
vel = VECTOR()
self._call_specific("Get3DAttributes", byref(pos), byref(vel))
return [pos.to_list(), vel.to_list()]
@_threed_attrs.setter
def _threed_attrs(self, attrs):
pos = VECTOR.from_list(attrs[0])
vel = VECTOR.from_list(attrs[1])
self._call_specific("Set3DAttributes", byref(pos), byref(vel))
@property
def position(self):
"""The position in 3D space used to apply panning and attenuation.
:type: VECTOR
"""
return self._threed_attrs[0]
@position.setter
def position(self, pos):
self._threed_attrs = (pos, self._threed_attrs[1])
@property
def velocity(self):
"""The velocity in 3D space used for doppler.
:type: VECTOR
"""
return self._threed_attrs[1]
@velocity.setter
def velocity(self, vel):
self._threed_attrs = (self._threed_attrs[0], vel)
@property
def cone_orientation(self):
"""The orientation of a 3D cone shape, used for simulated occlusion.
Normalized orientation vector, which represents the direction of the
sound cone.
:type: list with x, y, and z float values
"""
ori = VECTOR()
self._call_specific("Get3DConeOrientation", byref(ori))
return ori.to_list()
@cone_orientation.setter
def cone_orientation(self, ori):
vec = VECTOR.from_list(ori)
self._call_specific("Set3DConeOrientation", byref(vec))
@property
def cone_settings(self):
"""The angles and attenuation levels of a 3D cone shape, for simulated
occlusion which is based on direction.
:rtype: ~pyfmodex.cone_settings.ConeSettings
"""
return ConeSettings(self._ptr, self.__class__.__name__)
@property
def custom_rolloff(self):
"""The current custom rolloff shape for 3D distance attenuation.
To set a curve, provide a list of objects that can be treated as a list
of [x, y, z] values with x = distance, y = volume (0 to 1) and z set to
0.
:type: list of [x, y, z]-lists where x is distance, y is volume (0 to
1) and z is undefined.
"""
num = c_int()
self._call_specific("Get3DCustomRolloff", None, byref(num))
curve = (VECTOR * num.value)()
curve = POINTER(VECTOR)()
self._call_specific("Get3DCustomRolloff", byref(curve), None)
return [curve[i].to_list() for i in range(num.value)]
@custom_rolloff.setter
def custom_rolloff(self, curve):
self._custom_rolloff_curve = (VECTOR * len(curve))(*[VECTOR.from_list(lst) for lst in curve])
self._call_specific("Set3DCustomRolloff", self._custom_rolloff_curve, len(self._custom_rolloff_curve))
@property
def threed_distance_filter(self):
"""The override values for the 3D distance filter.
If distance filtering is enabled, by default the 3D engine will
automatically attenuate frequencies using a lowpass and a highpass
filter, based on 3D distance. This function allows the distance filter
effect to be set manually, or to be set back to 'automatic' mode.
:type: Structobject with the following members:
- custom: Boolean indicating wheter to override automatic distance
filtering and use custom_level instead
- custom_level: Float between 0 and 1 representing the attenuation
factor where 1 represents no attenuation and 0 represents
complete attenuation.
- center_frequency: Integer between 10 and 22050 showing the center
frequency of the band-pass filter used to simulate distance
attenuation, 0 for default, or
:py:class:`~pyfmodex.structures.ADVANCEDSETTINGS`
"""
custom = c_bool()
custom_level = c_float()
center_frequency = c_float()
self._call_specific(
"Get3DDistanceFilter", byref(custom), byref(custom_level), byref(custom)
)
return so(
custom=custom.value,
custom_level=custom_level.value,
center_frequency=center_frequency.value,
)
@threed_distance_filter.setter
def threed_distance_filter(self, cfg):
self._call_specific(
"Set3DDistanceFilter",
cfg.custom,
c_float(cfg.custom_level),
c_float(cfg.center_frequency),
)
@property
def doppler_level(self):
"""The amount by which doppler is scaled.
:type: Doppler scale (float) where 0 represents no doppler, 1
represents natural doppler and 5 represents exaggerated doppler.
"""
level = c_float()
self._call_specific("Get3DDopplerLevel", byref(level))
return level.value
@doppler_level.setter
def doppler_level(self, level):
self._call_specific("Set3DDopplerLevel", c_float(level))
@property
def level(self):
"""The blend between 3D panning and 2D panning.
The :py:class:`~pyfmodex.flags.MODE` flag THREED must be set on this
object otherwise :py:const:`~pyfmodex.enums.RESULT.NEEDS3D` is
returned.
:type: 3D pan level (float) where 0 represents panning/attenuating
solely with 2D panning functions and 1 represents solely 3D.
"""
level = c_float()
self._call_specific("Get3DLevel", byref(level))
return level.value
@level.setter
def level(self, level):
self._call_specific("Set3DLevel", c_float(level))
@property
def _min_max_distance(self):
"""The minimum and maximum distances used to calculate the 3D rolloff
attenuation.
:type: two-tuple with
- mindistance: Distance (float) from the source where attenuation
begins.
- maxdistance: Distance (float) from the source where attenuation
ends.
"""
mindistance = c_float()
maxdistance = c_float()
self._call_specific(
"Get3DMinMaxDistance", byref(mindistance), byref(maxdistance)
)
return (mindistance.value, maxdistance.value)
@_min_max_distance.setter
def _min_max_distance(self, dists):
self._call_specific("Set3DMinMaxDistance", c_float(dists[0]), c_float(dists[1]))
@property
def min_distance(self):
"""The minimum distance used to calculate the 3D rolloff attenuation.
The distance from the source where attenuation begins.
:type: float
"""
return self._min_max_distance[0]
@min_distance.setter
def min_distance(self, dist):
self._min_max_distance = (dist, self._min_max_distance[1])
@property
def max_distance(self):
"""The maximum distance used to calculate the 3D rolloff attenuation.
The distance from the source where attenuation ends.
:type: float
"""
return self._min_max_distance[1]
@max_distance.setter
def max_distance(self, dist):
self._min_max_distance = (self._min_max_distance[0], dist)
@property
def _occlusion(self):
"""The 3D attenuation factors for the direct and reverb paths.
There is a reverb path/send when `set_reverb_wet` has been used,
reverb_occlusion controls its attenuation.
If the System has been initialized with The
:py:class:`INIT <pyfmodex.flags.INIT_FLAGS>` flags
CHANNEL_DISTANCEFILTER or CHANNEL_LOWPASS the direct_occlusion is
applied as frequency filtering rather than volume attenuation.
:type: two-tuple with
- direct_occlusion: Occlusion factor (float) for the direct path
where 0 represents no occlusion and 1 represents full occlusion.
- reverb_occlusion: Occlusion factor (float) for the reverb path
where 0 represents no occlusion and 1 represents full occlusion.
"""
direct = c_float()
reverb = c_float()
self._call_specific("Get3DOcclusion", byref(direct), byref(reverb))
return (direct.value, reverb.value)
@_occlusion.setter
def _occlusion(self, occs):
self._call_specific("Set3DOcclusion", c_float(occs[0]), c_float(occs[1]))
@property
def direct_occlusion(self):
"""Occlusion factor for the direct path where 0 represents no occlusion
and 1 represents full occlusion.
If the System has been initialized with The
:py:class:`INIT <pyfmodex.flags.INIT_FLAGS>` flags
CHANNEL_DISTANCEFILTER or CHANNEL_LOWPASS the direct_occlusion is
applied as frequency filtering rather than volume attenuation.
:type: float
"""
return self._occlusion[0]
@direct_occlusion.setter
def direct_occlusion(self, occ):
self._occlusion = (occ, self._occlusion[1])
@property
def reverb_occlusion(self):
"""Occlusion factor for the reverb path where 0 represents no occlusion
and 1 represents full occlusion.
There is a reverb path/send when
:py:meth:`~pyfmodex.ChannelControl.set_reverb_wet` has been used,
reverb_occlusion controls its attenuation.
:type: float
"""
return self._occlusion[1]
@reverb_occlusion.setter
def reverb_occlusion(self, occ):
self._occlusion = (self._occlusion[0], occ)
@property
def threed_spread(self):
"""The spread of a 3D sound in speaker space.
Angle over which the sound is spread. Between 0 and 360.
:type: float
"""
spread = c_float()
self._call_specific("Get3DSpread", byref(spread))
return spread.value
@threed_spread.setter
def threed_spread(self, spread):
"""The spread of a 3D sound in speaker space.
:param float angle: Angle over which the sound is spread. Between 0 and
360.
"""
self._call_specific("Set3DSpread", c_float(spread))
@property
def audibility(self):
"""An estimation of the output volume.
Estimated volume is calculated based on 3D spatialization, occlusion,
API volume levels and DSPs used.
While this does not represent the actual waveform, Channels playing FSB
files will take into consideration the overall peak level of the file
(if available).
This value is used to determine which Channels should be audible and
which Channels to virtualize when resources are limited.
:returns: Estimated audibility.
:rtype: float
"""
aud = c_float()
self._call_specific("GetAudibility", byref(aud))
return aud.value
def get_dsp(self, index):
"""The DSP unit at the specified index in the DSP chain.
:param int index: Offset into the DSP chain, see
:py:class:`~pyfmodex.enums.CHANNELCONTROL_DSP_INDEX` for special
named offsets for 'head' and 'tail' and 'fader' units.
"""
dsp = c_void_p()
self._call_specific("GetDSP", index, byref(dsp))
return get_class("DSP")(dsp)
@property
def dsp_clock(self):
"""The DSP clock values at this point in time.
:returns: Structobject with the following members:
- dsp_clock: DSP clock value for the tail DSP node (int).
- parent_clock: DSP clock value for the tail DSP node of the parent
ChannelGroup (int).
:rtype: Structobject
"""
clock = c_ulonglong()
parent = c_ulonglong()
self._call_specific("GetDSPClock", byref(clock), byref(parent))
return so(dsp_clock=clock.value, parent_clock=parent.value)
def get_dsp_index(self, dsp):
"""The index of a DSP inside the Channel or ChannelGroup's DSP chain.
:param DSP dsp: DSP unit that exists in the DSP chain.
:returns: Offset into the DSP chain.
:rtype: int
"""
index = c_int()
self._call_specific("GetDSPIndex", dsp._ptr, byref(index))
return index.value
def set_dsp_index(self, dsp, index):
"""The index in the DSP chain of the specified DSP.
:param DSP dsp: DSP unit that exists in the DSP chain.
:param int index: Offset into the DSP chain, see
:py:class:`~pyfmodex.enums.CHANNELCONTROL_DSP_INDEX` for special
named offsets for 'head' and 'tail' and 'fader' units.
"""
self._call_specific("SetDSPIndex", dsp._ptr, index)
@property
def delay(self):
"""A sample accurate start (and/or stop) time relative to the parent
ChannelGroup DSP clock.
:returns: Structobject with the following members:
- dspclock_start: DSP clock (int) of the parent ChannelGroup to
audibly start playing sound at.
- dspclock_end: DSP clock (int) of the parent ChannelGroup to
audibly stop playing sound at.
- stop_channels:
- True: When dspclock_end is reached, behaves like
:py:meth:`stop` has been called.
- False: When dspclock_end is reached, behaves like
:py:attr:`paused` is True, a subsequent dspclock_start allows
it to resume.
:rtype: Structobject
"""
dspclock_start = c_ulonglong()
dspclock_end = c_ulonglong()
stop_channels = c_bool()
self._call_specific(
"GetDelay", byref(dspclock_start), byref(dspclock_end), byref(stop_channels)
)
return so(
dsp_start=dspclock_start.value,
dsp_end=dspclock_end.value,
stop_channels=stop_channels.value,
)
@delay.setter
def delay(self, delay):
"""A sample accurate start (and/or stop) time relative to the parent
ChannelGroup DSP clock.
:param delay: any object with the following attributes:
- dsp_start: DSP clock (int) of the parent ChannelGroup to
audibly start playing sound at.
- dsp_end: DSP clock (int) of the parent ChannelGroup to
audibly stop playing sound at.
- stop_channels:
- True: When dspclock_end is reached, behaves like
:py:meth:`stop` has been called.
- False: When dspclock_end is reached, behaves like
:py:attr:`paused` is True, a subsequent dspclock_start allows
it to resume.
"""
self._call_specific(
"SetDelay",
c_ulonglong(delay.dsp_start),
c_ulonglong(delay.dsp_end),
delay.stop_channels,
)
@property
def fade_points(self):
"""Information about stored fade points.
:returns:
- point_dspclock: List of DSP clock values that represent the fade
point times.
- point_volume: List of volume levels that represent the fade point
values. Volume levels cannot be negative.
:rtype: two-tuple of lists
"""
num = c_uint()
self._call_specific("GetFadePoints", byref(num), None, None)
clocks = (c_ulonglong * num.value)()
volumes = (c_float * num.value)()
self._call_specific("GetFadePoints", byref(num), clocks, volumes)
return list(clocks), list(volumes)
@property
def low_pass_gain(self):
"""The gain of the dry signal when built in lowpass / distance
filtering is applied.
Gain level where 0 represents silent (full filtering) and 1 represents
full volume (no filtering).
Requires the built in lowpass to be created with
:py:class:`INIT <pyfmodex.flags.INIT_FLAGS>` flags CHANNEL_LOWPASS or
CHANNEL_DISTANCEFILTER.
:type: float
"""
gain = c_float()
self._call_specific("GetLowPassGain", byref(gain))
return gain.value
@low_pass_gain.setter
def low_pass_gain(self, gain):
self._call_specific("SetLowPassGain", c_float(gain))
def get_mix_matrix(self, hop=0):
"""Retrieve a 2 dimensional pan matrix that maps the signal from input
channels (columns) to output speakers (rows).
Matrix element values can be below 0 to invert a signal and above 1 to
amplify the signal.
:returns: Two dimensional list of volume levels in row-major order.
Each row represents an output speaker, each column represents an
input channel.
:rtype: list of floats
"""
in_channels = c_int()
out_channels = c_int()
self._call_fmod(
"FMOD_Channel_GetMixMatrix",
None,
byref(out_channels),
byref(in_channels),
hop,
)
matrix = (c_float * (hop or in_channels.value * out_channels.value))()
self._call_specific(
"GetMixMatrix", matrix, byref(out_channels), byref(in_channels), hop
)
return list(matrix)
def set_mix_matrix(self, matrix, out_channels, in_channels):
"""Set a 2 dimensional pan matrix that maps the signal from input
channels (columns) to output speakers (rows).
Matrix element values can be below 0 to invert a signal and above 1 to
amplify the signal. Note that increasing the signal level too far may
cause audible distortion.
:param list matrix: List of volume levels (float) in row-major order.
Each row represents an output speaker, each column represents an
input channel.
:param int out_channels: Number of output channels (rows) in matrix.
Always assumed 0 if `matrix` is empty.
:param int in_channels: Number of input channels (columns) in matrix.
Always assumed 0 if `matrix` is empty.
"""
if not matrix:
in_channels = 0
out_channels = 0
raw_matrix = (c_float * (in_channels * out_channels))(*matrix)
self._call_specific("SetMixMatrix", raw_matrix, out_channels, in_channels, 0)
@property
def mode(self):
"""The playback mode bits that control how this object behaves.
When changing the loop mode, sounds created with
:py:meth:`~pyfmodex.system.System.create_stream` or the
:py:class:`~pyfmodex.flags.MODE` flag CREATESTREAM may have already
been pre-buffered and executed their loop logic ahead of time before
this call was even made. This is dependent on the size of the sound
versus the size of the stream decode buffer (see
:py:class:`~pyfmodex.structure_declarations.CREATESOUNDEXINFO`). If
this happens, you may need to reflush the stream buffer by calling
:py:meth:`~pyfmodex.channel.Channel.set_position`. Note this will
usually only happen if you have sounds or loop points that are smaller
than the stream decode buffer size.
When changing the loop mode of sounds created with
:py:meth:`~pyfmodex.system.System.create_sound` or the
:py:class:`~pyfmodex.flags.MODE` flag CREATESAMPLE, if the sound was
set up with the :py:class:`~pyfmodex.flags.MODE` flag LOOP_OFF, then
set to LOOP_NORMAL with this function, the sound may click when playing
the end of the sound. This is because the sound needs to be prepared
for looping setting its :py:attr:`~pyfmodex.sound.Sound.mode`, by
modifying the content of the PCM data (i.e. data past the end of the
actual sample data) to allow the interpolators to read ahead without
clicking. If you set it this way, it will not do this (because
different Channels may have different loop modes for the same sound)
and may click if you try to set it to looping on an unprepared sound.
If you want to change the loop mode at runtime it may be better to load
the sound as looping first (or use its
:py:attr:`~pyfmodex.sound.Sound.mode`), to let it prepare the data as
if it was looping so that it does not click whenever this proprerty is
used to turn looping on.
If :py:class:`~pyfmodex.flags.MODE` flags IGNOREGEOMETRY or
VIRTUAL_PLAYFROMSTART are not specified, the flag will be cleared if it
was specified previously.
:type: Playback mode bitfield. Test against a specific
:py:class:`~pyfmodex.flags.MODE` with the AND operator or set more
than one mode at once by combining them with the OR operator.
"""
mode = c_int()
self._call_specific("GetMode", byref(mode))
return MODE(mode.value)
@mode.setter
def mode(self, mode):
self._call_specific("SetMode", mode.value)
@property
def mute(self):
"""The mute state.
- True: silent
- False: audible
Mute is an additional control for volume, the effect of which is
equivalent to setting the volume to zero.
An individual mute state is kept for each object, muting a parent
:py:class:`~pyfmodex.channel_group.ChannelGroup` will effectively mute
this object however when queried the individual mute state is returned.
The :py:attr:`~pyfmodex.channel_control.ChannelControl.audibility`
property can be used to calculate overall audibility for a
:py:class:`~pyfmodex.channel.Channel` or
:py:class:`~pyfmodex.channel_group.ChannelGroup`.
:type: bool
"""
mute = c_bool()
self._call_specific("GetMute", byref(mute))
return mute.value
@mute.setter
def mute(self, m):
self._call_specific("SetMute", m)
@property
def num_dsps(self):
"""The number of DSP units in the DSP chain.
:type: int
"""
num = c_int()
self._call_specific("GetNumDSPs", byref(num))
return num.value
@property
def paused(self):
"""The paused state.
- True: playback halted
- False: playback active
An individual pause state is kept for each object, a parent
:py:class:`~pyfmodex.channel_group.ChannelGroup` being paused will
effectively pause this object. However, when queried, the individual
pause state is returned.
:type: bool
"""
paused = c_bool()
self._call_specific("GetPaused", byref(paused))
return paused.value
@paused.setter
def paused(self, pausedstate):
self._call_specific("SetPaused", pausedstate)
@property
def pitch(self):
"""The relative pitch / playback rate.
Pitch value where 0.5 represents half pitch (one octave down), 1
represents unmodified pitch and 2 represents double pitch (one octave
up).
An individual pitch value is kept for each object, a parent
:py:class:`~pyfmodex.channel_group.ChannelGroup` pitch will effectively
scale the pitch of this object however when queried the individual
pitch value is returned.
:type: float
"""
val = c_float()
self._call_specific("GetPitch", byref(val))
return val.value
@pitch.setter
def pitch(self, val):
self._call_specific("SetPitch", c_float(val))
def get_reverb_wet(self, instance):
"""Get the wet / send level for a particular reverb instance.
:param int instance: Reverb instance index.
:rtype: float
"""
wet = c_float()
self._call_specific("GetReverbProperties", instance, byref(wet))
return wet.value
def set_reverb_wet(self, instance, wet):
"""Set the wet / send level for a particular reverb instance.
Channels are automatically connected to all existing reverb instances
due to the default wet level of 1.
:py:class:`ChannelGroups <pyfmodex.channel_group.ChannelGroup>` however
will not send to any reverb by default requiring an explicit call to
this function.
:py:class:`~pyfmodex.channel_group.ChannelGroup` reverb is optimal for
the case where you want to send 1 mixed signal to the reverb, rather
than a lot of individual :py:class:`~pyfmodex.channel.Channel` reverb
sends. It is advisable to do this to reduce CPU if you have many
:py:class:`Channels <pyfmodex.channel.Channel>` inside a
:py:class:`~pyfmodex.channel_group.ChannelGroup`.
When setting a wet level for a
:py:class:`~pyfmodex.channel_group.ChannelGroup`, any
:py:class:`Channels <pyfmodex.channel.Channel>` under that
:py:class:`~pyfmodex.channel_group.ChannelGroup` will still have their
existing sends to the reverb. To avoid this doubling up you should
explicitly set the Channel wet levels to 0.
:param int instance: Reverb instance index.
:param float wet: Send level for the signal to the reverb. 0 = none, 1
= full. Negative level inverts the signal.
"""
self._call_specific("SetReverbProperties", instance, c_float(wet))
@property
def system_object(self):
"""The System that created this object.
:type: System
"""
sptr = c_void_p()
self._call_specific("GetSystemObject", byref(sptr))
return get_class("System")(sptr)
@property
def volume(self):
"""The volume level.
- 0: silent
- 1: full
- Negative level: inverts the signal
- Value larger than 1: amplifies the signal
Setting volume at a level higher than 1 can lead to
distortion/clipping.
:type: float
"""
vol = c_float()
self._call_specific("GetVolume", byref(vol))
return vol.value
@volume.setter
def volume(self, vol):
self._call_specific("SetVolume", c_float(vol))
@property
def volume_ramp(self):
"""Ramp state: whether volume changes are ramped or instantaneous.
- True: volume change is ramped
- False: volume change is instantaeous
Volume changes when not paused will be ramped to the target value to
avoid a pop sound, this function allows that setting to be overridden
and volume changes to be applied immediately.
:type: bool
"""
ramp = c_bool()
self._call_specific("GetVolumeRamp", byref(ramp))
return ramp.value
@volume_ramp.setter
def volume_ramp(self, ramp):
self._call_specific("SetVolumeRamp", ramp)
@property
def is_playing(self):
"""The playing state.
A :py:class:`~pyfmodex.channel.Channel` is considered
playing after :py:meth:`~pyfmodex.system.System.play_sound` or
:py:meth:`~pyfmodex.system.System.play_dsp`, even if it is paused.
A :py:class:`~pyfmodex.channel_group.ChannelGroup` is considered
playing if it has any playing :py:class:`Channels
<pyfmodex.channel.Channel>`.
:type: bool
"""
play_state = c_bool()
self._call_specific("IsPlaying", byref(play_state))
self._call_specific("IsPlaying", byref(play_state))
return play_state.value
def remove_dsp(self, dsp):
"""Remove the specified DSP unit from the DSP chain.
:param DSP dsp: DSP unit to be removed.
"""
self._call_specific("RemoveDSP", dsp._ptr)
def remove_fade_points(self, dsp_clock_start, dsp_clock_end):
"""Remove all fade points between the two specified clock values
(inclusive).
:param int dsp_clock_start: :py:class:`~pyfmodex.dsp.DSP` clock of the
parent :py:class:`~pyfmodex.channel_group.ChannelGroup` at which to
begin removing fade points.
:param int dsp_clock_end: :py:class:`~pyfmodex.dsp.DSP` clock of the
parent :py:class:`~pyfmodex.channel_group.ChannelGroup` at which to
stop removing fade points.
"""
self._call_specific(
"RemoveFadePoints", c_ulonglong(dsp_clock_start), c_ulonglong(dsp_clock_end)
)
def set_callback(self, callback):
"""Set the callback for ChannelControl level notifications.
:param CHANNELCONTROL_CALLBACK callback: Callback to invoke.
"""
cbi = CHANNELCONTROL_CALLBACK(callback)
self._cb = callback
self._call_specific("SetCallback", cbi)
def set_fade_point_ramp(self, dsp_clock, volume):
"""Add a volume ramp at the specified time in the future using fade
points.
This is a convenience method that creates a scheduled 64 sample fade
point ramp from the current volume level to volume arriving at
dsp_clock time.
Can be use in conjunction with :py:attr:`delay`.
All fade points after dsp_clock will be removed.
:param int dsp_clock: Time at which the ramp will end, as measured by
the :py:class:`~pyfmodex.dsp.DSP` clock of the parent
:py:class:`~pyfmodex.channel_group.ChannelGroup`.
:param float volume: Volume level at the given dsp_clock.
- 0: silent
- 1: full
"""
self._call_specific("SetFadePointRamp", c_ulonglong(dsp_clock), c_float(volume))
def set_mix_levels_input(self, *levels):
"""Set the incoming volume level for each channel of a multi-channel
signal.
This is a convenience method to avoid passing a matrix, it will
overwrite values set via :py:meth:`set_pan`,
:py:meth:`set_mix_levels_output` and :py:meth:`set_mix_matrix`.
:param list levels: volume levels for each incoming channel.
- 0: silent
- 1: full
- Negative level: inverts the signal
- Value larger than 1: amplifies the signal
"""
level_array = (c_float * len(levels))(*levels)
self._call_specific("SetMixLevelsInput", level_array, len(level_array))
def set_mix_levels_output(
self,
frontleft,
frontright,
center,
lfe,
surroundleft,
surroundright,
backleft,
backright,
):
"""Set the outgoing volume levels for each speaker.
Specify the level for a given output speaker. If the channel count of
the input and output do not match, channels will be up/down mixed as
appropriate to approximate the given speaker values. For example stereo
input with 5.1 output will use the center parameter to distribute
signal to the center speaker from front left and front right channels.
This is a convenience method to avoid passing a matrix, it will
overwrite values set via :py:meth:`set_pan`,
:py:meth:`set_mix_levels_input` and :py:meth:`set_mix_matrix`.
The output channel count will always match the System speaker mode set
via :py:attr:`~pyfmodex.system.System.software_format`.
If the System is initialized with
:py:class:`~pyfmodex.enums.SPEAKERMODE` RAW calling this function will
produce silence.
:param float frontleft: Volume level.
:param float frontright: Volume level.
:param float center: Volume level.
:param float lfe: Volume level.
:param float surroundleft: Volume level.
:param float surroundright: Volume level.
:param float backleft: Volume level.
:param float backright: Volume level.
Volume levels:
- 0: silent
- 1: full
- Negative level: inverts the signal
- Value larger than 1: amplifies the signal
"""
self._call_specific(
"SetMixLevelsOutput",
c_float(frontleft),
c_float(frontright),
c_float(center),
c_float(lfe),
c_float(surroundleft),
c_float(surroundright),
c_float(backleft),
c_float(backright),
)
def set_pan(self, pan):
"""Set the left/right pan level.
This is a convenience method to avoid passing a matrix, it will
overwrite values set via :py:meth:`set_mix_levels_input`,
:py:meth:`set_mix_levels_output` and :py:meth:`set_mix_matrix`.
Mono inputs are panned from left to right using constant power panning
(non linear fade). Stereo and greater inputs will isolate the front
left and right input channels and fade them up and down based on the
pan value (silencing other channels). The output channel count will
always match the System speaker mode set via
:py:attr:`pyfmodex.system.System.software_format`.
If the System is initialized with
:py:class:`~pyfmodex.enums.SPEAKERMODE` RAW calling this function will
produce silence.
:param float pan: Pan level where -1 represents full left, 0 represents
center and 1 represents full right.
"""
self._call_specific("SetPan", c_float(pan))
def stop(self):
"""Stop the Channel (or all Channels in nested ChannelGroups) from
playing.
This will free up internal resources for reuse by the virtual
:py:class:`~pyfmodex.channel.Channel` system.
"""
self._call_specific("Stop")
| {
"repo_name": "tyrylu/pyfmodex",
"path": "pyfmodex/channel_control.py",
"copies": "1",
"size": "36232",
"license": "mit",
"hash": -7120706403960294000,
"line_mean": 35.5241935484,
"line_max": 110,
"alpha_frac": 0.6252759991,
"autogenerated": false,
"ratio": 3.985918591859186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5111194590959186,
"avg_score": null,
"num_lines": null
} |
"""An interface to GraphViz."""
from __future__ import division
from __future__ import print_function
import copy
import io
import errno
import os
import re
import subprocess
import sys
import tempfile
import warnings
try:
import dot_parser
except Exception as e:
warnings.warn(
"Couldn't import dot_parser, "
"loading of dot files will not be possible.")
__author__ = 'Ero Carrera'
__version__ = '1.4.2.dev0'
__license__ = 'MIT'
PY3 = sys.version_info >= (3, 0, 0)
if PY3:
str_type = str
else:
str_type = basestring
GRAPH_ATTRIBUTES = { 'Damping', 'K', 'URL', 'aspect', 'bb', 'bgcolor',
'center', 'charset', 'clusterrank', 'colorscheme', 'comment', 'compound',
'concentrate', 'defaultdist', 'dim', 'dimen', 'diredgeconstraints',
'dpi', 'epsilon', 'esep', 'fontcolor', 'fontname', 'fontnames',
'fontpath', 'fontsize', 'id', 'label', 'labeljust', 'labelloc',
'landscape', 'layers', 'layersep', 'layout', 'levels', 'levelsgap',
'lheight', 'lp', 'lwidth', 'margin', 'maxiter', 'mclimit', 'mindist',
'mode', 'model', 'mosek', 'nodesep', 'nojustify', 'normalize', 'nslimit',
'nslimit1', 'ordering', 'orientation', 'outputorder', 'overlap',
'overlap_scaling', 'pack', 'packmode', 'pad', 'page', 'pagedir',
'quadtree', 'quantum', 'rankdir', 'ranksep', 'ratio', 'remincross',
'repulsiveforce', 'resolution', 'root', 'rotate', 'searchsize', 'sep',
'showboxes', 'size', 'smoothing', 'sortv', 'splines', 'start',
'stylesheet', 'target', 'truecolor', 'viewport', 'voro_margin',
# for subgraphs
'rank' }
EDGE_ATTRIBUTES = { 'URL', 'arrowhead', 'arrowsize', 'arrowtail',
'color', 'colorscheme', 'comment', 'constraint', 'decorate', 'dir',
'edgeURL', 'edgehref', 'edgetarget', 'edgetooltip', 'fontcolor',
'fontname', 'fontsize', 'headURL', 'headclip', 'headhref', 'headlabel',
'headport', 'headtarget', 'headtooltip', 'href', 'id', 'label',
'labelURL', 'labelangle', 'labeldistance', 'labelfloat', 'labelfontcolor',
'labelfontname', 'labelfontsize', 'labelhref', 'labeltarget',
'labeltooltip', 'layer', 'len', 'lhead', 'lp', 'ltail', 'minlen',
'nojustify', 'penwidth', 'pos', 'samehead', 'sametail', 'showboxes',
'style', 'tailURL', 'tailclip', 'tailhref', 'taillabel', 'tailport',
'tailtarget', 'tailtooltip', 'target', 'tooltip', 'weight',
'rank' }
NODE_ATTRIBUTES = { 'URL', 'color', 'colorscheme', 'comment',
'distortion', 'fillcolor', 'fixedsize', 'fontcolor', 'fontname',
'fontsize', 'group', 'height', 'id', 'image', 'imagescale', 'label',
'labelloc', 'layer', 'margin', 'nojustify', 'orientation', 'penwidth',
'peripheries', 'pin', 'pos', 'rects', 'regular', 'root', 'samplepoints',
'shape', 'shapefile', 'showboxes', 'sides', 'skew', 'sortv', 'style',
'target', 'tooltip', 'vertices', 'width', 'z',
# The following are attributes dot2tex
'texlbl', 'texmode' }
CLUSTER_ATTRIBUTES = { 'K', 'URL', 'bgcolor', 'color', 'colorscheme',
'fillcolor', 'fontcolor', 'fontname', 'fontsize', 'label', 'labeljust',
'labelloc', 'lheight', 'lp', 'lwidth', 'nojustify', 'pencolor',
'penwidth', 'peripheries', 'sortv', 'style', 'target', 'tooltip' }
DEFAULT_PROGRAMS = {
'dot',
'twopi',
'neato',
'circo',
'fdp',
'sfdp',
}
def is_windows():
# type: () -> bool
return os.name == 'nt'
def is_anacoda():
# type: () -> bool
return os.path.exists(os.path.join(sys.prefix, 'conda-meta'))
def get_executable_extension():
# type: () -> str
if is_windows():
return '.bat' if is_anacoda() else '.exe'
else:
return ''
def call_graphviz(program, arguments, working_dir, **kwargs):
# explicitly inherit `$PATH`, on Windows too,
# with `shell=False`
if program in DEFAULT_PROGRAMS:
extension = get_executable_extension()
program += extension
if arguments is None:
arguments = []
env = {
'PATH': os.environ.get('PATH', ''),
'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH', ''),
}
program_with_args = [program, ] + arguments
process = subprocess.Popen(
program_with_args,
env=env,
cwd=working_dir,
shell=False,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
**kwargs
)
stdout_data, stderr_data = process.communicate()
return stdout_data, stderr_data, process
#
# Extended version of ASPN's Python Cookbook Recipe:
# Frozen dictionaries.
# https://code.activestate.com/recipes/414283/
#
# This version freezes dictionaries used as values within dictionaries.
#
class frozendict(dict):
def _blocked_attribute(obj):
raise AttributeError('A frozendict cannot be modified.')
_blocked_attribute = property(_blocked_attribute)
__delitem__ = __setitem__ = clear = _blocked_attribute
pop = popitem = setdefault = update = _blocked_attribute
def __new__(cls, *args, **kw):
new = dict.__new__(cls)
args_ = []
for arg in args:
if isinstance(arg, dict):
arg = copy.copy(arg)
for k in arg:
v = arg[k]
if isinstance(v, frozendict):
arg[k] = v
elif isinstance(v, dict):
arg[k] = frozendict(v)
elif isinstance(v, list):
v_ = list()
for elm in v:
if isinstance(elm, dict):
v_.append( frozendict(elm) )
else:
v_.append( elm )
arg[k] = tuple(v_)
args_.append( arg )
else:
args_.append( arg )
dict.__init__(new, *args_, **kw)
return new
def __init__(self, *args, **kw):
pass
def __hash__(self):
try:
return self._cached_hash
except AttributeError:
h = self._cached_hash = hash(tuple(sorted(self.items())))
return h
def __repr__(self):
return "frozendict(%s)" % dict.__repr__(self)
dot_keywords = ['graph', 'subgraph', 'digraph', 'node', 'edge', 'strict']
id_re_alpha_nums = re.compile('^[_a-zA-Z][a-zA-Z0-9_,]*$', re.UNICODE)
id_re_alpha_nums_with_ports = re.compile(
'^[_a-zA-Z][a-zA-Z0-9_,:\"]*[a-zA-Z0-9_,\"]+$', re.UNICODE)
id_re_num = re.compile('^[0-9,]+$', re.UNICODE)
id_re_with_port = re.compile('^([^:]*):([^:]*)$', re.UNICODE)
id_re_dbl_quoted = re.compile('^\".*\"$', re.S|re.UNICODE)
id_re_html = re.compile('^<.*>$', re.S|re.UNICODE)
def needs_quotes( s ):
"""Checks whether a string is a dot language ID.
It will check whether the string is solely composed
by the characters allowed in an ID or not.
If the string is one of the reserved keywords it will
need quotes too but the user will need to add them
manually.
"""
# If the name is a reserved keyword it will need quotes but pydot
# can't tell when it's being used as a keyword or when it's simply
# a name. Hence the user needs to supply the quotes when an element
# would use a reserved keyword as name. This function will return
# false indicating that a keyword string, if provided as-is, won't
# need quotes.
if s in dot_keywords:
return False
chars = [ord(c) for c in s if ord(c)>0x7f or ord(c)==0]
if chars and not id_re_dbl_quoted.match(s) and not id_re_html.match(s):
return True
for test_re in [id_re_alpha_nums, id_re_num,
id_re_dbl_quoted, id_re_html,
id_re_alpha_nums_with_ports]:
if test_re.match(s):
return False
m = id_re_with_port.match(s)
if m:
return needs_quotes(m.group(1)) or needs_quotes(m.group(2))
return True
def quote_if_necessary(s):
"""Enclode attribute value in quotes, if needed."""
if isinstance(s, bool):
if s is True:
return 'True'
return 'False'
if not isinstance( s, str_type):
return s
if not s:
return s
if needs_quotes(s):
replace = {'"' : r'\"',
"\n" : r'\n',
"\r" : r'\r'}
for (a,b) in replace.items():
s = s.replace(a, b)
return '"' + s + '"'
return s
def graph_from_dot_data(s):
"""Load graphs from DOT description in string `s`.
@param s: string in [DOT language](
https://en.wikipedia.org/wiki/DOT_(graph_description_language))
@return: Graphs that result from parsing.
@rtype: `list` of `pydot.Dot`
"""
return dot_parser.parse_dot_data(s)
def graph_from_dot_file(path, encoding=None):
"""Load graphs from DOT file at `path`.
@param path: to DOT file
@param encoding: as passed to `io.open`.
For example, `'utf-8'`.
@return: Graphs that result from parsing.
@rtype: `list` of `pydot.Dot`
"""
with io.open(path, 'rt', encoding=encoding) as f:
s = f.read()
if not PY3:
s = unicode(s)
graphs = graph_from_dot_data(s)
return graphs
def graph_from_edges(edge_list, node_prefix='', directed=False):
"""Creates a basic graph out of an edge list.
The edge list has to be a list of tuples representing
the nodes connected by the edge.
The values can be anything: bool, int, float, str.
If the graph is undirected by default, it is only
calculated from one of the symmetric halves of the matrix.
"""
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for edge in edge_list:
if isinstance(edge[0], str):
src = node_prefix + edge[0]
else:
src = node_prefix + str(edge[0])
if isinstance(edge[1], str):
dst = node_prefix + edge[1]
else:
dst = node_prefix + str(edge[1])
e = Edge( src, dst )
graph.add_edge(e)
return graph
def graph_from_adjacency_matrix(matrix, node_prefix= u'', directed=False):
"""Creates a basic graph out of an adjacency matrix.
The matrix has to be a list of rows of values
representing an adjacency matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
"""
node_orig = 1
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for row in matrix:
if not directed:
skip = matrix.index(row)
r = row[skip:]
else:
skip = 0
r = row
node_dest = skip+1
for e in r:
if e:
graph.add_edge(
Edge( node_prefix + node_orig,
node_prefix + node_dest) )
node_dest += 1
node_orig += 1
return graph
def graph_from_incidence_matrix(matrix, node_prefix='', directed=False):
"""Creates a basic graph out of an incidence matrix.
The matrix has to be a list of rows of values
representing an incidence matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
"""
node_orig = 1
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for row in matrix:
nodes = []
c = 1
for node in row:
if node:
nodes.append(c*node)
c += 1
nodes.sort()
if len(nodes) == 2:
graph.add_edge(
Edge( node_prefix + abs(nodes[0]),
node_prefix + nodes[1] ))
if not directed:
graph.set_simplify(True)
return graph
class Common(object):
"""Common information to several classes.
Should not be directly used, several classes are derived from
this one.
"""
def __getstate__(self):
dict = copy.copy(self.obj_dict)
return dict
def __setstate__(self, state):
self.obj_dict = state
def __get_attribute__(self, attr):
"""Look for default attributes for this node"""
attr_val = self.obj_dict['attributes'].get(attr, None)
if attr_val is None:
# get the defaults for nodes/edges
default_node_name = self.obj_dict['type']
# The defaults for graphs are set on a node named 'graph'
if default_node_name in ('subgraph', 'digraph', 'cluster'):
default_node_name = 'graph'
g = self.get_parent_graph()
if g is not None:
defaults = g.get_node( default_node_name )
else:
return None
# Multiple defaults could be set by having repeated 'graph [...]'
# 'node [...]', 'edge [...]' statements. In such case, if the
# same attribute is set in different statements, only the first
# will be returned. In order to get all, one would call the
# get_*_defaults() methods and handle those. Or go node by node
# (of the ones specifying defaults) and modify the attributes
# individually.
#
if not isinstance(defaults, (list, tuple)):
defaults = [defaults]
for default in defaults:
attr_val = default.obj_dict['attributes'].get(attr, None)
if attr_val:
return attr_val
else:
return attr_val
return None
def set_parent_graph(self, parent_graph):
self.obj_dict['parent_graph'] = parent_graph
def get_parent_graph(self):
return self.obj_dict.get('parent_graph', None)
def set(self, name, value):
"""Set an attribute value by name.
Given an attribute 'name' it will set its value to 'value'.
There's always the possibility of using the methods:
set_'name'(value)
which are defined for all the existing attributes.
"""
self.obj_dict['attributes'][name] = value
def get(self, name):
"""Get an attribute value by name.
Given an attribute 'name' it will get its value.
There's always the possibility of using the methods:
get_'name'()
which are defined for all the existing attributes.
"""
return self.obj_dict['attributes'].get(name, None)
def get_attributes(self):
""""""
return self.obj_dict['attributes']
def set_sequence(self, seq):
self.obj_dict['sequence'] = seq
def get_sequence(self):
return self.obj_dict['sequence']
def create_attribute_methods(self, obj_attributes):
#for attr in self.obj_dict['attributes']:
for attr in obj_attributes:
# Generate all the Setter methods.
#
self.__setattr__(
'set_'+attr,
lambda x, a=attr :
self.obj_dict['attributes'].__setitem__(a, x) )
# Generate all the Getter methods.
#
self.__setattr__(
'get_'+attr, lambda a=attr : self.__get_attribute__(a))
class Error(Exception):
"""General error handling class.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class InvocationException(Exception):
"""Indicate ploblem while running any GraphViz executable.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class Node(Common):
"""A graph node.
This class represents a graph's node with all its attributes.
node(name, attribute=value, ...)
name: node's name
All the attributes defined in the Graphviz dot language should
be supported.
"""
def __init__(self, name = '', obj_dict = None, **attrs):
#
# Nodes will take attributes of
# all other types because the defaults
# for any GraphViz object are dealt with
# as if they were Node definitions
#
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
# Copy the attributes
#
self.obj_dict[ 'attributes' ] = dict( attrs )
self.obj_dict[ 'type' ] = 'node'
self.obj_dict[ 'parent_graph' ] = None
self.obj_dict[ 'parent_node_list' ] = None
self.obj_dict[ 'sequence' ] = None
# Remove the compass point
#
port = None
if isinstance(name, str_type) and not name.startswith('"'):
idx = name.find(':')
if idx > 0 and idx+1 < len(name):
name, port = name[:idx], name[idx:]
if isinstance(name, int):
name = str(name)
self.obj_dict['name'] = quote_if_necessary(name)
self.obj_dict['port'] = port
self.create_attribute_methods(NODE_ATTRIBUTES)
def __str__(self):
return self.to_string()
def set_name(self, node_name):
"""Set the node's name."""
self.obj_dict['name'] = node_name
def get_name(self):
"""Get the node's name."""
return self.obj_dict['name']
def get_port(self):
"""Get the node's port."""
return self.obj_dict['port']
def add_style(self, style):
styles = self.obj_dict['attributes'].get('style', None)
if not styles and style:
styles = [ style ]
else:
styles = styles.split(',')
styles.append( style )
self.obj_dict['attributes']['style'] = ','.join( styles )
def to_string(self):
"""Return string representation of node in DOT language."""
# RMF: special case defaults for node, edge and graph properties.
#
node = quote_if_necessary(self.obj_dict['name'])
node_attr = list()
for attr in sorted(self.obj_dict['attributes']):
value = self.obj_dict['attributes'][attr]
if value == '':
value = '""'
if value is not None:
node_attr.append(
'%s=%s' % (attr, quote_if_necessary(value) ) )
else:
node_attr.append( attr )
# No point in having nodes setting any defaults if the don't set
# any attributes...
#
if node in ('graph', 'node', 'edge') and len(node_attr) == 0:
return ''
node_attr = ', '.join(node_attr)
if node_attr:
node += ' [' + node_attr + ']'
return node + ';'
class Edge(Common):
"""A graph edge.
This class represents a graph's edge with all its attributes.
edge(src, dst, attribute=value, ...)
src: source node
dst: destination node
`src` and `dst` can be specified as a `Node` object,
or as the node's name string.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_label, set_fontname
or directly by using the instance's special dictionary:
Edge.obj_dict['attributes'][attribute name], i.e.
edge_instance.obj_dict['attributes']['label']
edge_instance.obj_dict['attributes']['fontname']
"""
def __init__(self, src='', dst='', obj_dict=None, **attrs):
self.obj_dict = dict()
if isinstance(src, Node):
src = src.get_name()
if isinstance(dst, Node):
dst = dst.get_name()
points = (quote_if_necessary(src),
quote_if_necessary(dst))
self.obj_dict['points'] = points
if obj_dict is None:
# Copy the attributes
self.obj_dict[ 'attributes' ] = dict( attrs )
self.obj_dict[ 'type' ] = 'edge'
self.obj_dict[ 'parent_graph' ] = None
self.obj_dict[ 'parent_edge_list' ] = None
self.obj_dict[ 'sequence' ] = None
else:
self.obj_dict = obj_dict
self.create_attribute_methods(EDGE_ATTRIBUTES)
def __str__(self):
return self.to_string()
def get_source(self):
"""Get the edges source node name."""
return self.obj_dict['points'][0]
def get_destination(self):
"""Get the edge's destination node name."""
return self.obj_dict['points'][1]
def __hash__(self):
return hash( hash(self.get_source()) +
hash(self.get_destination()) )
def __eq__(self, edge):
"""Compare two edges.
If the parent graph is directed, arcs linking
node A to B are considered equal and A->B != B->A
If the parent graph is undirected, any edge
connecting two nodes is equal to any other
edge connecting the same nodes, A->B == B->A
"""
if not isinstance(edge, Edge):
raise Error('Can not compare and '
'edge to a non-edge object.')
if self.get_parent_graph().get_top_graph_type() == 'graph':
# If the graph is undirected, the edge has neither
# source nor destination.
#
if ( ( self.get_source() == edge.get_source() and
self.get_destination() == edge.get_destination() ) or
( edge.get_source() == self.get_destination() and
edge.get_destination() == self.get_source() ) ):
return True
else:
if (self.get_source()==edge.get_source() and
self.get_destination()==edge.get_destination()):
return True
return False
def parse_node_ref(self, node_str):
if not isinstance(node_str, str):
return node_str
if node_str.startswith('"') and node_str.endswith('"'):
return node_str
node_port_idx = node_str.rfind(':')
if (node_port_idx>0 and node_str[0]=='"' and
node_str[node_port_idx-1]=='"'):
return node_str
if node_port_idx>0:
a = node_str[:node_port_idx]
b = node_str[node_port_idx+1:]
node = quote_if_necessary(a)
node += ':'+quote_if_necessary(b)
return node
return node_str
def to_string(self):
"""Return string representation of edge in DOT language."""
src = self.parse_node_ref( self.get_source() )
dst = self.parse_node_ref( self.get_destination() )
if isinstance(src, frozendict):
edge = [ Subgraph(obj_dict=src).to_string() ]
elif isinstance(src, int):
edge = [ str(src) ]
else:
edge = [ src ]
if (self.get_parent_graph() and
self.get_parent_graph().get_top_graph_type() and
self.get_parent_graph().get_top_graph_type() == 'digraph' ):
edge.append( '->' )
else:
edge.append( '--' )
if isinstance(dst, frozendict):
edge.append( Subgraph(obj_dict=dst).to_string() )
elif isinstance(dst, int):
edge.append( str(dst) )
else:
edge.append( dst )
edge_attr = list()
for attr in sorted(self.obj_dict['attributes']):
value = self.obj_dict['attributes'][attr]
if value == '':
value = '""'
if value is not None:
edge_attr.append(
'%s=%s' % (attr, quote_if_necessary(value) ) )
else:
edge_attr.append( attr )
edge_attr = ', '.join(edge_attr)
if edge_attr:
edge.append( ' [' + edge_attr + ']' )
return ' '.join(edge) + ';'
class Graph(Common):
"""Class representing a graph in Graphviz's dot language.
This class implements the methods to work on a representation
of a graph in Graphviz's dot language.
graph( graph_name='G', graph_type='digraph',
strict=False, suppress_disconnected=False, attribute=value, ...)
graph_name:
the graph's name
graph_type:
can be 'graph' or 'digraph'
suppress_disconnected:
defaults to False, which will remove from the
graph any disconnected nodes.
simplify:
if True it will avoid displaying equal edges, i.e.
only one edge between two nodes. removing the
duplicated ones.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_size, set_fontname
or using the instance's attributes:
Graph.obj_dict['attributes'][attribute name], i.e.
graph_instance.obj_dict['attributes']['label']
graph_instance.obj_dict['attributes']['fontname']
"""
def __init__(self, graph_name='G', obj_dict=None,
graph_type='digraph', strict=False,
suppress_disconnected=False, simplify=False, **attrs):
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
self.obj_dict['attributes'] = dict(attrs)
if graph_type not in ['graph', 'digraph']:
raise Error((
'Invalid type "{t}". '
'Accepted graph types are: '
'graph, digraph').format(t=graph_type))
self.obj_dict['name'] = quote_if_necessary(graph_name)
self.obj_dict['type'] = graph_type
self.obj_dict['strict'] = strict
self.obj_dict['suppress_disconnected'] = suppress_disconnected
self.obj_dict['simplify'] = simplify
self.obj_dict['current_child_sequence'] = 1
self.obj_dict['nodes'] = dict()
self.obj_dict['edges'] = dict()
self.obj_dict['subgraphs'] = dict()
self.set_parent_graph(self)
self.create_attribute_methods(GRAPH_ATTRIBUTES)
def __str__(self):
return self.to_string()
def get_graph_type(self):
return self.obj_dict['type']
def get_top_graph_type(self):
parent = self
while True:
parent_ = parent.get_parent_graph()
if parent_ == parent:
break
parent = parent_
return parent.obj_dict['type']
def set_graph_defaults(self, **attrs):
self.add_node( Node('graph', **attrs) )
def get_graph_defaults(self, **attrs):
graph_nodes = self.get_node('graph')
if isinstance( graph_nodes, (list, tuple)):
return [ node.get_attributes() for node in graph_nodes ]
return graph_nodes.get_attributes()
def set_node_defaults(self, **attrs):
self.add_node( Node('node', **attrs) )
def get_node_defaults(self, **attrs):
graph_nodes = self.get_node('node')
if isinstance( graph_nodes, (list, tuple)):
return [ node.get_attributes() for node in graph_nodes ]
return graph_nodes.get_attributes()
def set_edge_defaults(self, **attrs):
self.add_node( Node('edge', **attrs) )
def get_edge_defaults(self, **attrs):
graph_nodes = self.get_node('edge')
if isinstance( graph_nodes, (list, tuple)):
return [ node.get_attributes() for node in graph_nodes ]
return graph_nodes.get_attributes()
def set_simplify(self, simplify):
"""Set whether to simplify or not.
If True it will avoid displaying equal edges, i.e.
only one edge between two nodes. removing the
duplicated ones.
"""
self.obj_dict['simplify'] = simplify
def get_simplify(self):
"""Get whether to simplify or not.
Refer to set_simplify for more information.
"""
return self.obj_dict['simplify']
def set_type(self, graph_type):
"""Set the graph's type, 'graph' or 'digraph'."""
self.obj_dict['type'] = graph_type
def get_type(self):
"""Get the graph's type, 'graph' or 'digraph'."""
return self.obj_dict['type']
def set_name(self, graph_name):
"""Set the graph's name."""
self.obj_dict['name'] = graph_name
def get_name(self):
"""Get the graph's name."""
return self.obj_dict['name']
def set_strict(self, val):
"""Set graph to 'strict' mode.
This option is only valid for top level graphs.
"""
self.obj_dict['strict'] = val
def get_strict(self, val):
"""Get graph's 'strict' mode (True, False).
This option is only valid for top level graphs.
"""
return self.obj_dict['strict']
def set_suppress_disconnected(self, val):
"""Suppress disconnected nodes in the output graph.
This option will skip nodes in
the graph with no incoming or outgoing
edges. This option works also
for subgraphs and has effect only in the
current graph/subgraph.
"""
self.obj_dict['suppress_disconnected'] = val
def get_suppress_disconnected(self, val):
"""Get if suppress disconnected is set.
Refer to set_suppress_disconnected for more information.
"""
return self.obj_dict['suppress_disconnected']
def get_next_sequence_number(self):
seq = self.obj_dict['current_child_sequence']
self.obj_dict['current_child_sequence'] += 1
return seq
def add_node(self, graph_node):
"""Adds a node object to the graph.
It takes a node object as its only argument and returns
None.
"""
if not isinstance(graph_node, Node):
raise TypeError(
'add_node() received ' +
'a non node class object: ' + str(graph_node))
node = self.get_node(graph_node.get_name())
if not node:
self.obj_dict['nodes'][graph_node.get_name()] = [
graph_node.obj_dict ]
#self.node_dict[graph_node.get_name()] = graph_node.attributes
graph_node.set_parent_graph(self.get_parent_graph())
else:
self.obj_dict['nodes'][graph_node.get_name()].append(
graph_node.obj_dict )
graph_node.set_sequence(self.get_next_sequence_number())
def del_node(self, name, index=None):
"""Delete a node from the graph.
Given a node's name all node(s) with that same name
will be deleted if 'index' is not specified or set
to None.
If there are several nodes with that same name and
'index' is given, only the node in that position
will be deleted.
'index' should be an integer specifying the position
of the node to delete. If index is larger than the
number of nodes with that name, no action is taken.
If nodes are deleted it returns True. If no action
is taken it returns False.
"""
if isinstance(name, Node):
name = name.get_name()
if name in self.obj_dict['nodes']:
if (index is not None and
index < len(self.obj_dict['nodes'][name])):
del self.obj_dict['nodes'][name][index]
return True
else:
del self.obj_dict['nodes'][name]
return True
return False
def get_node(self, name):
"""Retrieve a node from the graph.
Given a node's name the corresponding Node
instance will be returned.
If one or more nodes exist with that name a list of
Node instances is returned.
An empty list is returned otherwise.
"""
match = list()
if name in self.obj_dict['nodes']:
match.extend(
[Node(obj_dict=obj_dict)
for obj_dict in self.obj_dict['nodes'][name]])
return match
def get_nodes(self):
"""Get the list of Node instances."""
return self.get_node_list()
def get_node_list(self):
"""Get the list of Node instances.
This method returns the list of Node instances
composing the graph.
"""
node_objs = list()
for node in self.obj_dict['nodes']:
obj_dict_list = self.obj_dict['nodes'][node]
node_objs.extend( [ Node( obj_dict = obj_d )
for obj_d in obj_dict_list ] )
return node_objs
def add_edge(self, graph_edge):
"""Adds an edge object to the graph.
It takes a edge object as its only argument and returns
None.
"""
if not isinstance(graph_edge, Edge):
raise TypeError(
'add_edge() received a non edge class object: ' +
str(graph_edge))
edge_points = ( graph_edge.get_source(),
graph_edge.get_destination() )
if edge_points in self.obj_dict['edges']:
edge_list = self.obj_dict['edges'][edge_points]
edge_list.append(graph_edge.obj_dict)
else:
self.obj_dict['edges'][edge_points] = [ graph_edge.obj_dict ]
graph_edge.set_sequence( self.get_next_sequence_number() )
graph_edge.set_parent_graph( self.get_parent_graph() )
def del_edge(self, src_or_list, dst=None, index=None):
"""Delete an edge from the graph.
Given an edge's (source, destination) node names all
matching edges(s) will be deleted if 'index' is not
specified or set to None.
If there are several matching edges and 'index' is
given, only the edge in that position will be deleted.
'index' should be an integer specifying the position
of the edge to delete. If index is larger than the
number of matching edges, no action is taken.
If edges are deleted it returns True. If no action
is taken it returns False.
"""
if isinstance( src_or_list, (list, tuple)):
if dst is not None and isinstance(dst, int):
index = dst
src, dst = src_or_list
else:
src, dst = src_or_list, dst
if isinstance(src, Node):
src = src.get_name()
if isinstance(dst, Node):
dst = dst.get_name()
if (src, dst) in self.obj_dict['edges']:
if (index is not None and
index < len(self.obj_dict['edges'][(src, dst)])):
del self.obj_dict['edges'][(src, dst)][index]
return True
else:
del self.obj_dict['edges'][(src, dst)]
return True
return False
def get_edge(self, src_or_list, dst=None):
"""Retrieved an edge from the graph.
Given an edge's source and destination the corresponding
Edge instance(s) will be returned.
If one or more edges exist with that source and destination
a list of Edge instances is returned.
An empty list is returned otherwise.
"""
if isinstance( src_or_list, (list, tuple)) and dst is None:
edge_points = tuple(src_or_list)
edge_points_reverse = (edge_points[1], edge_points[0])
else:
edge_points = (src_or_list, dst)
edge_points_reverse = (dst, src_or_list)
match = list()
if edge_points in self.obj_dict['edges'] or (
self.get_top_graph_type() == 'graph' and
edge_points_reverse in self.obj_dict['edges']):
edges_obj_dict = self.obj_dict['edges'].get(
edge_points,
self.obj_dict['edges'].get( edge_points_reverse, None ))
for edge_obj_dict in edges_obj_dict:
match.append(
Edge(edge_points[0],
edge_points[1],
obj_dict=edge_obj_dict))
return match
def get_edges(self):
return self.get_edge_list()
def get_edge_list(self):
"""Get the list of Edge instances.
This method returns the list of Edge instances
composing the graph.
"""
edge_objs = list()
for edge in self.obj_dict['edges']:
obj_dict_list = self.obj_dict['edges'][edge]
edge_objs.extend(
[Edge(obj_dict=obj_d)
for obj_d in obj_dict_list])
return edge_objs
def add_subgraph(self, sgraph):
"""Adds an subgraph object to the graph.
It takes a subgraph object as its only argument and returns
None.
"""
if (not isinstance(sgraph, Subgraph) and
not isinstance(sgraph, Cluster)):
raise TypeError(
'add_subgraph() received a non subgraph class object:' +
str(sgraph))
if sgraph.get_name() in self.obj_dict['subgraphs']:
sgraph_list = self.obj_dict['subgraphs'][ sgraph.get_name() ]
sgraph_list.append( sgraph.obj_dict )
else:
self.obj_dict['subgraphs'][sgraph.get_name()] = [
sgraph.obj_dict]
sgraph.set_sequence( self.get_next_sequence_number() )
sgraph.set_parent_graph( self.get_parent_graph() )
def get_subgraph(self, name):
"""Retrieved a subgraph from the graph.
Given a subgraph's name the corresponding
Subgraph instance will be returned.
If one or more subgraphs exist with the same name, a list of
Subgraph instances is returned.
An empty list is returned otherwise.
"""
match = list()
if name in self.obj_dict['subgraphs']:
sgraphs_obj_dict = self.obj_dict['subgraphs'].get( name )
for obj_dict_list in sgraphs_obj_dict:
#match.extend( Subgraph( obj_dict = obj_d )
# for obj_d in obj_dict_list )
match.append( Subgraph( obj_dict = obj_dict_list ) )
return match
def get_subgraphs(self):
return self.get_subgraph_list()
def get_subgraph_list(self):
"""Get the list of Subgraph instances.
This method returns the list of Subgraph instances
in the graph.
"""
sgraph_objs = list()
for sgraph in self.obj_dict['subgraphs']:
obj_dict_list = self.obj_dict['subgraphs'][sgraph]
sgraph_objs.extend(
[Subgraph(obj_dict=obj_d)
for obj_d in obj_dict_list])
return sgraph_objs
def set_parent_graph(self, parent_graph):
self.obj_dict['parent_graph'] = parent_graph
for k in self.obj_dict['nodes']:
obj_list = self.obj_dict['nodes'][k]
for obj in obj_list:
obj['parent_graph'] = parent_graph
for k in self.obj_dict['edges']:
obj_list = self.obj_dict['edges'][k]
for obj in obj_list:
obj['parent_graph'] = parent_graph
for k in self.obj_dict['subgraphs']:
obj_list = self.obj_dict['subgraphs'][k]
for obj in obj_list:
Graph(obj_dict=obj).set_parent_graph(parent_graph)
def to_string(self):
"""Return string representation of graph in DOT language.
@return: graph and subelements
@rtype: `str`
"""
graph = list()
if self.obj_dict.get('strict', None) is not None:
if (self == self.get_parent_graph() and
self.obj_dict['strict']):
graph.append('strict ')
graph_type = self.obj_dict['type']
if (graph_type == 'subgraph' and
not self.obj_dict.get('show_keyword', True)):
graph_type = ''
s = '{type} {name} {{\n'.format(
type=graph_type,
name=self.obj_dict['name'])
graph.append(s)
for attr in sorted(self.obj_dict['attributes']):
if self.obj_dict['attributes'].get(attr, None) is not None:
val = self.obj_dict['attributes'].get(attr)
if val == '':
val = '""'
if val is not None:
graph.append('%s=%s' %
(attr, quote_if_necessary(val)))
else:
graph.append( attr )
graph.append( ';\n' )
edges_done = set()
edge_obj_dicts = list()
for k in self.obj_dict['edges']:
edge_obj_dicts.extend(self.obj_dict['edges'][k])
if edge_obj_dicts:
edge_src_set, edge_dst_set = list(zip(
*[obj['points'] for obj in edge_obj_dicts]))
edge_src_set, edge_dst_set = set(edge_src_set), set(edge_dst_set)
else:
edge_src_set, edge_dst_set = set(), set()
node_obj_dicts = list()
for k in self.obj_dict['nodes']:
node_obj_dicts.extend(self.obj_dict['nodes'][k])
sgraph_obj_dicts = list()
for k in self.obj_dict['subgraphs']:
sgraph_obj_dicts.extend(self.obj_dict['subgraphs'][k])
obj_list = [(obj['sequence'], obj)
for obj in (edge_obj_dicts +
node_obj_dicts + sgraph_obj_dicts) ]
obj_list.sort(key=lambda x: x[0])
for idx, obj in obj_list:
if obj['type'] == 'node':
node = Node(obj_dict=obj)
if self.obj_dict.get('suppress_disconnected', False):
if (node.get_name() not in edge_src_set and
node.get_name() not in edge_dst_set):
continue
graph.append( node.to_string()+'\n' )
elif obj['type'] == 'edge':
edge = Edge(obj_dict=obj)
if (self.obj_dict.get('simplify', False) and
edge in edges_done):
continue
graph.append( edge.to_string() + '\n' )
edges_done.add(edge)
else:
sgraph = Subgraph(obj_dict=obj)
graph.append( sgraph.to_string()+'\n' )
graph.append( '}\n' )
return ''.join(graph)
class Subgraph(Graph):
"""Class representing a subgraph in Graphviz's dot language.
This class implements the methods to work on a representation
of a subgraph in Graphviz's dot language.
subgraph(graph_name='subG',
suppress_disconnected=False,
attribute=value,
...)
graph_name:
the subgraph's name
suppress_disconnected:
defaults to false, which will remove from the
subgraph any disconnected nodes.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_size, set_fontname
or using the instance's attributes:
Subgraph.obj_dict['attributes'][attribute name], i.e.
subgraph_instance.obj_dict['attributes']['label']
subgraph_instance.obj_dict['attributes']['fontname']
"""
# RMF: subgraph should have all the
# attributes of graph so it can be passed
# as a graph to all methods
#
def __init__(self, graph_name='',
obj_dict=None, suppress_disconnected=False,
simplify=False, **attrs):
Graph.__init__(
self, graph_name=graph_name, obj_dict=obj_dict,
suppress_disconnected=suppress_disconnected,
simplify=simplify, **attrs)
if obj_dict is None:
self.obj_dict['type'] = 'subgraph'
class Cluster(Graph):
"""Class representing a cluster in Graphviz's dot language.
This class implements the methods to work on a representation
of a cluster in Graphviz's dot language.
cluster(graph_name='subG',
suppress_disconnected=False,
attribute=value,
...)
graph_name:
the cluster's name
(the string 'cluster' will be always prepended)
suppress_disconnected:
defaults to false, which will remove from the
cluster any disconnected nodes.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_color, set_fontname
or using the instance's attributes:
Cluster.obj_dict['attributes'][attribute name], i.e.
cluster_instance.obj_dict['attributes']['label']
cluster_instance.obj_dict['attributes']['fontname']
"""
def __init__(self, graph_name='subG',
obj_dict=None, suppress_disconnected=False,
simplify=False, **attrs):
Graph.__init__(
self, graph_name=graph_name, obj_dict=obj_dict,
suppress_disconnected=suppress_disconnected,
simplify=simplify, **attrs)
if obj_dict is None:
self.obj_dict['type'] = 'subgraph'
self.obj_dict['name'] = quote_if_necessary('cluster_'+graph_name)
self.create_attribute_methods(CLUSTER_ATTRIBUTES)
class Dot(Graph):
"""A container for handling a dot language file.
This class implements methods to write and process
a dot language file. It is a derived class of
the base class 'Graph'.
"""
def __init__(self, *argsl, **argsd):
Graph.__init__(self, *argsl, **argsd)
self.shape_files = list()
self.formats = [
'canon', 'cmap', 'cmapx',
'cmapx_np', 'dia', 'dot',
'fig', 'gd', 'gd2', 'gif',
'hpgl', 'imap', 'imap_np', 'ismap',
'jpe', 'jpeg', 'jpg', 'mif',
'mp', 'pcl', 'pdf', 'pic', 'plain',
'plain-ext', 'png', 'ps', 'ps2',
'svg', 'svgz', 'vml', 'vmlz',
'vrml', 'vtx', 'wbmp', 'xdot', 'xlib']
self.prog = 'dot'
# Automatically creates all
# the methods enabling the creation
# of output in any of the supported formats.
for frmt in self.formats:
def new_method(
f=frmt, prog=self.prog,
encoding=None):
"""Refer to docstring of method `create`."""
return self.create(
format=f, prog=prog, encoding=encoding)
name = 'create_{fmt}'.format(fmt=frmt)
self.__setattr__(name, new_method)
for frmt in self.formats+['raw']:
def new_method(
path, f=frmt, prog=self.prog,
encoding=None):
"""Refer to docstring of method `write.`"""
self.write(
path, format=f, prog=prog,
encoding=encoding)
name = 'write_{fmt}'.format(fmt=frmt)
self.__setattr__(name, new_method)
def __getstate__(self):
dict = copy.copy(self.obj_dict)
return dict
def __setstate__(self, state):
self.obj_dict = state
def set_shape_files(self, file_paths):
"""Add the paths of the required image files.
If the graph needs graphic objects to
be used as shapes or otherwise
those need to be in the same folder as
the graph is going to be rendered
from. Alternatively the absolute path to
the files can be specified when
including the graphics in the graph.
The files in the location pointed to by
the path(s) specified as arguments
to this method will be copied to
the same temporary location where the
graph is going to be rendered.
"""
if isinstance( file_paths, str_type):
self.shape_files.append( file_paths )
if isinstance( file_paths, (list, tuple) ):
self.shape_files.extend( file_paths )
def set_prog(self, prog):
"""Sets the default program.
Sets the default program in charge of processing
the dot file into a graph.
"""
self.prog = prog
def write(self, path, prog=None, format='raw', encoding=None):
"""Writes a graph to a file.
Given a filename 'path' it will open/create and truncate
such file and write on it a representation of the graph
defined by the dot object in the format specified by
'format' and using the encoding specified by `encoding` for text.
The format 'raw' is used to dump the string representation
of the Dot object, without further processing.
The output can be processed by any of graphviz tools, defined
in 'prog', which defaults to 'dot'
Returns True or False according to the success of the write
operation.
There's also the preferred possibility of using:
write_'format'(path, prog='program')
which are automatically defined for all the supported formats.
[write_ps(), write_gif(), write_dia(), ...]
The encoding is passed to `open` [1].
[1] https://docs.python.org/3/library/functions.html#open
"""
if prog is None:
prog = self.prog
if format == 'raw':
s = self.to_string()
if not PY3:
s = unicode(s)
with io.open(path, mode='wt', encoding=encoding) as f:
f.write(s)
else:
s = self.create(prog, format, encoding=encoding)
with io.open(path, mode='wb') as f:
f.write(s)
return True
def create(self, prog=None, format='ps', encoding=None):
"""Creates and returns a binary image for the graph.
create will write the graph to a temporary dot file in the
encoding specified by `encoding` and process it with the
program given by 'prog' (which defaults to 'twopi'), reading
the binary image output and return it as:
- `str` of bytes in Python 2
- `bytes` in Python 3
There's also the preferred possibility of using:
create_'format'(prog='program')
which are automatically defined for all the supported formats,
for example:
- `create_ps()`
- `create_gif()`
- `create_dia()`
If 'prog' is a list, instead of a string,
then the fist item is expected to be the program name,
followed by any optional command-line arguments for it:
[ 'twopi', '-Tdot', '-s10' ]
@param prog: either:
- name of GraphViz executable that
can be found in the `$PATH`, or
- absolute path to GraphViz executable.
If you have added GraphViz to the `$PATH` and
use its executables as installed
(without renaming any of them)
then their names are:
- `'dot'`
- `'twopi'`
- `'neato'`
- `'circo'`
- `'fdp'`
- `'sfdp'`
On Windows, these have the notorious ".exe" extension that,
only for the above strings, will be added automatically.
The `$PATH` is inherited from `os.env['PATH']` and
passed to `subprocess.Popen` using the `env` argument.
If you haven't added GraphViz to your `$PATH` on Windows,
then you may want to give the absolute path to the
executable (for example, to `dot.exe`) in `prog`.
"""
if prog is None:
prog = self.prog
assert prog is not None
if isinstance(prog, (list, tuple)):
prog, args = prog[0], prog[1:]
else:
args = []
# temp file
tmp_fd, tmp_name = tempfile.mkstemp()
os.close(tmp_fd)
self.write(tmp_name, encoding=encoding)
tmp_dir = os.path.dirname(tmp_name)
# For each of the image files...
for img in self.shape_files:
# Get its data
f = open(img, 'rb')
f_data = f.read()
f.close()
# And copy it under a file with the same name in
# the temporary directory
f = open(os.path.join(tmp_dir, os.path.basename(img)), 'wb')
f.write(f_data)
f.close()
arguments = ['-T{}'.format(format), ] + args + [tmp_name]
try:
stdout_data, stderr_data, process = call_graphviz(
program=prog,
arguments=arguments,
working_dir=tmp_dir,
)
except OSError as e:
if e.errno == errno.ENOENT:
args = list(e.args)
args[1] = '"{prog}" not found in path.'.format(
prog=prog)
raise OSError(*args)
else:
raise
# clean file litter
for img in self.shape_files:
os.unlink(os.path.join(tmp_dir, os.path.basename(img)))
os.unlink(tmp_name)
if process.returncode != 0:
message = (
'"{prog}" with args {arguments} returned code: {code}\n\n'
'stdout, stderr:\n {out}\n{err}\n'
).format(
prog=prog,
arguments=arguments,
code=process.returncode,
out=stdout_data,
err=stderr_data,
)
print(message)
assert process.returncode == 0, process.returncode
return stdout_data
| {
"repo_name": "erocarrera/pydot",
"path": "pydot.py",
"copies": "1",
"size": "54068",
"license": "mit",
"hash": -3781711013687590000,
"line_mean": 26.769902414,
"line_max": 78,
"alpha_frac": 0.5488828882,
"autogenerated": false,
"ratio": 4.013956941351151,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5062839829551151,
"avg_score": null,
"num_lines": null
} |
"""An interface to GraphViz."""
import copy
import io
import errno
import os
import re
import subprocess
import sys
import tempfile
import warnings
try:
import dot_parser
except Exception as e:
warnings.warn(
"`pydot` could not import `dot_parser`, "
"so `pydot` will be unable to parse DOT files. "
"The error was: {e}".format(e=e)
)
__author__ = "Ero Carrera"
__version__ = "2.0.0.dev0"
__license__ = "MIT"
# fmt: off
GRAPH_ATTRIBUTES = {
"Damping", "K", "URL", "aspect", "bb", "bgcolor",
"center", "charset", "clusterrank", "colorscheme", "comment", "compound",
"concentrate", "defaultdist", "dim", "dimen", "diredgeconstraints",
"dpi", "epsilon", "esep", "fontcolor", "fontname", "fontnames",
"fontpath", "fontsize", "id", "label", "labeljust", "labelloc",
"landscape", "layers", "layersep", "layout", "levels", "levelsgap",
"lheight", "lp", "lwidth", "margin", "maxiter", "mclimit", "mindist",
"mode", "model", "mosek", "nodesep", "nojustify", "normalize", "nslimit",
"nslimit1", "ordering", "orientation", "outputorder", "overlap",
"overlap_scaling", "pack", "packmode", "pad", "page", "pagedir",
"quadtree", "quantum", "rankdir", "ranksep", "ratio", "remincross",
"repulsiveforce", "resolution", "root", "rotate", "searchsize", "sep",
"showboxes", "size", "smoothing", "sortv", "splines", "start",
"stylesheet", "target", "truecolor", "viewport", "voro_margin",
# for subgraphs
"rank"
}
EDGE_ATTRIBUTES = {
"URL", "arrowhead", "arrowsize", "arrowtail",
"color", "colorscheme", "comment", "constraint", "decorate", "dir",
"edgeURL", "edgehref", "edgetarget", "edgetooltip", "fontcolor",
"fontname", "fontsize", "headURL", "headclip", "headhref", "headlabel",
"headport", "headtarget", "headtooltip", "href", "id", "label",
"labelURL", "labelangle", "labeldistance", "labelfloat", "labelfontcolor",
"labelfontname", "labelfontsize", "labelhref", "labeltarget",
"labeltooltip", "layer", "len", "lhead", "lp", "ltail", "minlen",
"nojustify", "penwidth", "pos", "samehead", "sametail", "showboxes",
"style", "tailURL", "tailclip", "tailhref", "taillabel", "tailport",
"tailtarget", "tailtooltip", "target", "tooltip", "weight",
"rank"
}
NODE_ATTRIBUTES = {
"URL", "color", "colorscheme", "comment",
"distortion", "fillcolor", "fixedsize", "fontcolor", "fontname",
"fontsize", "group", "height", "id", "image", "imagescale", "label",
"labelloc", "layer", "margin", "nojustify", "orientation", "penwidth",
"peripheries", "pin", "pos", "rects", "regular", "root", "samplepoints",
"shape", "shapefile", "showboxes", "sides", "skew", "sortv", "style",
"target", "tooltip", "vertices", "width", "z",
# The following are attributes dot2tex
"texlbl", "texmode"
}
CLUSTER_ATTRIBUTES = {
"K", "URL", "bgcolor", "color", "colorscheme",
"fillcolor", "fontcolor", "fontname", "fontsize", "label", "labeljust",
"labelloc", "lheight", "lp", "lwidth", "nojustify", "pencolor",
"penwidth", "peripheries", "sortv", "style", "target", "tooltip"
}
# fmt: on
DEFAULT_PROGRAMS = {
"dot",
"twopi",
"neato",
"circo",
"fdp",
"sfdp",
}
def is_windows():
# type: () -> bool
return os.name == "nt"
def is_anaconda():
# type: () -> bool
import glob
conda_pattern = os.path.join(sys.prefix, "conda-meta\\graphviz*.json")
return glob.glob(conda_pattern) != []
def get_executable_extension():
# type: () -> str
if is_windows():
return ".bat" if is_anaconda() else ".exe"
else:
return ""
def call_graphviz(program, arguments, working_dir, **kwargs):
# explicitly inherit `$PATH`, on Windows too,
# with `shell=False`
if program in DEFAULT_PROGRAMS:
extension = get_executable_extension()
program += extension
if arguments is None:
arguments = []
env = {
"PATH": os.environ.get("PATH", ""),
"LD_LIBRARY_PATH": os.environ.get("LD_LIBRARY_PATH", ""),
"SYSTEMROOT": os.environ.get("SYSTEMROOT", ""),
}
program_with_args = [program] + arguments
process = subprocess.Popen(
program_with_args,
env=env,
cwd=working_dir,
shell=False,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
**kwargs,
)
stdout_data, stderr_data = process.communicate()
return stdout_data, stderr_data, process
#
# Extended version of ASPN's Python Cookbook Recipe:
# Frozen dictionaries.
# https://code.activestate.com/recipes/414283/
#
# This version freezes dictionaries used as values within dictionaries.
#
class frozendict(dict):
def _blocked_attribute(obj):
raise AttributeError("A frozendict cannot be modified.")
_blocked_attribute = property(_blocked_attribute)
__delitem__ = __setitem__ = clear = _blocked_attribute
pop = popitem = setdefault = update = _blocked_attribute
def __new__(cls, *args, **kw):
new = dict.__new__(cls)
args_ = []
for arg in args:
if isinstance(arg, dict):
arg = copy.copy(arg)
for k in arg:
v = arg[k]
if isinstance(v, frozendict):
arg[k] = v
elif isinstance(v, dict):
arg[k] = frozendict(v)
elif isinstance(v, list):
v_ = list()
for elm in v:
if isinstance(elm, dict):
v_.append(frozendict(elm))
else:
v_.append(elm)
arg[k] = tuple(v_)
args_.append(arg)
else:
args_.append(arg)
dict.__init__(new, *args_, **kw)
return new
def __init__(self, *args, **kw):
pass
def __hash__(self):
try:
return self._cached_hash
except AttributeError:
h = self._cached_hash = hash(tuple(sorted(self.items())))
return h
def __repr__(self):
return "frozendict(%s)" % dict.__repr__(self)
dot_keywords = ["graph", "subgraph", "digraph", "node", "edge", "strict"]
id_re_alpha_nums = re.compile("^[_a-zA-Z][a-zA-Z0-9_,]*$", re.UNICODE)
id_re_alpha_nums_with_ports = re.compile(
'^[_a-zA-Z][a-zA-Z0-9_,:"]*[a-zA-Z0-9_,"]+$', re.UNICODE
)
id_re_num = re.compile("^[0-9,]+$", re.UNICODE)
id_re_with_port = re.compile("^([^:]*):([^:]*)$", re.UNICODE)
id_re_dbl_quoted = re.compile('^".*"$', re.S | re.UNICODE)
id_re_html = re.compile("^<.*>$", re.S | re.UNICODE)
def needs_quotes(s):
"""Checks whether a string is a dot language ID.
It will check whether the string is solely composed
by the characters allowed in an ID or not.
If the string is one of the reserved keywords it will
need quotes too but the user will need to add them
manually.
"""
# If the name is a reserved keyword it will need quotes but pydot
# can't tell when it's being used as a keyword or when it's simply
# a name. Hence the user needs to supply the quotes when an element
# would use a reserved keyword as name. This function will return
# false indicating that a keyword string, if provided as-is, won't
# need quotes.
if s in dot_keywords:
return False
chars = [ord(c) for c in s if ord(c) > 0x7F or ord(c) == 0]
if chars and not id_re_dbl_quoted.match(s) and not id_re_html.match(s):
return True
for test_re in [
id_re_alpha_nums,
id_re_num,
id_re_dbl_quoted,
id_re_html,
id_re_alpha_nums_with_ports,
]:
if test_re.match(s):
return False
m = id_re_with_port.match(s)
if m:
return needs_quotes(m.group(1)) or needs_quotes(m.group(2))
return True
def quote_if_necessary(s):
"""Enclose attribute value in quotes, if needed."""
if isinstance(s, bool):
if s is True:
return "True"
return "False"
if not isinstance(s, str):
return s
if not s:
return s
if needs_quotes(s):
replace = {
'"': r"\"",
"\n": r"\n",
"\r": r"\r",
}
for (a, b) in replace.items():
s = s.replace(a, b)
return '"' + s + '"'
return s
def graph_from_dot_data(s):
"""Load graphs from DOT description in string `s`.
@param s: string in [DOT language](
https://en.wikipedia.org/wiki/DOT_(graph_description_language))
@return: Graphs that result from parsing.
@rtype: `list` of `pydot.Dot`
"""
return dot_parser.parse_dot_data(s)
def graph_from_dot_file(path, encoding=None):
"""Load graphs from DOT file at `path`.
@param path: to DOT file
@param encoding: as passed to `io.open`.
For example, `'utf-8'`.
@return: Graphs that result from parsing.
@rtype: `list` of `pydot.Dot`
"""
with io.open(path, "rt", encoding=encoding) as f:
s = f.read()
graphs = graph_from_dot_data(s)
return graphs
def graph_from_edges(edge_list, node_prefix="", directed=False):
"""Creates a basic graph out of an edge list.
The edge list has to be a list of tuples representing
the nodes connected by the edge.
The values can be anything: bool, int, float, str.
If the graph is undirected by default, it is only
calculated from one of the symmetric halves of the matrix.
"""
if directed:
graph = Dot(graph_type="digraph")
else:
graph = Dot(graph_type="graph")
for edge in edge_list:
if isinstance(edge[0], str):
src = node_prefix + edge[0]
else:
src = node_prefix + str(edge[0])
if isinstance(edge[1], str):
dst = node_prefix + edge[1]
else:
dst = node_prefix + str(edge[1])
e = Edge(src, dst)
graph.add_edge(e)
return graph
def graph_from_adjacency_matrix(matrix, node_prefix="", directed=False):
"""Creates a basic graph out of an adjacency matrix.
The matrix has to be a list of rows of values
representing an adjacency matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
"""
node_orig = 1
if directed:
graph = Dot(graph_type="digraph")
else:
graph = Dot(graph_type="graph")
for row in matrix:
if not directed:
skip = matrix.index(row)
r = row[skip:]
else:
skip = 0
r = row
node_dest = skip + 1
for e in r:
if e:
graph.add_edge(
Edge(
"%s%s" % (node_prefix, node_orig),
"%s%s" % (node_prefix, node_dest),
)
)
node_dest += 1
node_orig += 1
return graph
def graph_from_incidence_matrix(matrix, node_prefix="", directed=False):
"""Creates a basic graph out of an incidence matrix.
The matrix has to be a list of rows of values
representing an incidence matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
"""
if directed:
graph = Dot(graph_type="digraph")
else:
graph = Dot(graph_type="graph")
for row in matrix:
nodes = []
c = 1
for node in row:
if node:
nodes.append(c * node)
c += 1
nodes.sort()
if len(nodes) == 2:
graph.add_edge(
Edge(
"%s%s" % (node_prefix, abs(nodes[0])),
"%s%s" % (node_prefix, nodes[1]),
)
)
if not directed:
graph.set_simplify(True)
return graph
class Common(object):
"""Common information to several classes.
Should not be directly used, several classes are derived from
this one.
"""
def __getstate__(self):
dict = copy.copy(self.obj_dict)
return dict
def __setstate__(self, state):
self.obj_dict = state
def __get_attribute__(self, attr):
"""Look for default attributes for this node"""
attr_val = self.obj_dict["attributes"].get(attr, None)
if attr_val is None:
# get the defaults for nodes/edges
default_node_name = self.obj_dict["type"]
# The defaults for graphs are set on a node named 'graph'
if default_node_name in ("subgraph", "digraph", "cluster"):
default_node_name = "graph"
g = self.get_parent_graph()
if g is not None:
defaults = g.get_node(default_node_name)
else:
return None
# Multiple defaults could be set by having repeated 'graph [...]'
# 'node [...]', 'edge [...]' statements. In such case, if the
# same attribute is set in different statements, only the first
# will be returned. In order to get all, one would call the
# get_*_defaults() methods and handle those. Or go node by node
# (of the ones specifying defaults) and modify the attributes
# individually.
#
if not isinstance(defaults, (list, tuple)):
defaults = [defaults]
for default in defaults:
attr_val = default.obj_dict["attributes"].get(attr, None)
if attr_val:
return attr_val
else:
return attr_val
return None
def set_parent_graph(self, parent_graph):
self.obj_dict["parent_graph"] = parent_graph
def get_parent_graph(self):
return self.obj_dict.get("parent_graph", None)
def set(self, name, value):
"""Set an attribute value by name.
Given an attribute 'name' it will set its value to 'value'.
There's always the possibility of using the methods:
set_'name'(value)
which are defined for all the existing attributes.
"""
self.obj_dict["attributes"][name] = value
def get(self, name):
"""Get an attribute value by name.
Given an attribute 'name' it will get its value.
There's always the possibility of using the methods:
get_'name'()
which are defined for all the existing attributes.
"""
return self.obj_dict["attributes"].get(name, None)
def get_attributes(self):
"""Get attributes of the object"""
return self.obj_dict["attributes"]
def set_sequence(self, seq):
"""Set sequence"""
self.obj_dict["sequence"] = seq
def get_sequence(self):
"""Get sequence"""
return self.obj_dict["sequence"]
def create_attribute_methods(self, obj_attributes):
for attr in obj_attributes:
# Generate all the Setter methods.
#
self.__setattr__(
"set_" + attr,
lambda x, a=attr: self.obj_dict["attributes"].__setitem__(
a, x
),
)
# Generate all the Getter methods.
#
self.__setattr__(
"get_" + attr, lambda a=attr: self.__get_attribute__(a)
)
class Error(Exception):
"""General error handling class."""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class InvocationException(Exception):
"""Indicate problem while running any GraphViz executable."""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class Node(Common):
"""A graph node.
This class represents a graph's node with all its attributes.
node(name, attribute=value, ...)
name: node's name
All the attributes defined in the Graphviz dot language should
be supported.
"""
def __init__(self, name="", obj_dict=None, **attrs):
#
# Nodes will take attributes of
# all other types because the defaults
# for any GraphViz object are dealt with
# as if they were Node definitions
#
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
# Copy the attributes
#
self.obj_dict["attributes"] = dict(attrs)
self.obj_dict["type"] = "node"
self.obj_dict["parent_graph"] = None
self.obj_dict["parent_node_list"] = None
self.obj_dict["sequence"] = None
# Remove the compass point
#
port = None
if isinstance(name, str) and not name.startswith('"'):
idx = name.find(":")
if idx > 0 and idx + 1 < len(name):
name, port = name[:idx], name[idx:]
if isinstance(name, int):
name = str(name)
self.obj_dict["name"] = quote_if_necessary(name)
self.obj_dict["port"] = port
self.create_attribute_methods(NODE_ATTRIBUTES)
def __str__(self):
return self.to_string()
def set_name(self, node_name):
"""Set the node's name."""
self.obj_dict["name"] = node_name
def get_name(self):
"""Get the node's name."""
return self.obj_dict["name"]
def get_port(self):
"""Get the node's port."""
return self.obj_dict["port"]
def add_style(self, style):
styles = self.obj_dict["attributes"].get("style", None)
if not styles and style:
styles = [style]
else:
styles = styles.split(",")
styles.append(style)
self.obj_dict["attributes"]["style"] = ",".join(styles)
def to_string(self):
"""Return string representation of node in DOT language."""
# RMF: special case defaults for node, edge and graph properties.
#
node = quote_if_necessary(self.obj_dict["name"])
node_attr = list()
for attr in sorted(self.obj_dict["attributes"]):
value = self.obj_dict["attributes"][attr]
if value == "":
value = '""'
if value is not None:
node_attr.append("%s=%s" % (attr, quote_if_necessary(value)))
else:
node_attr.append(attr)
# No point in having nodes setting any defaults if the don't set
# any attributes...
#
if node in ("graph", "node", "edge") and len(node_attr) == 0:
return ""
node_attr = ", ".join(node_attr)
if node_attr:
node += " [" + node_attr + "]"
return node + ";"
class Edge(Common):
"""A graph edge.
This class represents a graph's edge with all its attributes.
edge(src, dst, attribute=value, ...)
src: source node, subgraph or cluster
dst: destination node, subgraph or cluster
`src` and `dst` can be specified as a `Node`, `Subgraph` or
`Cluster` object, or as the name string of such a component.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_label, set_fontname
or directly by using the instance's special dictionary:
Edge.obj_dict['attributes'][attribute name], i.e.
edge_instance.obj_dict['attributes']['label']
edge_instance.obj_dict['attributes']['fontname']
"""
def __init__(self, src="", dst="", obj_dict=None, **attrs):
self.obj_dict = dict()
if isinstance(src, (Node, Subgraph, Cluster)):
src = src.get_name()
if isinstance(dst, (Node, Subgraph, Cluster)):
dst = dst.get_name()
points = (quote_if_necessary(src), quote_if_necessary(dst))
self.obj_dict["points"] = points
if obj_dict is None:
# Copy the attributes
self.obj_dict["attributes"] = dict(attrs)
self.obj_dict["type"] = "edge"
self.obj_dict["parent_graph"] = None
self.obj_dict["parent_edge_list"] = None
self.obj_dict["sequence"] = None
else:
self.obj_dict = obj_dict
self.create_attribute_methods(EDGE_ATTRIBUTES)
def __str__(self):
return self.to_string()
def get_source(self):
"""Get the edges source node name."""
return self.obj_dict["points"][0]
def get_destination(self):
"""Get the edge's destination node name."""
return self.obj_dict["points"][1]
def __hash__(self):
return hash(hash(self.get_source()) + hash(self.get_destination()))
def __eq__(self, edge):
"""Compare two edges.
If the parent graph is directed, arcs linking
node A to B are considered equal and A->B != B->A
If the parent graph is undirected, any edge
connecting two nodes is equal to any other
edge connecting the same nodes, A->B == B->A
"""
if not isinstance(edge, Edge):
raise Error("Can not compare an edge to a non-edge object.")
if self.get_parent_graph().get_top_graph_type() == "graph":
# If the graph is undirected, the edge has neither
# source nor destination.
#
if (
self.get_source() == edge.get_source()
and self.get_destination() == edge.get_destination()
) or (
edge.get_source() == self.get_destination()
and edge.get_destination() == self.get_source()
):
return True
else:
if (
self.get_source() == edge.get_source()
and self.get_destination() == edge.get_destination()
):
return True
return False
def parse_node_ref(self, node_str):
if not isinstance(node_str, str):
return node_str
if node_str.startswith('"') and node_str.endswith('"'):
return node_str
node_port_idx = node_str.rfind(":")
if (
node_port_idx > 0
and node_str[0] == '"'
and node_str[node_port_idx - 1] == '"'
):
return node_str
if node_port_idx > 0:
a = node_str[:node_port_idx]
b = node_str[node_port_idx + 1 :]
node = quote_if_necessary(a)
node += ":" + quote_if_necessary(b)
return node
return node_str
def to_string(self):
"""Return string representation of edge in DOT language."""
src = self.parse_node_ref(self.get_source())
dst = self.parse_node_ref(self.get_destination())
if isinstance(src, frozendict):
edge = [Subgraph(obj_dict=src).to_string()]
elif isinstance(src, int):
edge = [str(src)]
else:
edge = [src]
if (
self.get_parent_graph()
and self.get_parent_graph().get_top_graph_type()
and self.get_parent_graph().get_top_graph_type() == "digraph"
):
edge.append("->")
else:
edge.append("--")
if isinstance(dst, frozendict):
edge.append(Subgraph(obj_dict=dst).to_string())
elif isinstance(dst, int):
edge.append(str(dst))
else:
edge.append(dst)
edge_attr = list()
for attr in sorted(self.obj_dict["attributes"]):
value = self.obj_dict["attributes"][attr]
if value == "":
value = '""'
if value is not None:
edge_attr.append("%s=%s" % (attr, quote_if_necessary(value)))
else:
edge_attr.append(attr)
edge_attr = ", ".join(edge_attr)
if edge_attr:
edge.append(" [" + edge_attr + "]")
return " ".join(edge) + ";"
class Graph(Common):
"""Class representing a graph in Graphviz's dot language.
This class implements the methods to work on a representation
of a graph in Graphviz's dot language.
graph( graph_name='G', graph_type='digraph',
strict=False, suppress_disconnected=False, attribute=value, ...)
graph_name:
the graph's name
graph_type:
can be 'graph' or 'digraph'
suppress_disconnected:
defaults to False, which will remove from the
graph any disconnected nodes.
simplify:
if True it will avoid displaying equal edges, i.e.
only one edge between two nodes. removing the
duplicated ones.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_size, set_fontname
or using the instance's attributes:
Graph.obj_dict['attributes'][attribute name], i.e.
graph_instance.obj_dict['attributes']['label']
graph_instance.obj_dict['attributes']['fontname']
"""
def __init__(
self,
graph_name="G",
obj_dict=None,
graph_type="digraph",
strict=False,
suppress_disconnected=False,
simplify=False,
**attrs
):
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
self.obj_dict["attributes"] = dict(attrs)
if graph_type not in ["graph", "digraph"]:
raise Error(
(
'Invalid type "{t}". '
"Accepted graph types are: "
"graph, digraph"
).format(t=graph_type)
)
self.obj_dict["name"] = quote_if_necessary(graph_name)
self.obj_dict["type"] = graph_type
self.obj_dict["strict"] = strict
self.obj_dict["suppress_disconnected"] = suppress_disconnected
self.obj_dict["simplify"] = simplify
self.obj_dict["current_child_sequence"] = 1
self.obj_dict["nodes"] = dict()
self.obj_dict["edges"] = dict()
self.obj_dict["subgraphs"] = dict()
self.set_parent_graph(self)
self.create_attribute_methods(GRAPH_ATTRIBUTES)
def __str__(self):
return self.to_string()
def get_graph_type(self):
return self.obj_dict["type"]
def get_top_graph_type(self):
parent = self
while True:
parent_ = parent.get_parent_graph()
if parent_ == parent:
break
parent = parent_
return parent.obj_dict["type"]
def set_graph_defaults(self, **attrs):
self.add_node(Node("graph", **attrs))
def get_graph_defaults(self, **attrs):
graph_nodes = self.get_node("graph")
if isinstance(graph_nodes, (list, tuple)):
return [node.get_attributes() for node in graph_nodes]
return graph_nodes.get_attributes()
def set_node_defaults(self, **attrs):
"""Define default node attributes.
These attributes only apply to nodes added to the graph after
calling this method.
"""
self.add_node(Node("node", **attrs))
def get_node_defaults(self, **attrs):
graph_nodes = self.get_node("node")
if isinstance(graph_nodes, (list, tuple)):
return [node.get_attributes() for node in graph_nodes]
return graph_nodes.get_attributes()
def set_edge_defaults(self, **attrs):
self.add_node(Node("edge", **attrs))
def get_edge_defaults(self, **attrs):
graph_nodes = self.get_node("edge")
if isinstance(graph_nodes, (list, tuple)):
return [node.get_attributes() for node in graph_nodes]
return graph_nodes.get_attributes()
def set_simplify(self, simplify):
"""Set whether to simplify or not.
If True it will avoid displaying equal edges, i.e.
only one edge between two nodes. removing the
duplicated ones.
"""
self.obj_dict["simplify"] = simplify
def get_simplify(self):
"""Get whether to simplify or not.
Refer to set_simplify for more information.
"""
return self.obj_dict["simplify"]
def set_type(self, graph_type):
"""Set the graph's type, 'graph' or 'digraph'."""
self.obj_dict["type"] = graph_type
def get_type(self):
"""Get the graph's type, 'graph' or 'digraph'."""
return self.obj_dict["type"]
def set_name(self, graph_name):
"""Set the graph's name."""
self.obj_dict["name"] = graph_name
def get_name(self):
"""Get the graph's name."""
return self.obj_dict["name"]
def set_strict(self, val):
"""Set graph to 'strict' mode.
This option is only valid for top level graphs.
"""
self.obj_dict["strict"] = val
def get_strict(self, val):
"""Get graph's 'strict' mode (True, False).
This option is only valid for top level graphs.
"""
return self.obj_dict["strict"]
def set_suppress_disconnected(self, val):
"""Suppress disconnected nodes in the output graph.
This option will skip nodes in
the graph with no incoming or outgoing
edges. This option works also
for subgraphs and has effect only in the
current graph/subgraph.
"""
self.obj_dict["suppress_disconnected"] = val
def get_suppress_disconnected(self, val):
"""Get if suppress disconnected is set.
Refer to set_suppress_disconnected for more information.
"""
return self.obj_dict["suppress_disconnected"]
def get_next_sequence_number(self):
seq = self.obj_dict["current_child_sequence"]
self.obj_dict["current_child_sequence"] += 1
return seq
def add_node(self, graph_node):
"""Adds a node object to the graph.
It takes a node object as its only argument and returns
None.
"""
if not isinstance(graph_node, Node):
raise TypeError(
"add_node() received "
+ "a non node class object: "
+ str(graph_node)
)
node = self.get_node(graph_node.get_name())
if not node:
self.obj_dict["nodes"][graph_node.get_name()] = [
graph_node.obj_dict
]
graph_node.set_parent_graph(self.get_parent_graph())
else:
self.obj_dict["nodes"][graph_node.get_name()].append(
graph_node.obj_dict
)
graph_node.set_sequence(self.get_next_sequence_number())
def del_node(self, name, index=None):
"""Delete a node from the graph.
Given a node's name all node(s) with that same name
will be deleted if 'index' is not specified or set
to None.
If there are several nodes with that same name and
'index' is given, only the node in that position
will be deleted.
'index' should be an integer specifying the position
of the node to delete. If index is larger than the
number of nodes with that name, no action is taken.
If nodes are deleted it returns True. If no action
is taken it returns False.
"""
if isinstance(name, Node):
name = name.get_name()
if name in self.obj_dict["nodes"]:
if index is not None and index < len(self.obj_dict["nodes"][name]):
del self.obj_dict["nodes"][name][index]
return True
else:
del self.obj_dict["nodes"][name]
return True
return False
def get_node(self, name):
"""Retrieve a node from the graph.
Given a node's name the corresponding Node
instance will be returned.
If one or more nodes exist with that name a list of
Node instances is returned.
An empty list is returned otherwise.
"""
match = list()
if name in self.obj_dict["nodes"]:
match.extend(
[
Node(obj_dict=obj_dict)
for obj_dict in self.obj_dict["nodes"][name]
]
)
return match
def get_nodes(self):
"""Get the list of Node instances."""
return self.get_node_list()
def get_node_list(self):
"""Get the list of Node instances.
This method returns the list of Node instances
composing the graph.
"""
node_objs = list()
for node in self.obj_dict["nodes"]:
obj_dict_list = self.obj_dict["nodes"][node]
node_objs.extend([Node(obj_dict=obj_d) for obj_d in obj_dict_list])
return node_objs
def add_edge(self, graph_edge):
"""Adds an edge object to the graph.
It takes a edge object as its only argument and returns
None.
"""
if not isinstance(graph_edge, Edge):
raise TypeError(
"add_edge() received a non edge class object: "
+ str(graph_edge)
)
edge_points = (graph_edge.get_source(), graph_edge.get_destination())
if edge_points in self.obj_dict["edges"]:
edge_list = self.obj_dict["edges"][edge_points]
edge_list.append(graph_edge.obj_dict)
else:
self.obj_dict["edges"][edge_points] = [graph_edge.obj_dict]
graph_edge.set_sequence(self.get_next_sequence_number())
graph_edge.set_parent_graph(self.get_parent_graph())
def del_edge(self, src_or_list, dst=None, index=None):
"""Delete an edge from the graph.
Given an edge's (source, destination) node names all
matching edges(s) will be deleted if 'index' is not
specified or set to None.
If there are several matching edges and 'index' is
given, only the edge in that position will be deleted.
'index' should be an integer specifying the position
of the edge to delete. If index is larger than the
number of matching edges, no action is taken.
If edges are deleted it returns True. If no action
is taken it returns False.
"""
if isinstance(src_or_list, (list, tuple)):
if dst is not None and isinstance(dst, int):
index = dst
src, dst = src_or_list
else:
src, dst = src_or_list, dst
if isinstance(src, Node):
src = src.get_name()
if isinstance(dst, Node):
dst = dst.get_name()
if (src, dst) in self.obj_dict["edges"]:
if index is not None and index < len(
self.obj_dict["edges"][(src, dst)]
):
del self.obj_dict["edges"][(src, dst)][index]
return True
else:
del self.obj_dict["edges"][(src, dst)]
return True
return False
def get_edge(self, src_or_list, dst=None):
"""Retrieved an edge from the graph.
Given an edge's source and destination the corresponding
Edge instance(s) will be returned.
If one or more edges exist with that source and destination
a list of Edge instances is returned.
An empty list is returned otherwise.
"""
if isinstance(src_or_list, (list, tuple)) and dst is None:
edge_points = tuple(src_or_list)
edge_points_reverse = (edge_points[1], edge_points[0])
else:
edge_points = (src_or_list, dst)
edge_points_reverse = (dst, src_or_list)
match = list()
if edge_points in self.obj_dict["edges"] or (
self.get_top_graph_type() == "graph"
and edge_points_reverse in self.obj_dict["edges"]
):
edges_obj_dict = self.obj_dict["edges"].get(
edge_points,
self.obj_dict["edges"].get(edge_points_reverse, None),
)
for edge_obj_dict in edges_obj_dict:
match.append(
Edge(
edge_points[0], edge_points[1], obj_dict=edge_obj_dict
)
)
return match
def get_edges(self):
return self.get_edge_list()
def get_edge_list(self):
"""Get the list of Edge instances.
This method returns the list of Edge instances
composing the graph.
"""
edge_objs = list()
for edge in self.obj_dict["edges"]:
obj_dict_list = self.obj_dict["edges"][edge]
edge_objs.extend([Edge(obj_dict=obj_d) for obj_d in obj_dict_list])
return edge_objs
def add_subgraph(self, sgraph):
"""Adds an subgraph object to the graph.
It takes a subgraph object as its only argument and returns
None.
"""
if not isinstance(sgraph, Subgraph) and not isinstance(
sgraph, Cluster
):
raise TypeError(
"add_subgraph() received a non subgraph class object:"
+ str(sgraph)
)
if sgraph.get_name() in self.obj_dict["subgraphs"]:
sgraph_list = self.obj_dict["subgraphs"][sgraph.get_name()]
sgraph_list.append(sgraph.obj_dict)
else:
self.obj_dict["subgraphs"][sgraph.get_name()] = [sgraph.obj_dict]
sgraph.set_sequence(self.get_next_sequence_number())
sgraph.set_parent_graph(self.get_parent_graph())
def get_subgraph(self, name):
"""Retrieved a subgraph from the graph.
Given a subgraph's name the corresponding
Subgraph instance will be returned.
If one or more subgraphs exist with the same name, a list of
Subgraph instances is returned.
An empty list is returned otherwise.
"""
match = list()
if name in self.obj_dict["subgraphs"]:
sgraphs_obj_dict = self.obj_dict["subgraphs"].get(name)
for obj_dict_list in sgraphs_obj_dict:
match.append(Subgraph(obj_dict=obj_dict_list))
return match
def get_subgraphs(self):
return self.get_subgraph_list()
def get_subgraph_list(self):
"""Get the list of Subgraph instances.
This method returns the list of Subgraph instances
in the graph.
"""
sgraph_objs = list()
for sgraph in self.obj_dict["subgraphs"]:
obj_dict_list = self.obj_dict["subgraphs"][sgraph]
sgraph_objs.extend(
[Subgraph(obj_dict=obj_d) for obj_d in obj_dict_list]
)
return sgraph_objs
def set_parent_graph(self, parent_graph):
self.obj_dict["parent_graph"] = parent_graph
for k in self.obj_dict["nodes"]:
obj_list = self.obj_dict["nodes"][k]
for obj in obj_list:
obj["parent_graph"] = parent_graph
for k in self.obj_dict["edges"]:
obj_list = self.obj_dict["edges"][k]
for obj in obj_list:
obj["parent_graph"] = parent_graph
for k in self.obj_dict["subgraphs"]:
obj_list = self.obj_dict["subgraphs"][k]
for obj in obj_list:
Graph(obj_dict=obj).set_parent_graph(parent_graph)
def to_string(self):
"""Return string representation of graph in DOT language.
@return: graph and subelements
@rtype: `str`
"""
graph = list()
if self.obj_dict.get("strict", None) is not None:
if self == self.get_parent_graph() and self.obj_dict["strict"]:
graph.append("strict ")
graph_type = self.obj_dict["type"]
if graph_type == "subgraph" and not self.obj_dict.get(
"show_keyword", True
):
graph_type = ""
s = "{type} {name} {{\n".format(
type=graph_type, name=self.obj_dict["name"]
)
graph.append(s)
for attr in sorted(self.obj_dict["attributes"]):
if self.obj_dict["attributes"].get(attr, None) is not None:
val = self.obj_dict["attributes"].get(attr)
if val == "":
val = '""'
if val is not None:
graph.append("%s=%s" % (attr, quote_if_necessary(val)))
else:
graph.append(attr)
graph.append(";\n")
edges_done = set()
edge_obj_dicts = list()
for k in self.obj_dict["edges"]:
edge_obj_dicts.extend(self.obj_dict["edges"][k])
if edge_obj_dicts:
edge_src_set, edge_dst_set = list(
zip(*[obj["points"] for obj in edge_obj_dicts])
)
edge_src_set, edge_dst_set = set(edge_src_set), set(edge_dst_set)
else:
edge_src_set, edge_dst_set = set(), set()
node_obj_dicts = list()
for k in self.obj_dict["nodes"]:
node_obj_dicts.extend(self.obj_dict["nodes"][k])
sgraph_obj_dicts = list()
for k in self.obj_dict["subgraphs"]:
sgraph_obj_dicts.extend(self.obj_dict["subgraphs"][k])
obj_list = [
(obj["sequence"], obj)
for obj in (edge_obj_dicts + node_obj_dicts + sgraph_obj_dicts)
]
obj_list.sort(key=lambda x: x[0])
for idx, obj in obj_list:
if obj["type"] == "node":
node = Node(obj_dict=obj)
if self.obj_dict.get("suppress_disconnected", False):
if (
node.get_name() not in edge_src_set
and node.get_name() not in edge_dst_set
):
continue
graph.append(node.to_string() + "\n")
elif obj["type"] == "edge":
edge = Edge(obj_dict=obj)
if self.obj_dict.get("simplify", False) and edge in edges_done:
continue
graph.append(edge.to_string() + "\n")
edges_done.add(edge)
else:
sgraph = Subgraph(obj_dict=obj)
graph.append(sgraph.to_string() + "\n")
graph.append("}\n")
return "".join(graph)
class Subgraph(Graph):
"""Class representing a subgraph in Graphviz's dot language.
This class implements the methods to work on a representation
of a subgraph in Graphviz's dot language.
subgraph(graph_name='subG',
suppress_disconnected=False,
attribute=value,
...)
graph_name:
the subgraph's name
suppress_disconnected:
defaults to false, which will remove from the
subgraph any disconnected nodes.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_size, set_fontname
or using the instance's attributes:
Subgraph.obj_dict['attributes'][attribute name], i.e.
subgraph_instance.obj_dict['attributes']['label']
subgraph_instance.obj_dict['attributes']['fontname']
"""
# RMF: subgraph should have all the
# attributes of graph so it can be passed
# as a graph to all methods
#
def __init__(
self,
graph_name="",
obj_dict=None,
suppress_disconnected=False,
simplify=False,
**attrs
):
Graph.__init__(
self,
graph_name=graph_name,
obj_dict=obj_dict,
suppress_disconnected=suppress_disconnected,
simplify=simplify,
**attrs,
)
if obj_dict is None:
self.obj_dict["type"] = "subgraph"
class Cluster(Graph):
"""Class representing a cluster in Graphviz's dot language.
This class implements the methods to work on a representation
of a cluster in Graphviz's dot language.
cluster(graph_name='subG',
suppress_disconnected=False,
attribute=value,
...)
graph_name:
the cluster's name
(the string 'cluster' will be always prepended)
suppress_disconnected:
defaults to false, which will remove from the
cluster any disconnected nodes.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_color, set_fontname
or using the instance's attributes:
Cluster.obj_dict['attributes'][attribute name], i.e.
cluster_instance.obj_dict['attributes']['label']
cluster_instance.obj_dict['attributes']['fontname']
"""
def __init__(
self,
graph_name="subG",
obj_dict=None,
suppress_disconnected=False,
simplify=False,
**attrs
):
Graph.__init__(
self,
graph_name=graph_name,
obj_dict=obj_dict,
suppress_disconnected=suppress_disconnected,
simplify=simplify,
**attrs,
)
if obj_dict is None:
self.obj_dict["type"] = "subgraph"
self.obj_dict["name"] = quote_if_necessary("cluster_" + graph_name)
self.create_attribute_methods(CLUSTER_ATTRIBUTES)
class Dot(Graph):
"""A container for handling a dot language file.
This class implements methods to write and process
a dot language file. It is a derived class of
the base class 'Graph'.
"""
def __init__(self, *argsl, **argsd):
Graph.__init__(self, *argsl, **argsd)
self.shape_files = list()
self.formats = [
"canon",
"cmap",
"cmapx",
"cmapx_np",
"dia",
"dot",
"fig",
"gd",
"gd2",
"gif",
"hpgl",
"imap",
"imap_np",
"ismap",
"jpe",
"jpeg",
"jpg",
"mif",
"mp",
"pcl",
"pdf",
"pic",
"plain",
"plain-ext",
"png",
"ps",
"ps2",
"svg",
"svgz",
"vml",
"vmlz",
"vrml",
"vtx",
"wbmp",
"xdot",
"xlib",
]
self.prog = "dot"
# Automatically creates all
# the methods enabling the creation
# of output in any of the supported formats.
for frmt in self.formats:
def new_method(f=frmt, prog=self.prog, encoding=None):
"""Refer to docstring of method `create`."""
return self.create(format=f, prog=prog, encoding=encoding)
name = "create_{fmt}".format(fmt=frmt)
self.__setattr__(name, new_method)
for frmt in self.formats + ["raw"]:
def new_method(path, f=frmt, prog=self.prog, encoding=None):
"""Refer to docstring of method `write.`"""
self.write(path, format=f, prog=prog, encoding=encoding)
name = "write_{fmt}".format(fmt=frmt)
self.__setattr__(name, new_method)
def __getstate__(self):
dict = copy.copy(self.obj_dict)
return dict
def __setstate__(self, state):
self.obj_dict = state
def set_shape_files(self, file_paths):
"""Add the paths of the required image files.
If the graph needs graphic objects to
be used as shapes or otherwise
those need to be in the same folder as
the graph is going to be rendered
from. Alternatively the absolute path to
the files can be specified when
including the graphics in the graph.
The files in the location pointed to by
the path(s) specified as arguments
to this method will be copied to
the same temporary location where the
graph is going to be rendered.
"""
if isinstance(file_paths, str):
self.shape_files.append(file_paths)
if isinstance(file_paths, (list, tuple)):
self.shape_files.extend(file_paths)
def set_prog(self, prog):
"""Sets the default program.
Sets the default program in charge of processing
the dot file into a graph.
"""
self.prog = prog
def write(self, path, prog=None, format="raw", encoding=None):
"""Writes a graph to a file.
Given a filename 'path' it will open/create and truncate
such file and write on it a representation of the graph
defined by the dot object in the format specified by
'format' and using the encoding specified by `encoding` for text.
The format 'raw' is used to dump the string representation
of the Dot object, without further processing.
The output can be processed by any of graphviz tools, defined
in 'prog', which defaults to 'dot'
Returns True or False according to the success of the write
operation.
There's also the preferred possibility of using:
write_'format'(path, prog='program')
which are automatically defined for all the supported formats.
[write_ps(), write_gif(), write_dia(), ...]
The encoding is passed to `open` [1].
[1] https://docs.python.org/3/library/functions.html#open
"""
if prog is None:
prog = self.prog
if format == "raw":
s = self.to_string()
with io.open(path, mode="wt", encoding=encoding) as f:
f.write(s)
else:
s = self.create(prog, format, encoding=encoding)
with io.open(path, mode="wb") as f:
f.write(s)
return True
def create(self, prog=None, format="ps", encoding=None):
"""Creates and returns a binary image for the graph.
create will write the graph to a temporary dot file in the
encoding specified by `encoding` and process it with the
program given by 'prog' (which defaults to 'twopi'), reading
the binary image output and return it as `bytes`.
There's also the preferred possibility of using:
create_'format'(prog='program')
which are automatically defined for all the supported formats,
for example:
- `create_ps()`
- `create_gif()`
- `create_dia()`
If 'prog' is a list, instead of a string,
then the fist item is expected to be the program name,
followed by any optional command-line arguments for it:
[ 'twopi', '-Tdot', '-s10' ]
@param prog: either:
- name of GraphViz executable that
can be found in the `$PATH`, or
- absolute path to GraphViz executable.
If you have added GraphViz to the `$PATH` and
use its executables as installed
(without renaming any of them)
then their names are:
- `'dot'`
- `'twopi'`
- `'neato'`
- `'circo'`
- `'fdp'`
- `'sfdp'`
On Windows, these have the notorious ".exe" extension that,
only for the above strings, will be added automatically.
The `$PATH` is inherited from `os.env['PATH']` and
passed to `subprocess.Popen` using the `env` argument.
If you haven't added GraphViz to your `$PATH` on Windows,
then you may want to give the absolute path to the
executable (for example, to `dot.exe`) in `prog`.
"""
if prog is None:
prog = self.prog
assert prog is not None
if isinstance(prog, (list, tuple)):
prog, args = prog[0], prog[1:]
else:
args = []
# temp file
tmp_fd, tmp_name = tempfile.mkstemp()
os.close(tmp_fd)
self.write(tmp_name, encoding=encoding)
tmp_dir = os.path.dirname(tmp_name)
# For each of the image files...
for img in self.shape_files:
# Get its data
f = open(img, "rb")
f_data = f.read()
f.close()
# And copy it under a file with the same name in
# the temporary directory
f = open(os.path.join(tmp_dir, os.path.basename(img)), "wb")
f.write(f_data)
f.close()
arguments = ["-T{}".format(format)] + args + [tmp_name]
try:
stdout_data, stderr_data, process = call_graphviz(
program=prog,
arguments=arguments,
working_dir=tmp_dir,
)
except OSError as e:
if e.errno == errno.ENOENT:
args = list(e.args)
args[1] = '"{prog}" not found in path.'.format(prog=prog)
raise OSError(*args)
else:
raise
# clean file litter
for img in self.shape_files:
os.unlink(os.path.join(tmp_dir, os.path.basename(img)))
os.unlink(tmp_name)
if process.returncode != 0:
message = (
'"{prog}" with args {arguments} returned code: {code}\n\n'
"stdout, stderr:\n {out}\n{err}\n"
).format(
prog=prog,
arguments=arguments,
code=process.returncode,
out=stdout_data,
err=stderr_data,
)
print(message)
assert (
process.returncode == 0
), '"{prog}" with args {arguments} returned code: {code}'.format(
prog=prog,
arguments=arguments,
code=process.returncode,
)
return stdout_data
| {
"repo_name": "pydot/pydot",
"path": "pydot.py",
"copies": "1",
"size": "54493",
"license": "mit",
"hash": -6503652617289410000,
"line_mean": 28.7126499455,
"line_max": 79,
"alpha_frac": 0.546547263,
"autogenerated": false,
"ratio": 4.0299511906522705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000011853397183632829,
"num_lines": 1834
} |
"""An interface to interactact with the common data-store shared
between various bots and the fetcher bot
Currently, the data-store is simply in memory and acts more or less
as a stub, but we plan to have the information in a database that
can be accessed through this module.
May change into class if necessary for maintaining the database
connections.
"""
import tempfile
import models
# TODO All of the following are supposed to be stored into database later
# currently just a temp dir
BASE_DIR = tempfile.mkdtemp() # TODO set this from a config file
# currently a in-memory git-hub repo id to local repo path mapping
repo_id2dir = {} # TODO change to database table
# currently the population is triggered by runtime with command line
# args
bots = [] # TODO load these based on database entries
# bots will add their proposed pulls to this pile
proposed_pulls = []
# repos to analyze
repos = []
def load_data(bot, repo):
"""currently a stub to load necessary information
This should not be needed in the future as information will
be stored in a database
"""
bots.append(bot)
repos.append(repo)
def get_repos_signed_up_for_bot(bot):
"""return a list of data_wrappers.Repo that has sigh up to the
given bot
"""
# TODO hook to the sign up database
# This is currently a stub for the spelling bot
return repos
def get_repo_dir(repo_id):
"""return the directory path of the project associated with repo_id."""
if repo_id in repo_id2dir:
return repo_id2dir[repo_id]
else:
# TODO change this to be using a more permenant data storage
# store the path to the local repo and the repo id on
# github
dir_path = tempfile.mkdtemp(dir=BASE_DIR)
repo_id2dir[repo_id] = dir_path
print(dir_path)
return dir_path
def get_bots():
"""return a list of available bots"""
return bots
def get_proposed_pull_requests():
"""return a list of proposed pull requests that have not been
processed
"""
return proposed_pulls
def propose_pull(bot, repo, branch_name, title, body):
"""propose a pull request and enter it into the data-store
to be reviewed by the runtime
"""
proposed_pulls.append(models.PullRequest(bot,
repo, branch_name, title, body)) | {
"repo_name": "AnalysisBots/runtime",
"path": "scripts/datastore.py",
"copies": "1",
"size": "2364",
"license": "mit",
"hash": -5040557377388072000,
"line_mean": 28.5625,
"line_max": 75,
"alpha_frac": 0.6861252115,
"autogenerated": false,
"ratio": 4.01358234295416,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5199707554454159,
"avg_score": null,
"num_lines": null
} |
"""An interface to manage files contained within a pod.
The File class should not be accessed directly. Instead, file objects should be
retrieved using the methods "create_file" or "get_file" from the Pod class.
pod = pods.Pod('/home/growler/my-site/')
You can create a new file.
pod.create_file('/README.md')
You can retrieve an existing file.
pod.get_file('/README.md')
You can delete a file.
file = pod.get_file('/README.md')
file.delete()
When using the File class, you must always use the file's "pod path" -- that is, the
file's absolute path within the pod, excluding the pod's root directory.
"""
from grow.common import utils
from grow.pods import messages
import mimetypes
import os
mimetypes.add_type('text/plain', '.md')
mimetypes.add_type('text/plain', '.yaml')
mimetypes.add_type('text/plain', '.yml')
try:
from google.appengine.ext import blobstore
except ImportError:
blobstore = None
class Error(Exception):
pass
class FileExistsError(Error):
pass
class FileDoesNotExistError(Error):
pass
class File(object):
def __init__(self, pod_path, pod):
utils.validate_name(pod_path)
self.pod_path = pod_path
self.pod = pod
@classmethod
def create(cls, pod_path, content, pod):
file_obj = cls(pod_path, pod)
file_obj.update_content(content)
return file_obj
@classmethod
def get(cls, pod_path, pod):
file_obj = cls(pod_path, pod)
file_obj.get_content()
return file_obj
@property
def mimetype(self):
return mimetypes.guess_type(self.pod_path)[0]
@classmethod
def list(cls, pod, prefix='/'):
paths = sorted(pod.list_dir(prefix))
return [File(path, pod) for path in paths]
def delete(self):
return self.pod.delete_file(self.pod_path)
def move_to(self, dest_pod_path):
self.pod.move_file_to(self.pod_path, dest_pod_path)
def get_content(self):
try:
return self.pod.read_file(self.pod_path)
except IOError as e:
raise FileDoesNotExistError(e)
def update_content(self, content):
if isinstance(content, unicode):
content = content.encode('utf-8')
if content is None:
content = ''
self.pod.write_file(self.pod_path, content)
def get_http_headers(self):
"""Returns the HTTP headers used to serve this file."""
headers = {}
if self.mimetype:
headers['Content-Type'] = self.mimetype
if blobstore and self.pod.storage.is_cloud_storage:
pod_path = self.pod_path.lstrip('/')
root = '/' + self.pod.root.lstrip('/')
path = '/gs' + os.path.join(root, pod_path)
headers['X-AppEngine-BlobKey'] = blobstore.create_gs_key(path)
return headers
def to_message(self):
message = messages.FileMessage()
message.pod_path = self.pod_path
message.mimetype = self.mimetype
return message
| {
"repo_name": "vitorio/pygrow",
"path": "grow/pods/files.py",
"copies": "2",
"size": "2811",
"license": "mit",
"hash": 3199761876507939000,
"line_mean": 23.4434782609,
"line_max": 84,
"alpha_frac": 0.679473497,
"autogenerated": false,
"ratio": 3.4072727272727272,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012203307924968177,
"num_lines": 115
} |
"""An interface to the NORB and Small NORB datasets.
Unlike ./norb_small.py, this reads the original NORB file format, not the
LISA lab's .npy version.
Download the datasets from:
Small NORB: http://www.cs.nyu.edu/~ylclab/data/norb-v1.0-small/
(big) NORB: http://www.cs.nyu.edu/~ylclab/data/norb-v1.0/
NORB and Small NORB datasets by Fu Jie Huang and Yann LeCun.
"""
from __future__ import print_function
__authors__ = "Guillaume Desjardins and Matthew Koichi Grimes"
__copyright__ = "Copyright 2010-2014, Universite de Montreal"
__credits__ = __authors__.split(" and ")
__license__ = "3-clause BSD"
__maintainer__ = "Matthew Koichi Grimes"
__email__ = "mkg alum mit edu (@..)"
import os
import copy
import gzip
import bz2
import warnings
import functools
import numpy
import theano
from pylearn2.utils import safe_zip, string_utils
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.space import VectorSpace, Conv2DSpace, CompositeSpace
from pylearn2.datasets.filetensor import read_header
class NORB(DenseDesignMatrix):
"""
A DenseDesignMatrix loaded with SmallNORB or NORB data.
Keeps the data on memmap files on disk, to avoid taking up memory. This
also speeds up instantiation time.
Parameters
----------
X : ndarray
Design matrix. Each row contains the pixels of a grayscale stereo image
pair.
y : ndarray
Design matrix of int32s. Each row contains the labels for the
corresponding row in X.
label_index_to_name : tuple
Maps column indices of y to the name of that label (e.g. 'category',
'instance', etc).
label_name_to_index : dict
Maps label names (e.g. 'category') to the corresponding column index in
label.y.
label_to_value_funcs : tuple
A tuple of functions that map label values to the physical values they
represent (for example, elevation angle in degrees).
X_memmap_info, y_memmap_info : dict
Constructor arguments for the memmaps self.X and self.y, used
during pickling/unpickling.
"""
def __init__(self, which_norb, which_set, image_dtype='uint8'):
"""
Reads the specified NORB dataset from a memmap cache.
Creates this cache first, if necessary.
Parameters
----------
which_norb : str
Valid values: 'big' or 'small'.
Chooses between the (big) 'NORB dataset', and the 'Small NORB
dataset'.
which_set : str
Valid values: 'test', 'train', or 'both'.
Chooses between the testing set or the training set. If 'both',
the two datasets will be stacked together (testing data in the
first N rows, then training data).
image_dtype : str, or numpy.dtype
The dtype to store image data as in the memmap cache.
Default is uint8, which is what the original NORB files use.
"""
if which_norb not in ('big', 'small'):
raise ValueError("Expected which_norb argument to be either 'big' "
"or 'small', not '%s'" % str(which_norb))
if which_set not in ('test', 'train', 'both'):
raise ValueError("Expected which_set argument to be either 'test' "
"or 'train', not '%s'." % str(which_set))
# This will check that dtype is a legitimate dtype string.
image_dtype = numpy.dtype(image_dtype)
# Maps column indices of self.y to the label type it contains.
# Names taken from http://www.cs.nyu.edu/~ylclab/data/norb-v1.0/
self.label_index_to_name = ('category',
'instance',
'elevation',
'azimuth',
'lighting condition')
# Big NORB has additional label types
if which_norb == 'big':
self.label_index_to_name = (self.label_index_to_name +
('horizontal shift', # in pixels
'vertical shift', # in pixels
'lumination change',
'contrast',
'object scale',
'rotation'))
# Maps label type names to the corresponding column indices of self.y
self.label_name_to_index = {}
for index, name in enumerate(self.label_index_to_name):
self.label_name_to_index[name] = index
self.label_to_value_funcs = (get_category_value,
get_instance_value,
get_elevation_value,
get_azimuth_value,
get_lighting_value)
if which_norb == 'big':
self.label_to_value_funcs = (self.label_to_value_funcs +
(get_horizontal_shift_value,
get_vertical_shift_value,
get_lumination_change_value,
get_contrast_change_value,
get_scale_change_value,
get_rotation_change_value))
# The size of one side of the image
image_length = 96 if which_norb == 'small' else 108
def read_norb_files(norb_files, output):
"""
Reads the contents of a list of norb files into a matrix.
Data is assumed to be in row-major order.
"""
def read_norb_file(norb_file_path, debug=False):
"""
Returns the numbers in a single NORB file as a 1-D ndarray.
Parameters
----------
norb_file_path : str
A NORB file from which to read.
Can be uncompressed (*.mat) or compressed (*.mat.gz).
debug : bool
Set to True if you want debug printfs.
"""
if not (norb_file_path.endswith(".mat") or
norb_file_path.endswith(".mat.gz")):
raise ValueError("Expected norb_file_path to end in "
"either '.mat' or '.mat.gz'. Instead "
"got '%s'" % norb_file_path)
if not os.path.isfile(norb_file_path):
raise IOError("Could not find NORB file '%s' in expected "
"directory '%s'." %
reversed(os.path.split(norb_file_path)))
file_handle = (gzip.open(norb_file_path)
if norb_file_path.endswith('.mat.gz')
else open(norb_file_path))
def readNums(file_handle, num_type, count):
"""
Reads some numbers from a file and returns them as a
numpy.ndarray.
Parameters
----------
file_handle : file handle
The file handle from which to read the numbers.
num_type : str, numpy.dtype
The dtype of the numbers.
count : int
Reads off this many numbers.
"""
num_bytes = count * numpy.dtype(num_type).itemsize
string = file_handle.read(num_bytes)
return numpy.fromstring(string, dtype=num_type)
(elem_type,
elem_size,
_num_dims,
shape,
num_elems) = read_header(file_handle, debug)
del _num_dims
beginning = file_handle.tell()
result = None
if isinstance(file_handle, (gzip.GzipFile, bz2.BZ2File)):
result = readNums(file_handle,
elem_type,
num_elems * elem_size).reshape(shape)
else:
result = numpy.fromfile(file_handle,
dtype=elem_type,
count=num_elems).reshape(shape)
return result # end of read_norb_file()
row_index = 0
for norb_file in norb_files:
print("copying NORB file %s" % os.path.split(norb_file)[1])
norb_data = read_norb_file(norb_file)
norb_data = norb_data.reshape(-1, output.shape[1])
end_row = row_index + norb_data.shape[0]
output[row_index:end_row, :] = norb_data
row_index = end_row
assert end_row == output.shape[0] # end of read_norb_files
if which_norb == 'small':
training_set_size = 24300
testing_set_size = 24300
else:
assert which_norb == 'big'
num_rows_per_file = 29160
training_set_size = num_rows_per_file * 10
testing_set_size = num_rows_per_file * 2
def load_images(which_norb, which_set, dtype):
"""
Reads image data from memmap disk cache, if available. If not, then
first builds the memmap file from the NORB files.
Parameters
----------
which_norb : str
'big' or 'small'.
which_set : str
'test', 'train', or 'both'.
dtype : numpy.dtype
The dtype of the image memmap cache file. If a
cache of this dtype doesn't exist, it will be created.
"""
assert type(dtype) == numpy.dtype
memmap_path = get_memmap_path(which_norb, 'images_%s' % str(dtype))
row_size = 2 * (image_length ** 2)
shape = (training_set_size + testing_set_size, row_size)
def make_memmap():
dat_files = get_norb_file_paths(which_norb, 'both', 'dat')
memmap_dir = os.path.split(memmap_path)[0]
if not os.path.isdir(memmap_dir):
os.mkdir(memmap_dir)
print("Allocating memmap file %s" % memmap_path)
writeable_memmap = numpy.memmap(filename=memmap_path,
dtype=dtype,
mode='w+',
shape=shape)
read_norb_files(dat_files, writeable_memmap)
if not os.path.isfile(memmap_path):
print("Caching images to memmap file. This "
"will only be done once.")
make_memmap()
images = numpy.memmap(filename=memmap_path,
dtype=dtype,
mode='r',
shape=shape)
if which_set == 'train':
images = images[:training_set_size, :]
elif which_set == 'test':
images = images[training_set_size:, :]
return images
def load_labels(which_norb, which_set):
"""
Reads label data (both category and info data) from memmap disk
cache, if available. If not, then first builds the memmap file from
the NORB files.
"""
memmap_path = get_memmap_path(which_norb, 'labels')
dtype = numpy.dtype('int32')
row_size = 5 if which_norb == 'small' else 11
shape = (training_set_size + testing_set_size, row_size)
def make_memmap():
cat_files, info_files = [get_norb_file_paths(which_norb,
'both',
x)
for x in ('cat', 'info')]
memmap_dir = os.path.split(memmap_path)[0]
if not os.path.isdir(memmap_dir):
os.mkdir(memmap_dir)
print("allocating labels' memmap...")
writeable_memmap = numpy.memmap(filename=memmap_path,
dtype=dtype,
mode='w+',
shape=shape)
print("... done.")
cat_memmap = writeable_memmap[:, :1] # 1st column
info_memmap = writeable_memmap[:, 1:] # remaining columns
for norb_files, memmap in safe_zip((cat_files, info_files),
(cat_memmap, info_memmap)):
read_norb_files(norb_files, memmap)
if not os.path.isfile(memmap_path):
print("Caching images to memmap file %s.\n"
"This will only be done once." % memmap_path)
make_memmap()
labels = numpy.memmap(filename=memmap_path,
dtype=dtype,
mode='r',
shape=shape)
if which_set == 'train':
labels = labels[:training_set_size, :]
elif which_set == 'test':
labels = labels[training_set_size:, :]
return labels
def get_norb_dir(which_norb):
datasets_dir = os.getenv('PYLEARN2_DATA_PATH')
if datasets_dir is None:
raise RuntimeError("Please set the 'PYLEARN2_DATA_PATH' "
"environment variable to tell pylearn2 "
"where the datasets are.")
if not os.path.isdir(datasets_dir):
raise IOError("The PYLEARN2_DATA_PATH directory (%s) "
"doesn't exist." % datasets_dir)
return os.path.join(datasets_dir,
'norb' if which_norb == 'big'
else 'norb_small')
norb_dir = get_norb_dir(which_norb)
def get_memmap_path(which_norb, file_basename):
assert which_norb in ('big', 'small')
assert (file_basename == 'labels' or
file_basename.startswith('images')), file_basename
memmap_dir = os.path.join(norb_dir, 'memmaps_of_original')
return os.path.join(memmap_dir, "%s.npy" % file_basename)
def get_norb_file_paths(which_norb, which_set, norb_file_type):
"""
Returns a list of paths for a given norb file type.
For example,
get_norb_file_paths('big', 'test', 'cat')
Will return the category label files ('cat') for the big NORB
dataset's test set.
"""
assert which_set in ('train', 'test', 'both')
if which_set == 'both':
return (get_norb_file_paths(which_norb,
'train',
norb_file_type) +
get_norb_file_paths(which_norb,
'test',
norb_file_type))
norb_file_types = ('cat', 'dat', 'info')
if norb_file_type not in norb_file_types:
raise ValueError("Expected norb_file_type to be one of %s, "
"but it was '%s'" % (str(norb_file_types),
norb_file_type))
instance_list = '01235' if which_set == 'test' else '46789'
if which_norb == 'small':
templates = ['smallnorb-5x%sx9x18x6x2x96x96-%sing-%%s.mat' %
(instance_list, which_set)]
else:
numbers = range(1, 3 if which_set == 'test' else 11)
templates = ['norb-5x%sx9x18x6x2x108x108-%sing-%02d-%%s.mat' %
(instance_list, which_set, n) for n in numbers]
original_files_dir = os.path.join(norb_dir, 'original')
return [os.path.join(original_files_dir, t % norb_file_type)
for t in templates]
def make_view_converter(which_norb, which_set):
image_length = 96 if which_norb == 'small' else 108
datum_shape = (2, # number of images per stereo pair
image_length, # image height
image_length, # image width
1) # number of channels
axes = ('b', 's', 0, 1, 'c')
return StereoViewConverter(datum_shape, axes)
images = load_images(which_norb, which_set, image_dtype)
labels = load_labels(which_norb, which_set)
view_converter = make_view_converter(which_norb, which_set)
super(NORB, self).__init__(X=images,
y=labels,
y_labels=numpy.max(labels) + 1,
view_converter=view_converter)
# Needed for pickling / unpickling.
# These are set during pickling, by __getstate__()
self.X_memmap_info = None
self.y_memmap_info = None
@functools.wraps(DenseDesignMatrix.get_topological_view)
def get_topological_view(self, mat=None, single_tensor=False):
"""
Return a topological view.
Parameters
----------
mat : ndarray
A design matrix of images, one per row.
single_tensor : bool
If True, returns a single tensor. If False, returns separate
tensors for the left and right stereo images.
returns : ndarray, tuple
If single_tensor is True, returns ndarray.
Else, returns the tuple (left_images, right_images).
"""
# Get topo view from view converter.
result = super(NORB, self).get_topological_view(mat)
# If single_tensor is True, merge the left and right image tensors
# into a single stereo tensor.
if single_tensor:
# Check that the view_converter has a stereo axis, and that it
# returned a tuple (left_images, right_images)
if 's' not in self.view_converter.axes:
raise ValueError('self.view_converter.axes must contain "s" '
'(stereo image index) in order to split the '
'images into left and right images. Instead, '
'the axes were %s.'
% str(self.view_converter.axes))
assert isinstance(result, tuple)
assert len(result) == 2
axes = list(self.view_converter.axes)
s_index = axes.index('s')
assert axes.index('b') == 0
num_image_pairs = result[0].shape[0]
shape = (num_image_pairs, ) + self.view_converter.shape
# inserts a singleton dimension where the 's' dimesion will be
mono_shape = shape[:s_index] + (1, ) + shape[(s_index + 1):]
result = tuple(t.reshape(mono_shape) for t in result)
result = numpy.concatenate(result, axis=s_index)
return result
def __getstate__(self):
"""
Support method for pickling. Returns the complete state of this object
as a dictionary, which is then pickled.
This state does not include the memmaps' contents. Rather, it includes
enough info to find the memmap and re-load it from disk in the same
state.
Note that pickling a NORB will set its memmaps (self.X and self.y) to
be read-only. This is to prevent the memmaps from accidentally being
edited after the save. To make them writeable again, the user must
explicitly call setflags(write=True) on the memmaps.
"""
_check_pickling_support()
result = copy.copy(self.__dict__)
assert isinstance(self.X, numpy.memmap), ("Expected X to be a memmap, "
"but it was a %s." %
str(type(self.X)))
assert isinstance(self.y, numpy.memmap), ("Expected y to be a memmap, "
"but it was a %s." %
str(type(self.y)))
# We don't want to pickle the memmaps; they're already on disk.
del result['X']
del result['y']
# Replace memmaps with their constructor arguments
def get_memmap_info(memmap):
assert isinstance(memmap, numpy.memmap)
if not isinstance(memmap.filename, str):
raise ValueError("Expected memmap.filename to be a str; "
"instead got a %s, %s" %
(type(memmap.filename), str(memmap.filename)))
result = {}
def get_relative_path(full_path):
"""
Returns the relative path to the PYLEARN2_DATA_PATH.
"""
data_dir = string_utils.preprocess('${PYLEARN2_DATA_PATH}')
if not memmap.filename.startswith(data_dir):
raise ValueError("Expected memmap.filename to start with "
"the PYLEARN2_DATA_PATH (%s). Instead it "
"was %s." % (data_dir, memmap.filename))
return os.path.relpath(full_path, data_dir)
return {'filename': get_relative_path(memmap.filename),
'dtype': memmap.dtype,
'shape': memmap.shape,
'offset': memmap.offset,
# We never want to set mode to w+, even if memmap.mode
# is w+. Otherwise we'll overwrite the memmap's contents
# when we open it.
'mode': 'r+' if memmap.mode in ('r+', 'w+') else 'r'}
result['X_info'] = get_memmap_info(self.X)
result['y_info'] = get_memmap_info(self.y)
# This prevents self.X and self.y from being accidentally written to
# after the save, thus unexpectedly changing the saved file. If the
# user really wants to, they can make the memmaps writeable again
# by calling setflags(write=True) on the memmaps.
for memmap in (self.X, self.y):
memmap.flush()
memmap.setflags(write=False)
return result
def __setstate__(self, state):
"""
Support method for unpickling. Takes a 'state' dictionary and
interprets it in order to set this object's fields.
"""
_check_pickling_support()
X_info = state['X_info']
y_info = state['y_info']
del state['X_info']
del state['y_info']
self.__dict__.update(state)
def load_memmap_from_info(info):
# Converts filename from relative to absolute path.
data_dir = string_utils.preprocess('${PYLEARN2_DATA_PATH}')
info['filename'] = os.path.join(data_dir, info['filename'])
shape = info['shape']
offset = info['offset']
if offset == 0:
del info['offset']
return numpy.memmap(**info)
else:
del info['shape']
result = numpy.memmap(**info)
return result.reshape(shape)
self.X = load_memmap_from_info(X_info)
self.y = load_memmap_from_info(y_info)
class StereoViewConverter(object):
"""
Converts stereo image data between two formats:
A) A dense design matrix, one stereo pair per row (VectorSpace)
B) An image pair (CompositeSpace of two Conv2DSpaces)
Parameters
----------
shape : tuple
See doc for __init__'s <shape> parameter.
"""
def __init__(self, shape, axes=None):
"""
The arguments describe how the data is laid out in the design matrix.
Parameters
----------
shape : tuple
A tuple of 4 ints, describing the shape of each datum.
This is the size of each axis in <axes>, excluding the 'b' axis.
axes : tuple
A tuple of the following elements in any order:
'b' batch axis
's' stereo axis
0 image axis 0 (row)
1 image axis 1 (column)
'c' channel axis
"""
shape = tuple(shape)
if not all(isinstance(s, int) for s in shape):
raise TypeError("Shape must be a tuple/list of ints")
if len(shape) != 4:
raise ValueError("Shape array needs to be of length 4, got %s." %
shape)
datum_axes = list(axes)
datum_axes.remove('b')
if shape[datum_axes.index('s')] != 2:
raise ValueError("Expected 's' axis to have size 2, got %d.\n"
" axes: %s\n"
" shape: %s" %
(shape[datum_axes.index('s')],
axes,
shape))
self.shape = shape
self.set_axes(axes)
def make_conv2d_space(shape, axes):
shape_axes = list(axes)
shape_axes.remove('b')
image_shape = tuple(shape[shape_axes.index(axis)]
for axis in (0, 1))
conv2d_axes = list(axes)
conv2d_axes.remove('s')
return Conv2DSpace(shape=image_shape,
num_channels=shape[shape_axes.index('c')],
axes=conv2d_axes,
dtype=None)
conv2d_space = make_conv2d_space(shape, axes)
self.topo_space = CompositeSpace((conv2d_space, conv2d_space))
self.storage_space = VectorSpace(dim=numpy.prod(shape))
def get_formatted_batch(self, batch, space):
"""
Returns a batch formatted to a space.
Parameters
----------
batch : ndarray
The batch to format
space : a pylearn2.space.Space
The target space to format to.
"""
return self.storage_space.np_format_as(batch, space)
def design_mat_to_topo_view(self, design_mat):
"""
Called by DenseDesignMatrix.get_formatted_view(), get_batch_topo()
Parameters
----------
design_mat : ndarray
"""
return self.storage_space.np_format_as(design_mat, self.topo_space)
def design_mat_to_weights_view(self, design_mat):
"""
Called by DenseDesignMatrix.get_weights_view()
Parameters
----------
design_mat : ndarray
"""
return self.design_mat_to_topo_view(design_mat)
def topo_view_to_design_mat(self, topo_batch):
"""
Used by DenseDesignMatrix.set_topological_view(), .get_design_mat()
Parameters
----------
topo_batch : ndarray
"""
return self.topo_space.np_format_as(topo_batch, self.storage_space)
def view_shape(self):
"""
TODO: write documentation.
"""
return self.shape
def weights_view_shape(self):
"""
TODO: write documentation.
"""
return self.view_shape()
def set_axes(self, axes):
"""
Change the order of the axes.
Parameters
----------
axes : tuple
Must have length 5, must contain 'b', 's', 0, 1, 'c'.
"""
axes = tuple(axes)
if len(axes) != 5:
raise ValueError("Axes must have 5 elements; got %s" % str(axes))
for required_axis in ('b', 's', 0, 1, 'c'):
if required_axis not in axes:
raise ValueError("Axes must contain 'b', 's', 0, 1, and 'c'. "
"Got %s." % str(axes))
if axes.index('b') != 0:
raise ValueError("The 'b' axis must come first (axes = %s)." %
str(axes))
def remove_b_axis(axes):
axes = list(axes)
axes.remove('b')
return tuple(axes)
if hasattr(self, 'axes'):
# Reorders the shape vector to match the new axis ordering.
assert hasattr(self, 'shape')
old_axes = remove_b_axis(self.axes) # pylint: disable-msg=E0203
new_axes = remove_b_axis(axes)
new_shape = tuple(self.shape[old_axes.index(a)] for a in new_axes)
self.shape = new_shape
self.axes = axes
def _check_is_integral(name, label):
if not numpy.issubdtype(type(label), numpy.integer):
raise TypeError("Expected %s label to be an integral dtype, not %s" %
(name, type(label)))
def _check_range(name, label, min_label, max_label):
if label < min_label or label > max_label:
raise ValueError("Expected %s label to be between %d "
"and %d inclusive, , but got %s" %
(name, min_label, max_label, str(label)))
def _get_array_element(name, label, array):
_check_is_integral(name, label)
_check_range(name, label, 0, len(array) - 1)
return array[label]
def get_category_value(label):
"""
Returns the category name represented by a category label int.
Parameters
----------
label: int
Category label.
"""
return _get_array_element('category', label, ('animal',
'human',
'airplane',
'truck',
'car',
'blank'))
def _check_range_and_return(name,
label,
min_label,
max_label,
none_label=None):
_check_is_integral(name, label)
_check_range(name, label, min_label, max_label)
return None if label == none_label else label
def get_instance_value(label):
"""
Returns the instance value corresponding to a lighting label int.
The value is the int itself. This just sanity-checks the label for range
errors.
Parameters
----------
label: int
Instance label.
"""
return _check_range_and_return('instance', label, -1, 9, -1)
def get_elevation_value(label):
"""
Returns the angle in degrees represented by a elevation label int.
Parameters
----------
label: int
Elevation label.
"""
name = 'elevation'
_check_is_integral(name, label)
_check_range(name, label, -1, 8)
if label == -1:
return None
else:
return label * 5 + 30
def get_azimuth_value(label):
"""
Returns the angle in degrees represented by a azimuth label int.
Parameters
----------
label: int
Azimuth label.
"""
_check_is_integral('azimuth', label)
if label == -1:
return None
else:
if (label % 2) != 0 or label < 0 or label > 34:
raise ValueError("Expected azimuth to be an even "
"number between 0 and 34 inclusive, "
"or -1, but got %s instead." %
str(label))
return label * 10
def get_lighting_value(label):
"""
Returns the value corresponding to a lighting label int.
The value is the int itself. This just sanity-checks the label for range
errors.
Parameters
----------
label: int
Lighting label.
"""
return _check_range_and_return('lighting', label, -1, 5, -1)
def get_horizontal_shift_value(label):
"""
Returns the value corresponding to a horizontal shift label int.
The value is the int itself. This just sanity-checks the label for range
errors.
Parameters
----------
label: int
Horizontal shift label.
"""
return _check_range_and_return('horizontal shift', label, -5, 5)
def get_vertical_shift_value(label):
"""
Returns the value corresponding to a vertical shift label int.
The value is the int itself. This just sanity-checks the label for range
errors.
Parameters
----------
label: int
Vertical shift label.
"""
return _check_range_and_return('vertical shift', label, -5, 5)
def get_lumination_change_value(label):
"""
Returns the value corresponding to a lumination change label int.
The value is the int itself. This just sanity-checks the label for range
errors.
Parameters
----------
label: int
Lumination change label.
"""
return _check_range_and_return('lumination_change', label, -19, 19)
def get_contrast_change_value(label):
"""
Returns the float value represented by a contrast change label int.
Parameters
----------
label: int
Contrast change label.
"""
return _get_array_element('contrast change', label, (0.8, 1.3))
def get_scale_change_value(label):
"""
Returns the float value represented by a scale change label int.
Parameters
----------
label: int
Scale change label.
"""
return _get_array_element('scale change', label, (0.78, 1.0))
def get_rotation_change_value(label):
"""
Returns the value corresponding to a rotation change label int.
The value is the int itself. This just sanity-checks the label for range
errors.
Parameters
----------
label: int
Rotation change label.
"""
return _check_range_and_return('rotation change', label, -4, 4)
def _check_pickling_support():
# Reads the first two components of the version number as a floating point
# number.
version = float('.'.join(numpy.version.version.split('.')[:2]))
if version < 1.7:
msg = ("Pickling NORB is disabled for numpy versions less "
"than 1.7, due to a bug in 1.6.x that causes memmaps "
"to interact poorly with pickling.")
raise NotImplementedError(msg)
| {
"repo_name": "nouiz/pylearn2",
"path": "pylearn2/datasets/new_norb.py",
"copies": "44",
"size": "34651",
"license": "bsd-3-clause",
"hash": 8412076440644270000,
"line_mean": 34.3221202854,
"line_max": 79,
"alpha_frac": 0.5097111194,
"autogenerated": false,
"ratio": 4.302868496212591,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""An internal(!) helpers module"""
class StreamlikeException(Exception):
""" Base error. """
code = None
def __init__(self, message, result=None):
super(StreamlikeException, self).__init__(message)
self.result = result
if message:
self.message = message
class BadRequest(StreamlikeException):
"""Signifies HTTP codes 400 or 406"""
code = 400
class AuthenticationError(StreamlikeException):
"""Signifies HTTP code 401"""
code = 401
class ResourceNotFound(StreamlikeException):
"""Signifies HTTP code 404"""
code = 404
class NotAcceptable(StreamlikeException):
"""Signifies HTTP code 406"""
code = 406
class RequestTooLarge(StreamlikeException):
"""Signifies HTTP code 413"""
code = 413
class FileTypeUnsupported(StreamlikeException):
"""Signifies HTTP code 415"""
code = 415
class TooManyRequests(StreamlikeException):
"""Signifies HTTP code 429"""
code = 429
class ServerError(StreamlikeException):
"""Signifies HTTP code 500"""
code = 500
message = "Internal Server Error"
class BadGatewayError(StreamlikeException):
"""Signifies HTTP code 502"""
code = 502
message = "Bad Gateway"
class ServiceUnavailableError(StreamlikeException):
"""Signifies HTTP code 503"""
code = 503
message = "Service is unavailable"
def get_error(response):
try:
json = response.json()
if "message" in json:
return json['message']
if "error" in json:
return json["error"]
except ValueError:
pass
return ''
def raise_errors_on_failure(response):
if response.status_code >= 400:
msg = get_error(response)
search_for_exception(response.status_code, msg)
return response
# The code that follows is stolen from werkzeug:
# https://github.com/mitsuhiko/werkzeug/blob/d4e8b3f46c51e7374388791282e66323f64b3068/werkzeug/exceptions.py
_exceptions = {}
__all__ = ['StreamlikeException',
'raise_errors_on_failure']
def _find_exceptions():
for name, obj in globals().items():
try:
is_http_exception = issubclass(obj, StreamlikeException)
except TypeError:
is_http_exception = False
if not is_http_exception or obj.code is None:
continue
__all__.append(obj.__name__)
old_obj = _exceptions.get(obj.code, None)
if old_obj is not None and issubclass(obj, old_obj):
continue
_exceptions[obj.code] = obj
_find_exceptions()
del _find_exceptions
def search_for_exception(code, msg):
if code not in _exceptions:
raise LookupError('no exception for %r' % code)
raise _exceptions[code](msg) | {
"repo_name": "brightforme/python-streamlike",
"path": "streamlike/helpers.py",
"copies": "1",
"size": "2745",
"license": "mit",
"hash": 4722507884618274000,
"line_mean": 25.9215686275,
"line_max": 108,
"alpha_frac": 0.6480874317,
"autogenerated": false,
"ratio": 3.9439655172413794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005174236988569532,
"num_lines": 102
} |
"""An internal module for wrapping the use of mappers."""
def make_mapper(mapper_class):
"""Wrap a mapper.
This makes a mapper wrapped with a few convenient tools for managing
mappers with scalar bars in a consistent way since not all mapper classes
have scalar ranges and lookup tables.
"""
class MapperHelper(mapper_class):
"""A helper that dynamically inherits the mapper's class."""
def __init__(self, *args, **kwargs):
self._scalar_range = None
self._lut = None
@property
def scalar_range(self):
if hasattr(self, 'GetScalarRange'):
self._scalar_range = self.GetScalarRange()
return self._scalar_range
@scalar_range.setter
def scalar_range(self, clim):
if hasattr(self, 'SetScalarRange'):
self.SetScalarRange(*clim)
if self.lookup_table is not None:
self.lookup_table.SetRange(*clim)
self._scalar_range = clim
@property
def lookup_table(self):
if hasattr(self, 'GetLookupTable'):
self._lut = self.GetLookupTable()
return self._lut
@lookup_table.setter
def lookup_table(self, lut):
if hasattr(self, 'SetLookupTable'):
self.SetLookupTable(lut)
self._lut = lut
return MapperHelper()
| {
"repo_name": "akaszynski/vtkInterface",
"path": "pyvista/plotting/mapper.py",
"copies": "1",
"size": "1417",
"license": "mit",
"hash": 2846052007923669000,
"line_mean": 28.5208333333,
"line_max": 77,
"alpha_frac": 0.5800988003,
"autogenerated": false,
"ratio": 4.428125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.55082238003,
"avg_score": null,
"num_lines": null
} |
# An Internet checksum algorithm using Python.
# This program is licensed under the GPL; see LICENSE for details.
# This procedure can be used to calculate the Internet checksum of
# some data. It is adapted from RFC 1071:
#
# ftp://ftp.isi.edu/in-notes/rfc1071.txt
#
# See also:
#
# http://www.netfor2.com/ipsum.htm
# http://www.netfor2.com/checksum.html
def ichecksum(data, sum=0):
""" Compute the Internet Checksum of the supplied data. The checksum is
initialized to zero. Place the return value in the checksum field of a
packet. When the packet is received, check the checksum, by passing
in the checksum field of the packet and the data. If the result is zero,
then the checksum has not detected an error.
"""
# make 16 bit words out of every two adjacent 8 bit words in the packet
# and add them up
for i in range(0,len(data),2):
if i + 1 >= len(data):
sum += ord(data[i]) & 0xFF
else:
w = ((ord(data[i]) << 8) & 0xFF00) + (ord(data[i+1]) & 0xFF)
sum += w
# take only 16 bits out of the 32 bit sum and add up the carries
while (sum >> 16) > 0:
sum = (sum & 0xFFFF) + (sum >> 16)
# one's complement the result
sum = ~sum
return sum & 0xFFFF
check = ichecksum('list\n')
print check
print ichecksum('list\n',check)
| {
"repo_name": "mdelatorre/checksum",
"path": "ichecksum.py",
"copies": "1",
"size": "1348",
"license": "cc0-1.0",
"hash": 727103883265623400,
"line_mean": 31.0952380952,
"line_max": 77,
"alpha_frac": 0.6424332344,
"autogenerated": false,
"ratio": 3.4564102564102566,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9584068830946311,
"avg_score": 0.002954931972789116,
"num_lines": 42
} |
# An internet operator plans to connect a business park to the optical fiber
# network. The area to be covered is large and the operator is asking you to
# write a program that will calculate the minimum length of optical fiber cable
# required to connect all buildings.
# For the implementation of the works, the operator has technical constraints
# whereby it is forced to proceed in the following manner:
# A main cable will cross through the park from the West to the East (from the
# position x of the most westerly building to the position x of the most
# easterly building).
# For each building, a dedicated cable will connect from the building to the
# main cable by a minimal path (North or South). The minimum length will
# therefore depend on the position of the main cable.
from statistics import median
N = int(input())
buildings = (map(int, input().split()) for _ in range(N))
x, y = zip(*buildings)
med = int(median(y))
cable = max(x) - min(x) + sum(abs(med - n) for n in y)
print(cable) | {
"repo_name": "Pouf/CodingCompetition",
"path": "CG/medium_network-cabling.py",
"copies": "1",
"size": "1034",
"license": "mit",
"hash": -5213598350318899000,
"line_mean": 39.44,
"line_max": 79,
"alpha_frac": 0.7292069632,
"autogenerated": false,
"ratio": 3.577854671280277,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48070616344802775,
"avg_score": null,
"num_lines": null
} |
# An Introduction to Interactive Programming in Python (Part 1)
# Challenges regarding triangle perimiter and area
# including chellenge 11. from functions excercises
import math
from string import split
class My_Point(object):
__x = 0.0
__y = 0.0
def __init__(self, x, y):
self.__x = x
self.__y = y
def __repr__(self):
return "(%g, %g)" %(self.__x,self.__y)
def distance(self, other_point):
return math.sqrt((self.__x - other_point.__x)**2+(self.__y - other_point.__y)**2)
class My_Triangle(object):
__node1 = My_Point(0,0)
__node2 = My_Point(0,0)
__node3 = My_Point(0,0)
__a = 0.0 # side __a: __node1 : __node2
__b = 0.0 # side __b: __node2 : __node3
__c = 0.0 # side __c: __node1 : __node3
__s = 0.0
__perimeter = 0.0
__area = 0.0
# calculate lenght of each side
def __calc_sides(self):
self.__a = self.__node1.distance(self.__node2)
self.__b = self.__node2.distance(self.__node3)
self.__c = self.__node1.distance(self.__node3)
# http://en.wikipedia.org/wiki/Heron's_formula
# semiperimeter
def __calc_s(self):
self.__s = 0.5*sum([self.__a, self.__b, self.__c])
def __calc_perimeter(self):
self.__perimeter= 2*self.__s
# http://en.wikipedia.org/wiki/Heron's_formula
# area of triangle
def __calc_area(self):
x = self.__s*(self.__s-self.__a)*(self.__s-self.__b)*(self.__s-self.__c)
self.__area = math.sqrt(x)
def __init__(self, node1, node2, node3):
self.__node1 = node1
self.__node2 = node2
self.__node3 = node3
self.__calc_sides()
self.__calc_s()
self.__calc_perimeter()
self.__calc_area()
def print_perimeter(self):
print "Triangle %s, %s, %s has a perimeter of %.1f." \
%(self.__node1, self.__node2, self.__node3, self.__perimeter)
def print_area(self):
print "Triangle %s, %s, %s has a area of %.1f." \
%(self.__node1, self.__node2, self.__node3, self.__area)
# User input
def get_point(point_name):
dlm = ','
inp = raw_input("Enter coordinates for %s, spearated by '%s'." %(point_name, dlm))
inp_splited = inp.split(dlm)
try:
x0 = float(inp_splited[0])
x1 = float(inp_splited[1])
return My_Point(x0, x1)
except:
print "Invalid point coordinates"
quit()
x = get_point("x")
y = get_point("y")
z = get_point("z")
print x, y, z
t = My_Triangle(x,y,z)
t.print_area()
quit()
# Triangle 1
n1 = My_Point(0,0)
n2 = My_Point(3,4)
n3 = My_Point(1,1)
t1 = My_Triangle(n1,n2,n3)
t1.print_perimeter()
t1.print_area()
# Triangle 2
n1 = My_Point(-2,4)
n2 = My_Point(1,6)
n3 = My_Point(2,1)
t2 = My_Triangle(n1,n2,n3)
t2.print_perimeter()
t2.print_area()
# Triangle 3
n1 = My_Point(10,0)
n2 = My_Point(0,0)
n3 = My_Point(0,10)
t3 = My_Triangle(n1,n2,n3)
t3.print_perimeter()
t3.print_area() | {
"repo_name": "polde-live/interprog1",
"path": "exercises/triangles.py",
"copies": "1",
"size": "3021",
"license": "unlicense",
"hash": -3505459968177569000,
"line_mean": 24.8290598291,
"line_max": 89,
"alpha_frac": 0.5475008275,
"autogenerated": false,
"ratio": 2.776654411764706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3824155239264706,
"avg_score": null,
"num_lines": null
} |
# # An Introduction to Interactive Programming in Python (Part 1)
# Practice exercise for interactive applications
def hl():
print "*" * 20
# 1. pring_goodbye
def print_goodbye():
message = "Goodbye"
print message
# Tests
message = "Hello"
print message
print_goodbye()
print message
message = "Ciao"
print message
print_goodbye()
print message
hl()
# 2. set_goodbye
def set_goodbye():
global message
message = "Goodbye"
print message
# Tests
message = "Hello"
print message
set_goodbye()
print message
message = "Ciao"
print message
set_goodbye()
print message
hl()
# 4. "My first frame"
import simpleguitk as simplegui
# message = "My first frame!"
#
# # Handler for mouse click
# def click():
# print message
#
# # Create a frame and assign callbacks to event handlers
# frame = simplegui.create_frame("My first frame", 100, 200)
# frame.add_button("Click me", click)
# frame.start()
# 5. "My second frame"
message = "My second frame!"
# Handler for mouse click
def click():
print message
frame = simplegui.create_frame("My second frame", 200, 100)
# Assign callbacks to event handlers
frame.add_button("Click me", click)
# Start the frame animation
frame.start()
| {
"repo_name": "polde-live/interprog1",
"path": "exercises/intprog.py",
"copies": "1",
"size": "1235",
"license": "unlicense",
"hash": -7722888274040530000,
"line_mean": 14.8333333333,
"line_max": 65,
"alpha_frac": 0.6898785425,
"autogenerated": false,
"ratio": 3.2845744680851063,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44744530105851066,
"avg_score": null,
"num_lines": null
} |
# An Introduction to Interactive Programming in Python (Part 1)
# Practice exercises for expressions
# 1. Number of feet in 13 miles:
feet_in_13_miles = 13 * 5280 #There are 5280 feet in a mile.
print "There are %d in 13 miles" %feet_in_13_miles
# 2. Number of seconds in 7 hours, 21 minutes and 37 seconds:
total_seconds = 7*3600+21*60+37
print "Total numer of seconds: %d" %total_seconds
# 3. The perimeter of a rectangle with sides
# of length 4 and 7 inches:
rec_w = 4
rec_h = 7
rec_perimeter = 2*rec_w + 2*rec_h
print "Rectangle perimeter is %d" %rec_perimeter
# 4. The area of a rectangle with sides of length 4 and 7 inches:
rec_area = rec_w * rec_h
print "Rectangle area is %d" %rec_area
# 5. Circumference in inches of a circle whose radius is 8 inches:
r = 8
pi = 3.14
circumference = 2*pi*r
print "Circle circumference of radius %d is %g" %(r, circumference)
# 6. Area of circle whose radious is 8 inches:
radius = pi * r ** 2
print "Circle area of radius %d is %g" %(r, radius)
# 7. Value of 1k USD compounded at 7% interest for 10 years:
i = 7 # percent
v = 1000
t = 10
fv = v * (1+0.01*i)**t
print "Future Value of %g USD over %d years at %d percent is %g USD" \
%(v, t, i, fv)
# 8. String concatanation:
s1 = "My name is"
s2 = "Joe"
s3 = "Warren"
text = " ".join([s1,s2,s3]) # join list of strings
print text
# 9. Print "Joe Warren is 52 years old."
# from the string "Joe Warren" and the number 52:
name = "Joe Warren"
age = 52
print "%s is %d years old" %(name, age)
# 10. Calculate distance between point (2,2) and (5,6)
import math
class Point(object):
x = 0.0
y = 0.0
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "(%g, %g)" %(self.x,self.y)
def distance(self, other_point):
return math.sqrt((self.x - other_point.x)**2+(self.y - other_point.y)**2)
p1 = Point(2,2)
p2 = Point(5,6)
print "The distance between %s and %s is %g" \
%(p1, p2, p1.distance(p2))
| {
"repo_name": "polde-live/interprog1",
"path": "exercises/expressions.py",
"copies": "1",
"size": "2001",
"license": "unlicense",
"hash": 7736628790815662000,
"line_mean": 22.8214285714,
"line_max": 81,
"alpha_frac": 0.6381809095,
"autogenerated": false,
"ratio": 2.6294349540078845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37676158635078844,
"avg_score": null,
"num_lines": null
} |
# An Introduction to Interactive Programming in Python (Part 1)
# Practice exercises for variables and assignments
# 1. Compute the number of feet corresponding to __a number of miles:
def miles_to_feet(miles):
return float(miles * 5280)
def print_miles_to_feet(miles):
print "%.1f miles equals %.1f feet" %(miles, miles_to_feet(miles))
print_miles_to_feet(13)
print_miles_to_feet(57)
print_miles_to_feet(82.67)
# 2. Calculate total seconds:
class My_Time(object):
h = 0
m = 0
__s = 0
def __init__(self, h, m, s):
self.h = h
self.m = m
self.__s = s
def __repr__(self):
return "%d::%d::%d" %(self.h, self.m, self.__s)
def total_seconds(self):
return 60*(60*self.h + self.m) + self.__s
def print_total_seconds(self):
print "%d hours, %d minutes, and %d seconds totals to %d seconds" \
%(self.h,self.m, self.__s, self.total_seconds())
time1 = My_Time(7,21,37)
time2 = My_Time(10,1,7)
time3 = My_Time(1,0,1)
time1.print_total_seconds()
time2.print_total_seconds()
time3.print_total_seconds()
# 3. & 4. Rectangle perimeter and area:
class My_Rectangle(object):
w = 0
h = 0
perimeter = 0
area = 0
def __init__(self, w, h):
self.w = w
self.h = h
self.perimeter = 2*w + 2*h
self.area = w*h
def __repr__(self):
return "A rectangle %d inches wide and %d \
inches high h__as a perimeter of %d inches and area of %d square inches." \
%(self.w,self.h, self.perimeter, self.area)
r1 = My_Rectangle(4,7)
r2 = My_Rectangle(7,4)
r3 = My_Rectangle(10,10)
print r1
print r2
print r3
# 5. , 6., 7., 8., 9., 10. done in previous program
# 11. Calculate the area of triangle:
# Point class from __a previous excercise.
import math
class My_Point(object):
__x = 0.0
__y = 0.0
def __init__(self, x, y):
self.__x = x
self.__y = y
def __repr__(self):
return "(%g, %g)" %(self.__x,self.__y)
def distance(self, other_point):
return math.sqrt((self.__x - other_point.__x)**2+(self.__y - other_point.__y)**2)
class My_Triangle(object):
__node1 = My_Point(0,0)
__node2 = My_Point(0,0)
__node3 = My_Point(0,0)
__a = 0.0 # side __a: __node1 : __node2
__b = 0.0 # side __b: __node2 : __node3
__c = 0.0 # side __c: __node1 : __node3
__s = 0.0
__perimeter = 0.0
def __calc_sides(self):
self.__a = self.__node1.distance(self.__node2)
self.__b = self.__node2.distance(self.__node3)
self.__c = self.__node1.distance(self.__node3)
def __calc_s(self):
self.__s = 0.5*sum([self.__a, self.__b, self.__c])
def __calc_perimeter(self):
x = self.__s*(self.__s-self.__a)*(self.__s-self.__b)*(self.__s-self.__c)
self.__perimeter = math.sqrt(x)
def __init__(self, node1, node2, node3):
self.__node1 = node1
self.__node2 = node2
self.__node3 = node3
self.__calc_sides()
self.__calc_s()
self.__calc_perimeter()
def print_perimeter(self):
print "Triangle %s, %s, %s has a perimeter of %.1f." \
%(self.__node1, self.__node2, self.__node3, self.__perimeter)
# Triangle 1
n1 = My_Point(0,0)
n2 = My_Point(3,4)
n3 = My_Point(1,1)
t1 = My_Triangle(n1,n2,n3)
t1.print_perimeter()
# Triangle 2
n1 = My_Point(-2,4)
n2 = My_Point(1,6)
n3 = My_Point(2,1)
t2 = My_Triangle(n1,n2,n3)
t2.print_perimeter()
# Triangle 3
n1 = My_Point(10,0)
n2 = My_Point(0,0)
n3 = My_Point(0,10)
t3 = My_Triangle(n1,n2,n3)
t3.print_perimeter()
| {
"repo_name": "polde-live/interprog1",
"path": "exercises/variables.py",
"copies": "1",
"size": "3682",
"license": "unlicense",
"hash": 4487087774683992000,
"line_mean": 24.0476190476,
"line_max": 89,
"alpha_frac": 0.5507876154,
"autogenerated": false,
"ratio": 2.7314540059347183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3782241621334718,
"avg_score": null,
"num_lines": null
} |
"""An intuitive record/replay mock object.
First, use the mock as you would the real object:
>>> mopen = Mock()
>>> fd = mopen('/etc/passwd')
One exception to this is return values, which are specified by defining the
"returns" attributes:
>>> fd.read().returns = 'This is a test'
>>> fd.close()
Once all the functions that your test will use have been called, call replay()
to prime the mock for testing:
>>> mopen.replay()
Then patch our mock over the real function:
>>> real_open = open
>>> open = mopen
And run your test with the mocked object:
>>> fd = open('/etc/passwd')
>>> assert fd.read() == 'This is a test'
>>> fd.close()
If an unexpected call is made to any part of the Mock an exception is thrown:
>>> fd = open('foo')
Traceback (most recent call last):
...
InvalidMockReplay: Unexpected Mock access.
Finally, we unpatch our mock:
>>> open = real_open
"""
UNDEFINED = object()
class Mock(object):
def __init__(self, name=None):
self._history = []
self._recording = True
self._name = name
self._returns = UNDEFINED
def replay(self):
self._recording = False
def __getattr__(self, key):
mock = Mock()
self._history.setdefault(key, [])
self._history[key].append(mock)
return mock
def __setattr__(self, key, value):
def _set_returns(self, value):
self._returns = value
def _get_returns(self):
try:
return self._returns
except AttributeError:
raise AttributeError('returns')
returns = property(lambda s: s._get_returns(),
lambda s, v: s._set_returns(v))
| {
"repo_name": "alecthomas/flam",
"path": "random/mock.py",
"copies": "3",
"size": "1658",
"license": "bsd-3-clause",
"hash": 1344326297475170000,
"line_mean": 21.1066666667,
"line_max": 78,
"alpha_frac": 0.6182147165,
"autogenerated": false,
"ratio": 3.776765375854214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007619047619047619,
"num_lines": 75
} |
# An investigation into simple python access to the critterbot/disco
# devices through the an interface similar to Thomas' java interface
import socket
import struct
import array
import time
#---------------- Drop Interface
class Dconst:
int_size=4
char_size=1
byte_size=1
time_size=8
num_leds=16
max_voltage=25
def struct_grab(fmt,buffer,offset):
return struct.unpack(fmt,buffer[offset:offset+struct.calcsize(fmt)])[0]
class DInt:
"A Drop Int"
def __init__(self,name,value=None):
self.name=name
self.value=value
if self.value!=None:
self.read_only=False
else:
self.read_only=True
def size(self):
return Dconst.int_size
def pack(self,buffer,offset,value):
struct.pack_into("i",buffer,offset,value)
def unpack(self,buffer,offset):
return struct_grab("i",buffer,offset)
class DArray:
"""A Drop array of integers by default"""
def __init__(self,name,vals,prototype=None):
self.name=name
self.read_only=False
if prototype==None:
prototype=DInt("",0)
self.prototype=prototype
if type(vals)==type(1): #default value
self.names=["%d"%x for x in range(vals)]
self.n=vals
else: #or names... this is messy & not good python.
self.names=vals
self.n=len(vals)
self.value=self.n*[self.prototype.value]
def size(self):
return self.n*self.prototype.size()
def pack(self,buffer,offset,value):
#print value
for i in range(self.n): #x,v in zip(self.data_description,value):
self.prototype.pack(buffer,offset,value[i])
offset+=self.prototype.size()
def unpack(self,buffer,offset):
vals=[]
for i in range(self.n):
vals.append(self.prototype.unpack(buffer,offset))
offset+=self.prototype.size()
return tuple(vals)
class DTime:
"A Drop Timestamp is in milliseconds, and stored in a long"
def __init__(self): #What is a default value for time?
self.name="Unixtime_usec"
self.read_only=False
def size(self):
return Dconst.time_size
def pack(self,buffer,offset,value):
struct.pack_into("L",buffer,offset,value)
def unpack(self,buffer,offset):
v1=struct_grab("L",buffer,offset)
#v2=struct_grab("L",buffer,offset+4) #data not sent
#print v1,v2
return v1
class DString:
def __init__(self,name,value="",read_only=False):
self.name=name
self.value=value
self.read_only=read_only
def size(self):
return len(self.value)*Dconst.char_size+ Dconst.int_size
def pack(self,buffer,offset,value):
struct.pack_into("i%ds"%len(value),buffer,offset,len(value),value)
def unpack(self,buffer,offset):
sz=struct_grab("i",buffer,offset)
str=struct_grab("%ds"%sz,buffer,offset+Dconst.int_size)
return str
class DColor:
def __init__(self,name,value=(255,255,0),read_only=False):
self.name=name
self.value=value
self.read_only=read_only
def size(self):
return 3*Dconst.byte_size
def pack(self,buffer,offset,value):
r,g,b=value
struct.pack_into("ccc",buffer,offset,chr(r),chr(g),chr(b))
def unpack(self,buffer,offset):
r,g,b=struct.unpack("ccc",buffer[offset:offset+3])
return ord(r),ord(g),ord(b)
class Drop:
def __init__(self,name,data_description):
self.known={}
self.buffer=[]
self.name=DString("Name",name)
self.data_description=data_description
self.data_size=sum(x.size() for x in data_description)
self.header_size=self.name.size()+Dconst.int_size #int/name/rest
self.packet_size=self.header_size+self.data_size
buffer=array.array('c','\0'*self.header_size)
self.name.pack(buffer,0,self.name.value)
offset=self.name.size()
struct.pack_into("i",buffer,offset,self.data_size)
self.header=buffer.tostring()
def pack(self):
offset=0
buffer=array.array('c','\0'*self.packet_size)
self.name.pack(buffer,offset,self.name.value)
offset+=self.name.size()
struct.pack_into("i",buffer,offset,self.data_size)
offset+=Dconst.int_size
for x in self.data_description:
if x.read_only:
value=x.value
elif x.name in self.known:
value=self.known[x.name]
else:
value=x.value
#print x
x.pack(buffer,offset,value)
offset+=x.size()
return buffer
def unpack(self,buffer):
offset=self.header_size
for x in self.data_description:
vals=x.unpack(buffer,offset)
self.known[x.name]=vals
offset+=x.size()
return self.known
#---------------- Critterbot interface
def int_or_list_mean(vals):
n=float(len(vals))
if type(vals[0])==type(0):
return sum(vals)/n
k=len(vals[0])
return tuple([sum(v[i] for v in vals)/n for i in range(k)])
import string
#ok on simulator, but there is extra junk on the socket with the real robot, namely control drops coming in.
PACKET_DEBUG=False
class critter(object):
#AVR Motor Modes
MotorExit=0x68
AVREnableChargingMode=0x69
AVRDisableChargingMode=0x6A
AmpEnableMode=0x6D
AmpDisableMode=0x6E
# These can be enabled, but are not part of normal operation
# self.AVREnableVref=0x6B
# self.AVRDisableVref=0x6C
#LED Mode
LEDNone=0
LEDClear=1
LEDBattery=2
LEDCustom=7
LEDExit=0x67
#MonitorState Bit Flags
MonitorOverheatingMotors=1
MonitorDriveEnabled=64
MonitorChargingEnabled=128
def __init__(self,hostname,port):
# Open port
self.sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.sock.connect((hostname,port))
motor_names=("Command","Speed","Current","Temperature")
axis_names=("X","Y","Z")
self.observation_description=[
DInt("Unixtime_sec"),
DTime(),
DInt("PowerSource"),
DInt("ChargeState"),
DInt("BusVoltage"),
DArray("Bat",3),
DArray("Motor0", motor_names),
DArray("Motor1", motor_names),
DArray("Motor2", motor_names),
DArray("Accel", axis_names),
DArray("Mag", axis_names),
DInt("RotationVel"),
DArray("IR",10),
DArray("IRLight",8),
DArray("Light",4),
DArray("Thermal",8),
DArray("Bump",32),
DInt("ErrorFlags"),
DInt("CycleTime"),
DInt("MonitorState")
]
self.action_description=[
#Use motor mode 1 for (vx,vy,vth), 0 for wheel velocities
DInt("MotorMode", 1),
DArray("Command", ("M100", "M220", "M340")),
DInt("LedMode", 0),
DArray("Led", Dconst.num_leds,DColor("")) #This would be a problem
]
self.observation_drop=Drop("CritterStateDrop", self.observation_description)
self.control_drop=Drop("CritterControlDrop", self.action_description)
self.motor_range=3*[(-Dconst.max_voltage,Dconst.max_voltage)]
self.last_time=time.time()
def __del__(self):
#Close port
self.sock.close()
def act(self,command_dict):
#Send command
self.control_drop=Drop("CritterControlDrop", self.action_description)
for k,v in command_dict.items():
self.control_drop.known[k]=v
buffer=self.control_drop.pack()
#print "Command Buffer",buffer
self.sock.send(buffer)
def new_observation(self):
#We can ensure that the data is not too stale by ensuring that we have waited for at least10ms before responding.
start_time=time.time()
while True:
out=self.observe()
end_time=time.time()
if end_time-start_time > .01:
return out
def fresh_observation(self,period=.01):
#We can ensure that the data is not too stale by ensuring that we have waited for at least10ms before responding.
while True:
out=self.observe()
end_time=time.time()
if end_time-self.last_time >= period:
self.last_time=end_time
return out
def avg_fresh_observation(self,period=.01):
#We can ensure that the data is not too stale by ensuring that we have waited for at least10ms before responding.
store=[]
while True:
out=self.observe()
store.append(out.copy())
end_time=time.time()
if end_time-self.last_time >= period:
new_out={}
keys=store[0].keys()
for k in keys:
vals=[s[k] for s in store]
new_out[k]=int_or_list_mean(vals)
new_out["Time"]=store[-1]["Time"]
new_out["Datasource"]=store[-1]["Datasource"]
self.last_time=end_time
return new_out
def observe(self):
#Read observation
data=""
while True:
delta=self.observation_drop.packet_size-len(data)
got=self.sock.recv(delta)
data+=got
if got=="": #broken connection
return ""
#Now check that the header is right, otherwise trim data appropriately
if PACKET_DEBUG:
print "delta,len,data:",delta,len(data),data
head=self.observation_drop.header
offset=string.find(data,head)
if (offset!=0):
if PACKET_DEBUG:
print "Junk in the socket to ",offset,", Moving along."
print " looking for: ",head
print " seeing:",data[:len(head)]
if offset<0:
data=data[-len(head):]
else:
data=data[offset:]
if len(data)== self.observation_drop.packet_size:
break
if PACKET_DEBUG:
print "Grabbing more data from socket..."
observation=self.observation_drop.unpack(data)
observation.pop("Bump") #Bumps are completely made up for now. Discard
return observation
def crittersim():
return critter(hostname="localhost",port=2324)
def critterbot():
return critter(hostname="10.0.1.20",port=2330) #From Disco
| {
"repo_name": "jmodayil/rlai-critterbot",
"path": "support/scripts/python_client/critter.py",
"copies": "1",
"size": "10678",
"license": "apache-2.0",
"hash": -8717749214610564000,
"line_mean": 32.8984126984,
"line_max": 129,
"alpha_frac": 0.5780108635,
"autogenerated": false,
"ratio": 3.6618655692729765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9575464602636784,
"avg_score": 0.032882366027238354,
"num_lines": 315
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.