input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# utils for working with 3d-protein structures
import os
import numpy as np
import torch
from functools import wraps
from einops import rearrange, repeat
# import torch_sparse # only needed for sparse nth_deg adj calculation
# bio
from Bio import SeqIO
import itertools
import string
# sidechainnet
from sidechainnet.utils.sequence import ProteinVocabulary, ONE_TO_THREE_LETTER_MAP
from sidechainnet.utils.measure import GLOBAL_PAD_CHAR
from sidechainnet.structure.build_info import NUM_COORDS_PER_RES, BB_BUILD_INFO, SC_BUILD_INFO
from sidechainnet.structure.StructureBuilder import _get_residue_build_iter
# build vocabulary
VOCAB = ProteinVocabulary()
# constants
import alphafold2_pytorch.constants as constants
# helpers
def exists(val):
return val is not None
# constants: same as in alphafold2.py
DISTANCE_THRESHOLDS = torch.linspace(2, 20, steps = constants.DISTOGRAM_BUCKETS)
# distance binning function
def get_bucketed_distance_matrix(coords, mask, num_buckets = constants.DISTOGRAM_BUCKETS, ignore_index = -100):
distances = torch.cdist(coords, coords, p=2)
boundaries = torch.linspace(2, 20, steps = num_buckets, device = coords.device)
discretized_distances = torch.bucketize(distances, boundaries[:-1])
discretized_distances.masked_fill_(~(mask[..., None] & mask[..., None, :]), ignore_index)
return discretized_distances
# decorators
def set_backend_kwarg(fn):
@wraps(fn)
def inner(*args, backend = 'auto', **kwargs):
if backend == 'auto':
backend = 'torch' if isinstance(args[0], torch.Tensor) else 'numpy'
kwargs.update(backend = backend)
return fn(*args, **kwargs)
return inner
def expand_dims_to(t, length = 3):
if length == 0:
return t
return t.reshape(*((1,) * length), *t.shape) # will work with both torch and numpy
def expand_arg_dims(dim_len = 3):
""" pack here for reuse.
turns input into (B x D x N)
"""
def outer(fn):
@wraps(fn)
def inner(x, y, **kwargs):
assert len(x.shape) == len(y.shape), "Shapes of A and B must match."
remaining_len = dim_len - len(x.shape)
x = expand_dims_to(x, length = remaining_len)
y = expand_dims_to(y, length = remaining_len)
return fn(x, y, **kwargs)
return inner
return outer
def invoke_torch_or_numpy(torch_fn, numpy_fn):
def outer(fn):
@wraps(fn)
def inner(*args, **kwargs):
backend = kwargs.pop('backend')
passed_args = fn(*args, **kwargs)
passed_args = list(passed_args)
if isinstance(passed_args[-1], dict):
passed_kwargs = passed_args.pop()
else:
passed_kwargs = {}
backend_fn = torch_fn if backend == 'torch' else numpy_fn
return backend_fn(*passed_args, **passed_kwargs)
return inner
return outer
# preprocess data
def get_atom_ids_dict():
""" Get's a dict mapping each atom to a token. """
ids = set(["", "N", "CA", "C", "O"])
for k,v in SC_BUILD_INFO.items():
for name in v["atom-names"]:
ids.add(name)
return {k: i for i,k in enumerate(sorted(ids))}
def make_cloud_mask(aa):
""" relevent points will be 1. paddings will be 0. """
mask = np.zeros(14)
# early stop if padding token
if aa == "_":
return mask
# get num of atoms in aa
n_atoms = 4+len( SC_BUILD_INFO[ ONE_TO_THREE_LETTER_MAP[aa] ]["atom-names"] )
mask[:n_atoms] = 1
return mask
def make_atom_id_embedds(aa, atom_ids):
""" Return the tokens for each atom in the aa. """
mask = np.zeros(14)
# early stop if padding token
if aa == "_":
return mask
# get atom id
atom_list = ["N", "CA", "C", "O"] + SC_BUILD_INFO[ ONE_TO_THREE_LETTER_MAP[aa] ]["atom-names"]
for i,atom in enumerate(atom_list):
mask[i] = ATOM_IDS[atom]
return mask
ATOM_IDS = get_atom_ids_dict()
CUSTOM_INFO = {k: {"cloud_mask": make_cloud_mask(k),
"atom_id_embedd": make_atom_id_embedds(k, atom_ids=ATOM_IDS),
} for k in "ARNDCQEGHILKMFPSTWYV_"}
# common utils
# parsing to pdb for easier visualization - other example from sidechainnet is:
# https://github.com/jonathanking/sidechainnet/tree/master/sidechainnet/structure
def download_pdb(name, route):
""" Downloads a PDB entry from the RCSB PDB.
Inputs:
* name: str. the PDB entry id. 4 characters, capitalized.
* route: str. route of the destin file. usually ".pdb" extension
Output: route of destin file
"""
os.system(f"curl https://files.rcsb.org/download/{name}.pdb > {route}")
return route
def clean_pdb(name, route=None, chain_num=None):
""" Cleans the structure to only leave the important part.
Inputs:
* name: str. route of the input .pdb file
* route: str. route of the output. will overwrite input if not provided
* chain_num: int. index of chain to select (1-indexed as pdb files)
Output: route of destin file.
"""
import mdtraj
destin = route if route is not None else name
# read input
raw_prot = mdtraj.load_pdb(name)
# iterate over prot and select the specified chains
idxs = []
for chain in raw_prot.topology.chains:
# if arg passed, only select that chain
if chain_num is not None:
if chain_num != chain.index:
continue
# select indexes of chain
chain_idxs = raw_prot.topology.select(f"chainid == {str(chain.index)}")
idxs.extend( chain_idxs.tolist() )
# sort: topology and xyz selection are ordered
idxs = sorted(idxs)
# get new trajectory from the sleected subset of indexes and save
prot = mdtraj.Trajectory(xyz=raw_prot.xyz[:, idxs],
topology=raw_prot.topology.subset(idxs))
prot.save(destin)
return destin
def custom2pdb(coords, proteinnet_id, route):
""" Takes a custom representation and turns into a .pdb file.
Inputs:
* coords: array/tensor of shape (3 x N) or (N x 3). in Angstroms.
same order as in the proteinnnet is assumed (same as raw pdb file)
* proteinnet_id: str. proteinnet id format (<class>#<pdb_id>_<chain_number>_<chain_id>)
see: https://github.com/aqlaboratory/proteinnet/
* route: str. destin route.
Output: tuple of routes: (original, generated) for the structures.
"""
import mdtraj
# convert to numpy
if isinstance(coords, torch.Tensor):
coords = coords.detach().cpu().numpy()
# ensure (1, N, 3)
if coords.shape[1] == 3:
coords = coords.T
coords = np.newaxis(coords, axis=0)
# get pdb id and chain num
pdb_name, chain_num = proteinnet_id.split("#")[-1].split("_")[:-1]
pdb_destin = "/".join(route.split("/")[:-1])+"/"+pdb_name+".pdb"
# download pdb file and select appropiate
download_pdb(pdb_name, pdb_destin)
clean_pdb(pdb_destin, chain_num=chain_num)
# load trajectory scaffold and replace coordinates - assumes same order
scaffold = mdtraj.load_pdb(pdb_destin)
scaffold.xyz = coords
scaffold.save(route)
return pdb_destin, route
def coords2pdb(seq, coords, cloud_mask, prefix="", name="af2_struct.pdb"):
""" Turns coordinates into PDB files ready to be visualized.
Inputs:
* seq: (L,) tensor of ints (sidechainnet aa-key pairs)
* coords: (3, N) coords of atoms
* cloud_mask: (L, C) boolean mask of occupied spaces in scn format
* prefix: str. directory to save files.
* name: str. name of destin file (ex: pred1.pdb)
"""
scaffold = torch.zeros( cloud_mask.shape, 3 )
scaffold[cloud_mask] = coords.cpu().float()
# build structures and save
pred = scn.StructureBuilder( seq, crd=scaffold )
pred.to_pdb(prefix+name)
# adapted from https://github.com/facebookresearch/esm
def remove_insertions(sequence: str) -> str:
""" Removes any insertions into the sequence. Needed to load aligned sequences in an MSA. """
deletekeys = dict.fromkeys(string.ascii_lowercase)
deletekeys["."] = None
deletekeys["*"] = None
translation = str.maketrans(deletekeys)
return sequence.translate(translation)
def read_msa(filename: str, nseq: int):
""" Reads the first nseq sequences from an MSA file, automatically removes insertions."""
return [(record.description, remove_insertions(str(record.seq)))
for record in itertools.islice(SeqIO.parse(filename, "fasta"), nseq)]
# sidechainnet / MSA / other data utils
def ids_to_embed_input(x):
""" Returns the amino acid string input for calculating the ESM and MSA transformer embeddings
Inputs:
* x: any deeply nested list of integers that correspond with amino acid id
"""
assert isinstance(x, list), 'input must be a list'
id2aa = VOCAB._int2char
out = []
for el in x:
if isinstance(el, list):
out.append(ids_to_embed_input(el))
elif isinstance(el, int):
out.append(id2aa[el])
else:
raise TypeError('type must be either list or character')
if all(map(lambda c: isinstance(c, str), out)):
return (None, ''.join(out))
return out
def get_msa_embedd(msa, embedd_model, batch_converter, device = None):
""" Returns the MSA_tr embeddings for a protein.
Inputs:
* seq: ( (b,) L,) tensor of ints (in sidechainnet int-char convention)
* embedd_model: MSA_tr model (see train_end2end.py for an example)
* batch_converter: MSA_tr batch converter (see train_end2end.py for an example)
Outputs: tensor of (batch, n_seqs, L, embedd_dim)
* n_seqs: number of sequences in the MSA
* embedd_dim: number of embedding dimensions. 768 for MSA_Transformer
"""
# use MSA transformer
REPR_LAYER_NUM = 12
device = embedd_model.device
max_seq_len = msa.shape[-1]
embedd_inputs = ids_to_embed_input(msa.cpu().tolist())
msa_batch_labels, msa_batch_strs, msa_batch_tokens = batch_converter(embedd_inputs)
with torch.no_grad():
results = embedd_model(msa_batch_tokens.to(device), repr_layers=[REPR_LAYER_NUM], return_contacts=False)
# index 0 is for start token. so take from 1 one
token_reps = results["representations"][REPR_LAYER_NUM][..., 1:, :]
return token_reps
def get_esm_embedd(seq, embedd_model, batch_converter, msa_data=None):
""" Returns the ESM embeddings for a protein.
Inputs:
* seq: ( (b,) L,) tensor of ints (in sidechainnet int-char convention)
* embedd_model: ESM model (see train_end2end.py for an example)
* batch_converter: ESM batch converter (see train_end2end.py for an example)
Outputs: tensor of (batch, n_seqs, L, embedd_dim)
* n_seqs: number of sequences in the MSA. 1 for ESM-1b
* embedd_dim: number of embedding dimensions. 1280 for ESM-1b
"""
# use ESM transformer
device = embedd_model.device
REPR_LAYER_NUM = 33
max_seq_len = seq.shape[-1]
embedd_inputs = ids_to_embed_input(seq.cpu().tolist())
batch_labels, batch_strs, batch_tokens = batch_converter(embedd_inputs)
with torch.no_grad():
results = embedd_model(batch_tokens.to(device), repr_layers=[REPR_LAYER_NUM], return_contacts=False)
# index 0 is for start token. so take from 1 one
token_reps = results["representations"][REPR_LAYER_NUM][..., 1:, :].unsqueeze(dim=1)
return token_reps
def get_all_protein_ids(dataloader, verbose=False):
""" Given a sidechainnet dataloader for a CASP version,
Returns all the ids belonging to proteins.
Inputs:
* dataloader: a sidechainnet dataloader for a CASP version
Outputs: a set containing the | |
0:
raise exception
return newvalue
def whole(value, exception = ValueError):
"""Casts to whole (integer) if possible"""
newvalue = int(value) #The exception may get raised here by the int function if needed
if newvalue < 0:
raise exception
return newvalue
def posrealargparse(value): return posreal(value, argparse.ArgumentTypeError)
def naturalargparse(value): return natural(value, argparse.ArgumentTypeError)
def wholeargparse(value): return whole(value, argparse.ArgumentTypeError)
def getinterceptparams(initialinterceptparams, interceptparams):
"""Gets parameters for an upcoming interception attempt from the user."""
#initialinterceptparams are default values of the parameters without the prefix
#and for the corresponding manifolds
#We return a new version of interceptparams with any requested changes made
#The structure of interceptparams lists is
#[stablowerdisplacement, stabupperdisplacement, stabnumdisplacements,
#unstablowerdisplacement, unstabupperdisplacement, unstabnumdisplacements, depth]
#We also return a list, recommendations, with two Boolean values.
#Each value recommends whether we should recompute the stable or unstable, respectively, manifolds based on what values changed
print "\n-------PARAMETER EDITING PROMPT-------"
print "\nCURRENT VALUES:"
print "\n\tFOR DISPLACEMENTS OF STABLE MANIFOLD INITIAL CONDITIONS:"
print "\t\tslb - the lower bound: " + str(interceptparams[0])
print "\t\tsub - the upper bound: " + str(interceptparams[1])
print "\t\tsn - the number of displacements: " + str(interceptparams[2])
print "\n\tFOR DISPLACEMENTS OF UNSTABLE MANIFOLD INITIAL CONDITIONS:"
print "\t\tulb - the lower bound: " + str(interceptparams[3])
print "\t\tuub - the upper bound: " + str(interceptparams[4])
print "\t\tun - the number of displacements: " + str(interceptparams[5])
print "\n\tFOR MANIFOLD INTERCEPTION DEPTH:"
print "\t\tdepth - alter the desired depth of the manifold interception: " + str(interceptparams[6])
print "\nPARAMETER EDITING PROMPT INSTRUCTIONS:"
print "\tAll parameter change commands are of the form [command] [value]."
print "\tYou can also type default to get a list of default parameters you can use."
print "\tWhen you are done, type done"
print "\nFOR DISPLACEMENTS OF STABLE MANIFOLD INITIAL CONDITIONS:"
print "\tslb - alter the lower bound (positive real number required for value argument)"
print "\tsub - alter the upper bound (positive real number required for value argument)"
print "\tsn - alter the number of displacements (natural number required for value argument)"
print "\nFOR DISPLACEMENTS OF UNSTABLE MANIFOLD INITIAL CONDITIONS:"
print "\tulb - alter the lower bound (positive real number required for value argument)"
print "\tuub - alter the upper bound (positive real number required for value argument)"
print "\tun - alter the number of displacements (natural number required for value argument)"
print "\nFOR MANIFOLD INTERCEPTION DEPTH:"
print "\tdepth - alter the desired depth of the manifold interception (natural number required for value argument)"
commands = {"slb":[0,posreal,1], #this dict maps between commands for parameters, corresponding parameter array placements, functions
"sub":[1,posreal,1], #that try to handle the value parameters inside commands (to cast them to acceptable types
"sn":[2,natural,1], #or to reject them), and affiliation (see below). "R+" means positive real, "J" means natural number, and "W" means whole number
"ulb":[3,posreal,-1],
"uub":[4,posreal,-1], #affiliation is what manifold a value is associated with. It is used in making recomputation recommendations.
"un":[5,natural,-1], #0 means no affiliation, 1 means stable manifold affiliation, and -1 means unstable manifold affiliation
"depth":[6,natural,0]}
#and yes, I know that at times like this a dedicated commandproperties object might make sense
recommendations = [False, False] #no recommendations to recompute are made until changes to corresponding variables are made
while True:
resp = raw_input(">>> ").lower()
if resp == "done":
break
if resp == "default":
print "\nFOR DISPLACEMENTS OF STABLE MANIFOLD INITIAL CONDITIONS:"
print "\tslb - the lower bound: " + str(initialinterceptparams[0])
print "\tsub - the upper bound: " + str(initialinterceptparams[1])
print "\tsn - the number of displacements: " + str(initialinterceptparams[2])
print "\nFOR DISPLACEMENTS OF UNSTABLE MANIFOLD INITIAL CONDITIONS:"
print "\tulb - the lower bound: " + str(initialinterceptparams[3])
print "\tuub - the upper bound: " + str(initialinterceptparams[4])
print "\tun - the number of displacements: " + str(initialinterceptparams[5])
print "\nFOR MANIFOLD INTERCEPTION DEPTH:"
print "\tdepth - alter the desired depth of the manifold interception: " + str(initialinterceptparams[6])
continue
resp = resp.split(" ")
if len(resp) < 2:
print "Please provide a valid response."
continue
if resp[0] in commands:
try:
#We get the value provided as the command argument and process it
value = commands[resp[0]][1](resp[1])
except ValueError:
print "Argument type invalid."
continue
interceptparams[commands[resp[0]][0]] = value #We then store a key-value pair with the index from the command dict and the value
if commands[resp[0]][2] == 1: #We also update the recommendation list
recommendations[0] = True
print "Note: the value of a parameter for the stable manifold has been modified. The initial condition region will be recomputed"
if commands[resp[0]][2] == -1:
recommendations[1] = True
print ("Note: the value of a parameter for the unstable manifold has been modified. If the unstable manifold is still being integrated, " +
"the initial condition region will be recomputed")
else:
print "Please provide a valid response."
return interceptparams, recommendations
def finditinerary(energy, mu, itinerary, timestep, mtimestep, ttimestep, timeout, tolerance, seedorbit1, seedorbit2,
initialinterceptparams):
"""Finds an initial condition corresponding to an itinerary."""
if not itineraryisvalid(itinerary):
return "invalid itinerary"
#founditin is the part of the itinerary for which we have a trajectory.
founditin = itinerary[:2]
findinginitial = True #findinginitial signifies that the initial unstable manifold can be found
#forwardregion is the part of the trajectory to be integrated forward.
forwardregion = []
#The structure of the interceptparams list is as follows:
#[unstablowerdisplacement, unstabupperdisplacement, unstabnumdisplacements,
#stablowerdisplacement, stabupperdisplacement, stabnumdisplacements, depth].
interceptparams = initialinterceptparams
while founditin != itinerary:
#We get the initial conditions for the stable manifold
stableitin = itinerary[len(founditin)-1:len(founditin)+1] #stableitin is the itinerary for the stable manifold
#We get the Poincare section we need
section = findsection(founditin, stableitin)
print section.__name__
backwardregion = []
unstabcomputemic = True #These variables determine whether the manifold initial conditions will be computed
stabcomputemic = True
while True: #This loop allows us to edit the properties of the upcoming integration
forwardintegration = []
backwardintegration = []
interceptparams, recommendations = getinterceptparams(initialinterceptparams, interceptparams)
print interceptparams
if recommendations[0] or not backwardregion:
print "-------FINDING STABLE MANIFOLD INITIAL CONDITIONS-------"
backwardregion = manifoldinitial(energy, mu, stableitin, timestep, mtimestep, tolerance,
seedorbit1, seedorbit2, True, interceptparams[3], interceptparams[4],
interceptparams[5])
stabcomputemic = False
if (recommendations[1] and findinginitial) or not forwardregion: #If new manifold initial conditions are requested, we provide them
print "-------FINDING UNSTABLE MANIFOLD INITIAL CONDITIONS-------"
forwardregion = manifoldinitial(energy, mu, itinerary[:2], timestep, mtimestep, tolerance,
seedorbit1, seedorbit2, False, interceptparams[0], interceptparams[1],
interceptparams[2])
unstabcomputemic = False
#We integrate the existing trajectory piece forward and the stable manifold backwards to the section
print "-------INTEGRATING EXISTING TRAJECTORY FORWARDS-------"
newforward = createpolygon(integrateregion(energy, mu, ttimestep, timeout, forwardregion, interceptparams[6], tolerance, section), section)
forwardintegration += newforward #We get the new integration and add it to what we have
print "-------INTEGRATING STABLE MANIFOLD BACKWARDS-------"
newbackward = createpolygon(integrateregion(energy, mu, ttimestep * -1, timeout, backwardregion, interceptparams[6], tolerance, section), section)
backwardintegration += newbackward
#We find the new forward region as the overlap between the two regions
forwardregiontemp, rep, stay = getoverlap(energy, mu, forwardintegration, backwardintegration, section)
if not stay:
forwardregion = forwardregiontemp
break
print forwardregion
founditin = founditin + itinerary[len(founditin)]
print "-------INTERCEPT REGION FOUND FOR ITINERARY SUBSET " + founditin + " AT " + str(interceptparams[6]) + "-------"
findinginitial = False
print "SAMPLE INITIAL CONDITION:", rep
return rep
def trajectoryviewer(mu, timestep, initial):
"""A command-line generator for trajectories corresponding to initial conditions initial."""
print "-------TRAJECTORY VISUALIZER-------"
print "Please input visualizer options in the form '[starttime] [endtime]'. To quit, type 'done'. "
#print "Note that the visualization will be saved as [filename].html."
while True:
options = raw_input(">>> ")
options = options.strip()
if options == "done":
break
else:
options = options.split(" ")
try:
start = float(options[0]) #We try to get the values
end = float(options[1])
#filename = str(options[2])
except:
print "Please provide a valid response."
continue
print "Visualizing..."
positions, sim = integratecore(mu, timestep, initial[0], initial[1], initial[2], initial[3],
visualizertest, [end], findmatrix = False, starttime = start)
keys = sorted(positions.keys())
fig = plt.figure() #Set up plot
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1.5, 1.5), ylim=(-1.5,1.5))
ax.grid()
ax.set_aspect("equal")
viewpos = [] #The positions of particles in a form processable by the viewer
| |
'post_id':postIdFragment}))
self.assertEqual(response.status_code, 200, f"expected 200. got: {response.status_code}")
post_likes = json.loads(response.content)["data"]
self.assertEqual(len(post_likes), 1, f"expected list of length 1. got: {len(post_likes)}")
self.assertEqual(postId, post_likes[0]["object"], "returned item referenced wrong object!")
self.assertEqual(post_likes[0]["author"]["id"], str(author2.id), "returned item referenced wrong author!")
comment_data = {
"type":"comment",
"author":{
"type":"author",
"id":f"{author.id}"
},
"comment":"A Comment with words and markdown",
"contentType":f"{Comment.ContentTypeEnum.MARKDOWN}"
}
response = self.client.post(reverse('post_api:comments', kwargs={'author_id':author.id, 'post_id':postIdFragment}), comment_data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
commentId = json.loads(response.content)["data"]["id"]
commentIdFragment = commentId.split("comments/")[1].rstrip("/")
data = {
"object": f"{commentId}",
"author":{
"type":"author",
"id":f"{author2.id}"
},
}
response = self.client.post(reverse('likes_api:inbox_like', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
dict_resp_data = json.loads(response.content)["data"]
self.assertEqual(commentId, dict_resp_data["object"], "returned item referenced wrong object!")
self.assertEqual(dict_resp_data["author"]["id"], str(author2.id), "returned item referenced wrong author!")
response = self.client.get(reverse('likes_api:comment_likes', kwargs={'author_id':author.id, 'post_id':postIdFragment, 'comment_id':commentIdFragment}))
self.assertEqual(response.status_code, 200, f"expected 200. got: {response.status_code}")
comment_likes = json.loads(response.content)["data"]
self.assertEqual(len(comment_likes), 1, f"expected list of length 1. got: {len(post_likes)}")
self.assertEqual(commentId, comment_likes[0]["object"], "returned item referenced wrong object!")
self.assertEqual(comment_likes[0]["author"]["id"], str(author2.id), "returned item referenced wrong author!")
def test_get_like_access_levels(self):
"""
should return 200 for all users
"""
password = "password"
user = User.objects.create_user("username1", password=password)
author: Author = Author.objects.get(userId=user)
user2 = User.objects.create_user("username2", password=password)
author2: Author = Author.objects.get(userId=user2)
self.client.logout()
self.client.login(username=user.username, password=password)
# author creates a post
post_data = {
"type":"post",
"title":"A post title about a post about web dev",
"description":"This post discusses stuff -- brief",
"contentType":f"{Post.ContentTypeEnum.MARKDOWN}",
"author":{
"type":"author",
"id":f"{author.id}"
},
"visibility":f"{Post.VisibilityEnum.PUBLIC}",
"unlisted":"false"
}
response = self.client.post(reverse('post_api:posts', kwargs={'author_id':author.id}), post_data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
postId = json.loads(response.content)["data"]["id"]
postIdFragment = postId.split("posts/")[1].rstrip("/")
# author2 likes author's post
self.client.logout()
self.assertTrue(self.client.login(username=user2.username, password=password))
data = {
"object": f"{postId}",
"author":{
"type":"author",
"id":f"{author2.id}"
},
}
response = self.client.post(reverse('likes_api:inbox_like', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
# author comments on their own post
self.client.logout()
self.assertTrue(self.client.login(username=user.username, password=password))
comment_data = {
"type":"comment",
"author":{
"type":"author",
"id":f"{author.id}"
},
"comment":"A Comment with words and markdown",
"contentType":f"{Comment.ContentTypeEnum.MARKDOWN}"
}
response = self.client.post(reverse('post_api:comments', kwargs={'author_id':author.id, 'post_id':postIdFragment}), comment_data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
commentId = json.loads(response.content)["data"]["id"]
commentIdFragment = commentId.split("comments/")[1].rstrip("/")
# author2 likes author's comment
self.client.logout()
self.assertTrue(self.client.login(username=user2.username, password=password))
data = {
"object": f"{commentId}",
"author":{
"type":"author",
"id":f"{author2.id}"
},
}
response = self.client.post(reverse('likes_api:inbox_like', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
self.client.logout()
# test anonymous user
response = self.client.get(reverse('likes_api:post_likes', kwargs={'author_id':author.id, 'post_id':postIdFragment}))
self.assertEqual(response.status_code, 200, f"expected 200. got: {response.status_code}")
response = self.client.get(reverse('likes_api:comment_likes', kwargs={'author_id':author.id, 'post_id':postIdFragment, 'comment_id':commentIdFragment}))
self.assertEqual(response.status_code, 200, f"expected 200. got: {response.status_code}")
# test non participant user
nonParticipant = User.objects.create_user("nonParticipant", password=password)
self.assertTrue(self.client.login(username=nonParticipant.username, password=password))
response = self.client.get(reverse('likes_api:post_likes', kwargs={'author_id':author.id, 'post_id':postIdFragment}))
self.assertEqual(response.status_code, 200, f"expected 200. got: {response.status_code}")
response = self.client.get(reverse('likes_api:comment_likes', kwargs={'author_id':author.id, 'post_id':postIdFragment, 'comment_id':commentIdFragment}))
self.assertEqual(response.status_code, 200, f"expected 200. got: {response.status_code}")
# test likee
self.client.logout()
self.assertTrue(self.client.login(username=user.username, password=password))
response = self.client.get(reverse('likes_api:post_likes', kwargs={'author_id':author.id, 'post_id':postIdFragment}))
self.assertEqual(response.status_code, 200, f"expected 200. got: {response.status_code}")
response = self.client.get(reverse('likes_api:comment_likes', kwargs={'author_id':author.id, 'post_id':postIdFragment, 'comment_id':commentIdFragment}))
self.assertEqual(response.status_code, 200, f"expected 200. got: {response.status_code}")
# test owner
self.client.logout()
self.assertTrue(self.client.login(username=user2.username, password=password))
response = self.client.get(reverse('likes_api:post_likes', kwargs={'author_id':author.id, 'post_id':postIdFragment}))
self.assertEqual(response.status_code, 200, f"expected 200. got: {response.status_code}")
response = self.client.get(reverse('likes_api:comment_likes', kwargs={'author_id':author.id, 'post_id':postIdFragment, 'comment_id':commentIdFragment}))
self.assertEqual(response.status_code, 200, f"expected 200. got: {response.status_code}")
# test admin
self.client.logout()
self.auth_helper.authorize_client(self.client)
response = self.client.get(reverse('likes_api:post_likes', kwargs={'author_id':author.id, 'post_id':postIdFragment}))
self.assertEqual(response.status_code, 200, f"expected 200. got: {response.status_code}")
response = self.client.get(reverse('likes_api:comment_likes', kwargs={'author_id':author.id, 'post_id':postIdFragment, 'comment_id':commentIdFragment}))
self.assertEqual(response.status_code, 200, f"expected 200. got: {response.status_code}")
def test_get_like_nonexist(self):
"""
should return 404
"""
author = self.auth_helper.get_author()
postIdFragment = uuid4()
response = self.client.get(reverse('likes_api:post_likes', kwargs={'author_id':author.id, 'post_id':postIdFragment}))
self.assertEqual(response.status_code, 404, f"expected 404. got: {response.status_code}")
# actually make a post now so we can soley test getting a comment that doesn't exist
user = User.objects.create_user("username1")
author2: Author = Author.objects.get(userId=user)
post_data = {
"type":"post",
"title":"A post title about a post about web dev",
"description":"This post discusses stuff -- brief",
"contentType":f"{Post.ContentTypeEnum.MARKDOWN}",
"author":{
"type":"author",
"id":f"{author.id}"
},
"visibility":f"{Post.VisibilityEnum.PUBLIC}",
"unlisted":"false"
}
response = self.client.post(reverse('post_api:posts', kwargs={'author_id':author.id}), post_data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
postId = json.loads(response.content)["data"]["id"]
postIdFragment = postId.split("posts/")[1].rstrip("/")
data = {
"object": f"{postId}",
"author":{
"type":"author",
"id":f"{author2.id}"
},
}
response = self.client.post(reverse('likes_api:inbox_like', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
dict_resp_data = json.loads(response.content)["data"]
self.assertEqual(postId, dict_resp_data["object"], "returned item referenced wrong object!")
self.assertEqual(dict_resp_data["author"]["id"], str(author2.id), "returned item referenced wrong author!")
commentIdFragment = uuid4()
response = self.client.get(reverse('likes_api:comment_likes', kwargs={'author_id':author.id, 'post_id':postIdFragment, 'comment_id':commentIdFragment}))
self.assertEqual(response.status_code, 404, f"expected 404. got: {response.status_code}")
def test_get_like_bad_uuid(self):
"""
should return 404
"""
author = self.auth_helper.get_author()
postIdFragment = "notARealUUID"
response = self.client.get(reverse('likes_api:post_likes', kwargs={'author_id':author.id, 'post_id':postIdFragment}))
self.assertEqual(response.status_code, 404, f"expected 404. got: {response.status_code}")
# actually make a post now so we can soley test getting a comment that doesn't exist
user = User.objects.create_user("username1")
author2: Author = Author.objects.get(userId=user)
post_data = {
"type":"post",
"title":"A post title about a post about web dev",
"description":"This post discusses stuff -- brief",
"contentType":f"{Post.ContentTypeEnum.MARKDOWN}",
"author":{
"type":"author",
"id":f"{author.id}"
},
"visibility":f"{Post.VisibilityEnum.PUBLIC}",
"unlisted":"false"
}
response = self.client.post(reverse('post_api:posts', kwargs={'author_id':author.id}), post_data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
postId = json.loads(response.content)["data"]["id"]
postIdFragment = postId.split("posts/")[1].rstrip("/")
data = {
"object": f"{postId}",
"author":{
"type":"author",
"id":f"{author2.id}"
},
}
response = self.client.post(reverse('likes_api:inbox_like', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
dict_resp_data = json.loads(response.content)["data"]
self.assertEqual(postId, dict_resp_data["object"], "returned item referenced wrong object!")
self.assertEqual(dict_resp_data["author"]["id"], str(author2.id), "returned item referenced wrong author!")
commentIdFragment = "notARealUUID"
response = self.client.get(reverse('likes_api:comment_likes', kwargs={'author_id':author.id, 'post_id':postIdFragment, 'comment_id':commentIdFragment}))
self.assertEqual(response.status_code, 404, f"expected 404. got: {response.status_code}")
def test_get_like_recipient_nonexist(self):
"""
should return 404
"""
author = self.auth_helper.get_author()
user = User.objects.create_user("username1")
author2: Author = Author.objects.get(userId=user)
post_data = {
"type":"post",
"title":"A post title about a post about web dev",
"description":"This post discusses stuff -- brief",
"contentType":f"{Post.ContentTypeEnum.MARKDOWN}",
"author":{
"type":"author",
"id":f"{author.id}"
},
"visibility":f"{Post.VisibilityEnum.PUBLIC}",
"unlisted":"false"
}
response = self.client.post(reverse('post_api:posts', kwargs={'author_id':author.id}), post_data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
postId = json.loads(response.content)["data"]["id"]
postIdFragment = postId.split("posts/")[1].rstrip("/")
data = {
"object": f"{postId}",
"author":{
"type":"author",
"id":f"{author2.id}"
},
}
response = self.client.post(reverse('likes_api:inbox_like', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
dict_resp_data = json.loads(response.content)["data"]
self.assertEqual(postId, dict_resp_data["object"], "returned item referenced wrong object!")
self.assertEqual(dict_resp_data["author"]["id"], str(author2.id), "returned item referenced wrong author!")
authorId = uuid4()
response = self.client.get(reverse('likes_api:post_likes', kwargs={'author_id':authorId, 'post_id':postIdFragment}))
self.assertEqual(response.status_code, 404, f"expected 404. got: {response.status_code}")
def test_get_like_recipient_bad_uuid(self):
"""
should return 404
"""
author = self.auth_helper.get_author()
user = User.objects.create_user("username1")
author2: Author = Author.objects.get(userId=user)
post_data = {
"type":"post",
"title":"A post title about a post about web dev",
"description":"This post discusses stuff -- brief",
"contentType":f"{Post.ContentTypeEnum.MARKDOWN}",
"author":{
"type":"author",
"id":f"{author.id}"
},
"visibility":f"{Post.VisibilityEnum.PUBLIC}",
"unlisted":"false"
}
response = self.client.post(reverse('post_api:posts', kwargs={'author_id':author.id}), post_data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
postId = json.loads(response.content)["data"]["id"]
postIdFragment = postId.split("posts/")[1].rstrip("/")
data = {
"object": f"{postId}",
"author":{
"type":"author",
"id":f"{author2.id}"
},
}
response = self.client.post(reverse('likes_api:inbox_like', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
dict_resp_data = json.loads(response.content)["data"]
self.assertEqual(postId, dict_resp_data["object"], "returned item referenced wrong object!")
self.assertEqual(dict_resp_data["author"]["id"], str(author2.id), "returned item referenced wrong author!")
authorId = "not<PASSWORD>"
response = self.client.get(reverse('likes_api:post_likes', kwargs={'author_id':authorId, 'post_id':postIdFragment}))
self.assertEqual(response.status_code, 404, f"expected 404. got: {response.status_code}")
def test_get_liked_things(self):
"""
should return inbox items with ids matching the created items
"""
author = self.auth_helper.get_author()
user = User.objects.create_user("username1")
author2: Author = Author.objects.get(userId=user)
post_data = {
"type":"post",
"title":"A post title about a post about web dev",
"description":"This post discusses stuff -- brief",
"contentType":f"{Post.ContentTypeEnum.MARKDOWN}",
"author":{
"type":"author",
"id":f"{author.id}"
},
"visibility":f"{Post.VisibilityEnum.PUBLIC}",
"unlisted":"false"
}
response = self.client.post(reverse('post_api:posts', kwargs={'author_id':author.id}), post_data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
postId = json.loads(response.content)["data"]["id"]
postIdFragment = postId.split("posts/")[1].rstrip("/")
data = {
"object": f"{postId}",
"author":{
"type":"author",
"id":f"{author2.id}"
},
}
response = self.client.post(reverse('likes_api:inbox_like', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
dict_resp_data = json.loads(response.content)["data"]
self.assertEqual(postId, dict_resp_data["object"], "returned item referenced wrong object!")
self.assertEqual(dict_resp_data["author"]["id"], str(author2.id), "returned item referenced wrong author!")
comment_data = {
"type":"comment",
"author":{
"type":"author",
"id":f"{author.id}"
},
"comment":"A Comment with words and markdown",
"contentType":f"{Comment.ContentTypeEnum.MARKDOWN}"
}
response = self.client.post(reverse('post_api:comments', kwargs={'author_id':author.id, 'post_id':postIdFragment}), comment_data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
commentId = json.loads(response.content)["data"]["id"]
data = {
"object": f"{commentId}",
"author":{
"type":"author",
"id":f"{author2.id}"
},
}
response = self.client.post(reverse('likes_api:inbox_like', kwargs={'author_id':author.id}), data, format="json")
self.assertEqual(response.status_code, 201, f"expected 201. got: {response.status_code}")
dict_resp_data = json.loads(response.content)["data"]
self.assertEqual(commentId, dict_resp_data["object"], "returned item referenced wrong object!")
self.assertEqual(dict_resp_data["author"]["id"], str(author2.id), "returned item referenced wrong author!")
response = self.client.get(reverse('likes_api:liked', kwargs={'author_id':author2.id}))
self.assertEqual(response.status_code, 200, f"expected 200. got: {response.status_code}")
likes = json.loads(response.content)["data"]
self.assertEqual(len(likes), 2, f"expected list of length 2. got: {len(likes)}")
data1 = likes[0]
data2 = likes[1]
if data1["object"] == commentId:
temp = data1
data1 = data2
data2 = temp
self.assertEqual(postId, data1["object"], "returned item referenced wrong object!")
self.assertEqual(data1["author"]["id"], str(author2.id), "returned item referenced wrong author!")
self.assertEqual(commentId, data2["object"], "returned item referenced wrong object!")
self.assertEqual(data2["author"]["id"], str(author2.id), "returned item referenced wrong author!")
def test_get_liked_things_empty(self):
"""
should return an empty list
"""
author = self.auth_helper.get_author()
response = | |
"""
sip.py
Computes connectivity (KS-test or percentile scores) between a test similarity
gct and a background similarity gct. The default output is signed connectivity,
which means that the connectivity score is artifically made negative if the
median of the test distribution is less than the median of the background
distribution.
Required inputs are paths to the test and background gct files. Output is a
connectivity gct.
Metadata for the connectivity gct comes from the test gct. The dimensions will
also be the same, except the connectivity gct will not include rows
that are not also in the the background gct. Therefore, it is important that the
rows of the background gct include the rows (i.e. targets) of the test gct;
any target that is not in the background gct will not have a background
distribution, and therefore connectivity cannot be computed for that target.
Any metadata present in the test gct will be retained. Since aggregation of
replicates occurs, unique metadata entries will be collapsed.
"""
import logging
import argparse
import sys
import pandas as pd
import numpy as np
from scipy import stats
import broadinstitute_psp.utils.setup_logger as setup_logger
import cmapPy.pandasGEXpress.GCToo as GCToo
import cmapPy.pandasGEXpress.parse as parse
import cmapPy.pandasGEXpress.write_gct as wg
__author__ = "<NAME>"
__email__ = "<EMAIL>"
# Set up logger
logger = logging.getLogger(setup_logger.LOGGER_NAME)
CONNECTIVITY_METRIC_FIELD = "connectivity_metric"
QUERY_FIELD_NAME = "query_field"
TARGET_FIELD_NAME = "target_field"
def build_parser():
""" Build argument parser. """
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Required args
parser.add_argument("--test_gct_path", "-t", required=True,
help="path to input gct file")
parser.add_argument("--bg_gct_path", "-b", required=True,
help="path to background gct file")
# Optional args
parser.add_argument("--out_name", "-o", default="sip_output.gct",
help="what to name the output connectivity file")
parser.add_argument("--connectivity_metric", "-c", default="ks_test",
choices=["ks_test", "percentile_score"],
help="metric to use for computing connectivity")
parser.add_argument("--fields_to_aggregate_in_test_gct_queries", "-tfq",
nargs="*", default=["pert_id", "cell_id", "pert_time"],
help="metadata fields in the test gct columns identifying replicates")
parser.add_argument("--fields_to_aggregate_in_test_gct_targets", "-tft",
nargs="*", default=["pert_id", "cell_id", "pert_time"],
help="metadata fields in the test gct rows identifying replicates")
parser.add_argument("--fields_to_aggregate_in_bg_gct", "-bf",
nargs="*", default=["pert_id", "cell_id", "pert_time"],
help="metadata fields in the background gct rows AND columns identifying replicates")
parser.add_argument("--separator", "-s", type=str, default="|",
help="string separator for aggregating fields together")
parser.add_argument("--verbose", "-v", action="store_true", default=False,
help="whether to increase the # of messages reported")
return parser
def main(args):
""" The main method. """
# Read test gct
test_gct = parse.parse(args.test_gct_path)
# Read bg_gct
bg_gct = parse.parse(args.bg_gct_path)
# Check symmetry
(is_test_df_sym, _) = check_symmetry(test_gct.data_df, bg_gct.data_df)
# Create an aggregated metadata field in test and background GCTs
# that will be used to aggregate replicates
(test_gct, bg_gct) = create_aggregated_fields_in_GCTs(
test_gct, bg_gct,
args.fields_to_aggregate_in_test_gct_queries,
args.fields_to_aggregate_in_test_gct_targets,
args.fields_to_aggregate_in_bg_gct,
QUERY_FIELD_NAME, TARGET_FIELD_NAME, args.separator)
# Compute connectivity
(conn_gct, signed_conn_gct) = compute_connectivities(
test_gct, bg_gct, QUERY_FIELD_NAME, TARGET_FIELD_NAME, TARGET_FIELD_NAME,
args.connectivity_metric, is_test_df_sym, args.separator)
# Append to queries a new column saying what connectivity metric was used
add_connectivity_metric_to_metadata(signed_conn_gct.col_metadata_df, args.connectivity_metric, CONNECTIVITY_METRIC_FIELD)
add_connectivity_metric_to_metadata(signed_conn_gct.row_metadata_df, args.connectivity_metric, CONNECTIVITY_METRIC_FIELD)
# Write signed result to file
wg.write(signed_conn_gct, args.out_name, data_null="NaN", filler_null="NaN", metadata_null="NaN")
def create_aggregated_fields_in_GCTs(test_gct, bg_gct, fields_to_aggregate_in_test_gct_queries,
fields_to_aggregate_in_test_gct_targets,
fields_to_aggregate_in_bg_gct, query_field_name,
target_field_name, sep):
"""
This function creates new metadata fields in the GCTs indicating how
replicates should be collapsed.
- fields_to_aggregate_in_test_gct_queries must be in the columns of the test_gct
- fields_to_aggregate_in_test_gct_targets must be in the rows of the test_gct
- fields_to_aggregate_in_bg_gct must be in the rows AND columns of the bg_gct
(currently only support symmetric background GCTs)
If any of these arguments is an empty list, then the ids are used.
Args:
test_gct (GCToo)
bg_gct (GCToo)
fields_to_aggregate_in_test_gct_queries (list of strings)
fields_to_aggregate_in_test_gct_targets (list of strings)
fields_to_aggregate_in_bg_gct (list of strings)
query_field_name (string)
target_field_name (string)
sep (string)
Returns:
test_gct (GCToo): with one row and one column metadata field appended
bg_gct (GCToo): with one row and one column metadata field appended
"""
# Check if we have any column metadata in test_gct
if (test_gct.col_metadata_df).shape[1] == 0:
logger.info("No metadata provided for test GCT columns. " +
"Using ids as perturbation identifiers.")
test_gct.col_metadata_df[query_field_name] = test_gct.col_metadata_df.index
else:
# If no aggregation fields were provided, use indices
if len(fields_to_aggregate_in_test_gct_queries) == 0:
logger.info("No aggregation fields provided for test GCT columns. " +
"Using ids as perturbation identifiers.")
test_gct.col_metadata_df[query_field_name] = test_gct.col_metadata_df.index
# Otherwise, create a new aggregated field
else:
test_gct.col_metadata_df = aggregate_fields(
test_gct.col_metadata_df,
fields_to_aggregate_in_test_gct_queries, sep, query_field_name)
# Check if we have any row metadata in test_gct
if (test_gct.row_metadata_df).shape[1] == 0:
logger.info("No metadata provided for test GCT rows. " +
"Using ids as perturbation identifiers.")
test_gct.row_metadata_df[target_field_name] = test_gct.row_metadata_df.index
else:
# If no aggregation fields were provided, use indices
if len(fields_to_aggregate_in_test_gct_targets) == 0:
logger.info("No aggregation fields provided for test GCT rows. " +
"Using ids as perturbation identifiers.")
test_gct.row_metadata_df[target_field_name] = test_gct.row_metadata_df.index
# Otherwise, create a new aggregated field
else:
test_gct.row_metadata_df = aggregate_fields(
test_gct.row_metadata_df,
fields_to_aggregate_in_test_gct_targets, sep, target_field_name)
# Check if we have any column metadata in bg_gct
if (bg_gct.col_metadata_df).shape[1] == 0:
logger.info("No metadata provided for background GCT columns. " +
"Using ids as perturbation identifiers.")
bg_gct.col_metadata_df[target_field_name] = bg_gct.col_metadata_df.index
else:
# If no aggregation fields were provided, use indices
if len(fields_to_aggregate_in_bg_gct) == 0:
logger.info("No aggregation fields provided for background GCT columns. " +
"Using ids as perturbation identifiers.")
bg_gct.col_metadata_df[target_field_name] = bg_gct.col_metadata_df.index
# Otherwise, create a new aggregated field
else:
bg_gct.col_metadata_df = aggregate_fields(
bg_gct.col_metadata_df,
fields_to_aggregate_in_bg_gct, sep, target_field_name)
# Check if we have any row metadata in bg_gct
if (bg_gct.row_metadata_df).shape[1] == 0:
logger.info("No metadata provided for background GCT rows. " +
"Using ids as perturbation identifiers.")
bg_gct.row_metadata_df[target_field_name] = bg_gct.row_metadata_df.index
else:
# If no aggregation fields were provided, use indices
if len(fields_to_aggregate_in_bg_gct) == 0:
logger.info("No aggregation fields provided for background GCT rows. " +
"Using ids as perturbation identifiers.")
bg_gct.row_metadata_df[target_field_name] = bg_gct.row_metadata_df.index
# Otherwise, create a new aggregated field
else:
bg_gct.row_metadata_df = aggregate_fields(
bg_gct.row_metadata_df,
fields_to_aggregate_in_bg_gct, sep, target_field_name)
return test_gct, bg_gct
def aggregate_fields(df, list_of_fields, separator, agg_field_name):
""" Join multiple columns of a dataframe into a single column.
Args:
df (pandas df)
list_of_fields (list of strings): columns from df to aggregate
separator (string): string separator to use in the aggregation
agg_field_name (string): what to name the new, aggregated column
Returns:
df (pandas df): same as input, but one new column appended
"""
# Check that each field to aggregate is present
for f in list_of_fields:
assert f in df.columns, (
"{} is not present as a metadata field.".format(f))
sub_df = df[list_of_fields]
agg_col = []
for _, my_series in sub_df.iterrows():
agg_col.append(my_series.astype(str).str.cat(sep=separator))
df[agg_field_name] = agg_col
return df
def check_symmetry(test_df, bg_df):
""" Check whether test and background dfs are symmetric. Assumes matrix
is symmetric if it is square; this is a soft check! This matters because
it affects data extraction later (to avoid double-counting).
Currently, background matrix MUST be square.
Args:
test_df (pandas df)
bg_df (pandas df)
Returns:
is_test_df_sym (bool)
is_bg_df_sym (bool)
"""
# Check if test_df is square
if test_df.shape[0] == test_df.shape[1]:
is_test_df_sym = True
logger.info("test_df is square, so it will be considered symmetric.")
else:
is_test_df_sym = False
# Check if bg_df is symmetric
if bg_df.shape[0] == bg_df.shape[1]:
is_bg_df_sym = True
logger.info("bg_df is square, so it will be considered symmetric.")
else:
is_bg_df_sym = False
# TODO(lev): should be able to support a non-symmetric background matrix
# (when we do this, we should also separate bg_gct_field into
# bg_gct_query_field and bg_gct_target_field)
assert is_bg_df_sym, "bg_df must be symmetric!"
return is_test_df_sym, is_bg_df_sym
def compute_connectivities(test_gct, bg_gct, test_gct_query_field,
test_gct_target_field, bg_gct_field,
connectivity_metric, is_test_df_sym, sep):
""" Compute all connectivities for a single test_gct and a single bg_gct.
Args:
test_gct (GCToo): m rows x n cols, where n is the # of queries, m is the # of targets
bg_gct (GCToo): M rows x M rows, where M is a superset of m
test_gct_query_field (string)
test_gct_target_field (string)
bg_gct_field (string)
connectivity_metric (string)
is_test_df_sym (bool)
sep (string): separator to use in creating aggregated strings
Returns:
conn_gct (GCToo): m rows x n cols, where n is the # of queries, m is the # of targets
signed_conn_gct (GCToo): m rows x n cols, where n is the # of queries, m is the # of targets
"""
logger.info("Computing connectivities...")
# Extract queries from test_df columns and targets from test_df index
queries = test_gct.col_metadata_df[test_gct_query_field].unique()
# Extract targets from both test_df and bg_df
test_targets = test_gct.row_metadata_df[test_gct_target_field].unique()
bg_targets = bg_gct.row_metadata_df[bg_gct_field].unique()
# Get intersection of targets
targets = np.intersect1d(test_targets, bg_targets)
assert targets.size > 0, (
"There are no targets in common between the test and bg dfs.\n" +
"test_targets: {}, bg_targets: {}".format(test_targets, bg_targets))
logger.info("{} queries, {} targets".format(len(queries), len(targets)))
# | |
#@<OUT> mysqlx.Type
NAME
Type - Data type constants.
SYNTAX
mysqlx.Type
DESCRIPTION
The data type constants assigned to a Column object retrieved through
RowResult.get_columns().
PROPERTIES
BIGINT
A large integer.
BIT
A bit-value type.
BYTES
A binary string.
DATE
A date.
DATETIME
A date and time combination.
DECIMAL
A packed "exact" fixed-point number.
ENUM
An enumeration.
FLOAT
A floating-point number.
GEOMETRY
A geometry type.
INT
A normal-size integer.
JSON
A JSON-format string.
MEDIUMINT
A medium-sized integer.
SET
A set.
SMALLINT
A small integer.
STRING
A character string.
TIME
A time.
TINYINT
A very small integer.
FUNCTIONS
help([member])
Provides help about this class and it's members
#@<OUT> mysqlx.date_value
NAME
date_value - Creates a Date object which represents a date time.
SYNTAX
mysqlx.date_value(year, month, day[, hour, day, minute[, milliseconds]])
WHERE
year: The year to be used in the new Date object.
month: The month to be used in the new Date object.
day: The month to be used in the new Date object.
hour: Hour to be used in the new Date object.
minutes: Minutes to be used in the new Date object.
seconds: Seconds to be used in the new Date object.
milliseconds: Milliseconds to be used in the new Date object.
DESCRIPTION
This function creates a Date object containing:
- A date value.
- A date and time value.
- A date and time value with milliseconds.
#@<OUT> mysqlx.expr
NAME
expr - Creates an Expression object based on a string.
SYNTAX
mysqlx.expr(expressionStr)
WHERE
expressionStr: The expression to be represented by the object
DESCRIPTION
An expression object is required in many operations on the X DevAPI.
Some applications of the expression objects include:
- Creation of documents based on a JSON string
- Defining calculated fields when inserting data on the database
- Defining calculated fields when pulling data from the database
#@<OUT> mysqlx.get_session
NAME
get_session - Creates a Session instance using the provided connection
data.
SYNTAX
mysqlx.get_session(connectionData[, password])
WHERE
connectionData: The connection data for the session
password: <PASSWORD>
RETURNS
A Session
DESCRIPTION
A Session object uses the X Protocol to allow executing operations on the
connected MySQL Server.
The connection data may be specified in the following formats:
- A URI string
- A dictionary with the connection options
A basic URI string has the following format:
[scheme://][user[:password]@]<host[:port]|socket>[/schema][?option=value&option=value...]
Connection Options
The following options are valid for use either in a URI or in a
dictionary:
- ssl-mode: The SSL mode to be used in the connection.
- ssl-ca: The path to the X509 certificate authority file in PEM format.
- ssl-capath: The path to the directory that contains the X509
certificate authority files in PEM format.
- ssl-cert: The path to the SSL public key certificate file in PEM
format.
- ssl-key: The path to the SSL private key file in PEM format.
- ssl-crl: The path to file that contains certificate revocation lists.
- ssl-crlpath: The path of directory that contains certificate revocation
list files.
- ssl-cipher: The list of permissible encryption ciphers for connections
that use TLS protocols up through TLSv1.2.
- tls-version: List of protocols permitted for secure connections.
- tls-ciphers: List of TLS v1.3 ciphers to use.
- auth-method: Authentication method.
- get-server-public-key: Request public key from the server required for
RSA key pair-based password exchange. Use when connecting to MySQL 8.0
servers with classic MySQL sessions with SSL mode DISABLED.
- server-public-key-path: The path name to a file containing a
client-side copy of the public key required by the server for RSA key
pair-based password exchange. Use when connecting to MySQL 8.0 servers
with classic MySQL sessions with SSL mode DISABLED.
- connect-timeout: The connection timeout in milliseconds. If not
provided a default timeout of 10 seconds will be used. Specifying a
value of 0 disables the connection timeout.
- compression: Enable compression in client/server protocol.
- compression-algorithms: Use compression algorithm in server/client
protocol.
- compression-level: Use this compression level in the client/server
protocol.
- connection-attributes: List of connection attributes to be registered
at the PERFORMANCE_SCHEMA connection attributes tables.
- local-infile: Enable/disable LOAD DATA LOCAL INFILE.
- net-buffer-length: The buffer size for TCP/IP and socket communication.
When these options are defined in a URI, their values must be URL
encoded.
The following options are also valid when a dictionary is used:
Base Connection Options
- scheme: the protocol to be used on the connection.
- user: the MySQL user name to be used on the connection.
- dbUser: alias for user.
- password: the password to be used on the connection.
- dbPassword: same as password.
- host: the hostname or IP address to be used on the connection.
- port: the port to be used in a TCP connection.
- socket: the socket file name to be used on a connection through unix
sockets.
- schema: the schema to be selected once the connection is done.
ATTENTION: The dbUser and dbPassword options are will be removed in a
future release.
The connection options are case insensitive and can only be defined once.
If an option is defined more than once, an error will be generated.
For additional information on connection data use \? connection.
#@<OUT> mysqlx.help
NAME
help - Provides help about this module and it's members
SYNTAX
mysqlx.help([member])
WHERE
member: If specified, provides detailed information on the given member.
#@<OUT> mysqlx help
NAME
mysqlx - Encloses the functions and classes available to interact with an
X Protocol enabled MySQL Product.
DESCRIPTION
The objects contained on this module provide a full API to interact with
the different MySQL Products implementing the X Protocol.
In the case of a MySQL Server the API will enable doing operations on the
different database objects such as schema management operations and both
table and collection management and CRUD operations. (CRUD: Create, Read,
Update, Delete).
Intention of the module is to provide a full API for development through
scripting languages such as JavaScript and Python, this would be normally
achieved through a normal session.
To use the properties and functions available on this module you first
need to import it.
When running the shell in interactive mode, this module is automatically
imported.
CONSTANTS
- LockContention Row locking mode constants.
- Type Data type constants.
FUNCTIONS
date_value(year, month, day[, hour, day, minute[, milliseconds]])
Creates a Date object which represents a date time.
expr(expressionStr)
Creates an Expression object based on a string.
get_session(connectionData[, password])
Creates a Session instance using the provided connection data.
help([member])
Provides help about this module and it's members
CLASSES
- BaseResult Base class for the different types of results returned by
the server.
- Collection A Collection is a container that may be used to store
Documents in a MySQL database.
- CollectionAdd Operation to insert documents into a Collection.
- CollectionFind Operation to retrieve documents from a Collection.
- CollectionModify Operation to update documents on a Collection.
- CollectionRemove Operation to delete documents on a Collection.
- DatabaseObject Provides base functionality for database objects.
- DocResult Allows traversing the DbDoc objects returned by a
Collection.find operation.
- Result Allows retrieving information about non query operations
performed on the database.
- RowResult Allows traversing the Row objects returned by a
Table.select operation.
- Schema Represents a Schema as retrieved from a session created
using the X Protocol.
- Session Enables interaction with a MySQL Server using the X
Protocol.
- SqlExecute Handler for execution SQL statements, supports parameter
binding.
- SqlResult Allows browsing through the result information after
performing an operation on the database done through
Session.sql
- Table Represents a Table on an Schema, retrieved with a session
created using mysqlx module.
- TableDelete Operation to delete data from a table.
- TableInsert Operation to insert data into a table.
- TableSelect Operation to retrieve rows from a table.
- TableUpdate Operation to add update records in a Table.
#@<OUT> Help on LockContention
NAME
LockContention - Row locking mode constants.
SYNTAX
mysqlx.LockContention
DESCRIPTION
These constants are used to indicate the locking mode to | |
<gh_stars>0
## Advent of Code 2019: Intcode Computer v5.1
## https://adventofcode.com/2019
## <NAME> | github.com/vblank182
# **Compatible with Day 11**
# Changelog (v5):
# - Added support for relative parameter mode
# - Added ARB (Adjust Relative Base) opcode
# - Modified tape handling to support reading and writing to addresses beyond initial tape length
# Changelog (v5.1):
# - Fixed accessing of off-tape addresses (read 0 if address doesn't yet exist)
# - Fixed initial tape loading to handle dict tapes from feedback mode
# - Added 'relbase' to program state object returned in feedback mode to properly represent the full state
# - Added 'cycle' to program state object returned in feedback mode to keep track of correct program cycle
# - Removed deprecated debug print using old list format of work tape
from collections import deque, namedtuple
#~# Opcodes #~#
ADD, MUL, IN, OUT, JIT, JIF, LT, EQ, ARB = 1, 2, 3, 4, 5, 6, 7, 8, 9
END = 99
#~# Parameter Modes #~#
POS = 0
IMM = 1
REL = 2
# Formatted tuple for holding the state of a suspended program
ProgramState = namedtuple('ProgramState', ['tape', 'ptr', 'output', 'relbase', 'cycle'])
# Numbers of expected parameters for each opcode
num_params = {1:3, 2:3, 3:1, 4:1, 5:2, 6:2, 7:3, 8:3, 9:1, 99:0}
def loadProgram(inputFile):
''' Loads a program file in "0,1,2,3,..." format and returns a list of integers. '''
with open(inputFile) as f:
initialTapeStrs = f.read()[:-1].split(',')
initialTape = [int(i) for i in initialTapeStrs]
return initialTape
def runProgram(initialTape, input, debugLevel=0, feedbackMode=False, feedbackPtr=0, feedbackRelbase=0, feedbackCycle=0):
if type(initialTape) == list:
# If the initial tape is a list, make a copy, then convert it to a dict
# Make a copy of the initial tape.
workTapeList = initialTape.copy()
# Convert tape from list to dict to index positions without needing a list large enough to hold all addresses (pythonic! :D)
workTape = {}
for i in range(len(workTapeList)):
workTape[i] = workTapeList[i]
else:
# If the initial tape is a dict (i.e. if we recieved it from a run in feedback mode), just copy it as-is
workTape = initialTape.copy()
try: input = deque(input) # convert input list to a deque to act as a queue
except TypeError: input = deque([input]) # if a single int is input, make it into a list first
output = []
running = True
if feedbackMode:
ptr = feedbackPtr
relbase = feedbackRelbase
cycle = feedbackCycle
else:
ptr = 0
relbase = 0
cycle = 0
while running:
# Determine the current opcode and parameter modes
opcode = int( str(workTape[ptr])[-2:] ) # get the opcode from the last 2 digits of the current position
param_modes = [0]*num_params[opcode]
for i in range(num_params[opcode]):
try:
# Set param mode to digit found (scanning right-to-left from opcode)
param_modes[i] = int( str(workTape[ptr])[-3-i] )
except IndexError:
# Default to param mode 0 if no digit is found
param_modes[i] = 0
#:: [1] ADD - Addition ::#
if opcode == ADD:
param = [0]*num_params[opcode] # initialize list of parameters
# Param 1 (left addend)
try:
if param_modes[0] == POS:
param[0] = workTape[workTape[ptr+1]] # position mode
elif param_modes[0] == IMM:
param[0] = workTape[ptr+1] # immediate mode
elif param_modes[0] == REL:
param[0] = workTape[relbase + workTape[ptr+1]] # relative mode
except KeyError: param[0] = 0 # off tape
# Param 2 (right addend)
try:
if param_modes[1] == POS:
param[1] = workTape[workTape[ptr+2]] # position mode
elif param_modes[1] == IMM:
param[1] = workTape[ptr+2] # immediate mode
elif param_modes[1] == REL:
param[1] = workTape[relbase + workTape[ptr+2]] # relative mode
except KeyError: param[1] = 0 # off tape
# Param 3 (sum)
if param_modes[2] == POS:
workTape[workTape[ptr+3]] = param[0] + param[1] # set output (position mode)
elif param_modes[2] == IMM:
raise InvalidParameterMode(opcode, 3, param_modes[2], "Immediate mode not supported for output.")
break
elif param_modes[2] == REL:
workTape[relbase + workTape[ptr+3]] = param[0] + param[1] # set output (relative mode)
ptr += num_params[opcode] + 1 # advance instruction pointer
#:: [2] MUL - Multiplication ::#
elif opcode == MUL:
param = [0]*num_params[opcode] # initialize list of parameters
# Param 1 (left multiplicand)
try:
if param_modes[0] == POS:
param[0] = workTape[workTape[ptr+1]] # position mode
elif param_modes[0] == IMM:
param[0] = workTape[ptr+1] # immediate mode
elif param_modes[0] == REL:
param[0] = workTape[relbase + workTape[ptr+1]] # position mode
except KeyError: param[0] = 0 # off tape
# Param 2 (right multiplicand)
try:
if param_modes[1] == POS:
param[1] = workTape[workTape[ptr+2]] # position mode
elif param_modes[1] == IMM:
param[1] = workTape[ptr+2] # immediate mode
elif param_modes[1] == REL:
param[1] = workTape[relbase + workTape[ptr+2]] # position mode
except KeyError: param[1] = 0 # off tape
# Param 3 (product)
if param_modes[2] == POS:
workTape[workTape[ptr+3]] = param[0] * param[1] # set output (position mode)
elif param_modes[2] == IMM:
raise InvalidParameterMode(opcode, 3, param_modes[2], "Immediate mode not supported for output.")
break
elif param_modes[2] == REL:
workTape[relbase + workTape[ptr+3]] = param[0] * param[1] # set output (relative mode)
ptr += num_params[opcode] + 1 # advance instruction pointer
#:: [3] IN - Input ::#
elif opcode == IN:
# Param 1 (position)
if param_modes[0] == POS:
workTape[workTape[ptr+1]] = input.popleft() # store next input at position in parameter (position mode)
elif param_modes[0] == IMM:
raise InvalidParameterMode(opcode, 1, param_modes[0], "Immediate mode not supported for this instruction.")
break
elif param_modes[0] == REL:
workTape[relbase + workTape[ptr+1]] = input.popleft() # store next input at position in parameter (relative mode)
ptr += num_params[opcode] + 1 # advance instruction pointer
#:: [4] OUT - Output ::#
elif opcode == OUT:
# Param 1 (position)
try:
if param_modes[0] == POS:
output.append(workTape[workTape[ptr+1]]) # write output (position mode)
elif param_modes[0] == IMM:
output.append(workTape[ptr+1]) # write output (immediate mode)
elif param_modes[0] == REL:
output.append(workTape[relbase + workTape[ptr+1]]) # write output (relative mode)
except KeyError: param[0] = 0 # off tape
ptr += num_params[opcode] + 1 # advance instruction pointer
if feedbackMode: return ProgramState(workTape, ptr, output, relbase, cycle)
#:: [5] JIT - Jump-If-True ::#
elif opcode == JIT:
param = [0]*num_params[opcode] # initialize list of parameters
# Param 1 (condition)
try:
if param_modes[0] == POS:
param[0] = workTape[workTape[ptr+1]] # position mode
elif param_modes[0] == IMM:
param[0] = workTape[ptr+1] # immediate mode
elif param_modes[0] == REL:
param[0] = workTape[relbase + workTape[ptr+1]] # relative mode
except KeyError: param[0] = 0 # off tape
# Param 2 (destination)
try:
if param_modes[1] == POS:
param[1] = workTape[workTape[ptr+2]] # position mode
elif param_modes[1] == IMM:
param[1] = workTape[ptr+2] # immediate mode
elif param_modes[1] == REL:
param[1] = workTape[relbase + workTape[ptr+2]] # relative mode
except KeyError: param[1] = 0 # off tape
if param[0] != 0: # if nonzero (true),
ptr = param[1] # jump
else:
ptr += num_params[opcode] + 1 # advance instruction pointer
#:: [6] JIF - Jump-If-False ::#
elif opcode == JIF:
param = [0]*num_params[opcode] # initialize list of parameters
# Param 1 (condition)
try:
if param_modes[0] == POS:
param[0] = workTape[workTape[ptr+1]] # position mode
elif param_modes[0] == IMM:
param[0] = workTape[ptr+1] # immediate mode
elif param_modes[0] == REL:
param[0] = workTape[relbase + workTape[ptr+1]] # relative mode
except KeyError: param[0] = 0 # off tape
# Param 2 (destination)
try:
if param_modes[1] == POS:
param[1] = workTape[workTape[ptr+2]] # position mode
elif param_modes[1] == IMM:
param[1] = workTape[ptr+2] # immediate mode
elif param_modes[1] == REL:
param[1] = workTape[relbase + workTape[ptr+2]] # relative mode
except KeyError: param[1] = 0 # off tape
if param[0] == 0: # if zero (false),
ptr = param[1] # jump
else:
ptr += num_params[opcode] + 1 # advance instruction pointer
#:: [7] LT - Less Than ::#
elif opcode == LT:
param = [0]*num_params[opcode] # initialize list of parameters
# Param 1 (left comparison)
try:
if param_modes[0] == POS:
param[0] = workTape[workTape[ptr+1]] # position mode
elif param_modes[0] == IMM:
param[0] = workTape[ptr+1] # immediate mode
elif param_modes[0] == REL:
param[0] = workTape[relbase + workTape[ptr+1]] # relative mode
except KeyError: param[0] = 0 # off tape
# Param 2 (right comparison)
try:
if param_modes[1] == POS:
param[1] = workTape[workTape[ptr+2]] # position mode
elif param_modes[1] == IMM:
| |
calculated at:")
for q in q_points:
print(" %s" % q)
else:
print_error_message("Q-points are not properly specified.")
if log_level:
print_error()
sys.exit(1)
phonon.run_qpoints(
q_points,
with_eigenvectors=settings.is_eigenvectors,
with_group_velocities=settings.is_group_velocity,
with_dynamical_matrices=settings.write_dynamical_matrices,
nac_q_direction=settings.nac_q_direction,
)
if settings.is_hdf5 or settings.qpoints_format == "hdf5":
phonon.write_hdf5_qpoints_phonon()
else:
phonon.write_yaml_qpoints_phonon()
#
# Band structure
#
if run_mode == "band" or run_mode == "band_mesh":
if settings.band_points is None:
npoints = 51
else:
npoints = settings.band_points
band_paths = settings.band_paths
if is_band_auto(settings):
print("SeeK-path is used to generate band paths.")
print(
" About SeeK-path https://seekpath.readthedocs.io/ "
"(citation there-in)"
)
is_legacy_plot = False
bands, labels, path_connections = get_band_qpoints_by_seekpath(
phonon.primitive,
npoints,
is_const_interval=settings.is_band_const_interval,
)
else:
is_legacy_plot = settings.is_legacy_plot
if settings.is_band_const_interval:
reclat = np.linalg.inv(phonon.primitive.cell)
bands = get_band_qpoints(
band_paths, npoints=npoints, rec_lattice=reclat
)
else:
bands = get_band_qpoints(band_paths, npoints=npoints)
path_connections = []
for paths in band_paths:
path_connections += [
True,
] * (len(paths) - 2)
path_connections.append(False)
labels = settings.band_labels
if log_level:
print("Reciprocal space paths in reduced coordinates:")
for band in bands:
print(
"[%6.3f %6.3f %6.3f] --> [%6.3f %6.3f %6.3f]"
% (tuple(band[0]) + tuple(band[-1]))
)
phonon.run_band_structure(
bands,
with_eigenvectors=settings.is_eigenvectors,
with_group_velocities=settings.is_group_velocity,
is_band_connection=settings.is_band_connection,
path_connections=path_connections,
labels=labels,
is_legacy_plot=is_legacy_plot,
)
if interface_mode is None:
comment = None
else:
comment = {
"calculator": interface_mode,
"length_unit": physical_units["length_unit"],
}
if settings.is_hdf5 or settings.band_format == "hdf5":
phonon.write_hdf5_band_structure(comment=comment)
else:
phonon.write_yaml_band_structure(comment=comment)
if plot_conf["plot_graph"] and run_mode != "band_mesh":
plot = phonon.plot_band_structure()
if plot_conf["save_graph"]:
plot.savefig("band.pdf")
else:
plot.show()
#
# mesh sampling
#
if run_mode == "mesh" or run_mode == "band_mesh":
mesh_numbers = settings.mesh_numbers
if mesh_numbers is None:
mesh_numbers = 50.0
mesh_shift = settings.mesh_shift
t_symmetry = settings.is_time_reversal_symmetry
q_symmetry = settings.is_mesh_symmetry
is_gamma_center = settings.is_gamma_center
if (
settings.is_thermal_displacements
or settings.is_thermal_displacement_matrices
): # noqa E129
if settings.cutoff_frequency is not None:
if log_level:
print_error_message(
"Use FMIN (--fmin) instead of CUTOFF_FREQUENCY "
"(--cutoff-freq)."
)
print_error()
sys.exit(1)
phonon.init_mesh(
mesh=mesh_numbers,
shift=mesh_shift,
is_time_reversal=t_symmetry,
is_mesh_symmetry=q_symmetry,
with_eigenvectors=settings.is_eigenvectors,
is_gamma_center=is_gamma_center,
use_iter_mesh=True,
)
if log_level:
print("Mesh numbers: %s" % phonon.mesh_numbers)
else:
phonon.init_mesh(
mesh=mesh_numbers,
shift=mesh_shift,
is_time_reversal=t_symmetry,
is_mesh_symmetry=q_symmetry,
with_eigenvectors=settings.is_eigenvectors,
with_group_velocities=settings.is_group_velocity,
is_gamma_center=is_gamma_center,
)
if log_level:
print("Mesh numbers: %s" % phonon.mesh_numbers)
weights = phonon.mesh.weights
if q_symmetry:
print(
"Number of irreducible q-points on sampling mesh: "
"%d/%d" % (weights.shape[0], np.prod(phonon.mesh_numbers))
)
else:
print("Number of q-points on sampling mesh: %d" % weights.shape[0])
print("Calculating phonons on sampling mesh...")
phonon.mesh.run()
if settings.write_mesh:
if settings.is_hdf5 or settings.mesh_format == "hdf5":
phonon.write_hdf5_mesh()
else:
phonon.write_yaml_mesh()
#
# Thermal property
#
if settings.is_thermal_properties:
if log_level:
if settings.is_projected_thermal_properties:
print("Calculating projected thermal properties...")
else:
print("Calculating thermal properties...")
t_step = settings.temperature_step
t_max = settings.max_temperature
t_min = settings.min_temperature
phonon.run_thermal_properties(
t_min=t_min,
t_max=t_max,
t_step=t_step,
cutoff_frequency=settings.cutoff_frequency,
pretend_real=settings.pretend_real,
band_indices=settings.band_indices,
is_projection=settings.is_projected_thermal_properties,
)
phonon.write_yaml_thermal_properties()
if log_level:
cutoff_freq = phonon.thermal_properties.cutoff_frequency
cutoff_freq /= THzToEv
print("Cutoff frequency: %.5f" % cutoff_freq)
num_ignored_modes = (
phonon.thermal_properties.number_of_modes
- phonon.thermal_properties.number_of_integrated_modes
)
print(
"Number of phonon frequencies less than cutoff "
"frequency: %d/%d"
% (num_ignored_modes, phonon.thermal_properties.number_of_modes)
)
print(
"#%11s %15s%15s%15s%15s"
% (
"T [K]",
"F [kJ/mol]",
"S [J/K/mol]",
"C_v [J/K/mol]",
"E [kJ/mol]",
)
)
tp = phonon.get_thermal_properties_dict()
temps = tp["temperatures"]
fe = tp["free_energy"]
entropy = tp["entropy"]
heat_capacity = tp["heat_capacity"]
for T, F, S, CV in zip(temps, fe, entropy, heat_capacity):
print(("%12.3f " + "%15.7f" * 4) % (T, F, S, CV, F + T * S / 1000))
if plot_conf["plot_graph"]:
plot = phonon.plot_thermal_properties()
if plot_conf["save_graph"]:
plot.savefig("thermal_properties.pdf")
else:
plot.show()
#
# Thermal displacements
#
elif settings.is_thermal_displacements and run_mode in ("mesh", "band_mesh"):
p_direction = settings.projection_direction
if log_level and p_direction is not None:
c_direction = np.dot(p_direction, phonon.primitive.cell)
c_direction /= np.linalg.norm(c_direction)
print(
"Projection direction: [%6.3f %6.3f %6.3f] "
"(fractional)" % tuple(p_direction)
)
print(
" [%6.3f %6.3f %6.3f] "
"(Cartesian)" % tuple(c_direction)
)
if log_level:
print("Calculating thermal displacements...")
t_step = settings.temperature_step
t_max = settings.max_temperature
t_min = settings.min_temperature
phonon.run_thermal_displacements(
t_min=t_min,
t_max=t_max,
t_step=t_step,
direction=p_direction,
freq_min=settings.min_frequency,
freq_max=settings.max_frequency,
)
phonon.write_yaml_thermal_displacements()
if plot_conf["plot_graph"]:
plot = phonon.plot_thermal_displacements(plot_conf["with_legend"])
if plot_conf["save_graph"]:
plot.savefig("thermal_displacement.pdf")
else:
plot.show()
#
# Thermal displacement matrices
#
elif settings.is_thermal_displacement_matrices and run_mode in (
"mesh",
"band_mesh",
):
if log_level:
print("Calculating thermal displacement matrices...")
t_step = settings.temperature_step
t_max = settings.max_temperature
t_min = settings.min_temperature
t_cif = settings.thermal_displacement_matrix_temperatue
if t_cif is None:
temperatures = None
else:
temperatures = [
t_cif,
]
phonon.run_thermal_displacement_matrices(
t_step=t_step,
t_max=t_max,
t_min=t_min,
temperatures=temperatures,
freq_min=settings.min_frequency,
freq_max=settings.max_frequency,
)
phonon.write_yaml_thermal_displacement_matrices()
if t_cif is not None:
phonon.write_thermal_displacement_matrix_to_cif(0)
#
# Projected DOS
#
elif settings.pdos_indices is not None and run_mode in ("mesh", "band_mesh"):
p_direction = settings.projection_direction
if (
log_level and p_direction is not None and not settings.xyz_projection
): # noqa E129
c_direction = np.dot(p_direction, phonon.primitive.cell)
c_direction /= np.linalg.norm(c_direction)
print(
"Projection direction: [%6.3f %6.3f %6.3f] "
"(fractional)" % tuple(p_direction)
)
print(
" [%6.3f %6.3f %6.3f] "
"(Cartesian)" % tuple(c_direction)
)
if log_level:
print("Calculating projected DOS...")
phonon.run_projected_dos(
sigma=settings.sigma,
freq_min=settings.min_frequency,
freq_max=settings.max_frequency,
freq_pitch=settings.frequency_pitch,
use_tetrahedron_method=settings.is_tetrahedron_method,
direction=p_direction,
xyz_projection=settings.xyz_projection,
)
phonon.write_projected_dos()
if plot_conf["plot_graph"]:
pdos_indices = settings.pdos_indices
if is_pdos_auto(settings):
pdos_indices = get_pdos_indices(phonon.primitive_symmetry)
legend = [phonon.primitive.symbols[x[0]] for x in pdos_indices]
else:
legend = [np.array(x) + 1 for x in pdos_indices]
if run_mode != "band_mesh":
plot = phonon.plot_projected_dos(
pdos_indices=pdos_indices, legend=legend
)
if plot_conf["save_graph"]:
plot.savefig("partial_dos.pdf")
else:
plot.show()
#
# Total DOS
#
elif (
(plot_conf["plot_graph"] or settings.is_dos_mode)
and not is_pdos_auto(settings)
and run_mode in ("mesh", "band_mesh")
):
phonon.run_total_dos(
sigma=settings.sigma,
freq_min=settings.min_frequency,
freq_max=settings.max_frequency,
freq_pitch=settings.frequency_pitch,
use_tetrahedron_method=settings.is_tetrahedron_method,
)
if log_level:
print("Calculating DOS...")
if settings.fits_Debye_model:
phonon.set_Debye_frequency()
if log_level:
debye_freq = phonon.get_Debye_frequency()
print("Debye frequency: %10.5f" % debye_freq)
phonon.write_total_dos()
if plot_conf["plot_graph"] and run_mode != "band_mesh":
plot = phonon.plot_total_dos()
if plot_conf["save_graph"]:
plot.savefig("total_dos.pdf")
else:
plot.show()
#
# Momemt
#
elif settings.is_moment and run_mode in ("mesh", "band_mesh"):
freq_min = settings.min_frequency
freq_max = settings.max_frequency
if log_level:
text = "Calculating moment of phonon states distribution"
if freq_min is None and freq_max is None:
text += "..."
elif freq_min is None and freq_max is not None:
text += "\nbelow frequency %.3f..." % freq_max
elif freq_min is not None and freq_max is None:
text += "\nabove frequency %.3f..." % freq_min
elif freq_min is not None and freq_max is not None:
text += "\nbetween frequencies %.3f and %.3f..." % (
freq_min,
freq_max,
)
print(text)
print("")
print("Order| Total | Projected to atoms")
if settings.moment_order is not None:
phonon.run_moment(
order=settings.moment_order,
freq_min=freq_min,
freq_max=freq_max,
is_projection=False,
)
total_moment = phonon.get_moment()
phonon.run_moment(
order=settings.moment_order,
freq_min=freq_min,
freq_max=freq_max,
is_projection=True,
)
text = " %3d |%10.5f | " % (settings.moment_order, total_moment)
for m in phonon.get_moment():
text += "%10.5f " % m
print(text)
else:
for i in range(3):
phonon.run_moment(
order=i,
freq_min=freq_min,
freq_max=freq_max,
is_projection=False,
)
total_moment = phonon.get_moment()
phonon.run_moment(
order=i,
freq_min=freq_min,
freq_max=freq_max,
is_projection=True,
)
text = " %3d |%10.5f | " % (i, total_moment)
for m in phonon.get_moment():
text += "%10.5f " % m
print(text)
#
# Band structure and DOS are plotted simultaneously.
#
if (
run_mode == "band_mesh"
and plot_conf["plot_graph"]
and not settings.is_thermal_properties
and not settings.is_thermal_displacements
and not settings.is_thermal_displacement_matrices
and not settings.is_thermal_distances
): # noqa E129
if settings.pdos_indices is not None:
plot = phonon.plot_band_structure_and_dos(pdos_indices=pdos_indices)
else:
plot = phonon.plot_band_structure_and_dos()
if plot_conf["save_graph"]:
plot.savefig("band_dos.pdf")
else:
plot.show()
#
# Animation
#
elif run_mode == "anime":
anime_type = settings.anime_type
if anime_type == "v_sim":
q_point = settings.anime_qpoint
amplitude = settings.anime_amplitude
fname_out = phonon.write_animation(
q_point=q_point, anime_type="v_sim", amplitude=amplitude
)
if log_level:
print("Animation type: v_sim")
print("q-point: [%6.3f %6.3f %6.3f]" % tuple(q_point))
else:
amplitude = settings.anime_amplitude
band_index = settings.anime_band_index
division = settings.anime_division
shift = settings.anime_shift
fname_out = phonon.write_animation(
anime_type=anime_type,
band_index=band_index,
amplitude=amplitude,
num_div=division,
shift=shift,
)
if log_level:
print("Animation type: %s" % anime_type)
print("amplitude: %f" % amplitude)
if anime_type != "jmol":
print("band index: %d" % band_index)
print("Number of images: %d" % division)
if log_level:
print('Animation was written in "%s". ' % fname_out)
#
# Modulation
#
elif run_mode == "modulation":
mod_setting = settings.modulation
phonon_modes = mod_setting["modulations"]
dimension = mod_setting["dimension"]
if "delta_q" in mod_setting:
delta_q = mod_setting["delta_q"]
else:
delta_q = None
derivative_order = mod_setting["order"]
num_band = len(phonon.primitive) * 3
if log_level:
if len(phonon_modes) == 1:
print(
"Modulated structure with %s multiplicity was created." % dimension
)
else:
print(
"Modulated structures with %s multiplicity were created."
% dimension
)
error_indices = []
for i, ph_mode in enumerate(phonon_modes):
if ph_mode[1] < 0 or ph_mode[1] >= num_band:
error_indices.append(i)
if log_level:
text = "%d: q=%s, band index=%d, amplitude=%f" % (
i + 1,
ph_mode[0],
ph_mode[1] + 1,
ph_mode[2],
)
if len(ph_mode) > 3:
text += ", phase=%f" % ph_mode[3]
print(text)
if error_indices:
if log_level:
lines = [
"Band index of modulation %d is out of range." % (i + 1)
for i in error_indices
]
print_error_message("\n".join(lines))
| |
"""
areas = cstudy.areas
if areas:
name = ""
if "W" in areas:
name += "Water"
if "E" in areas:
name += "Energy"
if "F" in areas:
name += "Food"
return "/static/images/" + name + "Nexus.png"
else:
return "/static/images/NoNexusAreas.png" # TODO create this image
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if not user:
user = "_anonymous"
# Recover case studies READABLE by current user (or "anonymous")
if not isess:
session = DBSession()
else:
session = isess.open_db_session()
# TODO Obtain case studies FILTERED by current user permissions. Show case studies with READ access enabled
# Access Control
# CS in acl and user in acl.detail and acl.detail is READ, WRITE,
# CS in acl and group acl.detail and user in group
base = app.config["APPLICATION_ROOT"]
lst = session.query(CaseStudy).all()
lst2 = []
for cs in lst:
uuid2 = str(cs.uuid)
d = {"resource": nis_api_base + "/case_studies/"+uuid2,
"uuid": uuid2,
"name": cs.name if cs.name else "<empty>",
"oid": cs.oid if cs.oid else "<empty>", # TODO
"internal_code": cs.internal_code if cs.internal_code else "", # TODO
"description": cs.description if cs.description else "", # TODO
"stats": {
"n_versions": str(len(cs.versions)),
"n_commands": str(len([])), # TODO
"n_hierarchies": str(len([])), # TODO
},
"versions": nis_api_base + "/case_studies/" + uuid2 + "/versions/",
"thumbnail": nis_api_base + "/case_studies/" + uuid2 + "/default_view.png",
"thumbnail_png": nis_api_base + "/case_studies/" + uuid2 + "/default_view.png",
"thumbnail_svg": nis_api_base + "/case_studies/" + uuid2 + "/default_view.svg",
"avatar": nis_api_base + get_avatar_path(cs), # Icon representing the type of Nexus study
"case_study_permissions":
{
"read": True,
"annotate": True,
"contribute": True,
"share": False,
"delete": False
}
}
lst2.append(d)
# print(json.dumps(lst2, default=json_serial, sort_keys=True, indent=JSON_INDENT, ensure_ascii=ENSURE_ASCII, separators=(',', ': '))
# )
r = build_json_response(lst2) # TODO Improve it, it must return the number of versions. See document !!!
if isess:
isess.close_db_session()
else:
DBSession.remove()
return r
@app.route(nis_api_base + "/case_studies/<cs_uuid>", methods=["GET"])
@app.route(nis_api_base + "/case_studies/<cs_uuid>/versions/", methods=["GET"])
def case_study(cs_uuid): # Information about case study
"""
{"case_study": "<uuid>",
"name": "Food in the EU",
"oid": "zenodo.org/2098235",
"internal_code": "CS1_F_E",
"resource": "/case_studies/<case study uuid>",
"description": "...",
"versions":
[
{"uuid": "<uuid>",
"resource": "/case_studies/<case study uuid>/<version uuid>",
"tag": "v0.1",
"sessions":
[
{"uuid": "<uuid>",
"open_date": "2017-09-20T10:00:00Z",
"close_date": "2017-09-20T10:00:10Z",
"client": "spreadsheet",
"restart": True,
"author": "<uuid>",
},
...
]
"detail": "/case_studies/<case study uuid>/<version uuid>/long.json"
"generator": "/case_studies/<case study uuid>/<version uuid>/generator.xlsx",
"state": "/case_studies/<case study uuid>/<version uuid>/state.xlsx",
"issues": [{"type": "error", "description": "syntax error in command ..."}, ...],
},
...
]
}
:param cs_uuid:
:return:
"""
def get_version_dict(vs):
# [
# {"uuid": "<uuid>",
# "resource": "/case_studies/<case study uuid>/<version uuid>",
# "tag": "v0.1",
# "sessions":
# [
# {"uuid": "<uuid>",
# "open_date": "2017-09-20T10:00:00Z",
# "close_date": "2017-09-20T10:00:10Z",
# "client": "spreadsheet",
# "restart": True,
# "author": "<uuid>",
# },
# ],
# "detail": "/case_studies/<case study uuid>/<version uuid>/long.json",
# "generator": "/case_studies/<case study uuid>/<version uuid>/generator.xlsx",
# },
# ],
def get_session_dict(ss):
uuid4 = str(ss.uuid)
v_session = {"uuid": uuid4,
"open_date": str(ss.open_instant),
"close_date": str(ss.close_instant),
"client": "spreadsheet", # TODO Spreadsheet, R script, Python script, <Direct?>
"restart": ss.restarts,
"author": ss.who.name
}
if mode == "tree":
v_session = {"data": v_session}
else:
pass
return v_session
uuid3 = str(vs.uuid)
version = {"uuid": uuid3,
"resource": nis_api_base + "/case_studies/"+uuid2+"/versions/"+uuid3,
"tag": "v0.1",
"detail": nis_api_base + "/case_studies/"+uuid2+"/versions/"+uuid3,
"state": nis_api_base + "/case_studies/" + uuid2 + "/versions/"+uuid3+"/state.xlsx",
"issues": None, # [{"type": "error", "description": "syntax error in command ..."}],
"generator": nis_api_base + "/case_studies/"+uuid2+"/versions/"+uuid3+"/generator.xlsx",
}
if mode == "tree":
version = {"data": version, "children": [get_session_dict(s) for s in vs.sessions]}
else:
version["sessions"] = [get_session_dict(s) for s in vs.sessions]
return version
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if not user:
user = "_anonymous"
mode = "tree"
# Recover case studies READABLE by current user (or "anonymous")
session = isess.open_db_session()
# TODO Obtain case study, filtered by current user permissions
# Access Control
# CS in acl and user in acl.detail and acl.detail is READ, WRITE,
# CS in acl and group acl.detail and user in group
cs = session.query(CaseStudy).filter(CaseStudy.uuid == cs_uuid).first()
if cs:
uuid2 = str(cs.uuid)
d = {"uuid": uuid2,
"name": cs.name if cs.name else "<empty>",
"oid": cs.oid if cs.oid else "<empty>",
"internal_code": cs.internal_code if cs.internal_code else "", # TODO
"description": cs.description if cs.description else "", # TODO
"resource": nis_api_base + "/case_studies/"+uuid2,
"versions": [get_version_dict(v) for v in cs.versions],
"case_study_permissions":
{
"read": True,
"annotate": True,
"contribute": True,
"share": False,
"delete": False
},
}
# print(json.dumps(d, default=json_serial, sort_keys=True, indent=JSON_INDENT, ensure_ascii=ENSURE_ASCII, separators=(',', ': ')))
r = build_json_response(d)
else:
r = build_json_response({"error": "The case study '"+cs_uuid+"' does not exist."}, 404)
isess.close_db_session()
return r
@app.route(nis_api_base + "/case_studies/<cs_uuid>", methods=["POST"])
@app.route(nis_api_base + "/case_studies/<cs_uuid>/versions/", methods=["POST"])
def new_case_study_version_from_file(cs_uuid):
"""
Check that the user is authorized to submit a new case study version
Open a reproducible session
Send the file to the service
Close the reproducible session
:param cs_uuid: UUID of case study
:return:
"""
# Check Interactive Session is Open. If not, open it
isess = deserialize_isession_and_prepare_db_session(False)
if not isess:
isess = InteractiveSession(DBSession)
# TODO Check User Credentials (from Token)
testing = is_testing_enabled()
if testing:
result = isess.identify({"user": "test_user", "password": <PASSWORD>}, testing=True)
# TODO Check User has Write Case Study permissions
# Receive file
generator_type, content_type, buffer, execute, register = receive_file_submission(request)
# Open Reproducible Session, NEW case study
try:
isess.open_reproducible_session(case_study_version_uuid=cs_uuid,
recover_previous_state=False,
cr_new=CreateNew.VERSION,
allow_saving=register
)
except Exception as e:
s = "Exception trying to open reproducible session: "+str(e)
logger.error(s)
return build_json_response({"error": s}, 401)
# Submit file to the Interactive Session (which has the open reproducible session)
issues, output = isess.register_andor_execute_command_generator(generator_type, content_type, buffer, register, execute)
# Close Reproducible Session
isess.close_reproducible_session(issues=issues, output=output, save=register, from_web_service=False)
# TODO Return the issues if there were any. Return outputs (could be a list of binary files)
r = build_json_response({}, 204)
serialize_isession_and_close_db_session(isess)
return r
# @app.route(nis_api_base + "/case_studies/<cs_uuid>", methods=["DELETE"])
# def case_study_delete(cs_uuid): # DELETE a case study
# # Recover InteractiveSession
# isess = deserialize_isession_and_prepare_db_session()
# if isess and isinstance(isess, Response):
# return isess
#
# # TODO Check permissions
# # TODO If possible, deleet ALL the case study
@app.route(nis_api_base + "/case_studies/<cs_uuid>/default_view.png", methods=["GET"])
def case_study_default_view_png(cs_uuid): # Return a view of the case study in PNG format, for preview purposes
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if not user:
user = "_anonymous"
return send_static_file("images/case_study_preview_placeholder.png")
# # Recover case studies READABLE by current user (or "anonymous")
# session = isess.open_db_session()
# # TODO Obtain case study, filtered by current user permissions
# # Access Control
# # CS in acl and user in acl.detail and acl.detail is READ, WRITE,
# # CS in acl and group acl.detail and user in group
# cs = session.query(CaseStudy).filter(CaseStudy.uuid == cs_uuid).first()
# # TODO Scan variables. Look for the ones most interesting: grammar, data. Maybe cut processors.
# # TODO Scan also for hints to the elaboration of this thumbnail
# # TODO Elaborate View in PNG format
# isess.close_db_session()
# # TODO Return PNG image
@app.route(nis_api_base + "/case_studies/<cs_uuid>/default_view.svg", methods=["GET"])
def case_study_default_view_svg(cs_uuid): # Return a view of the case study in SVG format, for preview purposes
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
user = isess.get_identity_id()
if not user:
user = "_anonymous"
# Recover case studies READABLE by current user (or "anonymous")
session = isess.open_db_session()
# TODO Obtain case study, filtered by current user permissions
# Access Control
# CS in acl and user in acl.detail and acl.detail is READ, WRITE,
# CS in acl and group acl.detail and user in group
cs = session.query(CaseStudy).filter(CaseStudy.uuid == cs_uuid).first()
# TODO Scan variables. Look for the ones most interesting: grammar, data. Maybe cut processors.
# TODO Scan also for hints to the elaboration of this thumbnail
# TODO Elaborate View in SVG format
isess.close_db_session()
# TODO Return SVG image
@app.route(nis_api_base + "/case_studies/<cs_uuid>/versions/<v_uuid>", methods=["GET"])
def case_study_version(cs_uuid, v_uuid): # Information about a case study version
"""
{"case_study": "<uuid>",
"version": "<uuid>",
"resource": "/case_studies/<case study uuid>/<version uuid>",
"tag": "v0.1",
"sessions":
[
{"uuid": "<uuid>",
"open_date": "2017-09-20T10:00:00Z",
"close_date": "2017-09-20T10:00:10Z",
"client": "spreadsheet",
"restart": True,
"author": "<uuid>",
"generator": "/case_studies/<case study uuid>/<version uuid>/<session uuid>/generator.xlsx",
"state": "/case_studies/<case study uuid>/<version uuid>/<session uuid>/state.xlsx",
"issues": [{"type": "error", "description": "syntax error in command ..."}, ...],
},
...
]
"command_executors":
| |
<filename>bin-by-sam_v7.py
#! /usr/bin/env python
import os, sys, math, time
from optparse import OptionParser
from collections import defaultdict
import gzip
#Comai Lab, Ucdavis Genome Center
#<NAME>, <NAME>, 2019
# This work is the property of UC Davis Genome Center - Comai Lab
# Use at your own risk.
# We cannot provide support.
# All information obtained/inferred with this script is without any
# implied warranty of fitness for any purpose or use whatsoever.
#------------------------------------------------------------------------------
#This script outputs a read coverage by bin across a reference sequence, using a directory of samtools aligned .sam files as input.
#It can also output a measure of relative coverage compared to a control dataset. There can be two types of control data: either a
#control file is indicated or the mean of all files in the directory is calculated and used as the control set. In both cases, the
#values for relative percentage per bin were calculated by dividing the percentage of reads mapping to that bin for the sample at
#hand by the mean percentage of reads mapping to that bin for the control set. Finally, all values are multiplied by the ploidy
#parameter (default 2) such that values for bins present in X copies would oscillate around X.
#
#This script also outputs a second small file containing the number of read processed from each sam file.
#
#Usage: [...] denotes optional parameters, if not indicated, default parameters are used.
#bin-by-sam.py -o output-bin-file.txt -s size-of-bins [-c control .sam file] [-u] [-m number of max snps, default is 5] [-b] [-r] [-p ploidy for relative percent calculation] [-C]
#
#For help
#bin-by-sam.py -h
#
#Input:
#Run in a directory with the input .sam files. If you want to use one of the files as control for the relative coverage, specify the file with the -c option.
#
#Parameters
#
#Required:
#-o, output file name
#-s, bin size (bps)
#
#Optional, see below
#
#Output:
#One file with a line per bin of each reference sequence and a column for each input .sam library, as well as the relative coverage per input .sam library.
usage = "\n\n%prog"
parser = OptionParser(usage=usage)
parser.add_option("-c", "--controlfile", dest="f", default="NA", help="Input zipped sam file if wanting a specific library to be a control for normalization.")
parser.add_option("-o", "--out", dest="o", default = "NA", help="Output bin file.")
parser.add_option("-q", "--minqual", dest="minqual", type = "int", default=0, help="Min mapping quality")
parser.add_option("-s", "--binsize", dest="binsize", type = "int", default=1000000, help="Bin size")
parser.add_option("-b", "--breaks", dest="breaks", action="store_true", default = False, help="Insert breaks")
parser.add_option("-r", "--removefile", dest="r", default=False, help="A sam header file of reference sequences to ignore")
parser.add_option("-p", "--ploidy", dest="ploidy", type = "int", default=2, help="Ploidy multiplier, default is 2 for diploid")
parser.add_option("-C", "--covmode", dest="covmode", action="store_true", default = False, help="Only output coverage columns, not relative percent")
parser.add_option("-m", "--mode", dest="mode", type = "str", default = "-", help="Modes (S or TP are most common): \
\t\t\t\t\tS - use this mode if reads are mapped single ended. \
\t\t\t\t\tPS - reads are mapped paired, but count as if single ended. \
\t\t\t\t\t\t\tTP - eads are mapped paired, this uses the default \"Correct\" PE mapping flags (For paired end use this unless understand the other options) \
\t\t\t\t\t\t\tTPI - same as TP, but allow odd calculated inserts up to 2kb \
\t\t\t\t\t\t\t\tTPM - same as TP, but allow same mapping direction for pairs -,- and +,+. \
\t\t\t\t\t\t\t\tTPA - same as TP, but allow both odd inserts up to 2kb and same mapping direction for pairs -,- and +,+.")
parser.add_option("-l", "--listfile", dest="filelist", default="NA", help="A list of sam or sam.gz files to run on, this would be instead of runnign all sam files in current directory")
parser.add_option("-B", "--bamfile", dest="bams", action="store_true", default = False, help="Use unsorted .bam files instead of .sam or .sam.gz files. DO NOT USE SORTED BAMS!!!!")
(opt, args) = parser.parse_args()
outmode = False
if opt.mode not in ["S", "PS", "TP", "TPI", "TPM", "TPA"]:
parser.error("Please specify a run mode with the -m paramter. S, PS, TP, TPI, TPM, or TPA. Use the -h option for paramter description")
if opt.o == "NA":
parser.error("Please specify an output file using the -o parameter. Use the -h option for paramter description")
#parser.add_option("--unique", "-u", dest="unique", action="store_true", default = False, help="U/all only U (Off)")
if opt.f != "NA" and opt.covmode == True:
parser.error("Cannot specify a contol then supress contol relative coverage percent columns")
#takes in a sam header file of chroms/genes to ignore, must be specified by command line
remlist = []
remcount = {}
remsize = {}
if opt.r != False:
rem = open(opt.r)
while 1:
x = rem.readline()
if x == '':
break
if x[0] != '@':
break
x[3333]
if x[:3] == "@SQ":
temp = (x[:-1].replace('SN:','').replace('LN:','').split('\t')[1:])
key2 = temp[0]
remlist.append(key2)
remcount[key2] = 0
remsize[key2] = int(temp[1])
rem.close()
if opt.filelist != "NA":
#read in list of sam files, must be in current directory and end in "_aln.sam"
f = open(opt.filelist)
todo = []
for l in f:
todo.append(l.replace('\n',''))
f.close()
else:
#read in list of sam files, must be in current directory and end in "_aln.sam"
if opt.bams == False:
li = os.listdir(os.getcwd())
todo = filter(lambda x: ".sam" in x or ".sam.gz" in x, li)
else:
li = os.listdir(os.getcwd())
todo = filter(lambda x: ".bam" in x, li)
todo.sort()
#read sam header of chrom/genes to use
data = defaultdict(lambda : defaultdict(lambda: defaultdict(lambda: defaultdict(int))))
all = []
sizes = []
lookup = {}
if opt.bams == False:
if todo[0].endswith('.gz'):
f = gzip.open(todo[0], 'rb')
else:
f = open(todo[0])
while 1:
x = f.readline()
if x[0] != '@':
break
if "\tLN:0\n" in x:
continue
if x[:3] == "@SQ":
temp = (x[:-1].replace('SN:','').replace('LN:','').split('\t')[1:])
key2 = temp[0]
if key2 in ["ChrUn", "ChrSy"]:
continue
if key2 not in remlist:
all.append(key2)
sizes.append(int(temp[1]))
lookup[key2] = int(temp[1])
else:
import pysam, gc
f = pysam.AlignmentFile(todo[0], "rb")
head = f.header
head = head.to_dict()
all2 = head['SQ']
for item in all2:
if type(item) == str:
break
tsize = int(item['LN'])
ref = item['SN']
if ref in ["ChrUn", "ChrSy"]:
continue
if ref not in remlist:
all.append(ref)
sizes.append(tsize)
lookup[ref] = tsize
f.close()
del(all2)
del(head)
gc.collect()
#for data export purpose; add blank lines based on size of largest reference
numblanks = max(sizes)/opt.binsize/10
fseen = []
f.close()
globalcount = {}
count = 0
liblist = []
if opt.mode == "S":
#per sam file, count reads
for file in todo:
if file.endswith(".bam") == False:
if file.endswith(".gz"):
f = gzip.open(file, 'rb')
else:
f = open(file)
else:
f = pysam.AlignmentFile(file, "rb")
print file
libname = file.split('.')[0].replace('_aln','')
liblist.append(libname)
globalcount[libname] = 0
count = 0
#print time.asctime()
for x in f:
if opt.bams == True:
x = x.tostring()
count +=1
if count % 1000000 == 8:
print count
if x[0] == '@':
continue
l = x.replace('\n','').split('\t')
if l[2] == '*':
continue
if int(l[1]) > 80 and int(l[1]) < 2000:
parser.error("Please specify a correct run mode - S, PS, TP, TPI, TPM, or TPA. Use the -h option for paramter description")
break
if l[1] in ['0', '16']:
pos = l[3]
else:
continue
if int(l[4]) < opt.minqual:
continue
key1 = l[2]
key2 = l[2]
if key2 in remlist:
remcount[key2] += 1
continue
s1 = int(l[3])
e1 = s1 + len(l[9])
mid = (e1 - s1 +1) / 2 + s1
key3 = int(mid) / opt.binsize
data[key1][key2][key3][libname] += 1
globalcount[libname] +=1
f.close()
else:
#per sam file, count reads
for file in todo:
if file.endswith(".bam") == False:
if file.endswith(".gz"):
f = gzip.open(file, 'rb')
else:
f = open(file)
else:
f = pysam.AlignmentFile(file, "rb")
print file
libname = file.split('.')[0].replace('_aln','')
liblist.append(libname)
globalcount[libname] = 0
count = 0
#print time.asctime()
while 1:
if opt.bams == False:
x1 = f.readline()
else:
try:
xt = f.next()
except StopIteration:
break
x1 = xt.tostring()
count +=1
if count % 1000000 == 8:
print count
if x1 == '':
break
if x1[0] == '@':
continue
l1 = x1.replace('\n','').split('\t')
while int(l1[1]) > 200:
if opt.bams == False:
x1 = f.readline()
else:
xt = f.next()
x1 = xt.tostring()
l1 = x1.replace('\n','').split('\t')
if opt.bams == False:
x2 = f.readline()
else:
xt2 = f.next()
x2 = xt2.tostring()
l2 = x2.replace('\n','').split('\t')
while int(l2[1]) > 200:
if opt.bams == False:
x2 = f.readline()
else:
xt2 = f.next()
x2 = xt2.tostring()
l2 = x2.replace('\n','').split('\t')
#look for slipped pair
if int(l1[1]) > 200 or | |
import numpy as np
import cupy as cp
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from sparana.parameter_selection import get_k_biggest
from sparana.parameter_selection import get_k_smallest
from sparana.model import model
def get_MAV_module(lobo, data):
''' This will run and store the mean activated values in the metric matrices in the class, sorts the list or whatever'''
for i in data:
if lobo._model._layer_type == 'Sparse':
this_layer_inputs = i.transpose()
if lobo._model._layer_type == 'Full':
this_layer_inputs = i
if lobo._model._comp_type == 'GPU':
this_layer_inputs = cp.array(this_layer_inputs)
output = None
layer_count = 0
for layer in lobo._model.layers:
output = layer.activate(this_layer_inputs)
this_layer_inputs = output
if lobo._model._layer_type == 'Sparse':
lobo._weight_stats[layer_count] += layer.activate_weights(this_layer_inputs)
# Convert the activatedd full layers to sparse matrices.
if lobo._model._layer_type == 'Full':
lobo._weight_stats[layer_count] += csr_matrix(layer.activate_weights(this_layer_inputs))
layer_count += 1
if lobo._layer_type == 'Sparse':
output = output.transpose()
lobo._weight_stats = [coo_matrix(i) for i in lobo._weight_stats]
for i in lobo._weight_stats:
i.data = abs(i/len(data))
return
def get_MAAV_module(lobo, data):
''' MAAV is mean absolutes activated values'''
for layer in lobo._model.layers:
layer._dropout = None
for i in data:
if lobo._model._layer_type == 'Sparse':
this_layer_inputs = i.transpose()
if lobo._model._layer_type == 'Full':
this_layer_inputs = i
if lobo._model._comp_type == 'GPU':
this_layer_inputs = cp.array(this_layer_inputs)
output = None
layer_count = 0
for layer in lobo._model.layers:
if lobo._model._layer_type == 'Sparse':
lobo._weight_stats[layer_count] += abs(layer.activate_weights(this_layer_inputs))
# Convert the activatedd full layers to sparse matrices.
if lobo._model._layer_type == 'Full':
if lobo._lobo_type == 'lobotomizer':
lobo._weight_stats[layer_count] += abs(coo_matrix(cp.asnumpy(layer.activate_weights(this_layer_inputs))))
if lobo._lobo_type == 'parameter_selector':
lobo._weight_stats[layer_count] += abs(cp.asnumpy(layer.activate_weights(this_layer_inputs)))
output = layer.activate(this_layer_inputs)
this_layer_inputs = output
layer_count += 1
if lobo._model._layer_type == 'Sparse':
output = output.transpose()
# Convert stuff here
if lobo._lobo_type == 'lobotomizer':
lobo._weight_stats = [coo_matrix(i) for i in lobo._weight_stats]
for i in lobo._weight_stats:
i = i/len(data)
for layer in lobo._model.layers:
layer._dropout = lobo._model._dropout
return
def get_absolute_values_module(lobo):
''' Stores the sorted list or whatever, either of these will just replace what is already there'''
if lobo._model._comp_type == 'GPU':
lobo._weight_stats = [coo_matrix(abs(i.weights.get())) for i in lobo._model.layers]
if lobo._model._comp_type == 'CPU':
lobo._weight_stats = [coo_matrix(abs(i.weights)) for i in lobo._model.layers]
return
class lobotomizer:
''' All stats arrays, sparse or no will be stored on the CPU ram, otherwise this will simply double the GPU memory requirements.
These operations would be sped up on a GPU, but are run much less than training.'''
def __init__(self, model):
self._model = model
self._lobo_type = 'lobotomizer'
self._weight_stats = [coo_matrix(i._weights.shape) for i in self._model.layers]
self._AV_datapoints = 0
def get_MAV(self, data):
''' This will run and store the mean activated values in the metric matrices in the class, sorts the list or whatever'''
get_MAV_module(self, data)
return
def get_MAAV(self, data):
''' MAAV is mean absolutes activated values'''
get_MAAV_module(self, data)
return
def get_absolute_values(self):
''' Stores the sorted list or whatever, either of these will just replace what is already there'''
get_absolute_values_module(self)
return
def get_sparse_masks(self):
""" Need to set the sparse training masks for selected training here"""
for i in self._model._layers:
i._sparse_training_mask = i._weights!=0
return
def get_random(self):
""" Gets randomized stats matrices, so prune smallest prunes random weights"""
return
def get_negative_values(self):
if self._model._comp_type == 'GPU':
self._weight_stats = [coo_matrix(i.weights.get()) for i in self._model.layers]
if self._model._comp_type == 'CPU':
self._weight_stats = [coo_matrix(i.weights) for i in self._model.layers]
for i in range(len(self._model.layers)):
self._weight_stats[i].data[self._weight_stats[i].data > 0] = 0
self._weight_stats[i].eliminate_zeros()
self._weight_stats[i].data = abs(self._weight_stats[i].data)
return
def get_positive_values(self):
if self._model._comp_type == 'GPU':
self._weight_stats = [coo_matrix(i.weights.get()) for i in self._model.layers]
if self._model._comp_type == 'CPU':
self._weight_stats = [coo_matrix(i.weights) for i in self._model.layers]
for i in range(len(self._model.layers)):
self._weight_stats[i].data[self._weight_stats[i].data < 0] = 0
self._weight_stats[i].eliminate_zeros()
self._weight_stats[i].data = abs(self._weight_stats[i].data)
return
def get_activation_ranks(self, data = None):
""" Ranks the weights for each activation so that I can remove the smallest x% of weights from each activation,
not just the smallest weights from the whole weight matrix..................."""
if data is not None:
self._lobo_type = 'parameter_selector'
self._weight_stats = [np.zeros(i._weights.shape) for i in self._model.layers]
get_MAAV_module(self, data)
self._lobo_type = 'lobotomizer'
else:
if self._model._comp_type == 'GPU':
self._weight_stats = [abs(i.weights.get()) for i in self._model.layers]
if self._model._comp_type == 'CPU':
self._weight_stats = [abs(i.weights) for i in self._model.layers]
for i in range(len(self._weight_stats)):
temp = []
for j in self._weight_stats[i]:
# This is surely not the most efficient way of doing this, there is a function
# somewhere but I can't find it, so this will do.
argsort = np.argsort(j)
ranks = np.zeros(len(j))
# Look at what the difference between the MAAV and absolute array structures, probably an indexing problem
for k in range(len(j)):
ranks[argsort[k]] = k
temp.append(ranks)
self._weight_stats[i] = coo_matrix(np.array(temp))
return
def prune_smallest(self, prune_ratio = None, print_stats = False, layers = None):
''' Prunes the weights in the model class.
Using the smallest values from weight stats to prune.
Sparse matrices will be reconstructed and assigned to the layer classes.
Layers needs to be a list of ratios for eack layer to be pruned to. I can just not include the final layer.
There are no checks or errors on here, so pay attention to the number of layers and the number of ratios input.'''
# Sparse GPU weights need to be reassigned, dont support index based assignment, full GPU, and sparse, and full CPU
# can be assigned, I will need to run eliminate zeros.
if layers:
for i in range(len(layers)):
if self._model._layer_type == 'Sparse' and self._model._comp_type == 'GPU':
# Copy weight matrix to CPU ram as a COO matrix
cpu_coo_matrix = self._model.layers[i]._weights.get().tocoo()
# Number of parameters to be removed
remove = int(layers[i]*cpu_coo_matrix.nnz)
if print_stats:
print('Pruning ', remove,' parameters from ', len(cpu_coo_matrix.data), ' parameters in layer ', i)
# List of indices of parameters to be removed
sortlist = np.argsort(self._layer_stats[i].data)[:remove]
# New COO matrix with parameters removed
cpu_coo_matrix = coo_matrix((cpu_coo_matrix.data[sortlist], (cpu_coo_matrix.row[sortlist], cpu_coo_matrix.col[sortlist])), shape = cpu_coo_matrix.shape)
# Copy back to GPU in the layer class as the original CSR matrix
self._model.layers[i]._weights = cp.sparse.csr_matrix(cpu_coo_matrix)
else:
if layers[i] != None:
# Number of parameters to be removed
remove = np.size(self._model.layers[i]._weights) *(layers[i] - (1-self._weight_stats[i].getnnz()/np.size(self._model.layers[i]._weights)))
remove = int(remove)
if print_stats:
print('Pruning ', remove,' parameters from ', self._weight_stats[i].nnz, ' parameters in layer ', i)
# List of indices of parameters to be removed
sortlist = np.argsort(self._weight_stats[i].data)[:remove]
# Loop through and set weights to 0
for j in sortlist:
self._model.layers[i]._weights[self._weight_stats[i].row[j], self._weight_stats[i].col[j]] = 0
self._weight_stats[i].data[j] = 0
self._weight_stats[i].eliminate_zeros()
if not layers:
# Not pruning the last layer, the model begins to fail quickly when this layer is pruned.
for i in range(len(self._model.layers)-1):
if self._model._layer_type == 'Sparse' and self._model._comp_type == 'GPU':
# Copy weight matrix to CPU ram as a COO matrix
cpu_coo_matrix = self._model.layers[i]._weights.get().tocoo()
# Number of parameters to be removed
remove = int(prune_ratio*cpu_coo_matrix.nnz)
if print_stats:
print('Pruning ', remove,' parameters from ',cpu_coo_matrix.nnz, ' parameters in layer ', i)
# List of indices of parameters to be removed
sortlist = np.argsort(self._weight_stats[i].data)[:remove]
# New COO matrix with parameters removed
cpu_coo_matrix = coo_matrix((cpu_coo_matrix.data[sortlist], (cpu_coo_matrix.row[sortlist], cpu_coo_matrix.col[sortlist])), shape = cpu_coo_matrix.shape)
# Copy back to GPU in the layer class as the original CSR matrix
self._model.layers[i]._weights = cp.sparse.csr_matrix(cpu_coo_matrix)
else:
# Number of parameters to be removed
remove = int(prune_ratio*self._weight_stats[i].getnnz())
if print_stats:
print('Pruning ', remove,' parameters from ',' parameters in layer ', i)
# List of indices of parameters to be removed
sortlist = np.argsort(self._weight_stats[i].data)[:remove]
# Loop through and set weights to 0. There is probably a faster way to do this.
for j in sortlist:
self._model.layers[i]._weights[self._weight_stats[i].row[j], self._weight_stats[i].col[j]] = 0
return
def prune_all_negative(self, layers = None, prune_ratio = None):
""" Just prunes the weights of a matrix that are negative, I have not added the option of choosing what ratio to
remove, but I might depending on how experiments go. """
if layers:
for i in range(len(layers)):
if layers[i] == True:
self._model._layers[i]._weights[self._model._layers[i]._weights < 0] = 0
else:
for layer in self._model._layers:
layer._weights[layer._weights < 0] = 0
return
class vulcanizer:
''' This is for splitting a smaller model off the main model, which can then be trained in a memory/compute restricted system, and the parameters can be reinserted into the main model.'''
def | |
will be loaded into the state and can be sampled directly
start_steps = 0
epsilon = 0.1
else:
start_steps = 5000
if TESTING:
start_steps = 10
epsilon = 1
chkpt_pth = os.path.join(CHCKPT_DIR, base_name)
agent_params = all_agent_params[agent_type].copy()
agent_params['seed'] = seed
agent_params['min_sat_action'] = min_action
agent_params['max_sat_action'] = max_action
agent_params['min_therm_action'] = 10 if not vav else 0
agent_params['max_therm_action'] = 40 if not vav else 100
agent_params['min_action'] = min_action
agent_params['max_action'] = max_action
agent_params['start_steps'] = start_steps
agent_params['alpha'] = alpha
agent_params['automatic_entropy_tuning'] = automatic_entropy_tuning
agent_params['epsilon'] = epsilon
state_length = sum([1 if isinstance(s, str) else len(list(s.values())[0]) for s in state_name])
if 'SAC' in agent_type:
network = sac_network_map[network_type]
if TESTING:
# Make the agent smaller so that the tests run faster
agent_params["hidden_size"] = 2
agent_params["replay_size"] = 200
agent_params["batch_size"] = 10
elif 'DuelingDQN' in agent_type:
network = branching_dueling_dqn_network_map[network_type]
elif 'PPO' in agent_type:
network = ppo_network_map[network_type]
else:
raise ValueError(f'{agent_type} is not an acceptable agent_type')
# Set the number of actions depending on use case
if control_therm: # Create agents controlling per zone therm and blind
therm_state_length = state_length
if multi_agent:
therm_state_length -= 9 + (blinds and zone_blinds_multi and control_blinds_multi) * 4
# print(f"State_length: {state_length}, Forcasted_length: {len(forecast_vars) * planning_steps}")
# print(state_name)
therm_num_inputs = therm_state_length + len(forecast_vars) * planning_steps
agent_params['n_state'] = therm_num_inputs
agent_params['input_dims'] = (therm_num_inputs,)
agent_params["num_sat_actions"] = 0
agent_params["num_blind_actions"] = 0
if blinds:
agent_params["num_blind_actions"] = 1
if control_blinds_multi and not multi_agent:
agent_params["num_blind_actions"] = 4
if not multi_agent:
if control_sat:
agent_params["num_sat_actions"] = 1
agent_params["num_therm_actions"] = 5
agents.append(agent_map[agent_type](agent_params, network, chkpt_dir=chkpt_pth))
else:
agent_params["num_therm_actions"] = 1
for i in range(1, 6):
if i != 5:
if 'DuelingDQN' in agent_type:
agent_params['input_dims'] = (therm_num_inputs + 1,)
else:
agent_params['n_state'] = therm_num_inputs + 1
else:
agent_params["num_blind_actions"] = 0
if 'DuelingDQN' in agent_type:
agent_params['input_dims'] = (therm_num_inputs,)
else:
agent_params['n_state'] = therm_num_inputs
# print(f"Zone{i} {agent_params['n_state']}")
agents.append(agent_map[agent_type](agent_params, network,
chkpt_dir=os.path.join(chkpt_pth, f"Zone{i}")))
if control_sat and multi_agent or control_sat and not control_therm:
sat_num_inputs = state_length + len(forecast_vars) * planning_steps
blind_count = 0
for check_name in state_name:
if "Blind" in check_name:
blind_count += 1
if multi_agent:
sat_num_inputs -= blind_count
blind_count = 0
agent_params['n_state'] = sat_num_inputs
agent_params['input_dims'] = (sat_num_inputs,)
agent_params["num_sat_actions"] = 1
agent_params["num_blind_actions"] = blind_count
agent_params["num_therm_actions"] = 0
if load_sat:
sat_agent = agent_map[agent_type](agent_params, network, chkpt_dir=load_sat_path)
chkpt_max_iter = 0
for chkpt_name in glob.glob(os.path.join(load_sat_path, '*')):
chkpt_iter = int(chkpt_name.split('_')[-1])
chkpt_max_iter = max(chkpt_iter, chkpt_max_iter)
if chkpt_max_iter != 400:
exit(1)
sat_agent.load(chkpt_max_iter)
else:
sat_agent = agent_map[agent_type](agent_params, network, chkpt_dir=os.path.join(chkpt_pth, f"Main_SAT"))
if not os.path.exists(os.path.join(chkpt_pth, f"Main_SAT")):
os.makedirs(os.path.join(chkpt_pth, f"Main_SAT"))
agents.append(sat_agent)
print(len(agents))
# Set agent specific params
# if 'SAC' in agent_type:
# agent_params['start_steps'] = start_steps
# agent_params['alpha'] = alpha
# agent_params['automatic_entropy_tuning'] = automatic_entropy_tuning
# agent_params['n_state'] = num_inputs
# network = sac_network_map[network_type]
# if TESTING:
# # Make the agent smaller so that the tests run faster
# agent_params["hidden_size"] = 2
# agent_params["replay_size"] = 200
# agent_params["batch_size"] = 10
# elif 'DuelingDQN' in agent_type:
# # Remove DQN and DDQN for now because they are not being used
# agent_params['epsilon'] = epsilon
# agent_params['input_dims'] = (num_inputs,)
# network = branching_dueling_dqn_network_map[network_type]
# elif 'PPO' in agent_type:
# # Set the number of actions depending on use case
# network = ppo_network_map[network_type]
# agent_params['n_state'] = num_inputs
# else:
# raise ValueError(f'{agent_type} is not an acceptable agent_type')
#
# if multi_agent:
# agent = []
# for i in range(1, 6):
# if i != 5:
# if 'DuelingDQN' in agent_type:
# agent_params['input_dims'] = (num_inputs + 1,)
# else:
# agent_params['n_state'] = num_inputs + 1
# else:
# agent_params["num_blind_actions"] = 0
# if 'DuelingDQN' in agent_type:
# agent_params['input_dims'] = (num_inputs,)
# else:
# agent_params['n_state'] = num_inputs
# # print(f"Zone{i} {agent_params['n_state']}")
# agent.append(agent_map[agent_type](agent_params, network, chkpt_dir=os.path.join(chkpt_dir, f"Zone{i}")))
# else:
# # print(f"{agent_params['n_state']}")
# agent = agent_map[agent_type](agent_params, network, chkpt_dir=chkpt_pth)
# =============================
# SETUP MODEL
# =============================
Model.set_energyplus_folder(eplus_path)
if TESTING:
ep_model = Model(
idf_file_name=idf_path,
weather_file=epw_path,
eplus_naming_dict=eplus_naming_dict_test,
eplus_var_types=eplus_var_types_test,
reward=reward,
tmp_idf_path=os.path.join(RL_RESULTS_DIR, base_name)
)
else:
ep_model = Model(
idf_file_name=idf_path,
weather_file=epw_path,
eplus_naming_dict=eplus_naming_dict,
eplus_var_types=eplus_var_types,
reward=reward,
tmp_idf_path=os.path.join(RL_RESULTS_DIR, base_name)
)
ep_model.set_runperiod(*run_period)
ep_model.edit_configuration('SCHEDULE:COMPACT', {'Name': 'DaylightingAvail'}, {
'Field 4': dlight
})
ep_model.edit_configuration('SCHEDULE:COMPACT', {'Name': 'ReheatCoilAvailSched'}, {
'Field 4': reheat
})
if control_sat:
ep_model.edit_configuration('SCHEDULE:COMPACT', {'Name': 'HeatingCoilAvailSched'}, {
'Field 4': heat
})
ep_model.edit_configuration('SCHEDULE:COMPACT', {'Name': 'CoolingCoilAvailSched'}, {
'Field 4': cool
})
if zone_blinds_multi:
if blinds:
ep_model.set_blinds(
blind_object_list,
blind_material_name="White Painted Metal Blind",
agent_control=True
)
else:
ep_model.set_blinds(
blind_object_list,
blind_material_name="White Painted Metal Blind",
agent_control=False
)
else:
ep_model.edit_configuration('WINDOWSHADINGCONTROL', {'Name': 'CONTROL SHADE'}, {
'Shading Control Type': blind_type,
'Setpoint': stpt,
'Shading Control Is Scheduled': is_scheduled
})
if vav:
ep_model.delete_configuration("ZoneControl:Thermostat")
ep_model.delete_configuration("ThermostatSetpoint:DualSetpoint")
for air_terminal_name in ep_model.get_available_names_under_group("AirTerminal:SingleDuct:VAV:Reheat"):
ep_model.edit_configuration(
idf_header_name="AirTerminal:SingleDuct:VAV:Reheat",
identifier={"Name": air_terminal_name},
update_values={"Zone Minimum Air Flow Input Method": "Scheduled",
"Minimum Air Flow Fraction Schedule Name": f"{air_terminal_name} Customized Schedule"})
ep_model.add_configuration("Schedule:Constant",
{"Name": f"{air_terminal_name} Customized Schedule",
"Schedule Type Limits Name": "Fraction",
"Hourly Value": 0})
external_data = CsvImporter(forecasted_path, planstep=planning_steps)
forecast_state = external_data.get_output_states() # TODO - think about scaling
ep_model.add_state_modifier(external_data)
# =============================
# CREATE BASE DIRECTORIES AND SAVE EXPERIMENT STATE
# =============================
if not os.path.exists(f'logs/{base_name}'): os.makedirs(f'logs/{base_name}')
if not os.path.exists(CHCKPT_DIR): os.makedirs(CHCKPT_DIR)
if not os.path.exists(RL_RESULTS_DIR): os.makedirs(RL_RESULTS_DIR)
run_dir = os.path.join(RL_RESULTS_DIR, base_name)
if not os.path.exists(os.path.join(run_dir)): os.makedirs(run_dir)
if multi_agent:
for i in range(1, 6):
if not os.path.exists(os.path.join(chkpt_pth, f"Zone{i}")): os.makedirs(os.path.join(chkpt_pth, f"Zone{i}"))
exp_info_pth = os.path.join(run_dir, 'experiment_info.txt')
with open(exp_info_pth, 'w') as file:
file.write('\n' + idf_path)
file.write('\n' + epw_path)
file.write('\n' + season)
file.write('\nSAT Control Status: ' + str(control_sat))
file.write('\nSAT Control Loaded: ' + str(load_sat))
file.write('\nSAT Control Load Path: ' + str(load_sat_path))
file.write('\nTHERM Control Status: ' + str(control_therm))
file.write('\nMulti Agent: ' + str(multi_agent))
file.write('\nMulti Blind: ' + str(control_blinds_multi))
file.write('\nWith Blinds: ' + str(blinds))
file.write('\nAgent Type ' + agent_type + '\n')
file.write('\nNetwork Type ' + network_type + '\n')
# remove forecast state from dict (PPO Only)
write_dict = {k: agent_params[k] for k in agent_params.keys() - {'target', 'dist'}}
file.write(json.dumps(write_dict))
file.write('\nReward Type ' + reward_type + '\n')
file.write(json.dumps(reward_params))
base_name = os.path.join(RL_RESULTS_DIR, base_name)
if customize_occupancy:
OG(ep_model, random_seed=seed).generate_daily_schedule(add_to_model=True,
overwrite_dict={f"SPACE{i}-1": f"SPACE{i}-1 People 1"
for i in range(1, 6)})
ep_model.run_parameters[ep_model.run_parameters.index('-d') + 1] = os.path.join(base_name, "epresult")
# return ep_model, agent, forecast_state, agent_type, control_type, \
return ep_model, agents, forecast_state, agent_type, \
(start_run, end_run, base_name, blinds, TESTING, multi_agent, season, control_sat, load_sat, vav)
def run_episodic(ep_model, agent, args):
start_run, end_run, base_name, blinds = args
# LOAD CHECKPOINTS
if start_run > 1:
agent.load(start_run - 1)
n_step = 96 # timesteps per day
for i in range(start_run, end_run):
print(f'\n============\nRunning simulation number {i}...\n==============\n')
observations = []
actions = []
obs = ep_model.reset()
observations.append(obs)
state = torch.tensor(obs_to_state_values(obs, state_name + forecast_state)).unsqueeze(0).double()
ts = pd.to_datetime(obs["time"])
ts = ts + pd.offsets.DateOffset(year=1991) # TODO should not be hardcoded
feeding_state = (state, obs, ts)
for i_episode in range(agent.tol_eps):
action = agent.agent_start(feeding_state, i_episode)
for t in range(n_step):
stpt_action['note'] = action[0]
stpt_action['value'] = action[1]
stpt_action['start_time'] = obs['timestep'] + 1
env_actions = [stpt_action]
if blinds:
blind_action['value'] = action[2]
blind_action['start_time'] = obs['timestep'] + 1
env_actions.append(blind_action)
obs = ep_model.step(env_actions)
observations.append(obs)
state = torch.tensor(obs_to_state_values(obs, state_name + forecast_state)).unsqueeze(
0).double()
ts = pd.to_datetime(obs["time"])
ts = ts + pd.offsets.DateOffset(year=1991) # TODO should not be hardcoded
feeding_state = (state, obs, ts)
if ep_model.is_terminate() or (t == (n_step - 1)):
agent.agent_end(obs["reward"], feeding_state, i_episode)
else:
action = agent.agent_step(obs["reward"], feeding_state)
actions.append(action)
def save(run_dir, run_num, agents, observations, actions, TESTING, multi_agent, control_sat, load_sat):
print('Saving...')
# run_dir = os.path.join('rl_results', save_name)
# if not os.path.exists(run_dir): os.makedirs(run_dir)
if run_num % 100 == 0:
for i in range(len(agents)):
if i == len(agents) - 1 and control_sat and load_sat:
continue
agents[i].save(run_num)
all_obs_df = pd.DataFrame(observations)
if TESTING:
r = ['reward']
d = list(eplus_naming_dict_test.values()) + ['time']
else:
r = ['reward agent 1']
if multi_agent:
r = [f"reward agent {i + 1}" for i in range(5)]
d = list(eplus_naming_dict.values()) + ['time', 'total reward'] + r
obs_df = all_obs_df[d].copy()
obs_df['run'] = run_num
obs_df['time'] = obs_df['time'].mask(obs_df['time'].dt.year > 1, # Warn: hacky way of replacing year
obs_df['time'] + pd.offsets.DateOffset(year=1991))
sat_actions, therm_actions, blind_actions = actions
if sat_actions:
obs_df['Action'] = [a1 for a1, _ in sat_actions]
obs_df['SAT STPT'] = [a2.item() for _, a2 in sat_actions]
for i in range(0, len(therm_actions[0])):
obs_df[f'THERM STPT {i + 1}'] = [a1[i] for a1 in therm_actions]
for i in range(0, len(blind_actions[0])):
obs_df[f'Blind Action {i + 1}'] = [a1[i] for a1 in blind_actions]
mode = 'a' if run_num % 100 != 0 else 'w'
obs_df.to_csv(os.path.join(run_dir, f'run_{run_num // 100}.csv'), mode=mode, header=mode == 'w')
with open(os.path.join(run_dir, 'convergence.csv'), mode) as conv_file:
r_data = obs_df[r].iloc[-1].tolist()
if len(r_data) > | |
'45 Celsius',
'state': 'Normal'},
'V1: 12v': {'reading': '11835 mV',
'state': 'Normal'},
'V1: GP1': {'reading': '910 mV',
'state': 'Normal'},
'V1: GP2': {'reading': '1198 mV',
'state': 'Normal'},
'V1: VDD': {'reading': '3295 mV',
'state': 'Normal'},
'V1: VMA': {'reading': '1201 mV',
'state': 'Normal'},
'V1: VMB': {'reading': '2495 mV',
'state': 'Normal'},
'V1: VMC': {'reading': '3291 mV',
'state': 'Normal'},
'V1: VMD': {'reading': '2495 mV',
'state': 'Normal'},
'V1: VME': {'reading': '1796 mV',
'state': 'Normal'},
'V1: VMF': {'reading': '1528 mV',
'state': 'Normal'},
'V2: 12v': {'reading': '11850 mV',
'state': 'Normal'},
'V2: GP1': {'reading': '2497 mV',
'state': 'Normal'},
'V2: GP2': {'reading': '1196 mV',
'state': 'Normal'},
'V2: VDD': {'reading': '3300 mV',
'state': 'Normal'},
'V2: VMA': {'reading': '1049 mV',
'state': 'Normal'},
'V2: VMB': {'reading': '1098 mV',
'state': 'Normal'},
'V2: VMD': {'reading': '996 mV',
'state': 'Normal'},
'V2: VME': {'reading': '1098 mV',
'state': 'Normal'},
'V2: VMF': {'reading': '996 mV',
'state': 'Normal'}}},
'R1': {'sensor': {'Temp: C2D C0': {'reading': '36 Celsius',
'state': 'Normal'},
'Temp: C2D C1': {'reading': '33 Celsius',
'state': 'Normal'},
'Temp: CPU AIR': {'reading': '32 Celsius',
'state': 'Normal'},
'Temp: Inlet': {'reading': '25 Celsius',
'state': 'Normal'},
'Temp: MCH AIR': {'reading': '39 Celsius',
'state': 'Normal'},
'Temp: MCH DIE': {'reading': '53 Celsius',
'state': 'Normal'},
'Temp: Outlet': {'reading': '30 Celsius',
'state': 'Normal'},
'Temp: SCBY AIR': {'reading': '40 Celsius',
'state': 'Normal'},
'V1: 12v': {'reading': '11835 mV',
'state': 'Normal'},
'V1: GP1': {'reading': '910 mV',
'state': 'Normal'},
'V1: GP2': {'reading': '1198 mV',
'state': 'Normal'},
'V1: VDD': {'reading': '3305 mV',
'state': 'Normal'},
'V1: VMA': {'reading': '1201 mV',
'state': 'Normal'},
'V1: VMB': {'reading': '2495 mV',
'state': 'Normal'},
'V1: VMC': {'reading': '3291 mV',
'state': 'Normal'},
'V1: VMD': {'reading': '2495 mV',
'state': 'Normal'},
'V1: VME': {'reading': '1796 mV',
'state': 'Normal'},
'V1: VMF': {'reading': '1528 mV',
'state': 'Normal'},
'V2: 12v': {'reading': '11821 mV',
'state': 'Normal'},
'V2: GP1': {'reading': '2497 mV',
'state': 'Normal'},
'V2: GP2': {'reading': '1196 mV',
'state': 'Normal'},
'V2: VDD': {'reading': '3305 mV',
'state': 'Normal'},
'V2: VMA': {'reading': '1044 mV',
'state': 'Normal'},
'V2: VMB': {'reading': '1098 mV',
'state': 'Normal'},
'V2: VMD': {'reading': '991 mV',
'state': 'Normal'},
'V2: VME': {'reading': '1098 mV',
'state': 'Normal'},
'V2: VMF': {'reading': '1000 mV',
'state': 'Normal'}}},
'Slot': {'sensor': {'Sensor': {'reading': 'State Reading',
'state': 'Current'}}}}}
golden_output = {'execute.return_value': '''\
Router#show environment
Load for five secs: 4%/0%; one minute: 8%; five minutes: 6%
Time source is NTP, 17:41:24.716 EST Wed Oct 19 2016
Number of Critical alarms: 0
Number of Major alarms: 0
Number of Minor alarms: 0
Slot Sensor Current State Reading
---- ------ ------------- -------
F0 V1: VMA Normal 1796 mV
F0 V1: VMB Normal 1196 mV
F0 V1: VMC Normal 996 mV
F0 V1: VMD Normal 1044 mV
F0 V1: VME Normal 1020 mV
F0 V1: VMF Normal 1098 mV
F0 V1: 12v Normal 11821 mV
F0 V1: VDD Normal 3295 mV
F0 V1: GP1 Normal 908 mV
F0 V1: GP2 Normal 771 mV
F0 V2: VMA Normal 3291 mV
F0 V2: VMB Normal 2495 mV
F0 V2: VMC Normal 1499 mV
F0 V2: VMD Normal 1196 mV
F0 V2: VME Normal 1103 mV
F0 V2: VMF Normal 1000 mV
F0 V2: 12v Normal 11748 mV
F0 V2: VDD Normal 3295 mV
F0 V2: GP1 Normal 771 mV
F0 V2: GP2 Normal 1096 mV
F0 Temp: Inlet Normal 30 Celsius
F0 Temp: Pop Die Normal 43 Celsius
F0 Temp: Left Ext Normal 42 Celsius
F0 Temp: HKP Die Normal 47 Celsius
F0 Temp: CPP Rear Normal 40 Celsius
F0 Temp: Olv Die Normal 38 Celsius
F0 Temp: Rght Ext Normal 37 Celsius
F0 Temp: MCH Die Normal 53 Celsius
F0 V3: VMA Normal 3291 mV
F0 V3: VMB Normal 2495 mV
F0 V3: VMC Normal 1499 mV
F0 V3: VMD Normal 1000 mV
F0 V3: 12v Normal 11850 mV
F0 V3: VDD Normal 3300 mV
P0 Vin Normal 101 V AC
P0 Iin Normal 1 A
P0 Vout Normal 12 V AC
P0 Iout Normal 15 A
P0 Temp1 Normal 26 Celsius
P0 Temp2 Normal 31 Celsius
P0 Temp3 Normal 26 Celsius
P1 Vin Normal 101 V AC
P1 Iin Normal 2 A
P1 Vout Normal 12 V AC
P1 Iout Normal 16 A
P1 Temp1 Normal 26 Celsius
P1 Temp2 Normal 33 Celsius
P1 Temp3 Normal 26 Celsius
P6 Temp1 Normal 38 Celsius
P6 Temp: FC PWM1 Fan Speed 60% 26 Celsius
P7 Temp1 Normal 37 Celsius
P7 Temp: FC PWM1 Fan Speed 60% 26 Celsius
R0 V1: VMA Normal 1201 mV
R0 V1: VMB Normal 2495 mV
R0 V1: VMC Normal 3291 mV
R0 V1: VMD Normal 2495 mV
R0 V1: VME Normal 1796 mV
R0 V1: VMF Normal 1528 mV
R0 V1: 12v Normal 11835 mV
R0 V1: VDD Normal 3295 mV
R0 V1: GP1 Normal 910 mV
R0 V1: GP2 Normal 1198 mV
R0 V2: VMA Normal 1049 mV
R0 V2: VMB Normal 1098 mV
R0 V2: VMD Normal 996 mV
R0 V2: VME Normal 1098 mV
R0 V2: VMF Normal 996 mV
R0 V2: 12v Normal 11850 mV
R0 V2: VDD Normal 3300 mV
R0 V2: GP1 Normal 2497 mV
R0 V2: GP2 Normal 1196 mV
R0 Temp: Outlet Normal 30 Celsius
R0 Temp: CPU AIR Normal 32 Celsius
R0 Temp: Inlet Normal 26 Celsius
R0 Temp: SCBY AIR Normal 45 Celsius
R0 Temp: MCH DIE Normal 54 Celsius
R0 Temp: MCH AIR Normal 40 Celsius
R0 Temp: C2D C0 Normal 35 Celsius
R0 Temp: C2D C1 Normal 37 Celsius
R1 V1: VMA Normal 1201 mV
R1 V1: VMB Normal 2495 mV
R1 V1: VMC Normal 3291 mV
R1 V1: VMD Normal 2495 mV
R1 V1: VME Normal 1796 mV
R1 V1: VMF Normal 1528 mV
R1 V1: 12v Normal 11835 mV
R1 V1: VDD Normal 3305 mV
R1 V1: GP1 Normal 910 mV
R1 V1: GP2 Normal 1198 mV
R1 V2: VMA Normal 1044 mV
R1 V2: VMB Normal 1098 mV
R1 V2: VMD Normal 991 mV
R1 V2: VME Normal 1098 mV
R1 V2: VMF Normal 1000 mV
R1 V2: 12v Normal 11821 mV
R1 V2: VDD Normal 3305 mV
R1 V2: GP1 Normal 2497 mV
R1 V2: GP2 Normal 1196 mV
R1 Temp: Outlet Normal 30 Celsius
R1 Temp: CPU AIR Normal 32 Celsius
R1 Temp: Inlet Normal 25 Celsius
R1 Temp: SCBY AIR Normal 40 Celsius
R1 Temp: MCH DIE Normal 53 Celsius
R1 Temp: MCH AIR Normal 39 Celsius
R1 Temp: C2D C0 Normal 36 Celsius
R1 Temp: C2D C1 Normal 33 Celsius
0 V1: VMA Normal 1098 mV
0 V1: VMB Normal 1196 mV
0 V1: VMC Normal 1494 mV
0 V1: VMD Normal 1796 mV
0 V1: VME Normal 2490 mV
0 V1: VMF Normal 3286 mV
0 V1: 12v Normal 11894 mV
0 V1: VDD Normal 3295 mV
0 V1: GP1 Normal 749 mV
0 V1: GP2 Normal 898 mV
0 V2: VMB Normal 996 mV
0 V2: VME Normal 747 mV
0 V2: VMF Normal 747 mV
0 V2: 12v Normal 11865 mV
0 V2: VDD Normal 3295 mV
0 V2: GP2 Normal 747 mV
0 Temp: Left Normal 30 Celsius
0 Temp: Center Normal 37 Celsius
0 Temp: Asic1 Normal 50 Celsius
0 Temp: Right Normal 35 Celsius
F1 V1: VMA Normal 1796 mV
F1 V1: VMB Normal 1196 mV
F1 V1: VMC Normal 996 mV
F1 V1: VMD Normal 1049 mV
F1 V1: VME Normal 1035 mV
F1 V1: VMF Normal 1098 mV
F1 V1: 12v Normal 11821 mV
F1 V1: VDD Normal 3295 mV
F1 V1: GP1 Normal 903 mV
F1 V1: GP2 Normal 769 mV
F1 V2: VMA Normal 3291 mV
F1 V2: VMB Normal 2495 mV
F1 V2: VMC Normal 1499 mV
F1 V2: VMD Normal 1196 mV
F1 V2: VME Normal 1098 mV
F1 V2: VMF Normal 996 mV
F1 V2: 12v Normal 11762 mV
F1 V2: VDD Normal 3295 mV
F1 V2: GP1 Normal 771 mV
F1 V2: GP2 Normal | |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
from collections import OrderedDict
import dhtmlparser
from dhtmlparser import HTMLElement
from . import tools
from .structures import MARCSubrecord
# Functions & classes =========================================================
class MARCXMLParser(object):
"""
This class parses everything between ``<root>`` elements. It checks, if
there is root element, so please, give it full XML.
:attr:`controlfields` is simple dictionary, where keys are field
identificators (string, 3 chars). Value is always string.
:attr:`datafields` is little more complicated; it is dictionary made of
arrays of dictionaries, which consists of arrays of :class:`MARCSubrecord`
objects and two special parameters.
It sounds horrible, but it is not that hard to understand::
.datafields = {
"011": ["ind1": " ", "ind2": " "] # array of 0 or more dicts
"012": [
{
"a": ["a) subsection value"],
"b": ["b) subsection value"],
"ind1": " ",
"ind2": " "
},
{
"a": [
"multiple values in a) subsections are possible!",
"another value in a) subsection"
],
"c": [
"subsection identificator is always one character long"
],
"ind1": " ",
"ind2": " "
}
]
}
Attributes:
leader (string): Leader of MARC XML document.
oai_marc (bool): True/False, depending if doc is OAI doc or not
controlfields (dict): Controlfields stored in dict.
datafields (dict of arrays of dict of arrays of strings): Datafileds
stored in nested dicts/arrays.
"""
def __init__(self, xml=None, resort=True):
"""
Constructor.
Args:
xml (str/file, default None): XML to be parsed. May be file-like
object.
resort (bool, default True): Sort the output alphabetically?
"""
self.leader = None
self.oai_marc = False
self.controlfields = OrderedDict()
self.datafields = OrderedDict()
self.valid_i_chars = set(list(" 0123456789*"))
# resort output XML alphabetically
self.resorted = tools.resorted if resort else lambda x: x
# handle file-like objects
if hasattr(xml, "read"):
xml = xml.read()
# it is always possible to create blank object and add values into it
# piece by piece using .add_ctl_field()/.add_data_field() methods.
if xml is not None:
self._original_xml = xml
self._parse_string(xml)
def _parse_string(self, xml):
"""
Parse MARC XML document to dicts, which are contained in
self.controlfields and self.datafields.
Args:
xml (str or HTMLElement): input data
Also detect if this is oai marc format or not (see elf.oai_marc).
"""
if not isinstance(xml, HTMLElement):
xml = dhtmlparser.parseString(str(xml))
# check if there are any records
record = xml.find("record")
if not record:
raise ValueError("There is no <record> in your MARC XML document!")
record = record[0]
self.oai_marc = len(record.find("oai_marc")) > 0
# leader is separate only in marc21
if not self.oai_marc:
leader = record.find("leader")
if len(leader) >= 1:
self.leader = leader[0].getContent()
# parse body in respect of OAI MARC format possibility
if self.oai_marc:
self._parse_control_fields(record.find("fixfield"), "id")
self._parse_data_fields(record.find("varfield"), "id", "label")
else:
self._parse_control_fields(record.find("controlfield"), "tag")
self._parse_data_fields(record.find("datafield"), "tag", "code")
# for backward compatibility of MARC XML with OAI
if self.oai_marc and "LDR" in self.controlfields:
self.leader = self.controlfields["LDR"]
def _parse_control_fields(self, fields, tag_id="tag"):
"""
Parse control fields.
Args:
fields (list): list of HTMLElements
tag_id (str): parameter name, which holds the information, about
field name this is normally "tag", but in case of
oai_marc "id".
"""
for field in fields:
params = field.params
# skip tags without parameters
if tag_id not in params:
continue
self.controlfields[params[tag_id]] = field.getContent().strip()
def _parse_data_fields(self, fields, tag_id="tag", sub_id="code"):
"""
Parse data fields.
Args:
fields (list): of HTMLElements
tag_id (str): parameter name, which holds the information, about
field name this is normally "tag", but in case of
oai_marc "id"
sub_id (str): id of parameter, which holds informations about
subfield name this is normally "code" but in case of
oai_marc "label"
"""
for field in fields:
params = field.params
if tag_id not in params:
continue
# take care of iX/indX (indicator) parameters
field_repr = OrderedDict([
[self.i1_name, params.get(self.i1_name, " ")],
[self.i2_name, params.get(self.i2_name, " ")],
])
# process all subfields
for subfield in field.find("subfield"):
if sub_id not in subfield.params:
continue
content = MARCSubrecord(
val=subfield.getContent().strip(),
i1=field_repr[self.i1_name],
i2=field_repr[self.i2_name],
other_subfields=field_repr
)
# add or append content to list of other contents
code = subfield.params[sub_id]
if code in field_repr:
field_repr[code].append(content)
else:
field_repr[code] = [content]
tag = params[tag_id]
if tag in self.datafields:
self.datafields[tag].append(field_repr)
else:
self.datafields[tag] = [field_repr]
def add_ctl_field(self, name, value):
"""
Add new control field `value` with under `name` into control field
dictionary :attr:`controlfields`.
"""
if len(name) != 3:
raise ValueError("name parameter have to be exactly 3 chars long!")
self.controlfields[name] = value
def add_data_field(self, name, i1, i2, subfields_dict):
"""
Add new datafield into :attr:`datafields` and take care of OAI MARC
differencies.
Args:
name (str): Name of datafield.
i1 (char): Value of i1/ind1 parameter.
i2 (char): Value of i2/ind2 parameter.
subfields_dict (dict): Dictionary containing subfields (as list).
`subfields_dict` is expected to be in this format::
{
"field_id": ["subfield data",],
...
"z": ["X0456b"]
}
Warning:
For your own good, use OrderedDict for `subfields_dict`, or
constructor's `resort` parameter set to ``True`` (it is by
default).
Warning:
``field_id`` can be only one character long!
"""
if i1 not in self.valid_i_chars:
raise ValueError("Invalid i1 parameter '" + i1 + "'!")
if i2 not in self.valid_i_chars:
raise ValueError("Invalid i2 parameter '" + i2 + "'!")
if len(name) != 3:
raise ValueError(
"`name` parameter have to be exactly 3 chars long!"
)
if not subfields_dict:
raise ValueError(
"`subfields_dict` have to contain something!"
)
if not isinstance(subfields_dict, dict):
raise ValueError(
"`subfields_dict` parameter has to be dict instance!"
)
# check local keys, convert strings to MARCSubrecord instances
subrecords = []
for key, val in subfields_dict.items():
if len(key) > 1:
raise KeyError(
"`subfields_dict` can be only one character long!"
)
# convert other values to lists
if not isinstance(val, list):
val = [val]
subfields = map(
lambda x: MARCSubrecord(x, i1, i2, None),
val
)
subfields_dict[key] = subfields
subrecords.extend(subfields)
# save i/ind values
subfields_dict[self.i1_name] = i1
subfields_dict[self.i2_name] = i2
# append dict, or add new dict into self.datafields
if name in self.datafields:
self.datafields[name].append(subfields_dict)
else:
self.datafields[name] = [subfields_dict]
# to each subrecord add reference to list of all subfields in this
# datafield
other_subfields = self.datafields[name]
for record in subrecords:
record.other_subfields = other_subfields
def get_i_name(self, num, is_oai=None):
"""
This method is used mainly internally, but it can be handy if you work
with with raw MARC XML object and not using getters.
Args:
num (int): Which indicator you need (1/2).
is_oai (bool/None): If None, :attr:`.oai_marc` is
used.
Returns:
str: current name of ``i1``/``ind1`` parameter based on \
:attr:`oai_marc` property.
"""
if num not in (1, 2):
raise ValueError("`num` parameter have to be 1 or 2!")
if is_oai is None:
is_oai = self.oai_marc
i_name = "ind" if not is_oai else "i"
return i_name + str(num)
@property
def i1_name(self):
"""
Property getter / alias for ``self.get_i_name(1)``.
"""
return self.get_i_name(1)
@property
def i2_name(self):
"""
Property getter / alias for ``self.get_i_name(2)``.
"""
return self.get_i_name(2)
def get_ctl_field(self, controlfield, alt=None):
"""
Method wrapper over :attr:`.controlfields` dictionary.
Args:
controlfield (str): Name of the controlfield.
alt (object, default None): Alternative value of the `controlfield`
when `controlfield` couldn't be found.
Returns:
str: record from given `controlfield`
"""
if not alt:
return self.controlfields[controlfield]
return self.controlfields.get(controlfield, alt)
def getDataRecords(self, datafield, subfield, throw_exceptions=True):
"""
.. deprecated::
Use :func:`get_subfields` instead.
"""
return self.get_subfields(
datafield=datafield,
subfield=subfield,
exception=throw_exceptions
)
def get_subfields(self, datafield, subfield, i1=None, i2=None,
exception=False):
"""
Return content of given `subfield` in `datafield`.
Args:
datafield (str): Section name (for example "001", "100", "700").
subfield (str): Subfield name (for example "a", "1", etc..).
i1 (str, default None): Optional i1/ind1 parameter value, which
will be used for search.
i2 (str, default None): Optional i2/ind2 parameter value, which
will be used for search.
exception (bool): If ``True``, :exc:`~exceptions.KeyError` is
raised when method couldn't found given `datafield` /
`subfield`. If ``False``, blank array ``[]`` is returned.
Returns:
list: of :class:`.MARCSubrecord`.
Raises:
KeyError: If the subfield or datafield couldn't be found.
Note:
MARCSubrecord is practically same thing as string, but has defined
:meth:`.MARCSubrecord.i1` and :attr:`.MARCSubrecord.i2`
methods.
You may need to be able to get this, because MARC XML depends on
i/ind parameters from time to time (names of authors for example).
"""
if len(datafield) != 3:
| |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import numpy as np
import tensorflow.compat.v1 as tf
from utils import linear, log_sum_exp
class Poisson(object):
"""Poisson distributon
Computes the log probability under the model.
"""
def __init__(self, log_rates):
""" Create Poisson distributions with log_rates parameters.
Args:
log_rates: a tensor-like list of log rates underlying the Poisson dist.
"""
self.logr = log_rates
def logp(self, bin_counts):
"""Compute the log probability for the counts in the bin, under the model.
Args:
bin_counts: array-like integer counts
Returns:
The log-probability under the Poisson models for each element of
bin_counts.
"""
k = tf.to_float(bin_counts)
# log poisson(k, r) = log(r^k * e^(-r) / k!) = k log(r) - r - log k!
# log poisson(k, r=exp(x)) = k * x - exp(x) - lgamma(k + 1)
return k * self.logr - tf.exp(self.logr) - tf.lgamma(k + 1)
def diag_gaussian_log_likelihood(z, mu=0.0, logvar=0.0):
"""Log-likelihood under a Gaussian distribution with diagonal covariance.
Returns the log-likelihood for each dimension. One should sum the
results for the log-likelihood under the full multidimensional model.
Args:
z: The value to compute the log-likelihood.
mu: The mean of the Gaussian
logvar: The log variance of the Gaussian.
Returns:
The log-likelihood under the Gaussian model.
"""
return -0.5 * (logvar + np.log(2*np.pi) + \
tf.square((z-mu)/tf.exp(0.5*logvar)))
def gaussian_pos_log_likelihood(unused_mean, logvar, noise):
"""Gaussian log-likelihood function for a posterior in VAE
Note: This function is specialized for a posterior distribution, that has the
form of z = mean + sigma * noise.
Args:
unused_mean: ignore
logvar: The log variance of the distribution
noise: The noise used in the sampling of the posterior.
Returns:
The log-likelihood under the Gaussian model.
"""
# ln N(z; mean, sigma) = - ln(sigma) - 0.5 ln 2pi - noise^2 / 2
return - 0.5 * (logvar + np.log(2 * np.pi) + tf.square(noise))
class Gaussian(object):
"""Base class for Gaussian distribution classes."""
pass
class DiagonalGaussian(Gaussian):
"""Diagonal Gaussian with different constant mean and variances in each
dimension.
"""
def __init__(self, batch_size, z_size, mean, logvar):
"""Create a diagonal gaussian distribution.
Args:
batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.
z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.
mean: The N-D mean of the distribution.
logvar: The N-D log variance of the diagonal distribution.
"""
size__xz = [None, z_size]
self.mean = mean # bxn already
self.logvar = logvar # bxn already
self.noise = noise = tf.random_normal(tf.shape(logvar))
self.sample = mean + tf.exp(0.5 * logvar) * noise
mean.set_shape(size__xz)
logvar.set_shape(size__xz)
self.sample.set_shape(size__xz)
def logp(self, z=None):
"""Compute the log-likelihood under the distribution.
Args:
z (optional): value to compute likelihood for, if None, use sample.
Returns:
The likelihood of z under the model.
"""
if z is None:
z = self.sample
# This is needed to make sure that the gradients are simple.
# The value of the function shouldn't change.
if z == self.sample:
return gaussian_pos_log_likelihood(self.mean, self.logvar, self.noise)
return diag_gaussian_log_likelihood(z, self.mean, self.logvar)
class LearnableDiagonalGaussian(Gaussian):
"""Diagonal Gaussian whose mean and variance are learned parameters."""
def __init__(self, batch_size, z_size, name, mean_init=0.0,
var_init=1.0, var_min=0.0, var_max=1000000.0):
"""Create a learnable diagonal gaussian distribution.
Args:
batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.
z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.
name: prefix name for the mean and log TF variables.
mean_init (optional): The N-D mean initialization of the distribution.
var_init (optional): The N-D variance initialization of the diagonal
distribution.
var_min (optional): The minimum value the learned variance can take in any
dimension.
var_max (optional): The maximum value the learned variance can take in any
dimension.
"""
size_1xn = [1, z_size]
size__xn = [None, z_size]
size_bx1 = tf.stack([batch_size, 1])
assert var_init > 0.0, "Problems"
assert var_max >= var_min, "Problems"
assert var_init >= var_min, "Problems"
assert var_max >= var_init, "Problems"
z_mean_1xn = tf.get_variable(name=name+"/mean", shape=size_1xn,
initializer=tf.constant_initializer(mean_init))
self.mean_bxn = mean_bxn = tf.tile(z_mean_1xn, size_bx1)
mean_bxn.set_shape(size__xn) # tile loses shape
log_var_init = np.log(var_init)
if var_max > var_min:
var_is_trainable = True
else:
var_is_trainable = False
z_logvar_1xn = \
tf.get_variable(name=(name+"/logvar"), shape=size_1xn,
initializer=tf.constant_initializer(log_var_init),
trainable=var_is_trainable)
if var_is_trainable:
z_logit_var_1xn = tf.exp(z_logvar_1xn)
z_var_1xn = tf.nn.sigmoid(z_logit_var_1xn)*(var_max-var_min) + var_min
z_logvar_1xn = tf.log(z_var_1xn)
logvar_bxn = tf.tile(z_logvar_1xn, size_bx1)
self.logvar_bxn = logvar_bxn
self.noise_bxn = noise_bxn = tf.random_normal(tf.shape(logvar_bxn))
self.sample_bxn = mean_bxn + tf.exp(0.5 * logvar_bxn) * noise_bxn
def logp(self, z=None):
"""Compute the log-likelihood under the distribution.
Args:
z (optional): value to compute likelihood for, if None, use sample.
Returns:
The likelihood of z under the model.
"""
if z is None:
z = self.sample
# This is needed to make sure that the gradients are simple.
# The value of the function shouldn't change.
if z == self.sample_bxn:
return gaussian_pos_log_likelihood(self.mean_bxn, self.logvar_bxn,
self.noise_bxn)
return diag_gaussian_log_likelihood(z, self.mean_bxn, self.logvar_bxn)
@property
def mean(self):
return self.mean_bxn
@property
def logvar(self):
return self.logvar_bxn
@property
def sample(self):
return self.sample_bxn
class DiagonalGaussianFromInput(Gaussian):
"""Diagonal Gaussian whose mean and variance are conditioned on other
variables.
Note: the parameters to convert from input to the learned mean and log
variance are held in this class.
"""
def __init__(self, x_bxu, z_size, name, var_min=0.0):
"""Create an input dependent diagonal Gaussian distribution.
Args:
x: The input tensor from which the mean and variance are computed,
via a linear transformation of x. I.e.
mu = Wx + b, log(var) = Mx + c
z_size: The size of the distribution.
name: The name to prefix to learned variables.
var_min (optional): Minimal variance allowed. This is an additional
way to control the amount of information getting through the stochastic
layer.
"""
size_bxn = tf.stack([tf.shape(x_bxu)[0], z_size])
self.mean_bxn = mean_bxn = linear(x_bxu, z_size, name=(name+"/mean"))
logvar_bxn = linear(x_bxu, z_size, name=(name+"/logvar"))
if var_min > 0.0:
logvar_bxn = tf.log(tf.exp(logvar_bxn) + var_min)
self.logvar_bxn = logvar_bxn
self.noise_bxn = noise_bxn = tf.random_normal(size_bxn)
self.noise_bxn.set_shape([None, z_size])
self.sample_bxn = mean_bxn + tf.exp(0.5 * logvar_bxn) * noise_bxn
def logp(self, z=None):
"""Compute the log-likelihood under the distribution.
Args:
z (optional): value to compute likelihood for, if None, use sample.
Returns:
The likelihood of z under the model.
"""
if z is None:
z = self.sample
# This is needed to make sure that the gradients are simple.
# The value of the function shouldn't change.
if z == self.sample_bxn:
return gaussian_pos_log_likelihood(self.mean_bxn,
self.logvar_bxn, self.noise_bxn)
return diag_gaussian_log_likelihood(z, self.mean_bxn, self.logvar_bxn)
@property
def mean(self):
return self.mean_bxn
@property
def logvar(self):
return self.logvar_bxn
@property
def sample(self):
return self.sample_bxn
class GaussianProcess:
"""Base class for Gaussian processes."""
pass
class LearnableAutoRegressive1Prior(GaussianProcess):
"""AR(1) model where autocorrelation and process variance are learned
parameters. Assumed zero mean.
"""
def __init__(self, batch_size, z_size,
autocorrelation_taus, noise_variances,
do_train_prior_ar_atau, do_train_prior_ar_nvar,
num_steps, name):
"""Create a learnable autoregressive (1) process.
Args:
batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.
z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.
autocorrelation_taus: The auto correlation time constant of the AR(1)
process.
A value of 0 is uncorrelated gaussian noise.
noise_variances: The variance of the additive noise, *not* the process
variance.
do_train_prior_ar_atau: Train or leave as constant, the autocorrelation?
do_train_prior_ar_nvar: Train or leave as constant, the noise variance?
num_steps: Number of steps to run the process.
name: The name to prefix to learned TF variables.
"""
# Note the use of the plural in all of these quantities. This is intended
# to mark that even though a sample z_t from the posterior is thought of a
# single sample of a multidimensional gaussian, the prior is actually
# thought of as U AR(1) processes, where U is the dimension of the inferred
# input.
size_bx1 = tf.stack([batch_size, 1])
size__xu = [None, z_size]
# process variance, the | |
return [True, res.json()]
else:
return [False, 'Not found']
def create_dashboard_from_template(self, dashboard_name, template, scope, shared=False, annotations={}):
if scope is not None:
if isinstance(scope, basestring) == False:
return [False, 'Invalid scope format: Expected a string']
#
# Clean up the dashboard we retireved so it's ready to be pushed
#
template['id'] = None
template['version'] = None
template['schema'] = 1
template['name'] = dashboard_name
template['isShared'] = shared # make sure the dashboard is not shared
template['isPublic'] = False # reset public sharing
template['publicToken'] = None
#
# set dashboard scope to the specific parameter
# NOTE: Individual panels might override the dashboard scope, the override will NOT be reset
#
template['filterExpression'] = scope
if 'items' in template:
for chart in template['items']:
if 'overrideFilter' in chart and chart['overrideFilter'] == False:
# patch frontend bug to hide scope override warning even when it's not really overridden
chart['scope'] = scope
if 'annotations' in template:
template['annotations'].update(annotations)
else:
template['annotations'] = annotations
template['annotations']['createdByEngine'] = True
#
# Create the new dashboard
#
res = requests.post(self.url + '/ui/dashboards', headers=self.hdrs, data=json.dumps({'dashboard': template}), verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
else:
return [True, res.json()]
def create_dashboard_from_view(self, newdashname, viewname, filter, shared=False, annotations={}):
'''**Description**
Create a new dasboard using one of the Sysdig Monitor views as a template. You will be able to define the scope of the new dashboard.
**Arguments**
- **newdashname**: the name of the dashboard that will be created.
- **viewname**: the name of the view to use as the template for the new dashboard. This corresponds to the name that the view has in the Explore page.
- **filter**: a boolean expression combining Sysdig Monitor segmentation criteria that defines what the new dasboard will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*.
- **shared**: if set to True, the new dashboard will be a shared one.
- **annotations**: an optional dictionary of custom properties that you can associate to this dashboard for automation or management reasons
**Success Return Value**
A dictionary showing the details of the new dashboard.
**Example**
`examples/create_dashboard.py <https://github.com/draios/python-sdc-client/blob/master/examples/create_dashboard.py>`_
'''
#
# Find our template view
#
gvres = self.get_view(viewname)
if gvres[0] is False:
return gvres
view = gvres[1]['defaultDashboard']
view['timeMode'] = {'mode' : 1}
view['time'] = {'last' : 2 * 60 * 60 * 1000000, 'sampling' : 2 * 60 * 60 * 1000000}
#
# Create the new dashboard
#
return self.create_dashboard_from_template(newdashname, view, filter, shared, annotations)
def create_dashboard_from_dashboard(self, newdashname, templatename, filter, shared=False, annotations={}):
'''**Description**
Create a new dasboard using one of the existing dashboards as a template. You will be able to define the scope of the new dasboard.
**Arguments**
- **newdashname**: the name of the dashboard that will be created.
- **viewname**: the name of the dasboard to use as the template, as it appears in the Sysdig Monitor dashboard page.
- **filter**: a boolean expression combining Sysdig Monitor segmentation criteria defines what the new dasboard will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*.
- **shared**: if set to True, the new dashboard will be a shared one.
- **annotations**: an optional dictionary of custom properties that you can associate to this dashboard for automation or management reasons
**Success Return Value**
A dictionary showing the details of the new dashboard.
**Example**
`examples/create_dashboard.py <https://github.com/draios/python-sdc-client/blob/master/examples/create_dashboard.py>`_
'''
#
# Get the list of dashboards from the server
#
res = requests.get(self.url + '/ui/dashboards', headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
j = res.json()
#
# Find our template dashboard
#
dboard = None
for db in j['dashboards']:
if db['name'] == templatename:
dboard = db
break
if dboard is None:
self.lasterr = 'can\'t find dashboard ' + templatename + ' to use as a template'
return [False, self.lasterr]
#
# Create the dashboard
#
return self.create_dashboard_from_template(newdashname, dboard, filter, shared, annotations)
def create_dashboard_from_file(self, newdashname, filename, filter, shared=False, annotations={}):
'''
**Description**
Create a new dasboard using a dashboard template saved to disk.
**Arguments**
- **newdashname**: the name of the dashboard that will be created.
- **filename**: name of a file containing a JSON object for a dashboard in the format of an array element returned by :func:`~SdcClient.get_dashboards`
- **filter**: a boolean expression combining Sysdig Monitor segmentation criteria defines what the new dasboard will be applied to. For example: *kubernetes.namespace.name='production' and container.image='nginx'*.
- **shared**: if set to True, the new dashboard will be a shared one.
- **annotations**: an optional dictionary of custom properties that you can associate to this dashboard for automation or management reasons
**Success Return Value**
A dictionary showing the details of the new dashboard.
**Example**
`examples/dashboard_save_load.py <https://github.com/draios/python-sdc-client/blob/master/examples/dashboard_save_load.py>`_
'''
#
# Load the Dashboard
#
with open(filename) as data_file:
dboard = json.load(data_file)
dboard['timeMode'] = {'mode' : 1}
dboard['time'] = {'last' : 2 * 60 * 60 * 1000000, 'sampling' : 2 * 60 * 60 * 1000000}
#
# Create the new dashboard
#
return self.create_dashboard_from_template(newdashname, dboard, filter, shared, annotations)
def delete_dashboard(self, dashboard):
'''**Description**
Deletes a dashboard.
**Arguments**
- **dashboard**: the dashboard object as returned by :func:`~SdcClient.get_dashboards`.
**Success Return Value**
`None`.
**Example**
`examples/delete_dashboard.py <https://github.com/draios/python-sdc-client/blob/master/examples/delete_dashboard.py>`_
'''
if 'id' not in dashboard:
return [False, "Invalid dashboard format"]
res = requests.delete(self.url + '/ui/dashboards/' + str(dashboard['id']), headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, None]
def get_metrics(self):
'''**Description**
Return the metric list that can be used for data requests/alerts/dashboards.
**Success Return Value**
A dictionary containing the list of available metrics.
**Example**
`examples/list_metrics.py <https://github.com/draios/python-sdc-client/blob/master/examples/list_metrics.py>`_
'''
res = requests.get(self.url + '/api/data/metrics', headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()]
def get_falco_rules(self):
res = requests.get(self.url + '/api/agents/falco_rules', headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
data = res.json()
return [True, data]
def set_falco_rules_content_raw(self, raw_payload):
res = requests.put(self.url + '/api/agents/falco_rules', headers=self.hdrs, data=json.dumps(raw_payload), verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()]
def set_falco_rules_content(self, filter, rules_content):
payload = { "files" : [ { "filter": filter, "content": rules_content} ] }
return self.set_falco_rules_content_raw(payload)
def set_falco_rules_filename(self, filter, rules_filename):
with open(rules_filename, 'r') as f:
rules_content = f.read()
return self.set_falco_rules_content(filter, rules_content)
def clear_falco_rules(self):
data = {'files' : []}
return self.set_falco_rules_content_raw(data)
# For backwards compatibility
SdcClient = SdMonitorClient
class SdSecureClient(_SdcCommon):
def __init__(self, token="", sdc_url='https://app.sysdigcloud.com', ssl_verify=True):
super(SdSecureClient, self).__init__(token, sdc_url, ssl_verify)
self.customer_id = None
def _get_falco_rules(self, kind):
res = requests.get(self.url + '/api/settings/falco/{}RulesFile'.format(kind), headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
data = res.json()
return [True, data]
def get_system_falco_rules(self):
'''**Description**
Get the system falco rules file in use for this customer. See the `Falco wiki <https://github.com/draios/falco/wiki/Falco-Rules>`_ for documentation on the falco rules format.
**Arguments**
- None
**Success Return Value**
The contents of the system falco rules file.
**Example**
`examples/get_secure_system_falco_rules.py <https://github.com/draios/python-sdc-client/blob/master/examples/get_secure_system_falco_rules.py>`_
'''
return self._get_falco_rules("system")
def get_user_falco_rules(self):
'''**Description**
Get the user falco rules file in use for this customer. See the `Falco wiki <https://github.com/draios/falco/wiki/Falco-Rules>`_ for documentation on the falco rules format.
**Arguments**
- None
**Success Return Value**
The contents of the user falco rules file.
**Example**
`examples/get_secure_user_falco_rules.py <https://github.com/draios/python-sdc-client/blob/master/examples/get_secure_user_falco_rules.py>`_
'''
return self._get_falco_rules("user")
def _set_falco_rules(self, kind, rules_content):
payload = self._get_falco_rules(kind)
if not payload[0]:
return payload
payload[1]["{}RulesFile".format(kind)]["content"] = rules_content # pylint: disable=unsubscriptable-object
res = requests.put(self.url + '/api/settings/falco/{}RulesFile'.format(kind), headers=self.hdrs, data=json.dumps(payload[1]), verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()]
def set_system_falco_rules(self, rules_content):
'''**Description**
Set the system falco rules file in use for this customer. NOTE: This API endpoint can *only* be used in on-premise deployments. Generally the system falco rules file is only modified in conjunction with Sysdig support. See the `Falco wiki <https://github.com/draios/falco/wiki/Falco-Rules>`_ for documentation on the falco rules format.
**Arguments**
- A string containing the system falco rules.
**Success Return Value**
The contents of the system falco rules file that were just updated.
**Example**
`examples/set_secure_system_falco_rules.py <https://github.com/draios/python-sdc-client/blob/master/examples/get_secure_system_falco_rules.py>`_
'''
return self._set_falco_rules("system", rules_content)
def set_user_falco_rules(self, rules_content):
'''**Description**
Set the user falco rules file in use for this customer. See the `Falco wiki <https://github.com/draios/falco/wiki/Falco-Rules>`_ for documentation on the falco rules format.
**Arguments**
- A string containing the user falco rules.
**Success Return Value**
The contents of the user falco rules file that were just updated.
**Example**
`examples/set_secure_user_falco_rules.py <https://github.com/draios/python-sdc-client/blob/master/examples/get_secure_user_falco_rules.py>`_
'''
return self._set_falco_rules("user", | |
Reference.
def save(self, *args, **kwargs):
super(Proposal, self).save(*args,**kwargs)
if self.lodgement_number == '':
new_lodgment_id = 'P{0:06d}'.format(self.pk)
self.lodgement_number = new_lodgment_id
self.save()
@property
def fee_paid(self):
if not self.apiary_group_application_type:
return False
else:
return True if self.fee_invoice_reference or self.proposal_type == 'amendment' else False
@property
def fee_amount(self):
return Invoice.objects.get(reference=self.fee_invoice_reference).amount if self.fee_paid else None
@property
def relevant_applicant(self):
if self.applicant:
return self.applicant
elif self.proxy_applicant:
return self.proxy_applicant
else:
return self.submitter
@property
def relevant_applicant_name(self):
if self.applicant:
return self.applicant.name
elif self.proxy_applicant:
return self.proxy_applicant.get_full_name()
else:
return self.submitter.get_full_name()
@property
def relevant_applicant_description(self):
if self.applicant:
return self.applicant.organisation.name
elif self.proxy_applicant:
return "{} {}".format(
self.proxy_applicant.first_name,
self.proxy_applicant.last_name)
else:
return "{} {}".format(
self.submitter.first_name,
self.submitter.last_name)
@property
def relevant_applicant_email(self):
if self.applicant and hasattr(self.applicant.organisation, 'email') and self.applicant.organisation.email:
return self.applicant.organisation.email
elif self.proxy_applicant:
return self.proxy_applicant.email
else:
return self.submitter.email
@property
def relevant_applicant_details(self):
if self.applicant:
return '{} \n{}'.format(
self.applicant.organisation.name,
self.applicant.address)
elif self.proxy_applicant:
return "{} {}\n{}".format(
self.proxy_applicant.first_name,
self.proxy_applicant.last_name,
self.proxy_applicant.addresses.all().first())
else:
return "{} {}\n{}".format(
self.submitter.first_name,
self.submitter.last_name,
self.submitter.addresses.all().first())
@property
def relevant_applicant_address(self):
if self.applicant:
return self.applicant.address
elif self.proxy_applicant:
#return self.proxy_applicant.addresses.all().first()
return self.proxy_applicant.residential_address
else:
#return self.submitter.addresses.all().first()
return self.submitter.residential_address
@property
def relevant_applicant_id(self):
return_value = None
if self.applicant:
print("APPLICANT")
return_value = self.applicant.id
elif self.proxy_applicant:
print("PROXY_APPLICANT")
return_value = self.proxy_applicant.id
else:
#return_value = self.submitter.id
pass
return return_value
@property
def relevant_applicant_type(self):
if self.applicant:
return self.APPLICANT_TYPE_ORGANISATION
elif self.proxy_applicant:
return self.APPLICANT_TYPE_PROXY
else:
return self.APPLICANT_TYPE_SUBMITTER
@property
def applicant_field(self):
if self.applicant:
return 'applicant'
elif self.proxy_applicant:
return 'proxy_applicant'
else:
return 'submitter'
@property
def reference(self):
return '{}-{}'.format(self.lodgement_number, self.lodgement_sequence)
@property
def get_history(self):
""" Return the prev proposal versions """
l = []
p = copy.deepcopy(self)
while (p.previous_application):
l.append( dict(id=p.previous_application.id, modified=p.previous_application.modified_date) )
p = p.previous_application
return l
def _get_history(self):
""" Return the prev proposal versions """
l = []
p = copy.deepcopy(self)
while (p.previous_application):
l.append( [p.id, p.previous_application.id] )
p = p.previous_application
return l
@property
def is_assigned(self):
return self.assigned_officer is not None
@property
def is_temporary(self):
return self.customer_status == 'temp' and self.processing_status == 'temp'
@property
def can_user_edit(self):
"""
:return: True if the application is in one of the editable status.
"""
return self.customer_status in self.CUSTOMER_EDITABLE_STATE
@property
def can_user_view(self):
"""
:return: True if the application is in one of the approved status.
"""
return self.customer_status in self.CUSTOMER_VIEWABLE_STATE
@property
def is_discardable(self):
"""
An application can be discarded by a customer if:
1 - It is a draft
2- or if the application has been pushed back to the user
"""
return self.customer_status == 'draft' or self.processing_status == 'awaiting_applicant_response'
@property
def is_deletable(self):
"""
An application can be deleted only if it is a draft and it hasn't been lodged yet
:return:
"""
return self.customer_status == 'draft' and not self.lodgement_number
@property
def latest_referrals(self):
return self.referrals.all()[:2]
@property
def regions_list(self):
#return self.region.split(',') if self.region else []
return [self.region.name] if self.region else []
@property
def permit(self):
return self.approval.licence_document._file.url if self.approval else None
@property
def allowed_assessors(self):
if self.processing_status == 'with_approver':
group = self.__approver_group()
else:
group = self.__assessor_group()
return group.members.all() if group else []
#Compliance and Approvals use assessor group to show/hide compliance/approvals actions on dashboard
@property
def compliance_assessors(self):
group = self.__assessor_group()
return group.members.all() if group else []
#Approver group required to show/hide reissue actions on Approval dashboard
@property
def allowed_approvers(self):
group = self.__approver_group()
return group.members.all() if group else []
@property
def can_officer_process(self):
"""
:return: True if the application is in one of the processable status for Assessor role.
"""
officer_view_state = ['draft','approved','declined','temp','discarded']
if self.processing_status in officer_view_state:
return False
else:
return True
@property
def amendment_requests(self):
qs =AmendmentRequest.objects.filter(proposal = self)
return qs
@property
def apiary_group_application_type(self):
apiary = False
if self.application_type and self.application_type.name in (
ApplicationType.APIARY,
ApplicationType.TEMPORARY_USE,
ApplicationType.SITE_TRANSFER,
):
apiary = True
return apiary
def __assessor_group(self):
# Alternative logic for Apiary applications
if self.apiary_group_application_type:
group = ApiaryAssessorGroup.objects.first()
if group:
return group
# TODO get list of assessor groups based on region and activity
if self.region and self.activity:
try:
check_group = ProposalAssessorGroup.objects.filter(
#activities__name__in=[self.activity],
region__name__in=self.regions_list
).distinct()
if check_group:
return check_group[0]
except ProposalAssessorGroup.DoesNotExist:
pass
default_group = ProposalAssessorGroup.objects.get(default=True)
return default_group
def __approver_group(self):
# Alternative logic for Apiary applications
if self.apiary_group_application_type:
group = ApiaryApproverGroup.objects.first()
if group:
return group
# TODO get list of approver groups based on region and activity
if self.region and self.activity:
try:
check_group = ProposalApproverGroup.objects.filter(
#activities__name__in=[self.activity],
region__name__in=self.regions_list
).distinct()
if check_group:
return check_group[0]
except ProposalApproverGroup.DoesNotExist:
pass
default_group = ProposalApproverGroup.objects.get(default=True)
return default_group
def __check_proposal_filled_out(self):
if not self.data:
raise exceptions.ProposalNotComplete()
missing_fields = []
required_fields = {
'region':'Region/District',
# 'title': 'Title',
# 'activity': 'Activity'
}
#import ipdb; ipdb.set_trace()
for k,v in required_fields.items():
val = getattr(self,k)
if not val:
missing_fields.append(v)
return missing_fields
@property
def assessor_recipients(self):
recipients = []
# Alternative logic for Apiary applications
if self.apiary_group_application_type:
group = ApiaryAssessorGroup.objects.first()
if group:
return group.members_email
#import ipdb; ipdb.set_trace()
# Proposal logic
try:
recipients = ProposalAssessorGroup.objects.get(region=self.region).members_email
except:
recipients = ProposalAssessorGroup.objects.get(default=True).members_email
#if self.submitter.email not in recipients:
# recipients.append(self.submitter.email)
return recipients
@property
def approver_recipients(self):
recipients = []
# Alternative logic for Apiary applications
if self.apiary_group_application_type:
group = ApiaryApproverGroup.objects.first()
if group:
return group.members_email
# Proposal logic
try:
recipients = ProposalApproverGroup.objects.get(region=self.region).members_email
except:
recipients = ProposalApproverGroup.objects.get(default=True).members_email
#if self.submitter.email not in recipients:
# recipients.append(self.submitter.email)
return recipients
@property
def hasAmendmentRequest(self):
qs = self.amendment_requests
qs = qs.filter(status = 'requested')
if qs:
return True
return False
def referral_email_list(self,user):
qs=self.referrals.all()
email_list=[]
if self.assigned_officer:
email_list.append(self.assigned_officer.email)
else:
email_list.append(user.email)
if qs:
for r in qs:
email_list.append(r.referral.email)
separator=', '
email_list_string=separator.join(email_list)
return email_list_string
def can_assess(self,user):
if self.processing_status == 'with_assessor' or self.processing_status == 'with_referral' or self.processing_status == 'with_assessor_requirements':
if self.apiary_group_application_type:
# Apiary logic
return self.__assessor_group() in user.apiaryassessorgroup_set.all()
else:
# Proposal logic
return self.__assessor_group() in user.proposalassessorgroup_set.all()
elif self.processing_status == 'with_approver':
if self.apiary_group_application_type:
# Apiary logic
return self.__approver_group() in user.apiaryapprovergroup_set.all()
else:
# Proposal logic
return self.__approver_group() in user.proposalapprovergroup_set.all()
else:
return False
def assessor_comments_view(self,user):
if self.processing_status == 'with_assessor' or self.processing_status == 'with_referral' or self.processing_status == 'with_assessor_requirements' or self.processing_status == 'with_approver' or self.processing_status == 'approved':
try:
referral = Referral.objects.get(proposal=self,referral=user)
except:
referral = None
if referral:
return True
elif self.__assessor_group() in user.proposalassessorgroup_set.all():
return True
elif self.__approver_group() in user.proposalapprovergroup_set.all():
return True
else:
return False
else:
return False
def has_assessor_mode(self,user):
status_without_assessor = ['with_approver','approved','declined','draft']
if self.processing_status in status_without_assessor:
return False
else:
if self.assigned_officer:
if self.assigned_officer == user:
if self.apiary_group_application_type:
# Apiary logic
return self.__assessor_group() in user.apiaryassessorgroup_set.all()
else:
# Proposal logic
return self.__assessor_group() in user.proposalassessorgroup_set.all()
else:
return False
else:
if self.apiary_group_application_type:
# Apiary logic
return self.__assessor_group() in user.apiaryassessorgroup_set.all()
else:
# Proposal logic
return self.__assessor_group() in user.proposalassessorgroup_set.all()
def log_user_action(self, action, request):
return ProposalUserAction.log_action(self, action, request.user)
def submit(self,request,viewset):
from disturbance.components.proposals.utils import save_proponent_data
with transaction.atomic():
if self.can_user_edit:
# Save the data first
save_proponent_data(self,request,viewset)
#import ipdb; ipdb.set_trace()
if self.application_type.name != ApplicationType.APIARY:
# Check if the special fields have been completed
missing_fields = self.__check_proposal_filled_out()
if missing_fields:
error_text = 'The proposal has these missing fields, {}'.format(','.join(missing_fields))
raise exceptions.ProposalMissingFields(detail=error_text)
self.submitter = request.user
#self.lodgement_date = datetime.datetime.strptime(timezone.now().strftime('%Y-%m-%d'),'%Y-%m-%d').date()
self.lodgement_date = timezone.now()
if (self.amendment_requests):
qs = self.amendment_requests.filter(status = "requested")
if (qs):
for q in qs:
q.status = 'amended'
q.save()
# Create a log entry for the proposal
self.log_user_action(ProposalUserAction.ACTION_LODGE_APPLICATION.format(self.lodgement_number), request)
# Create a log entry for the organisation
if self.applicant:
self.applicant.log_user_action(ProposalUserAction.ACTION_LODGE_APPLICATION.format(self.lodgement_number), request)
#import ipdb; ipdb.set_trace()
ret1 = send_submit_email_notification(request, self)
ret2 = send_external_submit_email_notification(request, self)
if ret1 and ret2:
self.processing_status = 'with_assessor'
self.customer_status = 'with_assessor'
self.documents.all().update(can_delete=False)
self.save()
else:
raise ValidationError('An error occurred while submitting proposal (Submit email notifications failed)')
else:
raise ValidationError('You can\'t edit this proposal at this moment')
return self
def update(self,request,viewset):
from disturbance.components.proposals.utils import save_proponent_data
with transaction.atomic():
#import ipdb; ipdb.set_trace()
if self.can_user_edit:
# Save the data first
save_proponent_data(self,request,viewset)
self.save()
else:
raise ValidationError('You can\'t edit this proposal at this moment')
def send_referral(self,request,referral_email,referral_text):
with transaction.atomic():
try:
referral_email = referral_email.lower()
if self.processing_status == 'with_assessor' or self.processing_status == 'with_referral':
self.processing_status = 'with_referral'
self.save()
referral = None
# Check if the user is in ledger
try:
user = EmailUser.objects.get(email__icontains=referral_email)
except EmailUser.DoesNotExist:
# Validate if it is a deparment user
department_user = get_department_user(referral_email)
if not department_user:
raise ValidationError('The user you want to send the referral to is not a member of the department')
# Check if the user is in ledger or create
user,created = EmailUser.objects.get_or_create(email=department_user['email'].lower())
if created:
user.first_name = department_user['given_name']
user.last_name = department_user['surname']
user.save()
try:
Referral.objects.get(referral=user,proposal=self)
raise ValidationError('A referral has already been sent to this user')
except Referral.DoesNotExist:
# Create Referral
referral = Referral.objects.create(
proposal = self,
referral=user,
sent_by=request.user,
text=referral_text
| |
"""
This module defines some classes to perform a calculation using BigDFT
using binding (GIBinding) or using system call (SystemCalculator).
"""
# In our case for the class SystemCalculator which uses system calls:
# * We define posinp (equivalent of Atoms)
# * We have a python dictionary for the parameter
# * We define a calculator (equivalent of BFGS which is an Optimizer
# (a method to optimize))
# Then we perform the method run.
#
# For the class GIBinding using the Gobject Introspection bindings, two methods
# set and update are added.
#
# The goal is to have a light Calculator almost compatible with ASE
# (Atomic Simulation environment, see https://gitlab.com/ase/ase)
# .. todo::
# In a future we add our method to ASE which is at a higher level
# (workflow of simulations).
# :Example:
# >>> from ase import Atoms
# >>> from ase.optimize import BFGS
# >>> from ase.calculators.nwchem import NWChem
# >>> from ase.io import write
# >>> h2 = Atoms('H2',
# >>> positions=[[0, 0, 0],
# >>> [0, 0, 0.7]])
# >>> h2.calc = NWChem(xc='PBE')
# >>> opt = BFGS(h2, trajectory='h2.traj')
# >>> opt.run(fmax=0.02)
# >>> BFGS: 0 19:10:49 -31.435229 2.2691
# >>> BFGS: 1 19:10:50 -31.490773 0.3740
# >>> BFGS: 2 19:10:50 -31.492791 0.0630
# >>> BFGS: 3 19:10:51 -31.492848 0.0023
# >>> write('H2.xyz', h2)
# >>> h2.get_potential_energy() # ASE's units are eV and Ang
# >>> -31.492847800329216
##
import os
from futile.Utils import write as safe_print
import BigDFT.Logfiles as Lf
class GIBinding():
"""
Calculator for BigDFT from Gobject Introspection bindings.
"""
def __init__(self):
# Import bindings about BigDFT (if the bindings are not generated, do
# not work at all)
from gi.repository import BigDFT
self.runObj = -1
# MPI initialisation
(ierr, self.iproc, self.nproc, igroup, ngroup) = BigDFT.lib_init(0)
self.runObj = None
def update(self, inputfile):
# If the inputpsiid is not present in the inputfile
# assumes that the user wants to do a restart
from futile.Utils import dict_merge
if "dft" in inputfile and "inputpsiid" in inputfile["dft"]:
var = inputfile
else:
var = inputfile.copy()
dict_merge(var, {'dft': {'inputpsiid': 1}})
from gi.repository import BigDFT
self.runObj.update(BigDFT.Dict(var))
def run(self):
self.out = self.runObj.calculate(self.iproc, self.nproc)
return self.out
def set(self, inputfile=None):
from gi.repository import BigDFT
if inputfile is None:
var = {}
else:
var = inputfile
# Free memory first
self.out = None
self.runObj = None
self.runObj = BigDFT.Run.new_from_dict(BigDFT.Dict(var))
def __del__(self):
if self.runObj == -1:
return
# MPI finalisation.
self.out = None
self.runObj = None
from gi.repository import BigDFT
BigDFT.lib_finalize()
class Runner():
"""Run of something.
This object is associated with the concept of execution of a action.
It may be customized to be used inside workflows and datasets.
The central functionality is the `run` method that can be customized on
subclasses of `Runner`. In this object there are global and local options
of a run method. All arguments passed at the instantiation are stored as
global options. For each call to `run`, these global options may updated by
the arguments of the run call.
Args:
**kwargs: global options of the runner. Deepcopied in the dictionary
returned by :meth:`global_options`.
Example:
>>> torun=Runner(args1='one',args2='two')
>>> print(torun.global_options())
{'args1':'one','args2':'two'}
>>> print(torun.get_global_option('args1'))
'one'
"""
def __init__(self, **kwargs):
import copy
self._global_options = copy.deepcopy(kwargs)
def global_options(self):
"""
Get all global options dict.
Returns:
:py:class:`dict`: The dictionary of the global options in its
current status
"""
return self._global_options
def get_global_option(self, key):
"""
Get one key in global options
Args:
key (string): the global option key
Returns:
The value of the global options labelled by ``key``
"""
return self._global_options[key]
def update_global_options(self, **kwargs):
"""
Update the global options by providing keyword arguments.
Args:
**kwargs: arguments to be updated in the global options
"""
self._global_options.update(kwargs)
def pop_global_option(self, key):
"""
Remove a given global option from the global option dictionary
Args:
key (string): the global option key
Returns:
The value of the global option
"""
self._global_options.pop(key)
def _run_options(self, **kwargs):
"""
Create a local dictionary for a specific run.
It combines the present status of global option with the local
dictionary of the run
"""
import copy
# First deepcopy from global_options and update from kwargs (warning: a
# dictionary is not update)
self.run_options = copy.deepcopy(self._global_options)
""" :py:class`dict`: Local options of process_run.
This dictionary can be accessed during the definition of the
process_run method.
It contains all the relevant keys for the definition of the runner.
"""
self.run_options.update(kwargs)
def run(self, **kwargs):
"""
Run method of the class. It performs the following actions:
* Constructs the local dictionary to be passed as ``**kwargs`` to the
`process_run` function;
* Calls the :meth:`pre_processing` method (intended to prepare some
actions associated to the :meth:`process_run` method);
* Calls :meth:`process_run` with the dictionary returned by
:meth:`pre_processing` as `**kwargs`;
* Update such dictionary with the results returned by
:meth:`process_run` and call :meth:`post_processing`;
* Returns the object passed by the call to :meth:`post_processing`
class method
Developers are therefore expected to override :meth:`pre_processing`
:meth:`process_run` and :meth:`post_processing`,
when subclassing :class:`Runner`.
"""
from futile.Utils import dict_merge
self._run_options(**kwargs)
run_args = self.pre_processing()
run_results = self.process_run(**run_args)
# safe_print('run_args',run_args,'run_results',run_results)
dict_merge(dest=run_args, src=run_results)
# safe_print('run_updated, again',run_args)
return self.post_processing(**run_args)
def pre_processing(self):
"""
Pre-treat the keyword arguments and the options, if needed.
Returns:
:py:class:`dict`: dictionary of the pre-treated keyword arguments
that have to be actually considered by process_run.
"""
return {}
def process_run(self, **kwargs):
"""
Main item of the runner, defines the information that have to be
post_processed by post_processing.
Args:
**kwargs (:py:class:`dict`): keyword arguments as returned from the
:meth:`pre_processing` method.
Returns:
:py:class:`dict`:
dictionary objects to be passed to post_processing, once the
dictionary returned by :meth:`pre_processing` has been updated
"""
return kwargs
def post_processing(self, **kwargs):
"""
Post-processing, take the arguments as they are provided by the update
of :meth:`process_run` and :meth:`pre_processing` methods.
Returns:
The final object that each call to the :meth:`run` method is
supposed to provide.
"""
return None
class SystemCalculator(Runner):
"""Define a BigDFT calculator.
Main calculator of BigDFT code. It performs :py:meth:`os.system` calls to
the main ``bigdft`` executable in the ``$BIGDFT_ROOT`` directory. It is
designed for two purposes:
* Run the code in a workstation-based environment, for example within
notebooks or scripts.
* Run the code from a python script that is submitted to a batch
scheduler in a potnentially large-scale supercomputer.
For triggering the execution, this code gets two variables from the
environment:
* The value of ``OMP_NUM_THREADS`` to set the number of
OMP_NUM_THREADS. If this variable is not present in the environment,
:class:`SystemCalculator` sets it to the value provided by the
``omp`` keyword at initialization.
* The value of ``BIGDFT_MPIRUN`` to define the MPI execution command.
If absent, the run is executed simply by ``$BIGDFT_ROOT/bigdft``,
followed by the command given by post-processing.
Arguments:
omp (int): number of OpenMP threads.
It defaults to the $OMP_NUM_THREADS variable in the environment, if
present, otherwise it fixes the run to 1 thread.
mpi_run (str): define the MPI command to be used.
It defaults to the value $BIGDFT_MPIRUN of the environment, if
present. When using this calculator into a job submission script,
the value of $BIGDFT_MPIRUN variable may be set appropriately to
launch the bigdft executable.
skip (bool): if ``True``, do not run the calculation if the
corresponding logfile exists.
verbose (bool): if ``True`` the class prints out informations about
the operations that are being performed by the calculator
dry_run (bool): check the input, estimate the memory but do not
perform the calculation.
dry_mpi (int): Number of MPI processes for the estimation of the
memory when ``dry_run`` is ``True`` (not yet implemented)
taskgroup_size (int): number of MPI processes of each of the
taskgroup in the case of a runs_file.
Warning:
At the initialization, `SystemCalculator` checks if the environment
variable $BIGDFT_ROOT is defined.
This would mean (although not guarantee) that the environment has been
properly set prior to the evaluation of the python command.
Also, it checks that the executable file ``bigdft`` might be found in
the ``$BIGDFT_ROOT/bigdft`` path.
Example:
>>> inpdict = { 'dft': { 'ixc': 'LDA' }} #a simple input file
>>> study = SystemCalculator(omp=1)
>>> logf = study.run(name="test",input=inpdict)
Executing command: $BIGDFT_MPIRUN <path_to_$BIGDFT_ROOT>/bigdft test
Methods:
run(name='',run_dir='.',outdir='',run_names='',input=None,posinp='posinp.xyz'):
Run a calculation building the input file from | |
#! /usr/bin/env python
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010-2015 <NAME> and <NAME>.
## All rights reserved.
##
## See "LICENSE.rst" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## <NAME>. and <NAME>. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
Classes and Methods for working with tree reconciliation, fitting, embedding,
contained/containing etc.
"""
import dendropy
from dendropy.model import coalescent
class ContainingTree(dendropy.Tree):
"""
A "containing tree" is a (usually rooted) tree data structure within which
other trees are "contained". For example, species trees and their contained
gene trees; host trees and their contained parasite trees; biogeographical
"area" trees and their contained species or taxon trees.
"""
def __init__(self,
containing_tree,
contained_taxon_namespace,
contained_to_containing_taxon_map,
contained_trees=None,
fit_containing_edge_lengths=True,
collapse_empty_edges=True,
ultrametricity_precision=False,
ignore_root_deep_coalescences=True,
**kwargs):
"""
__init__ converts ``self`` to ContainingTree class, embedding the trees
given in the list, ``contained_trees.``
Mandatory Arguments:
``containing_tree``
A |Tree| or |Tree|-like object that describes the topological
constraints or conditions of the containing tree (e.g., species,
host, or biogeographical area trees).
``contained_taxon_namespace``
A |TaxonNamespace| object that will be used to manage the taxa of
the contained trees.
``contained_to_containing_taxon_map``
A |TaxonNamespaceMapping| object mapping |Taxon| objects in the
contained |TaxonNamespace| to corresponding |Taxon| objects in the
containing tree.
Optional Arguments:
``contained_trees``
An iterable container of |Tree| or |Tree|-like objects that
will be contained into ``containing_tree``; e.g. gene or
parasite trees.
``fit_containing_edge_lengths``
If |True| [default], then the branch lengths of
``containing_tree`` will be adjusted to fit the contained tree
as they are added. Otherwise, the containing tree edge lengths
will not be changed.
``collapse_empty_edges``
If |True| [default], after edge lengths are adjusted,
zero-length branches will be collapsed.
``ultrametricity_precision``
If |False| [default], then trees will not be checked for
ultrametricity. Otherwise this is the threshold within which
all node to tip distances for sister nodes must be equal.
``ignore_root_deep_coalescences``
If |True| [default], then deep coalescences in the root will
not be counted.
Other Keyword Arguments: Will be passed to Tree().
"""
if "taxon_namespace" not in kwargs:
kwargs["taxon_namespace"] = containing_tree.taxon_namespace
dendropy.Tree.__init__(self,
containing_tree,
taxon_namespace=containing_tree.taxon_namespace)
self.original_tree = containing_tree
for edge in self.postorder_edge_iter():
edge.head_contained_edges = {}
edge.tail_contained_edges = {}
edge.containing_taxa = set()
edge.contained_taxa = set()
self._contained_taxon_namespace = contained_taxon_namespace
self._contained_to_containing_taxon_map = None
self._contained_trees = None
self._set_contained_to_containing_taxon_map(contained_to_containing_taxon_map)
self.fit_containing_edge_lengths = fit_containing_edge_lengths
self.collapse_empty_edges = collapse_empty_edges
self.ultrametricity_precision = ultrametricity_precision
self.ignore_root_deep_coalescences = ignore_root_deep_coalescences
if contained_trees:
self._set_contained_trees(contained_trees)
if self.contained_trees:
self.rebuild(rebuild_taxa=False)
def _set_contained_taxon_namespace(self, taxon_namespace):
self._contained_taxon_namespace = taxon_namespace
def _get_contained_taxon_namespace(self):
if self._contained_taxon_namespace is None:
self._contained_taxon_namespace = dendropy.TaxonNamespace()
return self._contained_taxon_namespace
contained_taxon_namespace = property(_get_contained_taxon_namespace)
def _set_contained_to_containing_taxon_map(self, contained_to_containing_taxon_map):
"""
Sets mapping of |Taxon| objects of the genes/parasite/etc. to that of
the population/species/host/etc.
Creates mapping (e.g., species to genes) and decorates edges of self
with sets of both containing |Taxon| objects and the contained
|Taxon| objects that map to them.
"""
if isinstance(contained_to_containing_taxon_map, dendropy.TaxonNamespaceMapping):
if self._contained_taxon_namespace is not contained_to_containing_taxon_map.domain_taxon_namespace:
raise ValueError("Domain TaxonNamespace of TaxonNamespaceMapping ('domain_taxon_namespace') not the same as 'contained_taxon_namespace' TaxonNamespace")
self._contained_to_containing_taxon_map = contained_to_containing_taxon_map
else:
self._contained_to_containing_taxon_map = dendropy.TaxonNamespaceMapping(
mapping_dict=contained_to_containing_taxon_map,
domain_taxon_namespace=self.contained_taxon_namespace,
range_taxon_namespace=self.taxon_namespace)
self.build_edge_taxa_sets()
def _get_contained_to_containing_taxon_map(self):
return self._contained_to_containing_taxon_map
contained_to_containing_taxon_map = property(_get_contained_to_containing_taxon_map)
def _set_contained_trees(self, trees):
if hasattr(trees, 'taxon_namespace'):
if self._contained_taxon_namespace is None:
self._contained_taxon_namespace = trees.taxon_namespace
elif self._contained_taxon_namespace is not trees.taxon_namespace:
raise ValueError("'contained_taxon_namespace' of ContainingTree is not the same TaxonNamespace object of 'contained_trees'")
self._contained_trees = dendropy.TreeList(trees, taxon_namespace=self._contained_taxon_namespace)
if self._contained_taxon_namespace is None:
self._contained_taxon_namespace = self._contained_trees.taxon_namespace
def _get_contained_trees(self):
if self._contained_trees is None:
self._contained_trees = dendropy.TreeList(taxon_namespace=self._contained_taxon_namespace)
return self._contained_trees
contained_trees = property(_get_contained_trees)
def _get_containing_to_contained_taxa_map(self):
return self._contained_to_containing_taxon_map.reverse
containing_to_contained_taxa_map = property(_get_containing_to_contained_taxa_map)
def clear(self):
"""
Clears all contained trees and mapped edges.
"""
self.contained_trees = dendropy.TreeList(taxon_namespace=self._contained_to_containing_taxon_map.domain_taxa)
self.clear_contained_edges()
def clear_contained_edges(self):
"""
Clears all contained mapped edges.
"""
for edge in self.postorder_edge_iter():
edge.head_contained_edges = {}
edge.tail_contained_edges = {}
def fit_edge_lengths(self, contained_trees):
"""
Recalculate node ages / edge lengths of containing tree to accomodate
contained trees.
"""
# set the ages
for node in self.postorder_node_iter():
if node.is_internal():
disjunct_leaf_set_list_split_bitmasks = []
for i in node.child_nodes():
disjunct_leaf_set_list_split_bitmasks.append(self.taxon_namespace.taxa_bitmask(taxa=i.edge.containing_taxa))
min_age = float('inf')
for et in contained_trees:
min_age = self._find_youngest_intergroup_age(et, disjunct_leaf_set_list_split_bitmasks, min_age)
node.age = max( [min_age] + [cn.age for cn in node.child_nodes()] )
else:
node.age = 0
# set the corresponding edge lengths
self.set_edge_lengths_from_node_ages()
# collapse 0-length branches
if self.collapse_empty_edges:
self.collapse_unweighted_edges()
def rebuild(self, rebuild_taxa=True):
"""
Recalculate edge taxa sets, node ages / edge lengths of containing
tree, and embed edges of contained trees.
"""
if rebuild_taxa:
self.build_edge_taxa_sets()
if self.fit_containing_edge_lengths:
self.fit_edge_lengths(self.contained_trees)
self.clear_contained_edges()
for et in self.contained_trees:
self.embed_tree(et)
def embed_tree(self, contained_tree):
"""
Map edges of contained tree into containing tree (i.e., self).
"""
if self.seed_node.age is None:
self.calc_node_ages(ultrametricity_precision=self.ultrametricity_precision)
if contained_tree not in self.contained_trees:
self.contained_trees.append(contained_tree)
if self.fit_containing_edge_lengths:
self.fit_edge_lengths(self.contained_trees)
if contained_tree.seed_node.age is None:
contained_tree.calc_node_ages(ultrametricity_precision=self.ultrametricity_precision)
contained_leaves = contained_tree.leaf_nodes()
taxon_to_contained = {}
for nd in contained_leaves:
containing_taxon = self.contained_to_containing_taxon_map[nd.taxon]
x = taxon_to_contained.setdefault(containing_taxon, set())
x.add(nd.edge)
for containing_edge in self.postorder_edge_iter():
if containing_edge.is_terminal():
containing_edge.head_contained_edges[contained_tree] = taxon_to_contained[containing_edge.head_node.taxon]
else:
containing_edge.head_contained_edges[contained_tree] = set()
for nd in containing_edge.head_node.child_nodes():
containing_edge.head_contained_edges[contained_tree].update(nd.edge.tail_contained_edges[contained_tree])
if containing_edge.tail_node is None:
if containing_edge.length is not None:
target_age = containing_edge.head_node.age + containing_edge.length
else:
# assume all coalesce?
containing_edge.tail_contained_edges[contained_tree] = set([contained_tree.seed_node.edge])
continue
else:
target_age = containing_edge.tail_node.age
containing_edge.tail_contained_edges[contained_tree] = set()
for contained_edge in containing_edge.head_contained_edges[contained_tree]:
if contained_edge.tail_node is not None:
remaining = target_age - contained_edge.tail_node.age
elif contained_edge.length is not None:
remaining = target_age - (contained_edge.head_node.age + contained_edge.length)
else:
continue
while remaining > 0:
if contained_edge.tail_node is not None:
contained_edge = contained_edge.tail_node.edge
else:
if contained_edge.length is not None and (remaining - contained_edge.length) <= 0:
contained_edge = None
remaining = 0
break
else:
remaining = 0
break
if contained_edge and remaining > 0:
remaining -= contained_edge.length
if contained_edge is not None:
containing_edge.tail_contained_edges[contained_tree].add(contained_edge)
def build_edge_taxa_sets(self):
"""
Rebuilds sets of containing and corresponding contained taxa at each
edge.
"""
for edge in self.postorder_edge_iter():
if edge.is_terminal():
edge.containing_taxa = set([edge.head_node.taxon])
else:
edge.containing_taxa = set()
for i in edge.head_node.child_nodes():
edge.containing_taxa.update(i.edge.containing_taxa)
edge.contained_taxa = set()
for t in edge.containing_taxa:
edge.contained_taxa.update(self.containing_to_contained_taxa_map[t])
def num_deep_coalescences(self):
"""
Returns total number of deep coalescences of the contained trees.
"""
return sum(self.deep_coalescences().values())
def deep_coalescences(self):
"""
Returns dictionary where the contained trees are keys, and the number of
deep coalescences corresponding to the tree are values.
"""
dc = {}
for tree in self.contained_trees:
for edge in self.postorder_edge_iter():
if edge.tail_node is None and self.ignore_root_deep_coalescences:
continue
try:
dc[tree] += len(edge.tail_contained_edges[tree]) - 1
except KeyError:
dc[tree] = len(edge.tail_contained_edges[tree]) - 1
return dc
def embed_contained_kingman(self,
edge_pop_size_attr='pop_size',
default_pop_size=1,
label=None,
rng=None,
use_expected_tmrca=False):
"""
Simulates, *embeds*, and returns a "censored" (Kingman) neutral coalescence tree
conditional on self.
``rng``
Random number generator to use. If |None|, the default will
be used.
``edge_pop_size_attr``
Name of attribute of self's edges that specify the population
size. If this attribute does not exist, then the population
size is taken to be 1.
Note that all edge-associated taxon sets must be up-to-date (otherwise,
``build_edge_taxa_sets()`` should be called).
"""
et = self.simulate_contained_kingman(
edge_pop_size_attr=edge_pop_size_attr,
default_pop_size=default_pop_size,
label=label,
rng=rng,
use_expected_tmrca=use_expected_tmrca)
self.embed_tree(et)
return et
def simulate_contained_kingman(self,
edge_pop_size_attr='pop_size',
default_pop_size=1,
label=None,
rng=None,
use_expected_tmrca=False):
"""
Simulates and returns a "censored" (Kingman) neutral coalescence tree
conditional on self.
``rng``
Random number generator to use. If |None|, the default will
be used.
``edge_pop_size_attr``
Name of attribute of self's edges that specify the population
size. If this attribute does not exist, then the population
size is taken to be 1.
Note that all edge-associated taxon sets must be up-to-date (otherwise,
``build_edge_taxa_sets()`` should be called), and that the tree
is *not* added to the set of contained trees. For the latter, call
``embed_contained_kingman``.
"""
# Dictionary that maps nodes of containing tree to list of
# corresponding nodes on gene tree, initially populated with leaf
# nodes.
contained_nodes = {}
for nd in self.leaf_node_iter():
contained_nodes[nd] = []
for gt in nd.edge.contained_taxa:
gn = dendropy.Node(taxon=gt)
contained_nodes[nd].append(gn)
# Generate the tree structure
for edge in self.postorder_edge_iter():
if edge.head_node.parent_node is None:
# root: run unconstrained coalescence until just one gene node
# remaining
if hasattr(edge, edge_pop_size_attr):
pop_size = getattr(edge, edge_pop_size_attr)
else:
pop_size = default_pop_size
if len(contained_nodes[edge.head_node]) > 1:
final = coalescent.coalesce_nodes(nodes=contained_nodes[edge.head_node],
pop_size=pop_size,
period=None,
rng=rng,
use_expected_tmrca=use_expected_tmrca)
else:
final = contained_nodes[edge.head_node]
else:
# run until next coalescence event, as determined by this edge
# size.
if hasattr(edge, edge_pop_size_attr):
pop_size = getattr(edge, edge_pop_size_attr)
else:
pop_size = default_pop_size
remaining | |
"name"
dyndns_del(nameserver, name, type="ANY", ttl=10) -> result code (0=ok)
example: dyndns_del("ns1.toto.com", "dyn.toto.com")
RFC2136
"""
zone = name[name.find(".")+1:]
r=sr1(IP(dst=nameserver)/UDP()/DNS(opcode=5,
qd=[DNSQR(qname=zone, qtype="SOA")],
ns=[DNSRR(rrname=name, type=type,
rclass="ANY", ttl=0, rdata="")]),
verbose=0, timeout=5)
if r and r.haslayer(DNS):
return r.getlayer(DNS).rcode
else:
return -1
def is_promisc(ip, fake_bcast="ff:ff:00:00:00:00",**kargs):
"""Try to guess if target is in Promisc mode. The target is provided by its ip."""
responses = srp1(Ether(dst=fake_bcast) / ARP(op="who-has", pdst=ip),type=ETH_P_ARP, iface_hint=ip, timeout=1, verbose=0,**kargs)
return responses is not None
def promiscping(net, timeout=2, fake_bcast="ff:ff:ff:ff:ff:fe", **kargs):
"""Send ARP who-has requests to determine which hosts are in promiscuous mode
promiscping(net, iface=conf.iface)"""
ans,unans = srp(Ether(dst=fake_bcast)/ARP(pdst=net),
filter="arp and arp[7] = 2", timeout=timeout, iface_hint=net, **kargs)
ans = ARPingResult(ans.res, name="PROMISCPing")
ans.display()
return ans,unans
def ikescan(ip):
return sr(IP(dst=ip)/UDP()/ISAKMP(init_cookie=RandString(8),
exch_type=2)/ISAKMP_payload_SA(prop=ISAKMP_payload_Proposal()))
def dhcp_request(iface=None,**kargs):
if conf.checkIPaddr != 0:
warning("conf.checkIPaddr is not 0, I may not be able to match the answer")
if iface is None:
iface = conf.iface
fam,hw = get_if_raw_hwaddr(iface)
return srp1(Ether(dst="ff:ff:ff:ff:ff:ff")/IP(src="0.0.0.0",dst="255.255.255.255")/UDP(sport=68,dport=67)
/BOOTP(chaddr=hw)/DHCP(options=[("message-type","discover"),"end"]),iface=iface,**kargs)
def snmpwalk(dst, oid="1", community="public"):
try:
while 1:
r = sr1(IP(dst=dst)/UDP(sport=RandShort())/SNMP(community=community, PDU=SNMPnext(varbindlist=[SNMPvarbind(oid=oid)])),timeout=2, chainCC=1, verbose=0, retry=2)
if ICMP in r:
print repr(r)
break
if r is None:
print "No answers"
break
print "%-40s: %r" % (r[SNMPvarbind].oid.val,r[SNMPvarbind].value)
oid = r[SNMPvarbind].oid
except KeyboardInterrupt:
pass
#####################
## Reporting stuff ##
#####################
def report_ports(target, ports):
"""portscan a target and output a LaTeX table
report_ports(target, ports) -> string"""
ans,unans = sr(IP(dst=target)/TCP(dport=ports),timeout=5)
rep = "\\begin{tabular}{|r|l|l|}\n\\hline\n"
for s,r in ans:
if not r.haslayer(ICMP):
if r.payload.flags == 0x12:
rep += r.sprintf("%TCP.sport% & open & SA \\\\\n")
rep += "\\hline\n"
for s,r in ans:
if r.haslayer(ICMP):
rep += r.sprintf("%TCPerror.dport% & closed & ICMP type %ICMP.type%/%ICMP.code% from %IP.src% \\\\\n")
elif r.payload.flags != 0x12:
rep += r.sprintf("%TCP.sport% & closed & TCP %TCP.flags% \\\\\n")
rep += "\\hline\n"
for i in unans:
rep += i.sprintf("%TCP.dport% & ? & unanswered \\\\\n")
rep += "\\hline\n\\end{tabular}\n"
return rep
def __make_table(yfmtfunc, fmtfunc, endline, list, fxyz, sortx=None, sorty=None, seplinefunc=None):
vx = {}
vy = {}
vz = {}
vxf = {}
vyf = {}
l = 0
for e in list:
xx,yy,zz = map(str, fxyz(e))
l = max(len(yy),l)
vx[xx] = max(vx.get(xx,0), len(xx), len(zz))
vy[yy] = None
vz[(xx,yy)] = zz
vxk = vx.keys()
vyk = vy.keys()
if sortx:
vxk.sort(sortx)
else:
try:
vxk.sort(lambda x,y:int(x)-int(y))
except:
try:
vxk.sort(lambda x,y: cmp(atol(x),atol(y)))
except:
vxk.sort()
if sorty:
vyk.sort(sorty)
else:
try:
vyk.sort(lambda x,y:int(x)-int(y))
except:
try:
vyk.sort(lambda x,y: cmp(atol(x),atol(y)))
except:
vyk.sort()
if seplinefunc:
sepline = seplinefunc(l, map(lambda x:vx[x],vxk))
print sepline
fmt = yfmtfunc(l)
print fmt % "",
for x in vxk:
vxf[x] = fmtfunc(vx[x])
print vxf[x] % x,
print endline
if seplinefunc:
print sepline
for y in vyk:
print fmt % y,
for x in vxk:
print vxf[x] % vz.get((x,y), "-"),
print endline
if seplinefunc:
print sepline
def make_table(*args, **kargs):
__make_table(lambda l:"%%-%is" % l, lambda l:"%%-%is" % l, "", *args, **kargs)
def make_lined_table(*args, **kargs):
__make_table(lambda l:"%%-%is |" % l, lambda l:"%%-%is |" % l, "",
seplinefunc=lambda a,x:"+".join(map(lambda y:"-"*(y+2), [a-1]+x+[-2])),
*args, **kargs)
def make_tex_table(*args, **kargs):
__make_table(lambda l: "%s", lambda l: "& %s", "\\\\", seplinefunc=lambda a,x:"\\hline", *args, **kargs)
######################
## Online doc stuff ##
######################
def lsc(cmd=None):
"""List user commands"""
if cmd is None:
for c in user_commands:
doc = "No doc. available"
if c.__doc__:
doc = c.__doc__.split("\n")[0]
print "%-16s : %s" % (c.__name__, doc)
else:
print cmd.__doc__
def ls(obj=None):
"""List available layers, or infos on a given layer"""
if obj is None:
import __builtin__
all = __builtin__.__dict__.copy()
all.update(globals())
objlst = filter(lambda (n,o): isinstance(o,type) and issubclass(o,Packet), all.items())
objlst.sort(lambda x,y:cmp(x[0],y[0]))
for n,o in objlst:
print "%-10s : %s" %(n,o.name)
else:
if isinstance(obj, type) and issubclass(obj, Packet):
for f in obj.fields_desc:
print "%-10s : %-20s = (%s)" % (f.name, f.__class__.__name__, repr(f.default))
elif isinstance(obj, Packet):
for f in obj.fields_desc:
print "%-10s : %-20s = %-15s (%s)" % (f.name, f.__class__.__name__, repr(getattr(obj,f.name)), repr(f.default))
if not isinstance(obj.payload, NoPayload):
print "--"
ls(obj.payload)
else:
print "Not a packet class. Type 'ls()' to list packet classes."
user_commands = [ sr, sr1, srp, srp1, srloop, srploop, sniff, p0f, arpcachepoison, send, sendp, traceroute, arping, ls, lsc, queso, nmap_fp, report_ports, dyndns_add, dyndns_del, is_promisc, promiscping ]
##############
## Automata ##
##############
class ATMT:
STATE = "State"
ACTION = "Action"
CONDITION = "Condition"
RECV = "Receive condition"
TIMEOUT = "Timeout condition"
class NewStateRequested(Exception):
def __init__(self, state_func, automaton, *args, **kargs):
self.func = state_func
self.state = state_func.atmt_state
self.initial = state_func.atmt_initial
self.error = state_func.atmt_error
self.final = state_func.atmt_final
Exception.__init__(self, "Request state [%s]" % self.state)
self.automaton = automaton
self.args = args
self.kargs = kargs
self.action_parameters() # init action parameters
def action_parameters(self, *args, **kargs):
self.action_args = args
self.action_kargs = kargs
return self
def run(self):
return self.func(self.automaton, *self.args, **self.kargs)
@staticmethod
def state(initial=0,final=0,error=0):
def deco(f,initial=initial, final=final):
f.atmt_type = ATMT.STATE
f.atmt_state = f.func_name
f.atmt_initial = initial
f.atmt_final = final
f.atmt_error = error
def state_wrapper(self, *args, **kargs):
return ATMT.NewStateRequested(f, self, *args, **kargs)
state_wrapper.func_name = "%s_wrapper" % f.func_name
state_wrapper.atmt_type = ATMT.STATE
state_wrapper.atmt_state = f.func_name
state_wrapper.atmt_initial = initial
state_wrapper.atmt_final = final
state_wrapper.atmt_error = error
state_wrapper.atmt_origfunc = f
return state_wrapper
return deco
@staticmethod
def action(cond, prio=0):
def deco(f,cond=cond):
if not hasattr(f,"atmt_type"):
f.atmt_cond = {}
f.atmt_type = ATMT.ACTION
f.atmt_cond[cond.atmt_condname] = prio
return f
return deco
@staticmethod
def condition(state, prio=0):
def deco(f, state=state):
f.atmt_type = ATMT.CONDITION
f.atmt_state = state.atmt_state
f.atmt_condname = f.func_name
f.atmt_prio = prio
return f
return deco
@staticmethod
def receive_condition(state, prio=0):
def deco(f, state=state):
f.atmt_type = ATMT.RECV
f.atmt_state = state.atmt_state
f.atmt_condname = f.func_name
f.atmt_prio = prio
return f
return deco
@staticmethod
def timeout(state, timeout):
def deco(f, state=state, timeout=timeout):
f.atmt_type = ATMT.TIMEOUT
f.atmt_state = state.atmt_state
f.atmt_timeout = timeout
f.atmt_condname = f.func_name
return f
return deco
class Automaton_metaclass(type):
def __new__(cls, name, bases, dct):
cls = super(Automaton_metaclass, cls).__new__(cls, name, bases, dct)
cls.states={}
cls.state = None
cls.recv_conditions={}
cls.conditions={}
cls.timeout={}
cls.actions={}
cls.initial_states=[]
members = {}
classes = [cls]
while classes:
c = classes.pop(0) # order is important to avoid breaking method overloading
classes += list(c.__bases__)
for k,v in c.__dict__.iteritems():
if k not in members:
members[k] = v
decorated = [v for v in members.itervalues()
if type(v) is types.FunctionType and hasattr(v, "atmt_type")]
for m in decorated:
if m.atmt_type == ATMT.STATE:
s = m.atmt_state
cls.states[s] = m
cls.recv_conditions[s]=[]
cls.conditions[s]=[]
cls.timeout[s]=[]
if m.atmt_initial:
cls.initial_states.append(m)
elif m.atmt_type in [ATMT.CONDITION, ATMT.RECV, ATMT.TIMEOUT]:
cls.actions[m.atmt_condname] = []
for m in decorated:
if m.atmt_type == ATMT.CONDITION:
cls.conditions[m.atmt_state].append(m)
elif m.atmt_type == ATMT.RECV:
cls.recv_conditions[m.atmt_state].append(m)
elif m.atmt_type == ATMT.TIMEOUT:
cls.timeout[m.atmt_state].append((m.atmt_timeout, m))
elif m.atmt_type == ATMT.ACTION:
for c in m.atmt_cond:
cls.actions[c].append(m)
for v in cls.timeout.itervalues():
v.sort(lambda (t1,f1),(t2,f2): cmp(t1,t2))
v.append((None, None))
for v in itertools.chain(cls.conditions.itervalues(),
cls.recv_conditions.itervalues()):
v.sort(lambda c1,c2: cmp(c1.atmt_prio,c2.atmt_prio))
for condname,actlst in cls.actions.iteritems():
actlst.sort(lambda c1,c2: cmp(c1.atmt_cond[condname], c2.atmt_cond[condname]))
return cls
def graph(self, **kargs):
s = 'digraph "%s" {\n' % self.__class__.__name__
se = "" # Keep initial nodes at the begining for better rendering
for st in self.states.itervalues():
if st.atmt_initial:
se = ('\t"%s" [ style=filled, fillcolor=blue, shape=box, root=true];\n' % st.atmt_state)+se
elif st.atmt_final:
se += '\t"%s" [ style=filled, fillcolor=green, shape=octagon ];\n' % st.atmt_state
elif st.atmt_error:
se += '\t"%s" [ style=filled, fillcolor=red, shape=octagon ];\n' % st.atmt_state
s += se
for st in self.states.values():
for n in st.atmt_origfunc.func_code.co_names+st.atmt_origfunc.func_code.co_consts:
if n in self.states:
s += '\t"%s" -> "%s" [ color=green ];\n' % (st.atmt_state,n)
for c,k,v in [("purple",k,v) for k,v in self.conditions.items()]+[("red",k,v) for k,v in self.recv_conditions.items()]:
for f in v:
for n in f.func_code.co_names+f.func_code.co_consts:
if n in self.states:
l = f.atmt_condname
for x in self.actions[f.atmt_condname]:
l += "\\l>[%s]" % x.func_name
s += '\t"%s" -> "%s" [label="%s", color=%s];\n' % (k,n,l,c)
for k,v in self.timeout.iteritems():
for t,f in v:
if f is None:
continue
for n in f.func_code.co_names+f.func_code.co_consts:
if n in self.states:
l = "%s/%.1fs" % (f.atmt_condname,t)
for x in self.actions[f.atmt_condname]:
l += "\\l>[%s]" % x.func_name
s += '\t"%s" -> "%s" [label="%s",color=blue];\n' % (k,n,l)
s += "}\n"
return do_graph(s, **kargs)
class Automaton:
__metaclass__ = Automaton_metaclass
def __init__(self, *args, **kargs):
self.debug_level=0
self.init_args=args
self.init_kargs=kargs
self.parse_args(*args, **kargs)
def debug(self, lvl, msg):
if self.debug_level >= lvl:
log_interactive.debug(msg)
class ErrorState(Exception):
def __init__(self, msg, result=None):
Exception.__init__(self, msg)
self.result = result
class Stuck(ErrorState):
pass
def parse_args(self, debug=0, store=1, **kargs):
self.debug_level=debug
self.socket_kargs = kargs
self.store_packets = store
def master_filter(self, pkt):
return True
def run_condition(self, cond, *args, **kargs):
try:
cond(self,*args, **kargs)
except ATMT.NewStateRequested, state_req:
self.debug(2, "%s [%s] taken to state [%s]" % (cond.atmt_type, cond.atmt_condname, state_req.state))
if cond.atmt_type == ATMT.RECV:
self.packets.append(args[0])
for | |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the GTFlow training Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import split_info_pb2
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.ops import training_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
def _gen_learner_config(num_classes,
l1_reg,
l2_reg,
tree_complexity,
max_depth,
min_node_weight,
pruning_mode,
growing_mode,
dropout_probability=None,
dropout_learning_rate=None,
dropout_prob_of_skipping=None):
"""Create a serialized learner config with the desired settings."""
config = learner_pb2.LearnerConfig()
config.num_classes = num_classes
config.regularization.l1 = l1_reg
config.regularization.l2 = l2_reg
config.regularization.tree_complexity = tree_complexity
config.constraints.max_tree_depth = max_depth
config.constraints.min_node_weight = min_node_weight
config.pruning_mode = pruning_mode
config.growing_mode = growing_mode
if dropout_probability is not None:
config.learning_rate_tuner.dropout.dropout_probability = dropout_probability
if dropout_learning_rate is not None:
config.learning_rate_tuner.dropout.learning_rate = dropout_learning_rate
if dropout_prob_of_skipping is not None:
config.learning_rate_tuner.dropout.dropout_prob_of_skipping = (
dropout_prob_of_skipping)
return config
def _gen_dense_split_info(fc, threshold, left_weight, right_weight):
split_str = """
split_node {
dense_float_binary_split {
feature_column: %d
threshold: %f
}
}
left_child {
sparse_vector {
index: 0
value: %f
}
}
right_child {
sparse_vector {
index: 0
value: %f
}
}""" % (fc, threshold, left_weight, right_weight)
split = split_info_pb2.SplitInfo()
text_format.Merge(split_str, split)
return split.SerializeToString()
def _gen_dense_oblivious_split_info(fc, threshold, leave_weights,
children_parent_id):
split_str = """
split_node {
oblivious_dense_float_binary_split {
feature_column: %d
threshold: %f
}
}""" % (fc, threshold)
for weight in leave_weights:
split_str += """
children {
vector {
value: %f
}
}""" % (
weight)
for x in children_parent_id:
split_str += """
children_parent_id: %d""" % (x)
split = split_info_pb2.ObliviousSplitInfo()
text_format.Merge(split_str, split)
return split.SerializeToString()
def _gen_categorical_split_info(fc, feat_id, left_weight, right_weight):
split_str = """
split_node {
categorical_id_binary_split {
feature_column: %d
feature_id: %d
}
}
left_child {
sparse_vector {
index: 0
value: %f
}
}
right_child {
sparse_vector {
index: 0
value: %f
}
}""" % (fc, feat_id, left_weight, right_weight)
split = split_info_pb2.SplitInfo()
text_format.Merge(split_str, split)
return split.SerializeToString()
def _get_bias_update(grads, hess):
return array_ops.where(hess > 0, -grads / hess, array_ops.zeros_like(grads))
class CenterTreeEnsembleBiasOpTest(test_util.TensorFlowTestCase):
"""Tests for centering tree ensemble bias."""
def testCenterBias(self):
"""Tests bias centering for multiple iterations."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=3,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=4,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE,
# Dropout does not change anything here.
dropout_probability=0.5).SerializeToString()
# Center bias for the initial step.
grads = constant_op.constant([0.4, -0.3])
hess = constant_op.constant([2.0, 1.0])
continue_centering1 = training_ops.center_tree_ensemble_bias(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
delta_updates=_get_bias_update(grads, hess),
learner_config=learner_config)
continue_centering = session.run(continue_centering1)
self.assertEqual(continue_centering, True)
# Validate ensemble state.
# dim 0 update: -0.4/2.0 = -0.2
# dim 1 update: +0.3/1.0 = +0.3
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
leaf {
vector {
value: -0.2
value: 0.3
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals(expected_result, tree_ensemble_config)
# Center bias for another step.
# dim 0 update: -0.06/0.5 = -0.12
# dim 1 update: -0.01/0.5 = -0.02
grads = constant_op.constant([0.06, 0.01])
hess = constant_op.constant([0.5, 0.5])
continue_centering2 = training_ops.center_tree_ensemble_bias(
tree_ensemble_handle,
stamp_token=1,
next_stamp_token=2,
delta_updates=_get_bias_update(grads, hess),
learner_config=learner_config)
continue_centering = session.run(continue_centering2)
self.assertEqual(continue_centering, True)
# Validate ensemble state.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=2))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
leaf {
vector {
value: -0.32
value: 0.28
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
"""
self.assertEqual(new_stamp, 2)
self.assertEqual(stats.num_trees, 0)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals(expected_result, tree_ensemble_config)
# Center bias for another step, but this time updates are negligible.
grads = constant_op.constant([0.0000001, -0.00003])
hess = constant_op.constant([0.5, 0.0])
continue_centering3 = training_ops.center_tree_ensemble_bias(
tree_ensemble_handle,
stamp_token=2,
next_stamp_token=3,
delta_updates=_get_bias_update(grads, hess),
learner_config=learner_config)
continue_centering = session.run(continue_centering3)
self.assertEqual(continue_centering, False)
# Validate ensemble stamp.
new_stamp, _ = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=3))
self.assertEqual(new_stamp, 3)
self.assertEqual(stats.num_trees, 1)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
class GrowTreeEnsembleOpTest(test_util.TensorFlowTestCase):
"""Tests for growing tree ensemble from split candidates."""
def testGrowEmptyEnsemble(self):
"""Test growing an empty ensemble."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=1,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE,
# Dropout does not change anything here, tree is not finalized.
dropout_probability=0.5)
# Prepare handler inputs.
# Note that handlers 1 & 3 have the same gain but different splits.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([7.62], dtype=np.float32)
handler1_split = [_gen_dense_split_info(0, 0.52, -4.375, 7.143)]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([0.63], dtype=np.float32)
handler2_split = [_gen_dense_split_info(0, 0.23, -0.6, 0.24)]
handler3_partitions = np.array([0], dtype=np.int32)
handler3_gains = np.array([7.62], dtype=np.float32)
handler3_split = [_gen_categorical_split_info(0, 7, -4.375, 7.143)]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[
handler1_partitions, handler2_partitions, handler3_partitions
],
gains=[handler1_gains, handler2_gains, handler3_gains],
splits=[handler1_split, handler2_split, handler3_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE)
session.run(grow_op)
# Expect the simpler split from handler 1 to be chosen.
# The grown tree should be finalized as max tree depth is 1.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
dense_float_binary_split {
threshold: 0.52
left_id: 1
right_id: 2
}
node_metadata {
gain: 7.62
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: -4.375
}
}
}
nodes {
leaf {
sparse_vector {
index: 0
value: 7.143
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 1)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowEmptyEnsembleObliviousCase(self):
"""Test growing an empty ensemble in the oblivious case."""
with self.cached_session() as session:
# Create empty ensemble.
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
tree_ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0,
tree_ensemble_config=tree_ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
# Prepare learner config.
learner_config = _gen_learner_config(
num_classes=2,
l1_reg=0,
l2_reg=0,
tree_complexity=0,
max_depth=1,
min_node_weight=0,
pruning_mode=learner_pb2.LearnerConfig.PRE_PRUNE,
growing_mode=learner_pb2.LearnerConfig.WHOLE_TREE)
# Prepare handler inputs.
# Note that handlers 1 & 3 have the same gain but different splits.
handler1_partitions = np.array([0], dtype=np.int32)
handler1_gains = np.array([7.62], dtype=np.float32)
handler1_split = [
_gen_dense_oblivious_split_info(0, 0.52, [-4.375, 7.143], [0])
]
handler2_partitions = np.array([0], dtype=np.int32)
handler2_gains = np.array([0.63], dtype=np.float32)
handler2_split = [
_gen_dense_oblivious_split_info(0, 0.23, [-0.6, 0.24], [0])
]
handler3_partitions = np.array([0], dtype=np.int32)
handler3_gains = np.array([7.62], dtype=np.float32)
handler3_split = [
_gen_dense_oblivious_split_info(0, 7, [-4.375, 7.143], [0])
]
# Grow tree ensemble.
grow_op = training_ops.grow_tree_ensemble(
tree_ensemble_handle,
stamp_token=0,
next_stamp_token=1,
learning_rate=0.1,
partition_ids=[
handler1_partitions, handler2_partitions, handler3_partitions
],
gains=[handler1_gains, handler2_gains, handler3_gains],
splits=[handler1_split, handler2_split, handler3_split],
learner_config=learner_config.SerializeToString(),
dropout_seed=123,
center_bias=True,
max_tree_depth=learner_config.constraints.max_tree_depth,
weak_learner_type=learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE)
session.run(grow_op)
# Expect the split with bigger handler_id, i.e. handler 3 to be chosen.
# The grown tree should be finalized as max tree depth is 1.
new_stamp, serialized = session.run(
model_ops.tree_ensemble_serialize(tree_ensemble_handle))
stats = session.run(
training_ops.tree_ensemble_stats(tree_ensemble_handle, stamp_token=1))
tree_ensemble_config.ParseFromString(serialized)
expected_result = """
trees {
nodes {
oblivious_dense_float_binary_split {
feature_column: 0
threshold: 7
}
node_metadata {
gain: 7.62
original_oblivious_leaves {
}
}
}
nodes {
leaf {
vector {
value: -4.375
}
}
}
nodes {
leaf {
vector {
value: 7.143
}
}
}
}
tree_weights: 0.1
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
"""
self.assertEqual(new_stamp, 1)
self.assertEqual(stats.num_trees, 1)
self.assertEqual(stats.num_layers, 1)
self.assertEqual(stats.active_tree, 1)
self.assertEqual(stats.active_layer, 1)
self.assertEqual(stats.attempted_trees, 1)
self.assertEqual(stats.attempted_layers, 1)
self.assertProtoEquals(expected_result, tree_ensemble_config)
def testGrowExistingEnsembleTreeNotFinalized(self):
"""Test growing an existing ensemble with the last tree not finalized."""
with self.cached_session() as session:
# Create existing ensemble with one root split
tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge("""
trees {
nodes {
categorical_id_binary_split {
feature_id: 4
left_id: 1
right_id: 2
}
node_metadata {
| |
!
FUNCTION FV4A1FE 298.15 +2.35; 6000 N !
FUNCTION FV5A1FE 298.15 +5; 6000 N !
FUNCTION FV6A1FE 298.15 +1; 6000 N !
FUNCTION FV7A1FE 298.15 +10; 6000 N !
FUNCTION FV8A1FE 298.15 +3; 6000 N !
$ implementation of EOS for FCC_A1 Fe
FUNCTION PA1FE 298.15 +FV1A1FE#**(-1)*P; 6000 N !
FUNCTION BVA1FE 298.15 +FV0A1FE#*FV1A1FE#; 6000 N !
FUNCTION IAA1FE 298.15 +FV2A1FE#-.333333333; 6000 N !
FUNCTION FCA1FE 298.15 +1+1.33333333*IAA1FE#*PA1FE#; 6000 N !
FUNCTION LFCA1FE 298.15 +1*LN(FCA1FE#); 6000 N !
FUNCTION IXCA1FE 298.15 +1-IAA1FE#**(-1)+IAA1FE#**(-1)*EXP(.25*LFCA1FE#);
6000 N !
FUNCTION G2A1FE 298.15 +1.5*FV2A1FE#**3-6*FV2A1FE#**2+8*FV2A1FE#
-3.55555555; 6000 N !
FUNCTION G1A1FE 298.15 -9*FV2A1FE#**3+27*FV2A1FE#**2-24*FV2A1FE#
+5.33333333; 6000 N !
FUNCTION GLA1FE 298.15 +9*FV2A1FE#**3-18*FV2A1FE#**2+9*FV2A1FE#
-1.33333333; 6000 N !
FUNCTION GM1A1FE 298.15 +3*FV2A1FE#**3-3*FV2A1FE#**2+FV2A1FE#-.111111111;
6000 N !
FUNCTION GPCA1FE 298.15 +G2A1FE#*IXCA1FE#**(-2)+G1A1FE#*IXCA1FE#**(-1)
-GLA1FE#*LN(IXCA1FE#)+GM1A1FE#*IXCA1FE#-G2A1FE#-G1A1FE#-GM1A1FE#; 6000
N !
FUNCTION PTA1FE 298.15 +FV1A1FE#**(-1)*P+FV5A1FE#*FV1A1FE#**(-1)*P; 6000
N !
FUNCTION IATA1FE 298.15 +3*FV6A1FE#-1; 6000 N !
FUNCTION FTA1FE 298.15 +1+.666666667*IATA1FE#*PTA1FE#; 6000 N !
FUNCTION LTFA1FE 298.15 +1*LN(FTA1FE#); 6000 N !
FUNCTION IXTA1FE 298.15 +1-IATA1FE#**(-1)+IATA1FE#**(-1)*EXP(.5*LTFA1FE#);
6000 N !
FUNCTION GPTA1FE 298.15 +4.5*FV6A1FE#*IXTA1FE#**(-2)-3*IXTA1FE#**(-2)
-9*FV6A1FE#*IXTA1FE#**(-1)+3*IXTA1FE#**(-1)+4.5*FV6A1FE#; 6000 N !
FUNCTION PT2A1FE 298.15 +FV1A1FE#**(-1)*P+FV7A1FE#*FV1A1FE#**(-1)*P; 6000
N !
FUNCTION IYA1FE 298.15 +1+2*FV8A1FE#*PT2A1FE#; 6000 N !
FUNCTION LYA1FE 298.15 +.5*LN(IYA1FE#); 6000 N !
FUNCTION YA1FE 298.15 +1*EXP(LYA1FE#); 6000 N !
FUNCTION IBA1FE 298.15 +FV8A1FE#**(-1)-FV8A1FE#**(-1)*EXP(LYA1FE#); 6000
N !
FUNCTION GBPA1FE 298.15 +1+FV8A1FE#-FV8A1FE#*EXP(IBA1FE#)
-YA1FE#*EXP(IBA1FE#); 6000 N !
FUNCTION GBMA1FE 298.15 +1+FV8A1FE#; 6000 N !
FUNCTION GBRA1FE 298.15 +GBMA1FE#**(-1)*GBPA1FE#; 6000 N !
FUNCTION IGRA1FE 298.15 +FV5A1FE#*FV4A1FE#**(-1)+FV4A1FE#**(-1); 6000 N !
FUNCTION INTA1FE 298.15 +IGRA1FE#**(-1)*GPTA1FE#; 6000 N !
FUNCTION TA1FE 298.15 +FV3A1FE#*EXP(INTA1FE#); 6000 N !
FUNCTION IEA1FE 298.15 +1-1*EXP(-TA1FE#*T**(-1)); 6000 N !
FUNCTION IE0A1FE 298.15 +1-1*EXP(-FV3A1FE#*T**(-1)); 6000 N !
FUNCTION GQHA1FE 298.15 +24.9435*T*LN(IEA1FE#)-24.9435*T*LN(IE0A1FE#);
6000 N !
FUNCTION C0A1FE 298.15 +1-1*EXP(-.00335401644*FV3A1FE#); 6000 N !
FUNCTION C20A1FE 298.15
+1.12494262E-05*FV3A1FE#**2*EXP(-.00335401644*FV3A1FE#); 6000 N !
FUNCTION CP0A1FE 298.15 +24.9435*C20A1FE#*C0A1FE#**(-2); 6000 N !
FUNCTION IH0A1FE 298.15 +24.9435*FV3A1FE#*C0A1FE#**(-1); 6000 N !
FUNCTION H0A1FE 298.15 +IH0A1FE#*EXP(-.00335401644*FV3A1FE#); 6000 N !
FUNCTION S0A1FE 298.15
+.00335401644*IH0A1FE#*EXP(-.00335401644*FV3A1FE#)-24.9435*LN(C0A1FE#);
6000 N !
FUNCTION GT0A1FE 298.15 +24.9435*T*LN(IE0A1FE#)-H0A1FE#+S0A1FE#*T; 6000 N
!
FUNCTION DGTA1FE 1 -.00167700822*CSEA1FE#*T**2
+.00167700822*CP0A1FE#*T**2; 298.15 Y
-HSEA1FE#+SSEA1FE#*T+GA1FE#-GT0A1FE#+149.075*CSEA1FE#-149.075*CP0A1FE#
-CSEA1FE#*T+CP0A1FE#*T; 6000 N !
$ parameters for HCP_A3 Fe
FUNCTION FV0A3FE 298.15 +6.678E-06; 6000 N !
FUNCTION FV1A3FE 298.15 +1.8E+11; 6000 N !
FUNCTION FV2A3FE 298.15 +5; 6000 N !
FUNCTION FV3A3FE 298.15 +250; 6000 N !
FUNCTION FV4A3FE 298.15 +2.85; 6000 N !
FUNCTION FV5A3FE 298.15 +5.5; 6000 N !
FUNCTION FV6A3FE 298.15 +.7; 6000 N !
FUNCTION FV7A3FE 298.15 +10; 6000 N !
FUNCTION FV8A3FE 298.15 +5; 6000 N !
$ implementation of EOS for HCP_A3 Fe
FUNCTION PA3FE 298.15 +FV1A3FE#**(-1)*P; 6000 N !
FUNCTION BVA3FE 298.15 +FV0A3FE#*FV1A3FE#; 6000 N !
FUNCTION IAA3FE 298.15 +FV2A3FE#-.333333333; 6000 N !
FUNCTION FCA3FE 298.15 +1+1.33333333*IAA3FE#*PA3FE#; 6000 N !
FUNCTION LFCA3FE 298.15 +1*LN(FCA3FE#); 6000 N !
FUNCTION IXCA3FE 298.15 +1-IAA3FE#**(-1)+IAA3FE#**(-1)*EXP(.25*LFCA3FE#);
6000 N !
FUNCTION G2A3FE 298.15 +1.5*FV2A3FE#**3-6*FV2A3FE#**2+8*FV2A3FE#
-3.55555555; 6000 N !
FUNCTION G1A3FE 298.15 -9*FV2A3FE#**3+27*FV2A3FE#**2-24*FV2A3FE#
+5.33333333; 6000 N !
FUNCTION GLA3FE 298.15 +9*FV2A3FE#**3-18*FV2A3FE#**2+9*FV2A3FE#
-1.33333333; 6000 N !
FUNCTION GM1A3FE 298.15 +3*FV2A3FE#**3-3*FV2A3FE#**2+FV2A3FE#-.111111111;
6000 N !
FUNCTION GPCA3FE 298.15 +G2A3FE#*IXCA3FE#**(-2)+G1A3FE#*IXCA3FE#**(-1)
-GLA3FE#*LN(IXCA3FE#)+GM1A3FE#*IXCA3FE#-G2A3FE#-G1A3FE#-GM1A3FE#; 6000
N !
FUNCTION PTA3FE 298.15 +FV1A3FE#**(-1)*P+FV5A3FE#*FV1A3FE#**(-1)*P; 6000
N !
FUNCTION IATA3FE 298.15 +3*FV6A3FE#-1; 6000 N !
FUNCTION FTA3FE 298.15 +1+.666666667*IATA3FE#*PTA3FE#; 6000 N !
FUNCTION LTFA3FE 298.15 +1*LN(FTA3FE#); 6000 N !
FUNCTION IXTA3FE 298.15 +1-IATA3FE#**(-1)+IATA3FE#**(-1)*EXP(.5*LTFA3FE#);
6000 N !
FUNCTION GPTA3FE 298.15 +4.5*FV6A3FE#*IXTA3FE#**(-2)-3*IXTA3FE#**(-2)
-9*FV6A3FE#*IXTA3FE#**(-1)+3*IXTA3FE#**(-1)+4.5*FV6A3FE#; 6000 N !
FUNCTION PT2A3FE 298.15 +FV1A3FE#**(-1)*P+FV7A3FE#*FV1A3FE#**(-1)*P; 6000
N !
FUNCTION IYA3FE 298.15 +1+2*FV8A3FE#*PT2A3FE#; 6000 N !
FUNCTION LYA3FE 298.15 +.5*LN(IYA3FE#); 6000 N !
FUNCTION YA3FE 298.15 +1*EXP(LYA3FE#); 6000 N !
FUNCTION IBA3FE 298.15 +FV8A3FE#**(-1)-FV8A3FE#**(-1)*EXP(LYA3FE#); 6000
N !
FUNCTION GBPA3FE 298.15 +1+FV8A3FE#-FV8A3FE#*EXP(IBA3FE#)
-YA3FE#*EXP(IBA3FE#); 6000 N !
FUNCTION GBMA3FE 298.15 +1+FV8A3FE#; 6000 N !
FUNCTION GBRA3FE 298.15 +GBMA3FE#**(-1)*GBPA3FE#; 6000 N !
FUNCTION IGRA3FE 298.15 +FV5A3FE#*FV4A3FE#**(-1)+FV4A3FE#**(-1); 6000 N !
FUNCTION INTA3FE 298.15 +IGRA3FE#**(-1)*GPTA3FE#; 6000 N !
FUNCTION TA3FE 298.15 +FV3A3FE#*EXP(INTA3FE#); 6000 N !
FUNCTION IEA3FE 298.15 +1-1*EXP(-TA3FE#*T**(-1)); 6000 N !
FUNCTION IE0A3FE 298.15 +1-1*EXP(-FV3A3FE#*T**(-1)); 6000 N !
FUNCTION GQHA3FE 298.15 +24.9435*T*LN(IEA3FE#)-24.9435*T*LN(IE0A3FE#);
6000 N !
FUNCTION C0A3FE 298.15 +1-1*EXP(-.00335401644*FV3A3FE#); 6000 N !
FUNCTION C20A3FE 298.15
+1.12494262E-05*FV3A3FE#**2*EXP(-.00335401644*FV3A3FE#); 6000 N !
FUNCTION CP0A3FE 298.15 +24.9435*C20A3FE#*C0A3FE#**(-2); 6000 N !
FUNCTION IH0A3FE 298.15 +24.9435*FV3A3FE#*C0A3FE#**(-1); 6000 N !
FUNCTION H0A3FE 298.15 +IH0A3FE#*EXP(-.00335401644*FV3A3FE#); 6000 N !
FUNCTION S0A3FE 298.15
+.00335401644*IH0A3FE#*EXP(-.00335401644*FV3A3FE#)-24.9435*LN(C0A3FE#);
6000 N !
FUNCTION GT0A3FE 298.15 +24.9435*T*LN(IE0A3FE#)-H0A3FE#+S0A3FE#*T; 6000 N
!
FUNCTION DGTA3FE 1 -.00167700822*CSEA3FE#*T**2
+.00167700822*CP0A3FE#*T**2; 298.15 Y
-HSEA3FE#+SSEA3FE#*T+GA3FE#-GT0A3FE#+149.075*CSEA3FE#-149.075*CP0A3FE#
-CSEA3FE#*T+CP0A3FE#*T; 6000 N !
$ parameters for pure liquid Fe
FUNCTION FV0L1FE 298.15 +7.084E-06; 6000 N !
FUNCTION FV1L1FE 298.15 +1.5E+11; 6000 N !
FUNCTION FV2L1FE 298.15 +5.1; 6000 N !
FUNCTION FV3L1FE 298.15 +250; 6000 N !
FUNCTION FV4L1FE 298.15 +2.1; 6000 N !
FUNCTION FV5L1FE 298.15 +6; 6000 N !
FUNCTION FV6L1FE 298.15 +1; 6000 N !
FUNCTION FV7L1FE 298.15 +13; 6000 N !
FUNCTION FV8L1FE 298.15 +2.3; 6000 N !
$ implementation of EOS for pure liquid Fe
FUNCTION PL1FE 298.15 +FV1L1FE#**(-1)*P; 6000 N !
FUNCTION BVL1FE 298.15 +FV0L1FE#*FV1L1FE#; 6000 N !
FUNCTION IAL1FE 298.15 +FV2L1FE#-.333333333; 6000 N !
FUNCTION FCL1FE 298.15 +1+1.33333333*IAL1FE#*PL1FE#; 6000 N !
FUNCTION LFCL1FE 298.15 +1*LN(FCL1FE#); 6000 N !
FUNCTION IXCL1FE 298.15 +1-IAL1FE#**(-1)+IAL1FE#**(-1)*EXP(.25*LFCL1FE#);
6000 N !
FUNCTION G2L1FE 298.15 +1.5*FV2L1FE#**3-6*FV2L1FE#**2+8*FV2L1FE#
-3.55555555; 6000 N !
FUNCTION G1L1FE 298.15 -9*FV2L1FE#**3+27*FV2L1FE#**2-24*FV2L1FE#
+5.33333333; 6000 N !
FUNCTION GLL1FE 298.15 +9*FV2L1FE#**3-18*FV2L1FE#**2+9*FV2L1FE#
-1.33333333; 6000 N !
FUNCTION GM1L1FE 298.15 +3*FV2L1FE#**3-3*FV2L1FE#**2+FV2L1FE#-.111111111;
6000 N !
FUNCTION GPCL1FE 298.15 +G2L1FE#*IXCL1FE#**(-2)+G1L1FE#*IXCL1FE#**(-1)
-GLL1FE#*LN(IXCL1FE#)+GM1L1FE#*IXCL1FE#-G2L1FE#-G1L1FE#-GM1L1FE#; 6000
N !
FUNCTION PTL1FE 298.15 +FV1L1FE#**(-1)*P+FV5L1FE#*FV1L1FE#**(-1)*P; 6000
N !
FUNCTION IATL1FE 298.15 +3*FV6L1FE#-1; 6000 N !
FUNCTION FTL1FE 298.15 +1+.666666667*IATL1FE#*PTL1FE#; 6000 N !
FUNCTION LTFL1FE 298.15 +1*LN(FTL1FE#); 6000 N !
FUNCTION IXTL1FE 298.15 +1-IATL1FE#**(-1)+IATL1FE#**(-1)*EXP(.5*LTFL1FE#);
6000 N !
FUNCTION GPTL1FE 298.15 +4.5*FV6L1FE#*IXTL1FE#**(-2)-3*IXTL1FE#**(-2)
-9*FV6L1FE#*IXTL1FE#**(-1)+3*IXTL1FE#**(-1)+4.5*FV6L1FE#; 6000 N !
FUNCTION PT2L1FE 298.15 +FV1L1FE#**(-1)*P+FV7L1FE#*FV1L1FE#**(-1)*P; 6000
N !
FUNCTION IYL1FE 298.15 +1+2*FV8L1FE#*PT2L1FE#; 6000 N !
FUNCTION LYL1FE 298.15 +.5*LN(IYL1FE#); 6000 N !
FUNCTION YL1FE 298.15 +1*EXP(LYL1FE#); 6000 N !
FUNCTION IBL1FE 298.15 +FV8L1FE#**(-1)-FV8L1FE#**(-1)*EXP(LYL1FE#); 6000
N !
FUNCTION GBPL1FE 298.15 +1+FV8L1FE#-FV8L1FE#*EXP(IBL1FE#)
-YL1FE#*EXP(IBL1FE#); 6000 N !
FUNCTION GBML1FE 298.15 +1+FV8L1FE#; 6000 N !
FUNCTION GBRL1FE 298.15 +GBML1FE#**(-1)*GBPL1FE#; 6000 N !
FUNCTION IGRL1FE 298.15 +FV5L1FE#*FV4L1FE#**(-1)+FV4L1FE#**(-1); 6000 N !
FUNCTION INTL1FE 298.15 +IGRL1FE#**(-1)*GPTL1FE#; 6000 N !
FUNCTION TL1FE 298.15 +FV3L1FE#*EXP(INTL1FE#); 6000 N !
FUNCTION IEL1FE 298.15 +1-1*EXP(-TL1FE#*T**(-1)); 6000 N !
FUNCTION IE0L1FE 298.15 +1-1*EXP(-FV3L1FE#*T**(-1)); 6000 N !
FUNCTION GQHL1FE 298.15 +24.9435*T*LN(IEL1FE#)-24.9435*T*LN(IE0L1FE#);
6000 N !
FUNCTION C0L1FE 298.15 +1-1*EXP(-.00335401644*FV3L1FE#); 6000 N !
FUNCTION C20L1FE 298.15
+1.12494262E-05*FV3L1FE#**2*EXP(-.00335401644*FV3L1FE#); 6000 N !
FUNCTION CP0L1FE 298.15 +24.9435*C20L1FE#*C0L1FE#**(-2); 6000 N !
FUNCTION IH0L1FE 298.15 +24.9435*FV3L1FE#*C0L1FE#**(-1); 6000 N !
FUNCTION H0L1FE 298.15 +IH0L1FE#*EXP(-.00335401644*FV3L1FE#); 6000 N !
FUNCTION S0L1FE 298.15
+.00335401644*IH0L1FE#*EXP(-.00335401644*FV3L1FE#)-24.9435*LN(C0L1FE#);
6000 N !
FUNCTION GT0L1FE 298.15 +24.9435*T*LN(IE0L1FE#)-H0L1FE#+S0L1FE#*T; 6000 N
!
FUNCTION DGTL1FE 1 -.00167700822*CSEL1FE#*T**2
+.00167700822*CP0L1FE#*T**2; 298.15 Y
-HSEL1FE#+SSEL1FE#*T+GL1FE#-GT0L1FE#+149.075*CSEL1FE#-149.075*CP0L1FE#
-CSEL1FE#*T+CP0L1FE#*T; 6000 N !
$ parameters for Graphite
FUNCTION FV0A9C 298.15 +5.273E-06; 6000 N !
FUNCTION FV1A9C 298.15 +3.38E+10; 6000 N !
FUNCTION FV2A9C 298.15 +8.9; 6000 N !
FUNCTION FV3A9C1 298.15 +520; 6000 N !
FUNCTION FV4A9C1 298.15 +.7; 6000 N !
FUNCTION FV3A9C2 298.15 +1677; 6000 N !
FUNCTION FV4A9C2 298.15 +.01; 6000 N !
FUNCTION FV5A9C 298.15 +5; 6000 N !
FUNCTION FV6A9C 298.15 +1; 6000 N !
FUNCTION FV7A9C 298.15 +5; 6000 N !
FUNCTION FV8A9C 298.15 +10; 6000 N !
$ implementation of EOS for graphite
FUNCTION PA9C 298.15 +FV1A9C#**(-1)*P; 6000 N !
FUNCTION BVA9C 298.15 +FV0A9C#*FV1A9C#; 6000 N !
FUNCTION IAA9C 298.15 +FV2A9C#-.333333333; 6000 N !
FUNCTION FCA9C 298.15 +1+1.33333333*IAA9C#*PA9C#; 6000 N !
FUNCTION LFCA9C 298.15 +1*LN(FCA9C#); 6000 N !
FUNCTION IXCA9C 298.15 +1-IAA9C#**(-1)+IAA9C#**(-1)*EXP(.25*LFCA9C#);
6000 N !
FUNCTION G2A9C 298.15 +1.5*FV2A9C#**3-6*FV2A9C#**2+8*FV2A9C#-3.55555555;
6000 N !
FUNCTION G1A9C 298.15 -9*FV2A9C#**3+27*FV2A9C#**2-24*FV2A9C#+5.33333333;
6000 N !
FUNCTION GLA9C 298.15 +9*FV2A9C#**3-18*FV2A9C#**2+9*FV2A9C#-1.33333333;
6000 N !
FUNCTION GM1A9C 298.15 +3*FV2A9C#**3-3*FV2A9C#**2+FV2A9C#-.111111111;
6000 N !
FUNCTION GPCA9C 298.15 +G2A9C#*IXCA9C#**(-2)+G1A9C#*IXCA9C#**(-1)
-GLA9C#*LN(IXCA9C#)+GM1A9C#*IXCA9C#-G2A9C#-G1A9C#-GM1A9C#; 6000 N !
FUNCTION PTA9C 298.15 +FV1A9C#**(-1)*P+FV5A9C#*FV1A9C#**(-1)*P; 6000 N !
FUNCTION IATA9C 298.15 +3*FV6A9C#-1; 6000 N !
FUNCTION FTA9C 298.15 +1+.666666667*IATA9C#*PTA9C#; 6000 N !
FUNCTION LTFA9C 298.15 +1*LN(FTA9C#); 6000 N !
FUNCTION IXTA9C 298.15 +1-IATA9C#**(-1)+IATA9C#**(-1)*EXP(.5*LTFA9C#);
6000 N !
FUNCTION GPTA9C 298.15 +4.5*FV6A9C#*IXTA9C#**(-2)-3*IXTA9C#**(-2)
-9*FV6A9C#*IXTA9C#**(-1)+3*IXTA9C#**(-1)+4.5*FV6A9C#; 6000 N !
FUNCTION PT2A9C 298.15 +FV1A9C#**(-1)*P+FV7A9C#*FV1A9C#**(-1)*P; 6000 N !
FUNCTION IYA9C 298.15 +1+2*FV8A9C#*PT2A9C#; 6000 N !
FUNCTION LYA9C 298.15 +.5*LN(IYA9C#); 6000 N !
FUNCTION YA9C 298.15 +1*EXP(LYA9C#); 6000 N !
FUNCTION IBA9C 298.15 +FV8A9C#**(-1)-FV8A9C#**(-1)*EXP(LYA9C#); | |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
from distutils.version import LooseVersion
import unittest
import numpy as np
import pandas as pd
from databricks import koalas as ks
from databricks.koalas.exceptions import SparkPandasIndexingError
from databricks.koalas.testing.utils import ComparisonTestBase, ReusedSQLTestCase, compare_both
class BasicIndexingTest(ComparisonTestBase):
@property
def pdf(self):
return pd.DataFrame(
{"month": [1, 4, 7, 10], "year": [2012, 2014, 2013, 2014], "sale": [55, 40, 84, 31]}
)
@compare_both(almost=False)
def test_indexing(self, df):
df1 = df.set_index("month")
yield df1
yield df.set_index("month", drop=False)
yield df.set_index("month", append=True)
yield df.set_index(["year", "month"])
yield df.set_index(["year", "month"], drop=False)
yield df.set_index(["year", "month"], append=True)
yield df1.set_index("year", drop=False, append=True)
df2 = df1.copy()
df2.set_index("year", append=True, inplace=True)
yield df2
self.assertRaisesRegex(KeyError, "unknown", lambda: df.set_index("unknown"))
self.assertRaisesRegex(KeyError, "unknown", lambda: df.set_index(["month", "unknown"]))
for d in [df, df1, df2]:
yield d.reset_index()
yield d.reset_index(drop=True)
yield df1.reset_index(level=0)
yield df2.reset_index(level=1)
yield df2.reset_index(level=[1, 0])
yield df1.reset_index(level="month")
yield df2.reset_index(level="year")
yield df2.reset_index(level=["month", "year"])
yield df2.reset_index(level="month", drop=True)
yield df2.reset_index(level=["month", "year"], drop=True)
self.assertRaisesRegex(
IndexError,
"Too many levels: Index has only 1 level, not 3",
lambda: df1.reset_index(level=2),
)
self.assertRaisesRegex(
IndexError,
"Too many levels: Index has only 1 level, not 4",
lambda: df1.reset_index(level=[3, 2]),
)
self.assertRaisesRegex(KeyError, "unknown.*month", lambda: df1.reset_index(level="unknown"))
self.assertRaisesRegex(
KeyError, "Level unknown not found", lambda: df2.reset_index(level="unknown")
)
df3 = df2.copy()
df3.reset_index(inplace=True)
yield df3
yield df1.sale.reset_index()
yield df1.sale.reset_index(level=0)
yield df2.sale.reset_index(level=[1, 0])
yield df1.sale.reset_index(drop=True)
yield df1.sale.reset_index(name="s")
yield df1.sale.reset_index(name="s", drop=True)
s = df1.sale
self.assertRaisesRegex(
TypeError,
"Cannot reset_index inplace on a Series to create a DataFrame",
lambda: s.reset_index(inplace=True),
)
s.reset_index(drop=True, inplace=True)
yield s
yield df1
# multi-index columns
df4 = df.copy()
df4.columns = pd.MultiIndex.from_tuples(
[("cal", "month"), ("cal", "year"), ("num", "sale")]
)
df5 = df4.set_index(("cal", "month"))
yield df5
yield df4.set_index([("cal", "month"), ("num", "sale")])
self.assertRaises(KeyError, lambda: df5.reset_index(level=("cal", "month")))
yield df5.reset_index(level=[("cal", "month")])
# non-string names
df6 = df.copy()
df6.columns = [10.0, 20.0, 30.0]
df7 = df6.set_index(10.0)
yield df7
yield df6.set_index([10.0, 30.0])
yield df7.reset_index(level=10.0)
yield df7.reset_index(level=[10.0])
df8 = df.copy()
df8.columns = pd.MultiIndex.from_tuples([(10, "month"), (10, "year"), (20, "sale")])
df9 = df8.set_index((10, "month"))
yield df9
yield df8.set_index([(10, "month"), (20, "sale")])
yield df9.reset_index(level=[(10, "month")])
def test_from_pandas_with_explicit_index(self):
pdf = self.pdf
df1 = ks.from_pandas(pdf.set_index("month"))
self.assertPandasEqual(df1.to_pandas(), pdf.set_index("month"))
df2 = ks.from_pandas(pdf.set_index(["year", "month"]))
self.assertPandasEqual(df2.to_pandas(), pdf.set_index(["year", "month"]))
def test_limitations(self):
df = self.kdf.set_index("month")
self.assertRaisesRegex(
ValueError,
"Level should be all int or all string.",
lambda: df.reset_index([1, "month"]),
)
class IndexingTest(ReusedSQLTestCase):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def kdf(self):
return ks.from_pandas(self.pdf)
@property
def pdf2(self):
return pd.DataFrame(
{0: [1, 2, 3, 4, 5, 6, 7, 8, 9], 1: [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def kdf2(self):
return ks.from_pandas(self.pdf2)
def test_at(self):
pdf = self.pdf
kdf = self.kdf
# Create the equivalent of pdf.loc[3] as a Koalas Series
# This is necessary because .loc[n] does not currently work with Koalas DataFrames (#383)
test_series = ks.Series([3, 6], index=["a", "b"], name="3")
# Assert invalided signatures raise TypeError
with self.assertRaises(TypeError, msg="Use DataFrame.at like .at[row_index, column_name]"):
kdf.at[3]
with self.assertRaises(TypeError, msg="Use DataFrame.at like .at[row_index, column_name]"):
kdf.at["ab"] # 'ab' is of length 2 but str type instead of tuple
with self.assertRaises(TypeError, msg="Use Series.at like .at[column_name]"):
test_series.at[3, "b"]
# Assert .at for DataFrames
self.assertEqual(kdf.at[3, "b"], 6)
self.assertEqual(kdf.at[3, "b"], pdf.at[3, "b"])
self.assert_eq(kdf.at[9, "b"], np.array([0, 0, 0]))
self.assert_eq(kdf.at[9, "b"], pdf.at[9, "b"])
# Assert .at for Series
self.assertEqual(test_series.at["b"], 6)
self.assertEqual(test_series.at["b"], pdf.loc[3].at["b"])
# Assert multi-character indices
self.assertEqual(
ks.Series([0, 1], index=["ab", "cd"]).at["ab"],
pd.Series([0, 1], index=["ab", "cd"]).at["ab"],
)
# Assert invalid column or index names result in a KeyError like with pandas
with self.assertRaises(KeyError, msg="x"):
kdf.at[3, "x"]
with self.assertRaises(KeyError, msg=99):
kdf.at[99, "b"]
with self.assertRaises(ValueError):
kdf.at[(3, 6), "b"]
with self.assertRaises(KeyError):
kdf.at[3, ("x", "b")]
# Assert setting values fails
with self.assertRaises(TypeError):
kdf.at[3, "b"] = 10
# non-string column names
pdf = self.pdf2
kdf = self.kdf2
# Assert .at for DataFrames
self.assertEqual(kdf.at[3, 1], 6)
self.assertEqual(kdf.at[3, 1], pdf.at[3, 1])
self.assert_eq(kdf.at[9, 1], np.array([0, 0, 0]))
self.assert_eq(kdf.at[9, 1], pdf.at[9, 1])
def test_at_multiindex(self):
pdf = self.pdf.set_index("b", append=True)
kdf = self.kdf.set_index("b", append=True)
# TODO: seems like a pandas' bug in pandas>=1.1.0
if LooseVersion(pd.__version__) < LooseVersion("1.1.0"):
self.assert_eq(kdf.at[(3, 6), "a"], pdf.at[(3, 6), "a"])
self.assert_eq(kdf.at[(3,), "a"], pdf.at[(3,), "a"])
self.assert_eq(list(kdf.at[(9, 0), "a"]), list(pdf.at[(9, 0), "a"]))
self.assert_eq(list(kdf.at[(9,), "a"]), list(pdf.at[(9,), "a"]))
else:
self.assert_eq(kdf.at[(3, 6), "a"], 3)
self.assert_eq(kdf.at[(3,), "a"], np.array([3]))
self.assert_eq(list(kdf.at[(9, 0), "a"]), [7, 8, 9])
self.assert_eq(list(kdf.at[(9,), "a"]), [7, 8, 9])
with self.assertRaises(ValueError):
kdf.at[3, "a"]
def test_at_multiindex_columns(self):
arrays = [np.array(["bar", "bar", "baz", "baz"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.at["B", ("bar", "one")], pdf.at["B", ("bar", "one")])
with self.assertRaises(KeyError):
kdf.at["B", "bar"]
# non-string column names
arrays = [np.array([0, 0, 1, 1]), np.array([1, 2, 1, 2])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.at["B", (0, 1)], pdf.at["B", (0, 1)])
def test_iat(self):
pdf = self.pdf
kdf = self.kdf
# Create the equivalent of pdf.loc[3] as a Koalas Series
# This is necessary because .loc[n] does not currently work with Koalas DataFrames (#383)
test_series = ks.Series([3, 6], index=["a", "b"], name="3")
# Assert invalided signatures raise TypeError
with self.assertRaises(
TypeError,
msg="Use DataFrame.at like .iat[row_interget_position, column_integer_position]",
):
kdf.iat[3]
with self.assertRaises(
ValueError, msg="iAt based indexing on multi-index can only have tuple values"
):
kdf.iat[3, "b"] # 'ab' is of length 2 but str type instead of tuple
with self.assertRaises(TypeError, msg="Use Series.iat like .iat[row_integer_position]"):
test_series.iat[3, "b"]
# Assert .iat for DataFrames
self.assertEqual(kdf.iat[7, 0], 8)
self.assertEqual(kdf.iat[7, 0], pdf.iat[7, 0])
# Assert .iat for Series
self.assertEqual(test_series.iat[1], 6)
self.assertEqual(test_series.iat[1], pdf.loc[3].iat[1])
# Assert invalid column or integer position result in a KeyError like with pandas
with self.assertRaises(KeyError, msg=99):
kdf.iat[0, 99]
with self.assertRaises(KeyError, msg=99):
kdf.iat[99, 0]
with self.assertRaises(ValueError):
kdf.iat[(1, 1), 1]
with self.assertRaises(ValueError):
kdf.iat[1, (1, 1)]
# Assert setting values fails
with self.assertRaises(TypeError):
kdf.iat[4, 1] = 10
def test_iat_multiindex(self):
pdf = self.pdf.set_index("b", append=True)
kdf = self.kdf.set_index("b", append=True)
self.assert_eq(kdf.iat[7, 0], pdf.iat[7, 0])
with self.assertRaises(ValueError):
kdf.iat[3, "a"]
def test_iat_multiindex_columns(self):
arrays = [np.array(["bar", "bar", "baz", "baz"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.iat[1, 3], pdf.iat[1, 3])
with self.assertRaises(KeyError):
kdf.iat[0, 99]
with self.assertRaises(KeyError):
kdf.iat[99, 0]
def test_loc(self):
kdf = self.kdf
pdf = self.pdf
self.assert_eq(kdf.loc[5:5], pdf.loc[5:5])
self.assert_eq(kdf.loc[3:8], pdf.loc[3:8])
self.assert_eq(kdf.loc[:8], pdf.loc[:8])
self.assert_eq(kdf.loc[3:], pdf.loc[3:])
self.assert_eq(kdf.loc[[5]], pdf.loc[[5]])
self.assert_eq(kdf.loc[:], pdf.loc[:])
# TODO?: self.assert_eq(kdf.loc[[3, 4, 1, 8]], pdf.loc[[3, 4, 1, 8]])
# TODO?: self.assert_eq(kdf.loc[[3, 4, 1, 9]], pdf.loc[[3, 4, 1, 9]])
# TODO?: self.assert_eq(kdf.loc[np.array([3, 4, 1, 9])], pdf.loc[np.array([3, 4, 1, 9])])
self.assert_eq(kdf.a.loc[5:5], pdf.a.loc[5:5])
self.assert_eq(kdf.a.loc[3:8], pdf.a.loc[3:8])
self.assert_eq(kdf.a.loc[:8], pdf.a.loc[:8])
self.assert_eq(kdf.a.loc[3:], pdf.a.loc[3:])
self.assert_eq(kdf.a.loc[[5]], pdf.a.loc[[5]])
# TODO?: self.assert_eq(kdf.a.loc[[3, 4, 1, 8]], pdf.a.loc[[3, 4, 1, 8]])
# TODO?: self.assert_eq(kdf.a.loc[[3, 4, 1, 9]], pdf.a.loc[[3, 4, 1, 9]])
# TODO?: self.assert_eq(kdf.a.loc[np.array([3, 4, 1, 9])],
# pdf.a.loc[np.array([3, 4, 1, 9])])
self.assert_eq(kdf.a.loc[[]], pdf.a.loc[[]])
self.assert_eq(kdf.a.loc[np.array([])], pdf.a.loc[np.array([])])
self.assert_eq(kdf.loc[1000:], pdf.loc[1000:])
self.assert_eq(kdf.loc[-2000:-1000], pdf.loc[-2000:-1000])
self.assert_eq(kdf.loc[5], pdf.loc[5])
self.assert_eq(kdf.loc[9], pdf.loc[9])
self.assert_eq(kdf.a.loc[5], pdf.a.loc[5])
self.assert_eq(kdf.a.loc[9], pdf.a.loc[9])
self.assertRaises(KeyError, lambda: kdf.loc[10])
self.assertRaises(KeyError, lambda: kdf.a.loc[10])
# monotonically increasing index test
pdf = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]}, index=[0, 1, 1, 2, 2, 2, 4, 5, 6])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[:2], pdf.loc[:2])
self.assert_eq(kdf.loc[:3], pdf.loc[:3])
self.assert_eq(kdf.loc[3:], pdf.loc[3:])
self.assert_eq(kdf.loc[4:], pdf.loc[4:])
self.assert_eq(kdf.loc[3:2], pdf.loc[3:2])
self.assert_eq(kdf.loc[-1:2], pdf.loc[-1:2])
self.assert_eq(kdf.loc[3:10], pdf.loc[3:10])
# monotonically decreasing index test
pdf = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]}, index=[6, 5, 5, 4, 4, 4, 2, 1, 0])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc[:4], pdf.loc[:4])
self.assert_eq(kdf.loc[:3], pdf.loc[:3])
self.assert_eq(kdf.loc[3:], pdf.loc[3:])
self.assert_eq(kdf.loc[2:], pdf.loc[2:])
self.assert_eq(kdf.loc[2:3], pdf.loc[2:3])
self.assert_eq(kdf.loc[2:-1], pdf.loc[2:-1])
self.assert_eq(kdf.loc[10:3], pdf.loc[10:3])
# test when type of key is string and given value is not included in key
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=["a", "b", "d"])
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.loc["a":"z"], pdf.loc["a":"z"])
# KeyError when index is not monotonic increasing or decreasing
# and specified values don't | |
Default: 0",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["-L", "seed_length"],
"Sets the length of the seed substrings to align during multiseed alignment. "
"Smaller values make alignment slower but more senstive. Default: the --sensitive preset is used "
"by default, which sets -L to 20 both in --end-to-end mode and in --local mode",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["-i", "i_func"],
"Sets a function governing the interval between seed substrings to use during multiseed alignment. "
"For instance, if the read has 30 characters, and seed length is 10, and the seed interval is 6, "
"the seeds extracted will be: Since it's best to use longer intervals for longer reads, this "
"parameter sets the interval as a function of the read length, rather than a single one-size-fits-"
"all number. For instance, specifying -i S,1,2.5 sets the interval "
"function f to f(x) = 1 + 2.5 * sqrt(x), where x is the read length. "
"See also: setting function options. If the function returns a result less than 1, it is rounded up"
" to 1. Default: the --sensitive preset is used by default, which sets -i to S,1,1.15 "
"in --end-to-end mode to -i S,1,0.75 in --local mode.",
checker_function=lambda value: re.match('^[CLSG],[-\d\.],[-\d\.]', value) is not None,
equate=False),
_Option(["--n-ceil", "n_ceil"],
"Sets a function governing the maximum number of ambiguous characters (usually Ns and/or .s) "
"allowed in a read as a function of read length. For instance, specifying -L,0,0.15 sets the "
"N-ceiling function f to f(x) = 0 + 0.15 * x, where x is the read length. See also: setting "
"function options. Reads exceeding this ceiling are filtered out. Default: L,0,0.15.",
checker_function=lambda value: re.match('^[CLSG],[-\d\.],[-\d\.]', value) is not None,
equate=False),
_Option(["--gbar", "gbar"],
"Disallow gaps within <int> positions of the beginning or end of the read. Default: 4.",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["--dpad", "dpad"],
"Pads dynamic programming problems by <int> columns on either side to allow gaps. Default: 15.",
checker_function=lambda value: type(value) is int,
equate=False),
_Switch(["--ignore-quals", "ignore_quals"],
"When calculating a mismatch penalty, always consider the quality value at the mismatched position "
"to be the highest possible, regardless of the actual value. I.e. input is treated as though all "
"quality values are high. This is also the default behavior when the input doesn't specify quality "
"values (e.g. in -f, -r, or -c modes)"),
_Switch(["--nofw", "nofw"],
"If --nofw is specified, bowtie2 will not attempt to align unpaired reads to the forward (Watson) "
"reference strand. In paired-end mode, --nofw and --norc pertain to the fragments; i.e. specifying "
"--nofw causes bowtie2 to explore only those paired-end configurations corresponding to fragments "
"from the reverse-complement (Crick) strand. Default: both strands enabled"),
_Switch(["--norc", "norc"],
"If --norc is specified, bowtie2 will not attempt to align unpaired reads against the reverse-"
"complement Crick reference strand. In paired-end mode, --nofw and --norc pertain to the fragments;"
" i.e. specifying --nofw causes bowtie2 to explore only those paired-end configurations "
"corresponding to fragments from the reverse-complement (Crick) strand. Default: both strands"),
_Switch(["--no-1mm-upfront", "no_1mm_upfront"],
"By default, Bowtie 2 will attempt to find either an exact or a 1-mismatch end-to-end alignment"
" for the read before trying the multiseed heuristic. Such alignments can be found very quickly,"
" and many short read alignments have exact or near-exact end-to-end alignments. However, this can "
"lead to unexpected alignments when the user also sets options governing the multiseed heuristic, "
"like -L and -N. For instance, if the user specifies -N 0 and -L equal to the length of the read, "
"the user will be surprised to find 1-mismatch alignments reported. This option prevents Bowtie 2 "
"from searching for 1-mismatch end-to-end alignments before using the multiseed heuristic, which "
"leads to the expected behavior when combined with options such as -L and -N. This comes at the "
"expense of speed"),
_Switch(["--end-to-end", "end_to_end"],
"In this mode, Bowtie 2 requires that the entire read align from one end to the other, without any "
"trimming (or soft clipping) of characters from either end. The match bonus --ma always equals 0 in"
" this mode, so all alignment scores are less than or equal to 0, and the greatest possible "
"alignment score is 0. This is mutually exclusive with --local. --end-to-end is the default mode"),
_Switch(["--local", "local"],
"In this mode, Bowtie 2 does not require that the entire read align from one end to the other. "
"Rather, some characters may be omitted (soft clipped) from the ends in order to achieve the "
"greatest possible alignment score. The match bonus --ma is used in this mode, and the best "
"possible alignment score is equal to the match bonus (--ma) times the length of the read. "
"Specifying --local and one of the presets (e.g. --local --very-fast) is equivalent to specifying "
"the local version of the preset (--very-fast-local). This is mutually exclusive with --end-to-end."
" --end-to-end is the default mode"),
# Scoring Options
_Option(["--score-min", "score_min"],
"Sets a function governing the minimum alignment score needed for an alignment to be considered "
"valid (i.e. good enough to report). This is a function of read length. For instance, specifying "
"L,0,-0.6 sets the minimum-score function f to f(x) = 0 + -0.6 * x, where x is the read length."
" See also: setting function options. The default in --end-to-end mode is L,-0.6,-0.6 "
"and the default in --local mode is G,20,8.",
checker_function=lambda value: re.match('^[CLSG],[-\d\.],[-\d\.]', value) is not None,
equate=False),
_Option(["--ma", "ma"],
"Sets the match bonus. In --local mode <int> is added to the alignment score for each "
"position where a read character aligns to a reference character and the characters match. "
"Not used in --end-to-end mode. Default: 2.",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["--np", "np"],
"Sets penalty for positions where the read, reference, or both, contain an ambiguous "
"character such as N. Default: 1.",
checker_function=lambda value: type(value) is int,
equate=False),
_Option(["--rdg", "rdg"],
"Sets the read gap open (<int1>) and extend (<int2>) penalties. A read gap of length N gets"
" a penalty of <int1> + N * <int2>. Default: 5, 3.",
checker_function=lambda value: re.match('[-d.],[-d.]', value) is not None,
equate=False),
_Option(["--rfg", "rfg"],
"Sets the reference gap open (<int1>) and extend (<int2>) penalties. A reference gap of "
"length N gets a penalty of <int1> + N * <int2>. Default: 5, 3.",
checker_function=lambda value: re.match('[-d.],[-d.]', value) is not None,
equate=False),
_Option(["--mp", "mp"],
"Sets the maximum (MX) and minimum (MN) mismatch penalties, both integers. A number less "
"than or equal to MX and greater than or equal to MN is subtracted from the alignment score for "
"each position where a read character aligns to a reference character, the characters do not match,"
" and neither is an N. If --ignore-quals is specified, the number subtracted quals MX. "
"Otherwise, the number subtracted is MN + floor( (MX-MN)(MIN(Q, 40.0)/40.0) ) "
"where Q is the Phred quality value. Default: MX = 6, MN = 2.",
checker_function=lambda value: re.match('[-d.],[-d.]', value) is not None,
equate=False),
# Reporting Options
_Option(["-k", "k"],
"By default, bowtie2 searches for distinct, valid alignments for each read. When it finds a"
" valid alignment, it continues looking for alignments that are nearly as good or better. The best "
"alignment found is reported (randomly selected from among best if tied). Information about the "
"best alignments is used to estimate mapping quality and to set SAM optional fields, such as "
"AS:i and XS:i.",
checker_function=lambda value: type(value) is int,
equate=False),
_Switch(["-a", "a"],
"Like -k but with no upper limit on number of alignments to search for. "
"-a is mutually exclusive with -k."),
# Effort Opti,ons
_Option(["-D", "D"],
"Up to <int> consecutive seed extension attempts can fail before Bowtie | |
<filename>facebook_business/api.py
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.session import FacebookSession
from facebook_business import apiconfig
from facebook_business.exceptions import (
FacebookRequestError,
FacebookBadObjectError,
FacebookUnavailablePropertyException,
FacebookBadParameterError,
)
from facebook_business.utils import api_utils
from facebook_business.utils import urls
from contextlib import contextmanager
import copy
from six.moves import http_client
import os
import json
import six
import collections
import re
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.typechecker import TypeChecker
"""
api module contains classes that make http requests to Facebook's graph API.
"""
class FacebookResponse(object):
"""Encapsulates an http response from Facebook's Graph API."""
def __init__(self, body=None, http_status=None, headers=None, call=None):
"""Initializes the object's internal data.
Args:
body (optional): The response body as text.
http_status (optional): The http status code.
headers (optional): The http headers.
call (optional): The original call that was made.
"""
self._body = body
self._http_status = http_status
self._headers = headers or {}
self._call = call
def body(self):
"""Returns the response body."""
return self._body
def json(self):
"""Returns the response body -- in json if possible."""
try:
return json.loads(self._body)
except (TypeError, ValueError):
return self._body
def headers(self):
"""Return the response headers."""
return self._headers
def etag(self):
"""Returns the ETag header value if it exists."""
return self._headers.get('ETag')
def status(self):
"""Returns the http status code of the response."""
return self._http_status
def is_success(self):
"""Returns boolean indicating if the call was successful."""
json_body = self.json()
if isinstance(json_body, collections.Mapping) and 'error' in json_body:
# Is a dictionary, has error in it
return False
elif bool(json_body):
# Has body and no error
if 'success' in json_body:
return json_body['success']
# API can retuen a success 200 when service unavailable occurs
return 'Service Unavailable' not in json_body
elif self._http_status == http_client.NOT_MODIFIED:
# ETAG Hit
return True
elif self._http_status == http_client.OK:
# HTTP Okay
return True
else:
# Something else
return False
def is_failure(self):
"""Returns boolean indicating if the call failed."""
return not self.is_success()
def error(self):
"""
Returns a FacebookRequestError (located in the exceptions module) with
an appropriate debug message.
"""
if self.is_failure():
return FacebookRequestError(
"Call was not successful",
self._call,
self.status(),
self.headers(),
self.body(),
)
else:
return None
class FacebookAdsApi(object):
"""Encapsulates session attributes and methods to make API calls.
Attributes:
SDK_VERSION (class): indicating sdk version.
HTTP_METHOD_GET (class): HTTP GET method name.
HTTP_METHOD_POST (class): HTTP POST method name
HTTP_METHOD_DELETE (class): HTTP DELETE method name
HTTP_DEFAULT_HEADERS (class): Default HTTP headers for requests made by
this sdk.
"""
SDK_VERSION = apiconfig.ads_api_config['SDK_VERSION']
API_VERSION = apiconfig.ads_api_config['API_VERSION']
HTTP_METHOD_GET = 'GET'
HTTP_METHOD_POST = 'POST'
HTTP_METHOD_DELETE = 'DELETE'
HTTP_DEFAULT_HEADERS = {
'User-Agent': "fbbizsdk-python-%s" % SDK_VERSION,
}
_default_api = None
_default_account_id = None
def __init__(self, session, api_version=None, enable_debug_logger=False):
"""Initializes the api instance.
Args:
session: FacebookSession object that contains a requests interface
and attribute GRAPH (the Facebook GRAPH API URL).
api_version: API version
"""
self._session = session
self._num_requests_succeeded = 0
self._num_requests_attempted = 0
self._api_version = api_version or self.API_VERSION
self._enable_debug_logger = enable_debug_logger
def get_num_requests_attempted(self):
"""Returns the number of calls attempted."""
return self._num_requests_attempted
def get_num_requests_succeeded(self):
"""Returns the number of calls that succeeded."""
return self._num_requests_succeeded
@classmethod
def init(
cls,
app_id=None,
app_secret=None,
access_token=None,
account_id=None,
api_version=None,
proxies=None,
timeout=None,
debug=False,
crash_log=True,
):
session = FacebookSession(app_id, app_secret, access_token, proxies,
timeout)
api = cls(session, api_version, enable_debug_logger=debug)
cls.set_default_api(api)
if account_id:
cls.set_default_account_id(account_id)
if crash_log:
from facebook_business.crashreporter import CrashReporter
if debug:
CrashReporter.enableLogging()
CrashReporter.enable()
return api
@classmethod
def set_default_api(cls, api_instance):
"""Sets the default api instance.
When making calls to the api, objects will revert to using the default
api if one is not specified when initializing the objects.
Args:
api_instance: The instance which to set as default.
"""
cls._default_api = api_instance
@classmethod
def get_default_api(cls):
"""Returns the default api instance."""
return cls._default_api
@classmethod
def set_default_account_id(cls, account_id):
account_id = str(account_id)
if account_id.find('act_') == -1:
raise ValueError(
"Account ID provided in FacebookAdsApi.set_default_account_id "
"expects a string that begins with 'act_'",
)
cls._default_account_id = account_id
@classmethod
def get_default_account_id(cls):
return cls._default_account_id
def call(
self,
method,
path,
params=None,
headers=None,
files=None,
url_override=None,
api_version=None,
):
"""Makes an API call.
Args:
method: The HTTP method name (e.g. 'GET').
path: A tuple of path tokens or a full URL string. A tuple will
be translated to a url as follows:
graph_url/tuple[0]/tuple[1]...
It will be assumed that if the path is not a string, it will be
iterable.
params (optional): A mapping of request parameters where a key
is the parameter name and its value is a string or an object
which can be JSON-encoded.
headers (optional): A mapping of request headers where a key is the
header name and its value is the header value.
files (optional): An optional mapping of file names to binary open
file objects. These files will be attached to the request.
Returns:
A FacebookResponse object containing the response body, headers,
http status, and summary of the call that was made.
Raises:
FacebookResponse.error() if the request failed.
"""
if not params:
params = {}
if not headers:
headers = {}
if not files:
files = {}
api_version = api_version or self._api_version
if api_version and not re.search('v[0-9]+\.[0-9]+', api_version):
raise FacebookBadObjectError(
'Please provide the API version in the following format: %s'
% self.API_VERSION,
)
self._num_requests_attempted += 1
if not isinstance(path, six.string_types):
# Path is not a full path
path = "/".join((
url_override or self._session.GRAPH,
api_version,
'/'.join(map(str, path)),
))
# Include api headers in http request
headers = headers.copy()
headers.update(FacebookAdsApi.HTTP_DEFAULT_HEADERS)
if params:
params = _top_level_param_json_encode(params)
# Get request response and encapsulate it in a FacebookResponse
if method in ('GET', 'DELETE'):
response = self._session.requests.request(
method,
path,
params=params,
headers=headers,
files=files,
timeout=self._session.timeout
)
else:
response = self._session.requests.request(
method,
path,
data=params,
headers=headers,
files=files,
timeout=self._session.timeout
)
if self._enable_debug_logger:
import curlify
print(curlify.to_curl(response.request))
fb_response = FacebookResponse(
body=response.text,
headers=response.headers,
http_status=response.status_code,
call={
'method': method,
'path': path,
'params': params,
'headers': headers,
'files': files,
},
)
if fb_response.is_failure():
raise fb_response.error()
self._num_requests_succeeded += 1
return fb_response
def new_batch(self):
"""
Returns a new FacebookAdsApiBatch, which when executed will go through
this api.
"""
return FacebookAdsApiBatch(api=self)
class FacebookAdsApiBatch(object):
"""
Exposes methods to build a sequence of calls which can be executed with
a single http request.
Note: Individual exceptions won't be thrown for each call that fails.
The success and failure callback functions corresponding to a call
should handle its success or failure.
"""
def __init__(self, api, success=None, failure=None):
self._api = api
self._files = []
self._batch = []
self._success_callbacks = []
self._failure_callbacks = []
if success is not None:
self._success_callbacks.append(success)
if failure is not None:
self._failure_callbacks.append(failure)
self._requests = []
def __len__(self):
return len(self._batch)
def add(
self,
method,
relative_path,
params=None,
headers=None,
files=None,
success=None,
failure=None,
request=None,
):
"""Adds a call to the batch.
Args:
method: The HTTP method name (e.g. 'GET').
relative_path: A tuple of path tokens or a relative URL string.
A tuple will be translated to a url as follows:
<graph url>/<tuple[0]>/<tuple[1]>...
It will be assumed that if the path is not a string, it will be
iterable.
params (optional): A mapping of request parameters where a key
is the parameter name and its value is a string or an object
which can be JSON-encoded.
headers (optional): A mapping of request headers where a key is the
header name and its value is the header value.
files (optional): An optional mapping of file names to binary open
file objects. These files will be attached to the request.
success (optional): | |
if self.state_update_callbacks and self.doStateUpdate:
await self.do_state_update_callbacks()
async def set_inputs_state(self, extinputs):
self._extinputs = {}
for extinput in extinputs:
self._extinputs[extinput["appId"]] = extinput
if self.state_update_callbacks and self.doStateUpdate:
await self.do_state_update_callbacks()
async def set_sound_output_state(self, sound_output):
self._sound_output = sound_output
if self.state_update_callbacks and self.doStateUpdate:
await self.do_state_update_callbacks()
async def set_picture_settings_state(self, picture_settings):
if isinstance(self._picture_settings, dict) and isinstance(picture_settings, dict):
self._picture_settings.update(picture_settings)
else:
self._picture_settings = picture_settings
if self.state_update_callbacks and self.doStateUpdate:
await self.do_state_update_callbacks()
# low level request handling
async def command(self, request_type, uri, payload=None, uid=None):
"""Build and send a command."""
if uid is None:
uid = self.command_count
self.command_count += 1
if payload is None:
payload = {}
message = {
"id": uid,
"type": request_type,
"uri": f"ssap://{uri}",
"payload": payload,
}
if self.connection is None:
raise PyLGTVCmdException("Not connected, can't execute command.")
await self.connection.send(json.dumps(message))
async def request(self, uri, payload=None, cmd_type="request", uid=None):
"""Send a request and wait for response."""
if uid is None:
uid = self.command_count
self.command_count += 1
res = asyncio.Future()
self.futures[uid] = res
try:
await self.command(cmd_type, uri, payload, uid)
except (asyncio.CancelledError, PyLGTVCmdException):
del self.futures[uid]
raise
try:
response = await res
except asyncio.CancelledError:
if uid in self.futures:
del self.futures[uid]
raise
del self.futures[uid]
payload = response.get("payload")
if payload is None:
raise PyLGTVCmdException(f"Invalid request response {response}")
returnValue = payload.get("returnValue") or payload.get("subscribed")
if response.get("type") == "error":
error = response.get("error")
if error == "404 no such service or method":
raise PyLGTVServiceNotFoundError(error)
else:
raise PyLGTVCmdError(response)
elif returnValue is None:
raise PyLGTVCmdException(f"Invalid request response {response}")
elif not returnValue:
raise PyLGTVCmdException(f"Request failed with response {response}")
return payload
async def subscribe(self, callback, uri, payload=None):
"""Subscribe to updates."""
uid = self.command_count
self.command_count += 1
self.callbacks[uid] = callback
try:
return await self.request(
uri, payload=payload, cmd_type="subscribe", uid=uid
)
except Exception:
del self.callbacks[uid]
raise
async def input_command(self, message):
inputws = None
try:
# open additional connection needed to send button commands
# the url is dynamically generated and returned from the ep.INPUT_SOCKET
# endpoint on the main connection
if self.input_connection is None:
sockres = await self.request(ep.INPUT_SOCKET)
inputsockpath = sockres.get("socketPath")
inputws = await asyncio.wait_for(
websockets.connect(
inputsockpath,
ping_interval=None,
close_timeout=self.timeout_connect,
),
timeout=self.timeout_connect,
)
if self.ping_interval is not None and self.ping_timeout is not None:
self.handler_tasks.add(asyncio.create_task(self.ping_handler(inputws)))
self.input_connection = inputws
if self.input_connection is None:
raise PyLGTVCmdException("Couldn't execute input command.")
await self.input_connection.send(message)
except Exception as ex:
if not self.connect_result.done():
self.connect_result.set_exception(ex)
# high level request handling
async def button(self, name, checkValid=True):
"""Send button press command."""
if checkValid and str(name) not in btn.BUTTONS:
raise ValueError(
f"button {name} is not valid, use checkValid=False to try a new one"
)
message = f"type:button\nname:{name}\n\n"
await self.input_command(message)
async def move(self, dx, dy, down=0):
"""Send cursor move command."""
message = f"type:move\ndx:{dx}\ndy:{dy}\ndown:{down}\n\n"
await self.input_command(message)
async def click(self):
"""Send cursor click command."""
message = f"type:click\n\n"
await self.input_command(message)
async def scroll(self, dx, dy):
"""Send scroll command."""
message = f"type:scroll\ndx:{dx}\ndy:{dy}\n\n"
await self.input_command(message)
async def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ""
icon_extension = ""
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, "rb") as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode("ascii")
return await self.request(
ep.SHOW_MESSAGE,
{
"message": message,
"iconData": icon_encoded_string,
"iconExtension": icon_extension,
},
)
async def get_power_state(self):
"""Get current power state."""
return await self.request(ep.GET_POWER_STATE)
async def subscribe_power(self, callback):
"""Subscribe to current power state."""
return await self.subscribe(callback, ep.GET_POWER_STATE)
# Apps
async def get_apps(self):
"""Return all apps."""
res = await self.request(ep.GET_APPS)
return res.get("launchPoints")
async def subscribe_apps(self, callback):
"""Subscribe to changes in available apps."""
return await self.subscribe(callback, ep.GET_APPS)
async def get_apps_all(self):
"""Return all apps, including hidden ones."""
res = await self.request(ep.GET_APPS_ALL)
return res.get("apps")
async def get_current_app(self):
"""Get the current app id."""
res = await self.request(ep.GET_CURRENT_APP_INFO)
return res.get("appId")
async def subscribe_current_app(self, callback):
"""Subscribe to changes in the current app id."""
async def current_app(payload):
await callback(payload.get("appId"))
return await self.subscribe(current_app, ep.GET_CURRENT_APP_INFO)
async def launch_app(self, app):
"""Launch an app."""
return await self.request(ep.LAUNCH, {"id": app})
async def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
return await self.request(ep.LAUNCH, {"id": app, "params": params})
async def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
return await self.request(ep.LAUNCH, {"id": app, "contentId": contentId})
async def close_app(self, app):
"""Close the current app."""
return await self.request(ep.LAUNCHER_CLOSE, {"id": app})
# Services
async def get_services(self):
"""Get all services."""
res = await self.request(ep.GET_SERVICES)
return res.get("services")
async def get_software_info(self):
"""Return the current software status."""
return await self.request(ep.GET_SOFTWARE_INFO)
async def get_system_info(self):
"""Return the system information."""
return await self.request(ep.GET_SYSTEM_INFO)
async def get_hello_info(self):
"""Return hello information."""
return self._hello_info
async def power_off(self):
"""Power off TV."""
# protect against turning tv back on if it is off
power_state = await self.get_power_state()
self._power_state = {"state": power_state.get("state", "Unknown")}
if not self.is_on:
return
# if tv is shutting down and standby+ option is not enabled,
# response is unreliable, so don't wait for one,
await self.command("request", ep.POWER_OFF)
async def power_on(self):
"""Play media."""
return await self.request(ep.POWER_ON)
async def turn_screen_off(self, webos_ver=""):
"""Turn TV Screen off. standbyMode values: 'active' or 'passive',
passive cannot turn screen back on, need to pull TV plug.
"""
epName = f"TURN_OFF_SCREEN_WO{webos_ver}" if webos_ver else "TURN_OFF_SCREEN"
if not hasattr(ep, epName):
raise ValueError(f"there's no {epName} endpoint")
return await self.request(getattr(ep, epName), {"standbyMode": "active"})
async def turn_screen_on(self, webos_ver=""):
"""Turn TV Screen on. standbyMode values: 'active' or 'passive',
passive cannot turn screen back on, need to pull TV plug.
"""
epName = f"TURN_ON_SCREEN_WO{webos_ver}" if webos_ver else "TURN_ON_SCREEN"
if not hasattr(ep, epName):
raise ValueError(f"there's no {epName} endpoint")
return await self.request(getattr(ep, epName), {"standbyMode": "active"})
# 3D Mode
async def turn_3d_on(self):
"""Turn 3D on."""
return await self.request(ep.SET_3D_ON)
async def turn_3d_off(self):
"""Turn 3D off."""
return await self.request(ep.SET_3D_OFF)
# Inputs
async def get_inputs(self):
"""Get all inputs."""
res = await self.request(ep.GET_INPUTS)
return res.get("devices")
async def subscribe_inputs(self, callback):
"""Subscribe to changes in available inputs."""
async def inputs(payload):
await callback(payload.get("devices"))
return await self.subscribe(inputs, ep.GET_INPUTS)
async def get_input(self):
"""Get current input."""
return await self.get_current_app()
async def set_input(self, input):
"""Set the current input."""
return await self.request(ep.SET_INPUT, {"inputId": input})
# Audio
async def get_audio_status(self):
"""Get the current audio status"""
return await self.request(ep.GET_AUDIO_STATUS)
async def get_muted(self):
"""Get mute status."""
status = await self.get_audio_status()
return status.get("mute")
async def subscribe_muted(self, callback):
"""Subscribe to changes in the current mute status."""
async def muted(payload):
await callback(payload.get("mute"))
return await self.subscribe(muted, ep.GET_AUDIO_STATUS)
async def set_mute(self, mute):
"""Set mute."""
return await self.request(ep.SET_MUTE, {"mute": mute})
async def get_volume(self):
"""Get the current volume."""
res = await self.request(ep.GET_VOLUME)
return res.get("volumeStatus", res).get("volume")
async def subscribe_volume(self, callback):
"""Subscribe to changes in the current volume."""
async def volume(payload):
await callback(payload.get("volumeStatus", payload).get("volume"))
return await self.subscribe(volume, ep.GET_VOLUME)
async def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
return await self.request(ep.SET_VOLUME, {"volume": volume})
async def volume_up(self):
"""Volume up."""
return await self._volume_step(ep.VOLUME_UP)
async def volume_down(self):
"""Volume down."""
return await self._volume_step(ep.VOLUME_DOWN)
async def _volume_step(self, endpoint):
"""Volume step and conditionally sleep afterwards if a consecutive volume step shouldn't be possible to perform immediately after."""
if (
self.sound_output in SOUND_OUTPUTS_TO_DELAY_CONSECUTIVE_VOLUME_STEPS
and self._volume_step_delay is not None
):
async with self._volume_step_lock:
response = await self.request(endpoint)
await asyncio.sleep(self._volume_step_delay.total_seconds())
return response
else:
return await self.request(endpoint)
# TV Channel
async def channel_up(self):
"""Channel up."""
return await self.request(ep.TV_CHANNEL_UP)
async def channel_down(self):
"""Channel down."""
return await self.request(ep.TV_CHANNEL_DOWN)
async def get_channels(self):
"""Get list of tv channels."""
res = await self.request(ep.GET_TV_CHANNELS)
return res.get("channelList")
async def subscribe_channels(self, callback):
"""Subscribe to list of tv channels."""
async def channels(payload):
await callback(payload.get("channelList"))
return await self.subscribe(channels, ep.GET_TV_CHANNELS)
async def get_current_channel(self):
"""Get the current tv channel."""
return await self.request(ep.GET_CURRENT_CHANNEL)
async def subscribe_current_channel(self, callback):
"""Subscribe to changes in the current tv channel."""
return await self.subscribe(callback, ep.GET_CURRENT_CHANNEL)
async def get_channel_info(self):
"""Get the current channel info."""
return await self.request(ep.GET_CHANNEL_INFO)
async def subscribe_channel_info(self, callback):
"""Subscribe to current channel info."""
return await self.subscribe(callback, ep.GET_CHANNEL_INFO)
async def set_channel(self, channel):
"""Set the current channel."""
return await self.request(ep.SET_CHANNEL, {"channelId": channel})
async def get_sound_output(self):
"""Get the current audio output."""
res = await self.request(ep.GET_SOUND_OUTPUT)
return res.get("soundOutput")
async def subscribe_sound_output(self, callback):
"""Subscribe to changes in current audio output."""
async def sound_output(payload):
await callback(payload.get("soundOutput"))
return await self.subscribe(sound_output, ep.GET_SOUND_OUTPUT)
async def change_sound_output(self, output):
"""Change current audio output."""
return await self.request(ep.CHANGE_SOUND_OUTPUT, {"output": output})
# Media control
async def play(self):
"""Play media."""
return await self.request(ep.MEDIA_PLAY)
async def pause(self):
"""Pause media."""
return await self.request(ep.MEDIA_PAUSE)
async def stop(self):
"""Stop media."""
return await self.request(ep.MEDIA_STOP)
async def close(self):
"""Close | |
"""설정 GUI."""
import time
from configparser import ConfigParser
from copy import deepcopy
from collections import defaultdict
from functools import partial
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from tkinter.simpledialog import askstring
from pyathena import connect
from datepicker import Datepicker
from dialog import Dialog, ModelessDlg, ConfirmDlg, VersionDlg, TableDlg
from util import *
WIN_WIDTH = 370
WIN_HEIGHT = 720
NB_WIDTH = 330
NB_HEIGHT = 630
databases = cursor = None
cfg = ConfigParser()
org_cfg = None
profiles = {}
def save_config(cfg):
global org_cfg
if 'default' not in cfg:
cfg['default'] = {}
cfg['default']['version'] = get_local_version()
info("Save config file to {}".format(cfg_path))
info({section: dict(cfg[section]) for section in cfg.sections() if section != 'aws'})
with open(cfg_path, 'w') as cfgfile:
cfg.write(cfgfile)
org_cfg = deepcopy(cfg)
class AWSConfigDlg(Dialog):
def body(self, master):
self.need_connect = False
Label(master, text="Access Key :").grid(row=0)
Label(master, text="Secret Key :").grid(row=1)
Label(master, text="S3 Stage Dir :").grid(row=2)
self.acval = StringVar()
self.ackey = Entry(master, textvariable=self.acval, width=30)
self.scval = StringVar()
self.sckey = Entry(master, textvariable=self.scval, width=30)
self.sckey.configure(show="*")
self.s3val = StringVar()
self.s3dir = Entry(master, textvariable=self.s3val, width=30)
try:
self.acval.set(cfg['aws']['access_key'])
self.scval.set(cfg['aws']['secret_key'])
self.s3val.set(cfg['aws']['s3_stage_dir'])
except Exception as e:
pass
self.ackey.grid(row=0, column=1)
self.sckey.grid(row=1, column=1)
self.s3dir.grid(row=2, column=1)
return self.ackey # initial focus
def validate(self):
access_key = self.ackey.get()
secret_key = self.sckey.get()
s3_stage_dir = self.s3dir.get()
if len(access_key) == 0:
messagebox.showerror("에러", "Access Key를 입력해주세요.")
self.initial_focus = self.ackey
return 0
if len(secret_key) == 0:
messagebox.showerror("에러", "Secret Key를 입력해주세요.")
self.initial_focus = self.sckey
return 0
if len(s3_stage_dir) == 0:
messagebox.showerror("에러", "S3 Stage 경로를 입력해주세요.")
self.initial_focus = self.s3dir
return 0
return 1
def apply(self):
global cfg
cfg['aws'] = {}
cfg['aws']['access_key'] = self.ackey.get()
cfg['aws']['secret_key'] = self.sckey.get()
cfg['aws']['s3_stage_dir'] = self.s3dir.get()
self.need_connect = org_cfg != cfg
save_config(cfg)
def get_current_profile():
pt = notebook.select()
tab = notebook.tab(pt)
return profiles[tab['text']]
def on_ttype():
curpro = get_current_profile()
curpro.switch_absrel_frame()
def disable_controls():
info("disable_controls")
global prev_tab, win
for pro in profiles.values():
pro.disable_controls()
for ctrl in global_disable_targets:
ctrl['state'] = 'disabled'
pt = notebook.select()
if len(pt) > 0:
prev_tab = pt
# for item in notebook.tabs():
# notebook.tab(item, state='disabled')
win.update_idletasks()
def on_db_sel(eobj):
set_wait_cursor()
disable_controls()
def _db_set():
curpro = get_current_profile()
curpro.db_set()
enable_controls()
unset_wait_cursor()
win.after(10, _db_set)
def on_all_table():
"""테이블 전체 선택."""
curpro = get_current_profile()
for cv in curpro.tbl_cvs:
cv.set(1)
on_check()
def on_no_table():
"""테이블 전체 지우기."""
curpro = get_current_profile()
for cv in curpro.tbl_cvs:
cv.set(0)
on_check()
def on_del_cache():
curpro = get_current_profile()
del_cache(curpro.name)
# messagebox.showinfo("정보", "프로파일 '{}' 의 캐쉬를 제거했습니다.".format(curpro.name))
ConfirmDlg(win, "정보", "프로파일 '{}' 의 캐쉬를 제거했습니다.".format(curpro.name), width=300, x=30)
def on_save():
"""설정 저장"""
global cfg, profiles
set_wait_cursor()
disable_controls()
def _save(cfg):
_cfg = ConfigParser()
_cfg['aws'] = cfg['aws']
# 설정 검증
# 모든 프로파일에 대해서
for pro in profiles.values():
pcfg = pro.validate_cfg()
if pcfg is None:
enable_controls()
unset_wait_cursor()
return
sname = "profile.{}".format(pro.name)
_cfg[sname] = pcfg
cfg = _cfg
save_config(cfg)
enable_controls()
unset_wait_cursor()
win.destroy()
win.after(100, lambda: _save(cfg))
class Profile:
def __init__(self, name, win, notebook, proidx):
global pro
self.name = name
self.selected_tables = {}
self.selected_columns = defaultdict(lambda: defaultdict(lambda: list()))
self.first_sel_db = self.org_pcfg = None
self.proidx = proidx
#
# Document Data
#
warning("Init document data")
self.ttype = StringVar()
self.ttype.set('rel')
self.lct_val = IntVar()
self.lct_val.set(6)
self.rel_bg_var = IntVar()
self.rel_bg_var.set(1)
self.rel_off_var = IntVar()
self.rel_off_var.set(1)
self.st_dp_val = self.ed_dp_val = None
self.db_val = StringVar()
#
# UI
#
self.notebook = notebook
self.pro_frame = Frame(win)
notebook.add(self.pro_frame, text=name)
self.abs_frame = LabelFrame(self.pro_frame, text="날자 선택")
self.rel_frame = LabelFrame(self.pro_frame, text="범위 선택")
# 날자 타입
self.ttype_frame = Frame(self.pro_frame)
self.rel_rbt = Radiobutton(self.ttype_frame, text="상대 시간", variable=self.ttype, value="rel", command=on_ttype)
self.rel_rbt.pack(side=LEFT, expand=True, padx=(20, 10))
self.abs_rbt = Radiobutton(self.ttype_frame, text="절대 시간", variable=self.ttype, value="abs", command=on_ttype)
self.abs_rbt.pack(side=LEFT, expand=True, padx=(10, 20))
self.ttype_frame.pack(side=TOP, pady=(20, 0))
# 상대 시간
self.rel_bg_etr = Entry(self.rel_frame, textvariable=self.rel_bg_var, width=5, justify=CENTER)
self.rel_bg_etr.pack(side=LEFT, padx=(15, 5), pady=10)
Label(self.rel_frame, text="일 전부터").pack(side=LEFT, pady=10)
self.rel_off_etr = Entry(self.rel_frame, textvariable=self.rel_off_var, width=5, justify=CENTER)
self.rel_off_etr.pack(side=LEFT, padx=(10, 5), pady=10)
Label(self.rel_frame, text="일치 데이터").pack(side=LEFT, padx=(0, 15), pady=10)
self.pack_rel_frame()
# 절대 시간
self.dts_frame = Frame(self.abs_frame)
st_lbl = Label(self.dts_frame, justify="left", text="시작일")
st_lbl.grid(row=0, column=0, stick=W, padx=(10, 20), pady=3)
self.st_dp = Datepicker(self.dts_frame)
self.st_dp.grid(row=0, column=1, padx=(10, 20), pady=3)
self.dts_frame.pack(side=TOP)
self.dte_frame = Frame(self.abs_frame)
ed_lbl = Label(self.dte_frame, justify="left", text="종료일")
ed_lbl.grid(row=0, column=0, stick=W, padx=(10, 20), pady=3)
self.ed_dp = Datepicker(self.dte_frame)
self.ed_dp.grid(row=0, column=1, padx=(10, 20), pady=3)
self.dte_frame.pack(side=TOP, pady=(7, 10))
self.pack_abs_frame()
# 날자 이후 UI프레임
self.after_dt_frame = Frame(self.pro_frame)
self.sel_frame = Frame(self.after_dt_frame)
# DB 선택 UI
self.db_frame = LabelFrame(self.sel_frame, text="DB 선택")
self.db_combo = ttk.Combobox(self.db_frame, width=20, textvariable=self.db_val, state="readonly")
self.cur_db = None
self.db_combo.bind("<<ComboboxSelected>>", on_db_sel)
self.db_combo.pack(padx=10, pady=(10,))
self.db_frame.pack(side=TOP)
# Table 선택 UI
self.tbl_frame = LabelFrame(self.sel_frame, text="테이블 선택")
self.tbl_text = Text(self.tbl_frame, wrap="none", height=10, background=self.tbl_frame.cget('bg'), bd=0)
self.tbl_text.grid_propagate(False)
self.tbl_vsb = Scrollbar(self.tbl_frame, orient="vertical", command=self.tbl_text.yview)
self.tbl_text.configure(yscrollcommand=self.tbl_vsb.set)
self.tbl_vsb.pack(side="right", fill="y")
self.tbl_text.pack(fill='both', expand=True, padx=15, pady=(5, 20))
self.tbl_frame.pack(side=TOP, fill=None, expand=False, padx=20, pady=(10, 5))
self.sel_frame.pack(side=TOP)
self.tbl_ckbs = []
self.tbl_ckbbs = []
self.tbl_cvs = []
# 테이블 전체 선택/취소
self.tbb_frame = Frame(self.after_dt_frame)
self.all_btn = ttk.Button(self.tbb_frame, text="전체 선택", width=8, command=on_all_table)
self.all_btn.pack(side=LEFT, expand=YES)
self.none_btn = ttk.Button(self.tbb_frame, text="전체 취소", width=8, command=on_no_table)
self.none_btn.pack(side=LEFT, expand=YES)
self.tbb_frame.pack(fill=BOTH, expand=YES)
self.switch_absrel_frame()
# 선택된 대상
self.target_frame = LabelFrame(self.after_dt_frame, text="모든 선택된 대상")
self.target_text = Text(self.target_frame, wrap="none", height=3.5, background=self.target_frame.cget('bg'), bd=0)
self.target_text.grid_propagate(False)
self.target_vsb = Scrollbar(self.target_frame, orient="vertical", command=self.target_text.yview)
self.target_vsb.pack(side='right', fill='y')
self.target_hsb = Scrollbar(self.target_frame, orient="horizontal", command=self.target_text.xview)
self.target_hsb.pack(side="bottom", fill="x")
self.target_text.configure(xscrollcommand=self.target_hsb.set, yscrollcommand=self.target_vsb.set)
self.target_text.pack(fill='both', expand=True, padx=15, pady=(5, 20))
self.target_text['state'] = 'disabled'
self.target_frame.pack(side=TOP, fill=None, expand=False, padx=20, pady=(10, 0))
self.update_targets_text()
self.lct_frame = Frame(self.after_dt_frame)
Label(self.lct_frame, text="로컬 캐쉬 유효 시간:").pack(side=LEFT)
self.lct_etr = Entry(self.lct_frame, textvariable=self.lct_val, width=3, justify="center")
self.lct_etr.pack(side=LEFT, padx=(5, 2))
Label(self.lct_frame, text="시간").pack(side=LEFT)
self.lct_frame.pack(side=TOP, pady=(10, 0))
self.confirm_frame = Frame(self.after_dt_frame)
self.flush_btn = ttk.Button(self.confirm_frame, text="로컬 캐쉬 제거", width=15, command=on_del_cache)
self.flush_btn.pack(side=LEFT, expand=YES, padx=10, pady=10)
self.confirm_frame.pack(fill=BOTH, expand=YES)
self.disable_targets = [self.st_dp, self.ed_dp, self.all_btn, self.none_btn, self.lct_etr,
self.db_combo, self.rel_rbt, self.abs_rbt, self.rel_bg_etr, self.rel_off_etr, self.flush_btn]
def select_table(self, table):
"""현재 DB에서 지정 테이블을 선택."""
for ti, tbl in enumerate(self.tbl_ckbs):
if tbl['text'] == table:
self.tbl_cvs[ti].set(1)
self.update_sel_tables()
self.update_targets_text()
def unselect_table(self, table):
"""현재 DB에서 지정 테이블을 비선택."""
for ti, tbl in enumerate(self.tbl_ckbs):
if tbl['text'] == table:
self.tbl_cvs[ti].set(0)
self.update_sel_tables()
self.update_targets_text()
def set_databases(self, databases):
self.db_combo['values'] = databases
# 첫 번재 DB 선택
db_idx = self.find_first_db_idx()
info("set_databases set db idx {}".format(db_idx))
self.first_sel_db = self.db_combo.current(db_idx)
def db_set(self):
assert len(self.db_combo['values']) > 0
self.cur_db = self.db_combo.get()
info("Read tables from '{}'.".format(self.cur_db))
self.fill_tables(self.cur_db)
def find_first_db_idx(self):
for idx, db in enumerate(databases):
if self.first_sel_db == db:
return idx
return 0
def set_wait_cursor(self):
if self.tbl_text is not None:
self.tbl_text.config(cursor='wait')
def unset_wait_cursor(self):
if self.tbl_text is not None:
self.tbl_text.config(cursor='')
def pack_rel_frame(self):
self.rel_frame.pack(side=TOP, pady=7)
def pack_abs_frame(self):
self.abs_frame.pack(side=TOP, pady=7)
def switch_absrel_frame(self):
tval = self.ttype.get()
self.after_dt_frame.pack_forget()
if tval == 'rel':
self.abs_frame.pack_forget()
self.pack_rel_frame()
else:
self.rel_frame.pack_forget()
self.pack_abs_frame()
self.after_dt_frame.pack(side=TOP)
def apply_profile_cfg(self, pcfg):
"""읽은 프로파일 설정을 적용."""
info("apply_profile_cfg")
self.org_pcfg = deepcopy(pcfg)
# 대상 시간
if 'ttype' in pcfg:
self.ttype.set(pcfg['ttype'])
on_ttype()
if 'before' in pcfg:
self.rel_bg_var.set(int(pcfg['before']))
if 'offset' in pcfg:
self.rel_off_var.set(int(pcfg['offset']))
if 'start' in pcfg:
self.st_dp_val = parse(pcfg['start']).date()
if 'end' in pcfg:
self.ed_dp_val = parse(pcfg['end']).date()
if self.st_dp_val is not None:
self.st_dp.current_date = self.st_dp_val
if self.ed_dp_val is not None:
self.ed_dp.current_date = self.ed_dp_val
# DB를 순회
for key in pcfg.keys():
if not key.startswith('db_'):
continue
db = key[3:]
tables = eval(pcfg[key])
if self.first_sel_db is None and len(tables) > 0:
self.first_sel_db = db
self.selected_tables[db] = []
for tbl in tables:
# 선택된 컬럼 정보 처리
if type(tbl) is not str:
tbl, tcols = tbl
self.selected_columns[db][tbl] = tcols
self.selected_tables[db].append(tbl)
if 'cache_valid_hour' in pcfg:
cache_valid_hour = int(pcfg['cache_valid_hour'])
self.lct_val.set(cache_valid_hour)
self.update_targets_text()
def fill_tables(self, db):
info("fill_tables for {}".format(db))
tables = get_tables(db)
# 이전에 선택된 테이블들
if db in self.selected_tables:
selected = self.selected_tables[db]
info(" selected: {}".format(selected))
else:
selected = []
for ckb in self.tbl_ckbs:
ckb.destroy()
for ckbb in self.tbl_ckbbs:
ckbb.destroy()
self.tbl_ckbs = []
self.tbl_ckbbs = []
self.tbl_cvs = []
self.tbl_text.configure(state='normal')
self.tbl_text.delete('1.0', END)
def make_on_table(tbl):
return lambda: on_table(self, db, tbl)
for i, tbl in enumerate(tables):
cv = IntVar()
if tbl in selected:
cv.set(1)
ckbf = Frame(self.tbl_text)
ckb = Checkbutton(ckbf, text=tbl, variable=cv, command=on_check)
ckb.pack(side=LEFT)
ckbb = Button(ckbf, text='+', command=make_on_table(tbl), width=1, height=1, font="Helvetica 6")
ckbb.pack(side=RIGHT, padx=(10,0))
self.tbl_text.window_create("end", window=ckbf)
self.tbl_text.insert("end", "\n")
self.tbl_ckbs.append(ckb)
self.tbl_ckbbs.append(ckbb)
self.tbl_cvs.append(cv)
self.tbl_text.configure(state='disabled')
self.tbl_frame.update()
def update_sel_tables(self):
"""DB의 선택된 테이블 기억."""
db = self.cur_db
info("update_sel_tables for {}".format(db))
selected = []
for i, cv in enumerate(self.tbl_cvs):
tbl = self.tbl_ckbs[i]['text']
if cv.get() == 1:
selected.append(tbl)
else:
# 선택되지 않은 테이블의 컬럼들은 지워 줌
self.selected_columns[db][tbl] = []
self.selected_tables[db] = selected
def get_ui_target_date(self):
"""현재 UI 기준 대상 날자 얻기."""
if self.ttype.get() == 'rel':
# 상대 시간
before = self.rel_bg_var.get()
offset = self.rel_off_var.get()
return 'rel', before, offset
else:
# 절대 시간
start = self.st_dp.get()
end = self.ed_dp.get()
start = parse(start).date()
end = parse(end).date()
return 'abs', start, end
def validate_cfg(self):
"""프로파일 설정 값 확인.
프로파일 UI에 설정된 값들을 확인하고, dict 넣어 반환
Returns:
dict
"""
| |
>= 1:
grid = grid[0]
data = __convertSvgStringTo(grid.dendogram, request.POST['convertTo'])
if request.POST.has_key('fileName'):
data.fileName = request.POST['fileName']
else:
data.fileName = generateRandomString()
# return the file
return createFileResponse(data)
else:
if not request.POST['gridUSID']:
raise ValueError('gridUSID had an invalid value: ' + request.POST['gridUSID'])
if not request.POST['convertTo']:
raise ValueError('convertTo had an invalid value: ' + request.POST['convertTo'])
else:
if not request.POST.has_key('gridUSID'):
raise Exception('gridUSID key was not received')
if not request.POST.has_key('convertTo'):
raise Exception('convertTo key was not received')
except:
if DEBUG:
print "Exception in user code:"
print '-' * 60
traceback.print_exc(file=sys.stdout)
print '-' * 60
# in case of an error or failing of one the checks return an image error
errorImageData = getImageError()
# send the file
response = HttpResponse(errorImageData, content_type='image/jpg')
response['Content-Disposition'] = 'attachment; filename=error.jpg'
return response
def __convertSvgStringTo(svgString=None, convertTo=None):
"""
This function is used to convert a string that contains svg data into
image data that can be sent over the internet and be downloaded as a file.
Parameters:
svgString: string
information: string that contains the svg data.
convertTo: string
values:
svg
Return
rgt/gridMng/fileData.FileData
"""
fpInMemory = None
imgData = FileData()
if svgString and convertTo:
if convertTo == 'svg':
imgData.data = svgString
imgData.contentType = 'image/svg+xml'
imgData.fileExtension = 'svg'
# response = HttpResponse(request.POST['data'], content_type='image/svg+xml')
# response['Content-Disposition'] = 'attachment; filename=' + fileName + '.svg'
# return response
return imgData
#################################################################
## Warning, old code that wasn't tested with the new functions ##
#################################################################
else:
(imageFileName, mimeType, fileExtention) = convertSvgTo(svgString, convertTo)
imgData.contentType = mimeType
imgData.fileExtension = fileExtention
if imageFileName is not None:
fpInMemory = BytesIO()
fp = open(imageFileName, "rb")
# read the file and place it in memory
try:
byte = fp.read(1)
while byte != '':
fpInMemory.write(byte)
byte = fp.read(1)
finally:
fp.close()
os.remove(imageFileName)
imgData.data = fpInMemory.getvalue()
imgData.length = fpInMemory.tell()
return imgData
else:
raise Exception('Error image file name was None')
else:
raise ValueError('svgString or convertTo was None')
def __validateInputForGrid(request, isConcernAlternativeResponseGrid):
"""
This function is used to validate the input that will be used to create
a grid for the user.
Arguments:
isConcernAlternativeResponseGrid: boolean
request: HttpRequest
information: this argument is needed as the concerns, alternatives, ratings and weights will be
contained in the HttpRequest object.
Return:
Type:
Tulip
Information: The tulip contains 5 positions. Position zero is where the number of concerns is located,
position one contains the number of alternatives, position two contains the array of tulips
containg the concern data, position three contains the array of altenative names and position
four contains the array of values.
The array of tulips used to store concern data had 3 positions. Position zero contains the
left pole name of the concern, position one contains the name of the right pole and position
two contains the weight of the concern.
"""
concernValues = [] # this will contain a tuple with 3 values, (leftPole, rightPole, weight)
alternativeValues = []
ratioValues = []
usedConcernNames = []
i = 0
j = 0
nAlternatives = int(request.POST['nAlternatives'])
nConcerns = int(request.POST['nConcerns'])
# check if the keys with the alternatives are present
while i < nAlternatives:
keyName = 'alternative_' + str((i + 1)) + '_name'
if not request.POST.has_key(keyName):
raise KeyError('Invalid request, request is missing argument(s)', 'Error key not found: ' + keyName)
else:
# alternative names should be unique in a grid, so lets check for that
temp = request.POST[keyName].strip()
if temp != '':
if not temp in alternativeValues:
alternativeValues.append(temp)
else:
raise ValueError("The name " + request.POST[keyName] + " is being used more than one time")
# return HttpResponse(createXmlErrorResponse("The name " + request.POST[keyName] + " is being used more than one time"))
else:
raise ValueError("No empty values are allowed for alternatives")
# return HttpResponse(createXmlErrorResponse("No empty values are allowed for alternatives"), content_type='application/xml')
i += 1
i = 0
# check if all the keys for the left and right pole are present
while i < nConcerns:
leftPole = None
rightPole = None
# check the left pole first
keyName = 'concern_' + str((i + 1)) + '_left'
if not request.POST.has_key(keyName):
raise KeyError('Invalid request, request is missing argument(s)', 'Error key not found: ' + keyName)
# return HttpResponse(createXmlErrorResponse("Invalid request, request is missing argument(s)"), content_type='application/xml')
else:
# the right and left pole can be None so convert the empty string into None
leftPole = request.POST[keyName]
if leftPole == '':
leftPole = None
# the names of the left and right pole should be unique in a grid, so lets check for that. If the left pole is none, allow it to be saved
if not leftPole in usedConcernNames or leftPole == None:
usedConcernNames.append(leftPole)
else:
raise ValueError("The name " + request.POST[keyName] + " is being used more than one time")
# return HttpResponse(createXmlErrorResponse("The name " + request.POST[keyName] + " is being used more than one time"), content_type='application/xml')
# check the right pole
keyName = 'concern_' + str((i + 1)) + '_right'
if not request.POST.has_key(keyName):
raise KeyError('Invalid request, request is missing argument(s)', 'Error key not found: ' + keyName)
# return HttpResponse(createXmlErrorResponse("Invalid request, request is missing argument(s)"), content_type='application/xml')
else:
# the right and left pole can be None so convert the empty string into None
rightPole = request.POST[keyName].strip()
if rightPole == '':
rightPole = None
# the names of the left and right pole should be unique in a grid, so lets check for that. If the right pole is none, allow it to be saved
if not rightPole in usedConcernNames or rightPole == None:
usedConcernNames.append(rightPole)
else:
raise ValueError("The name " + request.POST[keyName] + " is being used more than one time")
# if it is a response grid of the alternative.concern we don't need to check for the weights as they will not be there
if not isConcernAlternativeResponseGrid:
# lets check if the weight key is present
keyName = 'weight_concern' + str((i + 1))
if not request.POST.has_key(keyName):
raise KeyError('Invalid request, request is missing argument(s)', 'Error key not found: ' + keyName)
else:
# allowed values for the values are None, '', ' ' and numbers
keyValue = request.POST[keyName]
if not (keyValue == None or keyValue == ' ' or keyValue == ''):
try:
value = float(keyValue)
concernValues.append((leftPole, rightPole, value))
except:
raise ValueError("Invalid input " + keyValue)
# return HttpResponse(createXmlErrorResponse("Invalid input " + keyValue), content_type='application/xml')
else:
raise KeyError('Invalid request, request is missing argument(s)', 'Error key not found: ' + keyName)
else:
concernValues.append((leftPole, rightPole, None))
i += 1
i = 0
# we are going to check the ratios now, because the response grid for the alternative/concern doesn't have ratios we don't need to check for them
if not isConcernAlternativeResponseGrid:
i = 0
j = 0
hasEmptyConcern = False;
while i < nConcerns:
ratios = []
# it is not allowed to have rations in an concern that has no leftPole or rightPole
if concernValues[i][0] is not None and concernValues[i][1] is not None:
hasEmptyConcern = False
else:
hasEmptyConcern = True
while j < nAlternatives:
keyName = 'ratio_concer' + str((i + 1)) + '_alternative' + str((j + 1))
if not request.POST.has_key(keyName):
raise KeyError('Invalid request, request is missing argument(s)', 'Error key not found: ' + keyName)
else:
keyValue = request.POST[keyName].strip()
# valid values for the they are None, ' ', '' and numbers, anything else is not allowed
if not (keyValue == None or keyValue == ''):
if hasEmptyConcern:
raise ValueError('It is not allowed to have ratings while the concern is empty')
# return HttpResponse(createXmlErrorResponse('It is not allowed to have ratings while the concern is empty'), content_type='application/xml')
else:
try:
value = float(keyValue)
ratios.append(value)
except:
raise ValueError("Invalid value: " + keyValue)
# return HttpResponse(createXmlErrorResponse("Invalid value: " + keyValue), content_type='application/xml')
else:
raise KeyError('Invalid request, request is missing argument(s)',
'Error rating not found: ' + keyName)
j += 1
ratioValues.append(ratios)
j = 0
i += 1
return nConcerns, nAlternatives, concernValues, alternativeValues, ratioValues
def updateGrid(gridObj, nConcerns, nAlternatives, concernValues, alternativeValues, ratioValues,
isConcernAlternativeResponseGrid):
"""
This function is used to update a grid that was previouly saved in the database.
Arguments:
gridObj: rgt/gridMng/models.Grid
nConcerns: int
nAlternatives: int
concernValues: | |
style or V2 style. In this binary
we only support V2 style (object-based) checkpoints.
input_dataset: The tf.data Dataset the model is being trained on. Needed
to get the shapes for the dummy loss computation.
unpad_groundtruth_tensors: A parameter passed to unstack_batch.
Raises:
IOError: if `checkpoint_path` does not point at a valid object-based
checkpoint
ValueError: if `checkpoint_version` is not train_pb2.CheckpointVersion.V2
"""
if not is_object_based_checkpoint(checkpoint_path):
raise IOError('Checkpoint is expected to be an object-based checkpoint.')
if checkpoint_version == train_pb2.CheckpointVersion.V1:
raise ValueError('Checkpoint version should be V2')
features, labels = iter(input_dataset).next()
@tf.function
def _dummy_computation_fn(features, labels):
model._is_training = False # pylint: disable=protected-access
tf.keras.backend.set_learning_phase(False)
labels = model_lib.unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
return _compute_losses_and_predictions_dicts(
model,
features,
labels)
strategy = tf.compat.v2.distribute.get_strategy()
strategy.experimental_run_v2(
_dummy_computation_fn, args=(
features,
labels,
))
restore_from_objects_dict = model.restore_from_objects(
fine_tune_checkpoint_type=checkpoint_type)
validate_tf_v2_checkpoint_restore_map(restore_from_objects_dict)
ckpt = tf.train.Checkpoint(**restore_from_objects_dict)
ckpt.restore(checkpoint_path).assert_existing_objects_matched()
def get_filepath(strategy, filepath):
"""Get appropriate filepath for worker.
Args:
strategy: A tf.distribute.Strategy object.
filepath: A path to where the Checkpoint object is stored.
Returns:
A temporary filepath for non-chief workers to use or the original filepath
for the chief.
"""
if strategy.extended.should_checkpoint:
return filepath
else:
# TODO(vighneshb) Replace with the public API when TF exposes it.
task_id = strategy.extended._task_id # pylint:disable=protected-access
return os.path.join(filepath, 'temp_worker_{:03d}'.format(task_id))
def clean_temporary_directories(strategy, filepath):
"""Temporary directory clean up for MultiWorker Mirrored Strategy.
This is needed for all non-chief workers.
Args:
strategy: A tf.distribute.Strategy object.
filepath: The filepath for the temporary directory.
"""
if not strategy.extended.should_checkpoint:
if tf.io.gfile.exists(filepath) and tf.io.gfile.isdir(filepath):
tf.io.gfile.rmtree(filepath)
def train_loop(
pipeline_config_path,
model_dir,
config_override=None,
train_steps=None,
use_tpu=False,
save_final_config=False,
checkpoint_every_n=1000,
checkpoint_max_to_keep=7,
**kwargs):
"""Trains a model using eager + functions.
This method:
1. Processes the pipeline configs
2. (Optionally) saves the as-run config
3. Builds the model & optimizer
4. Gets the training input data
5. Loads a fine-tuning detection or classification checkpoint if requested
6. Loops over the train data, executing distributed training steps inside
tf.functions.
7. Checkpoints the model every `checkpoint_every_n` training steps.
8. Logs the training metrics as TensorBoard summaries.
Args:
pipeline_config_path: A path to a pipeline config file.
model_dir:
The directory to save checkpoints and summaries to.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override the config from `pipeline_config_path`.
train_steps: Number of training steps. If None, the number of training steps
is set from the `TrainConfig` proto.
use_tpu: Boolean, whether training and evaluation should run on TPU.
save_final_config: Whether to save final config (obtained after applying
overrides) to `model_dir`.
checkpoint_every_n:
Checkpoint every n training steps.
checkpoint_max_to_keep:
int, the number of most recent checkpoints to keep in the model directory.
**kwargs: Additional keyword arguments for configuration override.
"""
## Parse the configs
get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[
'get_configs_from_pipeline_file']
merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[
'merge_external_params_with_configs']
create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[
'create_pipeline_proto_from_configs']
configs = get_configs_from_pipeline_file(
pipeline_config_path, config_override=config_override)
kwargs.update({
'train_steps': train_steps,
'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu
})
configs = merge_external_params_with_configs(
configs, None, kwargs_dict=kwargs)
model_config = configs['model']
train_config = configs['train_config']
train_input_config = configs['train_input_config']
unpad_groundtruth_tensors = train_config.unpad_groundtruth_tensors
add_regularization_loss = train_config.add_regularization_loss
clip_gradients_value = None
if train_config.gradient_clipping_by_norm > 0:
clip_gradients_value = train_config.gradient_clipping_by_norm
# update train_steps from config but only when non-zero value is provided
if train_steps is None and train_config.num_steps != 0:
train_steps = train_config.num_steps
if kwargs['use_bfloat16']:
tf.compat.v2.keras.mixed_precision.experimental.set_policy('mixed_bfloat16')
if train_config.load_all_detection_checkpoint_vars:
raise ValueError('train_pb2.load_all_detection_checkpoint_vars '
'unsupported in TF2')
config_util.update_fine_tune_checkpoint_type(train_config)
fine_tune_checkpoint_type = train_config.fine_tune_checkpoint_type
fine_tune_checkpoint_version = train_config.fine_tune_checkpoint_version
# Write the as-run pipeline config to disk.
if save_final_config:
pipeline_config_final = create_pipeline_proto_from_configs(configs)
config_util.save_pipeline_config(pipeline_config_final, model_dir)
# Build the model, optimizer, and training input
strategy = tf.compat.v2.distribute.get_strategy()
with strategy.scope():
detection_model = model_builder.build(
model_config=model_config, is_training=True)
def train_dataset_fn(input_context):
"""Callable to create train input."""
# Create the inputs.
train_input = inputs.train_input(
train_config=train_config,
train_input_config=train_input_config,
model_config=model_config,
model=detection_model,
input_context=input_context)
train_input = train_input.repeat()
return train_input
train_input = strategy.experimental_distribute_datasets_from_function(
train_dataset_fn)
global_step = tf.Variable(
0, trainable=False, dtype=tf.compat.v2.dtypes.int64, name='global_step',
aggregation=tf.compat.v2.VariableAggregation.ONLY_FIRST_REPLICA)
optimizer, (learning_rate,) = optimizer_builder.build(
train_config.optimizer, global_step=global_step)
if callable(learning_rate):
learning_rate_fn = learning_rate
else:
learning_rate_fn = lambda: learning_rate
## Train the model
# Get the appropriate filepath (temporary or not) based on whether the worker
# is the chief.
summary_writer_filepath = get_filepath(strategy,
os.path.join(model_dir, 'train'))
summary_writer = tf.compat.v2.summary.create_file_writer(
summary_writer_filepath)
if use_tpu:
num_steps_per_iteration = 100
else:
# TODO(b/135933080) Explore setting to 100 when GPU performance issues
# are fixed.
num_steps_per_iteration = 1
with summary_writer.as_default():
with strategy.scope():
with tf.compat.v2.summary.record_if(
lambda: global_step % num_steps_per_iteration == 0):
# Load a fine-tuning checkpoint.
if train_config.fine_tune_checkpoint:
load_fine_tune_checkpoint(detection_model,
train_config.fine_tune_checkpoint,
fine_tune_checkpoint_type,
fine_tune_checkpoint_version,
train_input,
unpad_groundtruth_tensors)
ckpt = tf.compat.v2.train.Checkpoint(
step=global_step, model=detection_model, optimizer=optimizer)
manager_dir = get_filepath(strategy, model_dir)
if not strategy.extended.should_checkpoint:
checkpoint_max_to_keep = 1
manager = tf.compat.v2.train.CheckpointManager(
ckpt, manager_dir, max_to_keep=checkpoint_max_to_keep)
# We use the following instead of manager.latest_checkpoint because
# manager_dir does not point to the model directory when we are running
# in a worker.
latest_checkpoint = tf.train.latest_checkpoint(model_dir)
ckpt.restore(latest_checkpoint)
def train_step_fn(features, labels):
"""Single train step."""
loss = eager_train_step(
detection_model,
features,
labels,
unpad_groundtruth_tensors,
optimizer,
learning_rate=learning_rate_fn(),
add_regularization_loss=add_regularization_loss,
clip_gradients_value=clip_gradients_value,
global_step=global_step,
num_replicas=strategy.num_replicas_in_sync)
global_step.assign_add(1)
return loss
def _sample_and_train(strategy, train_step_fn, data_iterator):
features, labels = data_iterator.next()
per_replica_losses = strategy.experimental_run_v2(
train_step_fn, args=(features, labels))
# TODO(anjalisridhar): explore if it is safe to remove the
## num_replicas scaling of the loss and switch this to a ReduceOp.Mean
return strategy.reduce(tf.distribute.ReduceOp.SUM,
per_replica_losses, axis=None)
@tf.function
def _dist_train_step(data_iterator):
"""A distributed train step."""
if num_steps_per_iteration > 1:
for _ in tf.range(num_steps_per_iteration - 1):
_sample_and_train(strategy, train_step_fn, data_iterator)
return _sample_and_train(strategy, train_step_fn, data_iterator)
train_input_iter = iter(train_input)
if int(global_step.value()) == 0:
manager.save()
checkpointed_step = int(global_step.value())
logged_step = global_step.value()
last_step_time = time.time()
for _ in range(global_step.value(), train_steps,
num_steps_per_iteration):
loss = _dist_train_step(train_input_iter)
time_taken = time.time() - last_step_time
last_step_time = time.time()
tf.compat.v2.summary.scalar(
'steps_per_sec', num_steps_per_iteration * 1.0 / time_taken,
step=global_step)
if global_step.value() - logged_step >= 100:
tf.logging.info(
'Step {} per-step time {:.3f}s loss={:.3f}'.format(
global_step.value(), time_taken / num_steps_per_iteration,
loss))
logged_step = global_step.value()
if ((int(global_step.value()) - checkpointed_step) >=
checkpoint_every_n):
manager.save()
checkpointed_step = int(global_step.value())
# Remove the checkpoint directories of the non-chief workers that
# MultiWorkerMirroredStrategy forces us to save during sync distributed
# training.
clean_temporary_directories(strategy, manager_dir)
clean_temporary_directories(strategy, summary_writer_filepath)
def eager_eval_loop(
detection_model,
configs,
eval_dataset,
use_tpu=False,
postprocess_on_cpu=False,
global_step=None):
"""Evaluate the model eagerly on the evaluation dataset.
This method will compute the evaluation metrics specified in the configs on
the entire evaluation dataset, then return the metrics. It will also log
the metrics to TensorBoard.
Args:
detection_model: A DetectionModel (based on Keras) to evaluate.
configs: Object detection configs that specify the evaluators that should
be used, as well as whether regularization loss should be included and
if bfloat16 should be used on TPUs.
eval_dataset: Dataset containing evaluation data.
use_tpu: Whether a TPU is being used to execute the model for evaluation.
postprocess_on_cpu: Whether model postprocessing should happen on
the CPU when using a TPU to execute the model.
global_step: A variable containing the training step this model was trained
to. Used for logging purposes.
Returns:
A dict of evaluation metrics representing the results of this evaluation.
"""
train_config = configs['train_config']
eval_input_config = configs['eval_input_config']
eval_config = configs['eval_config']
add_regularization_loss = train_config.add_regularization_loss
is_training = False
detection_model._is_training = is_training # pylint: disable=protected-access
tf.keras.backend.set_learning_phase(is_training)
evaluator_options = eval_util.evaluator_options_from_eval_config(
eval_config)
class_agnostic_category_index = (
label_map_util.create_class_agnostic_category_index())
class_agnostic_evaluators = eval_util.get_evaluators(
eval_config,
list(class_agnostic_category_index.values()),
evaluator_options)
class_aware_evaluators = None
if eval_input_config.label_map_path:
class_aware_category_index = (
label_map_util.create_category_index_from_labelmap(
eval_input_config.label_map_path))
class_aware_evaluators = eval_util.get_evaluators(
eval_config,
list(class_aware_category_index.values()),
evaluator_options)
evaluators = None
loss_metrics = {}
@tf.function
def compute_eval_dict(features, labels):
"""Compute the evaluation result on an image."""
# For evaling on train data, it is necessary to check whether groundtruth
# must be unpadded.
boxes_shape = (
labels[fields.InputDataFields.groundtruth_boxes].get_shape().as_list())
unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu
labels = model_lib.unstack_batch(
labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)
losses_dict, prediction_dict = _compute_losses_and_predictions_dicts(
detection_model, features, labels, add_regularization_loss)
def postprocess_wrapper(args):
return detection_model.postprocess(args[0], args[1])
# TODO(kaftan): Depending on how postprocessing will work for TPUS w/
## TPUStrategy, may be good to move wrapping to a utility method
if use_tpu and postprocess_on_cpu:
detections = contrib_tpu.outside_compilation(
postprocess_wrapper,
(prediction_dict, features[fields.InputDataFields.true_image_shape]))
else:
detections = postprocess_wrapper(
(prediction_dict, features[fields.InputDataFields.true_image_shape]))
class_agnostic = (
fields.DetectionResultFields.detection_classes not in detections)
# TODO(kaftan) (or anyone): move `_prepare_groundtruth_for_eval to eval_util
## and call this from there.
groundtruth = model_lib._prepare_groundtruth_for_eval( # pylint: disable=protected-access
detection_model, class_agnostic, eval_input_config.max_number_of_boxes)
use_original_images = fields.InputDataFields.original_image in features
if use_original_images:
eval_images = features[fields.InputDataFields.original_image]
true_image_shapes = tf.slice(
features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3])
original_image_spatial_shapes = features[
fields.InputDataFields.original_image_spatial_shape]
else:
eval_images = features[fields.InputDataFields.image]
true_image_shapes = None
original_image_spatial_shapes = None
eval_dict = eval_util.result_dict_for_batched_example(
eval_images,
features[inputs.HASH_KEY],
detections,
groundtruth,
class_agnostic=class_agnostic,
scale_to_absolute=True,
original_image_spatial_shapes=original_image_spatial_shapes,
true_image_shapes=true_image_shapes)
return eval_dict, losses_dict, class_agnostic
agnostic_categories | |
import nnef
import os, fnmatch
import argparse
import struct
from bitarray import bitarray
import numpy as np
import string
'''
Compiler for 3PXNet. Compiles a neural network stored in NNEF format to C using inference engine.
'''
'''
NOTIFICATIONS:
variablen is a dictionary, its keys are variable_# and value is a list, [non-pruned inputs, the operation object whose output name is variable_#]
variables is also a dictionary, its keys are variable_# and value is the file name of this data
batchn is also a dictionary, its keys are indices of graph object, where there is a batchnorm operation, the values are variable_#, who are input parameter to this operation
The workflow is like this: given a graph, first this program will read all of its operations and determine whether a given operation is able to be compiled or not.
Then it reads the data files and put the values into header files, i.e., decoding_data. After that, threshold and sign needed for some batchnorm layers are computed.
Then it starts writing source file. total_ops stores indices of graph where matrix multiplication, whether conv or fc, takes place.
To decide whether there is a batchnorm to one layer or not, it look ahead for another matrix multiplication, if there is a batchnorm operation between these two, then there will be a
batchnorm, and vice versa.
'''
class convert(object):
def __init__(self,input_dir,dataset,test_start_id,test_end_id):
'''
initialize a convert object
:param input_dir: the input directory, its name should end with .nnef
:param dataset: dataset to test against
:param test_start_id: testing dataset start index
:param test_end_id: testing dataset end index
'''
self.input_dir=input_dir
self.dataset=dataset
self.test_start_id=int(test_start_id)
self.test_end_id=int(test_end_id)
self.graph=nnef.Graph
# batch norm variables
self.var = {}
self.mean = {}
self.gamma = {}
self.beta = {}
# store variables with their names as keys
# for specific information, please see NOTIFICATIONS above
self.variablen = {}
self.variables = {}
# store index of propagation in the graph
self.matrixmul = []
self.conv = []
self.batchn = {}
#input shape
self.in_shape = []
self.rank = []
# which batch norm layer is the last one
self.batch_last=" "
# source code we are writing to
self.source=0
#permutation list. For specific information, please see training engine
#as well as the paper the whole project is based on.
self.list=[]
self.tempweight=[]
self.tempsparse=False
self.tempoutput=0
self.name=" "
self.lastlist=[]
self.tempvar=" "
self.tempmean=""
self.tempgamma=""
self.tempbeta=""
def replace_non_ascii(self,stri):
'''
Replace all non ascii, including . , in the file name to _
:param stri: input string
:return: the input string with non-character or non-digit being replaced by _
'''
return ''.join([i if i in string.ascii_letters or i in string.digits else '_' for i in stri])
def search_non_ascii(self,stri):
'''
Search for the first letter that is not letter or digit
Needed for determine last layer of batch norm
:param stri: input string
:return: the first index of non-character or non-digit char
'''
for i in range(len(stri)):
if not (stri[i] in string.ascii_letters or stri[i] in string.digits):
return i
def loadgraph(self):
'''
load the nnef graph into compiler
'''
print(self.input_dir)
os.chdir(self.input_dir)
if "graph.nnef" not in os.listdir("."):
print("ERROR: BAD NNEF DIRECTORY!")
exit()
else:
self.graph = nnef.load_graph('graph.nnef')
print("Graph loaded")
def find_batch_last(self):
'''
Determines the last layer
The last layer will not be binarized, so batchnorm has to be delt with differently
Requires NNEF graph to be loaded (using loadgraph())
:return: NA
'''
#find out the last matrix multiplication or convolution operation
batch_last = next(i for i in reversed(range(len(self.graph.operations))) if
self.graph.operations[i].name == 'matmul' or self.graph.operations[i].name == 'conv')
# If True, last layer is batch norm, otherwise false
lastBnFound=False
#if there is batch norm after the last matmul or conv, then that is the last batch norm layer
for i in range(batch_last,len(self.graph.operations)):
if self.graph.operations[i].name=='batch_normalization':
for ops in self.graph.operations:
# Get the variable which is the input to the batch_normalization layer
if ops.outputs['output']==self.graph.operations[i].inputs['mean']: #get the name for the last batcch norm layer
batch_last=ops.attribs['label']
lastBnFound=True
break
if lastBnFound:
# If found, save the label for the last batchnorm layer
self.batch_last=batch_last[0:self.search_non_ascii(batch_last)] #cutoff the ".mean" part from batch_last
else:
self.batch_last=" "
def write_source_first(self):
'''
Write to the source file include headers for variables
:return: NA
'''
os.chdir("../3pxnet-compiler")
if "autogen" not in os.listdir("."):
os.mkdir("autogen")
os.chdir("autogen")
source = open("source.c", 'w+')
self.source=source
# Write generic headers
source.write("#include <stdio.h>\n")
source.write("#include <stdlib.h>\n")
source.write("#include <stdint.h>\n")
source.write("#include <string.h>\n")
source.write("#include <math.h>\n")
source.write("#include <time.h>\n")
source.write("#include <errno.h>\n")
# Write model specific headers
# TODO: don't include headers that are not neeed for a particular model
source.write("#include \"datatypes.h\"\n")
source.write("#include \"utils.h\"\n")
source.write("#include \"xnor_base.h\"\n")
source.write("#include \"xnor_fc.h\"\n")
source.write("#include \"3pxnet_fc.h\"\n")
source.write("#include \"3pxnet_cn.h\"\n")
source.write("#include \"xnor_fc.h\"\n")
source.write("#include \"bwn_dense_cn.h\"\n")
os.chdir("..")
os.chdir(self.input_dir)
def writefc(self, write, rank, temp_array, sparse, output, name):
'''
Write fc layer's data into C headers
:param write: whether to write or not(related to permutation issue)
:param rank: weight's shape
:param temp_array: weight data
:param sparse: whether the layer is sparse or not
:param output: IO object, corresponding to the header file it's writing to
:param name: name of the header file
:return: indices: if it is a sparse layer, indices are used to calculate # non-pruned inputs
'''
indices = []
if write:
print("Writing to header " + name + ".h ...")
output.write("#define _" + name + " {\\\n")
# NNEF format weight values are stored in row-major order.
# So for a fc layer, its shape is [input, output]
for i in range(rank[1]):
# outtemp is used to store packs
# mask is used to check whether a given pack is all zero
outtemp = bitarray()
mask = bitarray()
for j in range(rank[0]):
temp = temp_array[j, i]
if temp >= 0:
outtemp.append(1)
else:
outtemp.append(0)
mask.append(temp == 0)
if j % 32 == 31:
if sparse:
# a pack is all zero
if int(mask.to01(), 2) == 2 ** 32 - 1:
outtemp = bitarray()
mask = bitarray()
else:
if write:
output.write(str("0x%x" % int(outtemp.to01(), 2)) + ", \\\n")
indices.append(int(j % rank[0] / 32))
outtemp = bitarray()
mask = bitarray()
else:
if write:
output.write(str("0x%x" % int(outtemp.to01(), 2)) + ", \\\n")
outtemp = bitarray()
mask = bitarray()
if write:
output.write("}\n")
if sparse:
output.write("#define _" + name + "_indices {\\\n")
for i in range(len(indices)):
output.write(str(indices[i]) + ", \\\n")
output.write("}\n")
output.close()
return indices
def writecn(self, write, rank, temp_array, sparse, output, name):
'''
Write conv layer's data into C headers
The same as fc layer, NNEF format stores value in row-major order
So for a conv layer, the shape is [n,z,y,x]
But, I modified this order during decoding data time.
So now the input rank has a shape [x,y,z,n]
:param write: whether to write or not(related to permutation issue)
:param rank: weight's shape
:param temp_array: weight data
:param sparse: whether the layer is sparse or not
:param output: IO object, corresponding to the header file it's writing to
:param name: name of the header file
:return: indices: if it is a sparse layer, indices are used to calculate # non-pruned inputs
'''
indices = []
if write:
print("Writing to header " + name + '.h ...')
output.write("#define _" + name + " {\\\n")
for n in range(rank[0]):
# outtemp is used to store packs
# mask is used to check whether a given pack is all zero
outtemp = bitarray()
mask = bitarray()
for y in range(rank[2]):
for x in range(rank[3]):
for z in range(rank[1]):
temp = temp_array[x, y, z, n]
if temp >= 0:
outtemp.append(1)
else:
outtemp.append(0)
mask.append(temp == 0)
if z % 32 == 31:
if sparse:
# a pack is all zero
if int(mask.to01(), 2) == 2 ** 32 - 1:
outtemp = bitarray()
mask = bitarray()
else:
if write:
output.write(str("0x%x" % int(outtemp.to01(), 2)) + ", \\\n")
indices.append(int(z / 32) + x * int(rank[1] / 32) + y * rank[3] * int(
rank[1] / 32))
outtemp = bitarray()
mask = bitarray()
else:
if write:
output.write(str("0x%x" % int(outtemp.to01(), 2)) + ", \\\n")
outtemp = bitarray()
mask = bitarray()
if write:
output.write("}\n")
if sparse:
output.write("#define _" + name + "_indices {\\\n")
for i in range(len(indices)):
output.write(str(indices[i]) | |
# this is the "Job Entry Subsystem" (including the "Initiator")
# this is currently only working for simple JCL
from zPE.util import *
from zPE.util.global_config import *
import zPE.util.sysmsg as sysmsg
import zPE.util.spool as spool
import zPE.base.core.SPOOL as core_SPOOL
import zPE.base.conf
import sys
import re
from time import time, localtime, strftime, strptime
def parse(job):
invalid_lable = [] # record all invalid lables
if job == '-':
fp = sys.stdin
else:
fp = open(job, 'r') # this is not under the control of SMS
sp1 = spool.retrieve('JESMSGLG') # SPOOL No. 01
sp2 = spool.retrieve('JESJCL') # SPOOL No. 02
ctrl = '1' # control character
sp1.append(ctrl, '{0:>27} J O B L O G -'.format(SYSTEM['JEST']),
'- S Y S T E M {0} -'.format(SYSTEM['SYST']),
'- N O D E {0}\n'.format(SYSTEM['NODET']))
sp1.append('0', '\n')
ctrl = ' '
# initial read (for JOB card)
line = fp.readline()
JCL['read_cnt'] += 1
JCL['card_cnt'] += 1
if debug_mode():
print line,
if not line.startswith('//'): # invalid JCL JOB card
abort(9, 'Error: ', line[:-1],
':\n Not a valid JCL JOB card.\n')
if len(line) > 73: # 72+ char + '\n'
abort(9, 'Error: line ', str(JCL['read_cnt']),
': Statement cannot exceed colomn 72.\n')
# field_0 field_1 field_2
# -------- ------- ------------------------------------
# //label JOB <args>
# // EXEC <args>
# //maxlabel DD <args>
field = re.split('\s+', line[2:], 2)
# check lable
if bad_label(field[0]):
invalid_lable.append(JCL['read_cnt'])
# parse JOB card
# currently supported parameter: region
if field[1] != 'JOB':
abort(9, 'Error: No JOB card found.\n')
JCL['jobname'] = field[0]
if len(JCL['jobname']) != 8:
abort(9, 'Error: JOB name is not 8 charactors long.\n')
JCL['owner'] = JCL['jobname'][:7]
JCL['class'] = JCL['jobname'][-1]
JCL['jobid'] = 'JOB{0:0>5}'.format(Config['job_id'])
JCL['spool_path'] = '{0}.{1}.{2}'.format(
JCL['owner'],
JCL['jobname'],
JCL['jobid']
)
# args_0,args_1,args_2
# AccInfo,'pgmer'[,parameters]
args = resplit_sq(',', field[2], 2)
if len(args) < 2:
abort(9, 'Error: Invalid JOB card: missing parameter(s).\n')
# parse AccInfo
JCL['accinfo'] = args[0]
if args[1][0] != '\'' or args[1][-1] != '\'':
abort(9, 'Error: ', args[1],
':\n The programmer\'s name need to be ',
'surrounded by single quotes.\n')
# parse pgmer
JCL['pgmer'] = args[1][1:-1]
if len(JCL['pgmer']) > 20:
abort(9, 'Error: ', args[1],
':\n The programmer\'s name cannot be exceed ',
'20 characters.\n')
# parse parameters
JCL['time'] = Config['time_limit']
JCL['region'] = Config['memory_sz']
if len(args) == 3:
for part in resplit_pp(',', args[2]):
if part[:5] == 'TIME=':
try:
JCL['time'] = parse_time(part[5:])
except SyntaxError:
abort(9, 'Error: ', part,
': Invalid time specification.\n')
elif part[:7] == 'REGION=':
try:
JCL['region'] = parse_region(part[7:])
except SyntaxError:
abort(9, 'Error: ', part,
': Invalid region size.\n')
except ValueError:
abort(9, 'Error: ', part,
': Region must be divisible by 4K.\n')
# elif part[:9] == 'MSGCLASS=':
sp1.append(ctrl, strftime('%H.%M.%S '), JCL['jobid'],
'{0:<16}'.format(strftime(' ---- %A,').upper()),
strftime(' %d %b %Y ----').upper(), '\n')
sp1.append(ctrl, strftime('%H.%M.%S '), JCL['jobid'],
' IRR010I USERID {0:<8}'.format(JCL['owner']),
' IS ASSIGNED TO THIS JOB.\n')
# ctrl for 1st line will be modified in finish_job()
sp2.append('c', '{0:>9}'.format(1), # should always be the first JCL card
' {0:<72}{1}\n'.format(line[:-1], JCL['jobid']))
ctrl = ' '
# main read loop
nextline = None # for `__READ_UNTIL()` look-ahead buffer
last_dd = None # for DD concatenation
card_lbl = None # the label the JCL card
jcl_continue = None # the action the JCL card continues
while True:
if nextline == None: # no left over line in the look-ahead buffer
line = fp.readline()
else: # line exist in the look-ahead buffer
line = nextline
nextline = None
# check JCL card length
if not line:
break
if len(line) > 73: # 72+ char + '\n'
abort(9, 'Error: line ', str(JCL['read_cnt']),
': Statement cannot exceed colomn 72.\n')
JCL['read_cnt'] += 1
# check implicit instream data
if not line.startswith('//'):
nextline = line # store the line in the look-ahead buffer
line = '//SYSIN DD * GENERATED STATEMENT\n'
# check end of JCL marker
elif len(line) == 2 or line[2:].isspace():
JCL['read_cnt'] -= 1 # END mark does not count
break
# check comment
elif line.startswith('//*') or jcl_continue == '*':
if debug_mode():
print line,
sp2.append(ctrl, '{0:>9} {1}'.format('', line))
if len(line) == 73 and line[71] != ' ':
# 72+ char + '\n' col 72 is non-space
jcl_continue = '*'
else:
jcl_continue = None
continue
# starting from here, line will be guaranteed to start with "//"
# increment line counter only for non-comment lines
if not jcl_continue:
JCL['card_cnt'] += 1
if debug_mode():
print line,
field = re.split('\s+', line[2:])
# check lable
if jcl_continue and field[0]: # JCL concatenation cannot start at col 3
abort(9, 'Error: line ', str(JCL['read_cnt']),
': Cannot have labels on continuation lines.\n')
elif bad_label(field[0]):
invalid_lable.append(JCL['read_cnt'])
if jcl_continue:
if line.index(field[1]) > 15:
abort(9, 'Error: line ', str(JCL['read_cnt']),
': JCL continuation must start before col 16.\n')
else:
card_lbl = field[0]
# parse EXEC card
# currently supported parameter: parm, time, region
# currently assumed parameter: cond=(0,NE)
# see also: __COND_FAIL(step)
if field[1] == 'EXEC' or jcl_continue == 'EXEC':
if jcl_continue not in [ None, 'EXEC' ]:
abort(9, 'Error: line ', str(JCL['read_cnt']),
': Invalid JCL continuation.\n')
last_dd = None
if not jcl_continue:
args = re.split(',', field[2], 1)
pgm = ''
proc = ''
if args[0][:4] == 'PGM=':
pgm = args[0][4:]
elif args[0][:5] == 'PROC=':
proc = args[0][5:]
else:
proc = args[0]
parm = '' # parameter list
time = JCL['time']
region = JCL['region']
else:
args = [ 'continuation', field[1] ]
jcl_continue = None
if len(args) == 2:
for part in resplit_pp(',', args[1]):
if jcl_continue: # jcl_continue can only be set by last part
abort(9, 'Error: line ', str(JCL['read_cnt']),
': Invalid JCL card\n')
elif part[:5] == 'PARM=':
parm = part[5:]
elif part[:5] == 'TIME=':
try:
time = parse_time(part[5:])
except SyntaxError:
abort(9, 'Error: ', part,
': Invalid time specification.\n')
elif part[:7] == 'REGION=':
try:
region = parse_region(part[7:])
except SyntaxError:
abort(9, 'Error: ', part,
': Invalid region size.\n')
except ValueError:
abort(9, 'Error: ', part,
': Region must be divisible ',
'by 4K.\n')
elif part[:5] == 'COND=':
pass # assume COND=(0,NE)
elif part == '':
jcl_continue = 'EXEC'
else:
abort(9, 'Error: ', part,
': Parameter not supported.\n')
if not jcl_continue:
JCL['step'].append(
JobStep(
name = card_lbl,
pgm = pgm,
proc = proc,
time = time,
region = region,
parm = parm
))
# parse DD card
# currently supported parameter: dsn, disp, sysout, */data
elif field[1] == 'DD' or jcl_continue == 'DD':
if jcl_continue not in [ None, 'DD' ]:
abort(9, 'Error: line ', str(JCL['read_cnt']),
': Invalid JCL continuation.\n')
if not jcl_continue:
sysout = ''
dsn = []
disp = ''
sp_in = None # will hold the tmp SPOOL if instream
if not card_lbl: # concatenated DD card
pass
else: # new DD card
last_dd = card_lbl # record the dd name
args = field[2]
else:
args = field[1]
jcl_continue = None
if args == '*' or args == 'DATA':
( nextline, sp_in ) = __READ_UNTIL(fp, last_dd, '/*', nextline)
elif args[:9] == 'DATA,DLM=\'':
( nextline, sp_in ) = __READ_UNTIL(fp, last_dd, args[9:11])
elif args[:7] == 'SYSOUT=':
sysout = args[7:]
else:
for part in resplit_pp(',', args):
if jcl_continue: # jcl_continue can only be set by last part
abort(9, 'Error: line ', str(JCL['read_cnt']),
': Invalid JCL card\n')
elif part[:4] == 'DSN=':
dsn = conv_path(part[4:])
elif part[:5] == 'DISP=':
disp = part[5:]
elif part == '':
jcl_continue = 'DD'
else:
abort(9, 'Error: ', part,
': Parameter not supported.\n')
if not jcl_continue and disp == '':
abort(9, 'Error: ', line[:-1],
': Need DISP=[disp].\n')
if not jcl_continue:
JCL['step'][-1].dd.append(
card_lbl, { # do not use last_dd, since it will be flaged
# as duplicated DD names
'SYSOUT' : sysout,
'DSN' : dsn,
'DISP' : disp,
},
sp_in
)
# ignore other types of cards
else:
mark4future(field[1])
sp2.append(ctrl, '{0:>9} {1}'.format(JCL['card_cnt'], line))
# end of the main read loop
sp3 = spool.retrieve('JESYSMSG') # SPOOL No. 03
if len(invalid_lable) != 0:
# ctrl for 1st line will be modified in finish_job()
sp3.append('c', ' STMT NO. MESSAGE\n')
ctrl = ' '
for indx in invalid_lable:
sp3.append(ctrl, '{0:>9} IEFC662I INVALID LABEL\n'.format(indx))
sp3.append(ctrl, '\n')
return 'label'
return 'ok'
def init_job():
sp1 = spool.retrieve('JESMSGLG') # SPOOL No. 01
| |
__copyright__ = \
"""
Copyright ©right © (c) 2019 The Board of Trustees of Purdue University and the Purdue Research Foundation.
All rights reserved.
This software is covered by US patents and copyright.
This source code is to be used for academic research purposes only, and no commercial use is allowed.
For any questions, please contact <NAME> (<EMAIL>) at Purdue University.
Last Modified: 10/02/2019
"""
__license__ = "CC BY-NC-SA 4.0"
__authors__ = "<NAME>, <NAME>, <NAME>, <NAME>"
__version__ = "1.6.0"
import numpy as np
import os
import argparse
from parse import parse
import torch
def parse_command_args(training_or_testing):
"""
Parse the arguments passed by the user from the command line.
Also performs some sanity checks.
:param training_or_testing: 'training' or 'testing'.
Returns: args object containing the arguments
as properties (args.argument_name)
"""
if training_or_testing == 'training':
# Training settings
parser = argparse.ArgumentParser(
description='BoundingBox-less Location with PyTorch.',
formatter_class=CustomFormatter)
optional_args = parser._action_groups.pop()
required_args = parser.add_argument_group('MANDATORY arguments')
required_args.add_argument('--train-dir',
required=True,
help='Directory with training images. '
'Must contain image files (any format), and '
'a CSV or XML file containing groundtruth, '
'as described in the README.')
optional_args.add_argument('--val-dir',
help="Directory with validation images and GT. "
"If 'auto', 20%% of the training samples "
"will be removed from training "
"and used for validation. "
"If left blank, no validation "
"will be done.")
optional_args.add_argument('--imgsize',
type=str,
default='256x256',
metavar='HxW',
help="Size of the input images "
"(height x width).")
optional_args.add_argument('--batch-size',
type=strictly_positive_int,
default=1,
metavar='N',
help="Input batch size for training.")
optional_args.add_argument('--epochs',
type=strictly_positive_int,
default=np.inf,
metavar='N',
help="Number of epochs to train.")
optional_args.add_argument('--nThreads', '-j',
default=4,
type=strictly_positive_int,
metavar='N',
help="Number of threads to create "
"for data loading. "
"Must be a striclty positive int")
optional_args.add_argument('--lr',
type=strictly_positive,
default=4e-5,
metavar='LR',
help="Learning rate (step size).")
optional_args.add_argument('-p',
type=float,
default=-1,
metavar='P',
help="alpha in the generalized mean "
"(-inf => minimum)")
optional_args.add_argument('--no-cuda',
action='store_true',
default=False,
help="Disables CUDA training")
optional_args.add_argument('--no-data-augm',
action='store_true',
default=False,
help="Disables data augmentation "
"(random vert+horiz flip)")
optional_args.add_argument('--drop-last-batch',
action='store_true',
default=False,
help="Drop the last batch during training, "
"which may be incomplete. "
"If the dataset size is not "
"divisible by the batch size, "
"then the last batch will be smaller.")
optional_args.add_argument('--seed',
type=int,
default=1,
metavar='S',
help="Random seed.")
optional_args.add_argument('--resume',
default='',
type=str,
metavar='PATH',
help="Path to latest checkpoint.")
optional_args.add_argument('--save',
default='',
type=str,
metavar='PATH',
help="Where to save the model "
"after each epoch.")
optional_args.add_argument('--log-interval',
type=strictly_positive,
default=3,
metavar='N',
help="Time to wait between every "
" time the losses are printed "
"(in seconds).")
optional_args.add_argument('--max-trainset-size',
type=strictly_positive_int,
default=np.inf,
metavar='N',
help="Only use the first N "
"images of the training dataset.")
optional_args.add_argument('--max-valset-size',
type=strictly_positive_int,
default=np.inf,
metavar='N',
help="Only use the first N images "
"of the validation dataset.")
optional_args.add_argument('--val-freq',
default=1,
type=int,
metavar='F',
help="Run validation every F epochs. "
"If 0, no validation will be done. "
"If no validation is done, "
"a checkpoint will be saved "
"every F epochs.")
optional_args.add_argument('--visdom-env',
default='default_environment',
type=str,
metavar='NAME',
help="Name of the environment in Visdom.")
optional_args.add_argument('--visdom-server',
default=None,
metavar='SRV',
help="Hostname of the Visdom server. "
"If not provided, nothing will "
"be sent to Visdom.")
optional_args.add_argument('--visdom-port',
default=8989,
metavar='PRT',
help="Port of the Visdom server.")
optional_args.add_argument('--optimizer', '--optim',
default='sgd',
type=str.lower,
metavar='OPTIM',
choices=['sgd', 'adam'],
help="SGD or Adam.")
optional_args.add_argument('--replace-optimizer',
action='store_true',
default=False,
help="Replace optimizer state "
"when resuming from checkpoint. "
"If True, the optimizer "
"will be replaced using the "
"arguments of this scripts. "
"If not resuming, it has no effect.")
optional_args.add_argument('--max-mask-pts',
type=strictly_positive_int,
default=np.infty,
metavar='M',
help="Subsample this number of points "
"from the mask, so that GMM fitting "
"runs faster.")
optional_args.add_argument('--paint',
default=False,
action="store_true",
help="Paint red circles at the "
"estimated locations in validation. "
"This maskes it run much slower!")
optional_args.add_argument('--radius',
type=strictly_positive,
default=5,
metavar='R',
help="Detections at dist <= R to a GT point"
"are considered True Positives.")
optional_args.add_argument('--n-points',
type=strictly_positive_int,
default=None,
metavar='N',
help="If you know the number of points "
"(e.g, just one pupil), then set it. "
"Otherwise it will be estimated.")
optional_args.add_argument('--ultrasmallnet',
default=False,
action="store_true",
help="If True, the 5 central layers are removed,"
"resulting in a much smaller UNet. "
"This is used for example for the pupil dataset."
"Make sure to enable this if your are restoring "
"a checkpoint that was trained using this option enabled.")
optional_args.add_argument('--lambdaa',
type=strictly_positive,
default=1,
metavar='L',
help="Weight that will increase the "
"importance of estimating the "
"right number of points.")
parser._action_groups.append(optional_args)
args = parser.parse_args()
# Force batchsize == 1 for validation
args.eval_batch_size = 1
if args.eval_batch_size != 1:
raise NotImplementedError('Only a batch size of 1 is implemented for now, got %s'
% args.eval_batch_size)
# Convert to full path
if args.save != '':
args.save = os.path.abspath(args.save)
if args.resume != '':
args.resume = os.path.abspath(args.resume)
# Check we are not overwriting a checkpoint without resuming from it
if args.save != '' and os.path.isfile(args.save) and \
not (args.resume and args.resume == args.save):
print("E: Don't overwrite a checkpoint without resuming from it. "
"Are you sure you want to do that? "
"(if you do, remove it manually).")
exit(1)
args.cuda = not args.no_cuda and torch.cuda.is_available()
elif training_or_testing == 'testing':
# Testing settings
parser = argparse.ArgumentParser(
description='BoundingBox-less Location with PyTorch (inference/test only)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
optional_args = parser._action_groups.pop()
required_args = parser.add_argument_group('MANDATORY arguments')
required_args.add_argument('--dataset',
required=True,
help='Directory with test images. '
'Must contain image files (any format), and '
'(optionally) a CSV or XML file containing '
'groundtruth, as described in the README.')
required_args.add_argument('--out',
type=str,
required=True,
help='Directory where results will be stored (images+CSV).')
optional_args.add_argument('--model',
type=str,
metavar='PATH',
help='Checkpoint with the CNN model.\n')
optional_args.add_argument('--evaluate',
action='store_true',
default=False,
help='Evaluate metrics (Precision/Recall, RMSE, MAPE, etc.)')
optional_args.add_argument('--no-cuda', '--no-gpu',
action='store_true',
default=False,
help='Use CPU only, no GPU.')
optional_args.add_argument('--imgsize',
type=str,
default='256x256',
metavar='HxW',
help='Size of the input images (heightxwidth).')
optional_args.add_argument('--radii',
type=str,
default=range(0, 15 + 1),
metavar='Rs',
help='Detections at dist <= R to a GT pt are True Positives.'
'If not selected, R=0, ..., 15 will be tested.')
optional_args.add_argument('--taus',
type=str,
# default=np.linspace(0, 1, 25).tolist() + [-1, -2],
default=-2,
metavar='Ts',
help='Detection threshold between 0 and 1. '
# 'If not selected, 25 thresholds in [0, 1] will be tested. '
'tau=-1 means dynamic Otsu thresholding. '
'tau=-2 means Beta Mixture Model-based thresholding.')
optional_args.add_argument('--n-points',
type=int,
default=None,
metavar='N',
help='If you know the exact number of points in the image, then set it. '
'Otherwise it will be estimated by adding a L1 cost term.')
optional_args.add_argument('--max-mask-pts',
type=int,
default=np.infty,
metavar='M',
help='Subsample this number of points from the mask, '
'so GMM fitting runs faster.')
optional_args.add_argument('--no-paint',
default=False,
action="store_true",
help='Don\'t paint a red circle at each estimated location.')
optional_args.add_argument('--force', '-f',
default=False,
action="store_true",
help='Overwrite output files if they exist. '
'In fact, it removes the output directory first')
optional_args.add_argument('--seed',
type=int,
default=0,
metavar='S',
help='Random seed.')
optional_args.add_argument('--max-testset-size',
type=int,
default=np.inf,
metavar='N',
help='Only use the first N images of the testing dataset.')
optional_args.add_argument('--nThreads', '-j',
default=4,
type=int,
metavar='N',
help='Number of data loading threads.')
optional_args.add_argument('--ultrasmallnet',
default=False,
action="store_true",
help="If True, the 5 central layers are removed,"
"resulting in a much smaller UNet. "
"This is used for example for the pupil dataset."
"Make sure to enable this if your are restoring "
"a checkpoint that was trained using this option enabled.")
parser._action_groups.append(optional_args)
args = parser.parse_args()
if not args.no_cuda and not torch.cuda.is_available():
print(
'W: No GPU (CUDA) devices detected in your system, running with --no-gpu option...')
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.paint = not args.no_paint
# String/Int -> List
if isinstance(args.taus, (list, range)):
pass
elif isinstance(args.taus, str) and ',' in args.taus:
args.taus = [float(tau)
for tau in args.taus.replace('[', '').replace(']', '').split(',')]
else:
args.taus = [float(args.taus)]
if isinstance(args.radii, (list, range)):
pass
elif isinstance(args.radii, str) and ',' in args.radii:
args.radii = [int(r) for r in args.radii.replace('[', '').replace(']', '').split(',')]
else:
args.radii = [int(args.radii)]
else:
raise ValueError('Only \'training\' or \'testing\' allowed, got %s'
% training_or_testing)
# imgsize -> height x width
try:
args.height, args.width = parse('{}x{}', args.imgsize)
args.height, args.width = int(args.height), int(args.width)
except TypeError as e:
print("\__ E: The input --imgsize must be in format WxH, got '{}'".format(args.imgsize))
exit(-1)
return args
class CustomFormatter(argparse.RawDescriptionHelpFormatter):
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not argparse.SUPPRESS:
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
if action.default is not None and action.default != '':
help += ' (default: ' + str(action.default) + ')'
help += '\n\n'
return help
def strictly_positive_int(val):
"""Convert to | |
self.textout += ptxt
# print protocol result, optionally a cell header.
self.print_formatted_script_output(script_header)
script_header = False
self.auto_updater = True # restore function
print('\nDone')
def get_window_analysisPars(self):
"""
Retrieve the settings of the lr region windows, and some other general values
in preparation for analysis
:return:
"""
self.analysis_parameters = {} # start out empty so we are not fooled by priors
# print '\ngetwindow: analysis pars: ', self.analysis_parameters
for region in ['lrwin0', 'lrwin1', 'lrwin2', 'lrrmp']:
rgninfo = self.regions[region]['region'].getRegion() # from the display
self.regions[region]['start'].setValue(rgninfo[0] * 1.0e3) # report values to screen
self.regions[region]['stop'].setValue(rgninfo[1] * 1.0e3)
self.analysis_parameters[region] = {'times': rgninfo}
# print '\nafter loop: ', self.analysis_parameters
for region in ['lrwin1', 'lrwin2']:
self.analysis_parameters[region]['mode'] = self.regions[region]['mode'].currentText()
self.analysis_parameters['lrwin0']['mode'] = 'Mean'
# print '\nand finally: ', self.analysis_parameters
self.get_alternation() # get values into the analysisPars dictionary
self.get_baseline()
self.get_junction()
def get_script_analysisPars(self, script_globals, thiscell):
"""
set the analysis times and modes from the script. Also updates the qt windows
:return: Nothing.
"""
self.analysis_parameters = {}
self.analysis_parameters['baseline'] = False
self.analysis_parameters['lrwin1'] = {}
self.analysis_parameters['lrwin2'] = {}
self.analysis_parameters['lrwin0'] = {}
self.analysis_parameters['lrrmp'] = {}
self.auto_updater = False # turn off the updates
scriptg = {'global_jp': ['junction'], 'global_win1_mode': ['lrwin1', 'mode'],
'global_win2_mode': ['lrwin2', 'mode']}
for k in scriptg.keys(): # set globals first
if len(scriptg[k]) == 1:
self.analysis_parameters[scriptg[k][0]] = script_globals[k]
else:
self.analysis_parameters[scriptg[k][0]] = {scriptg[k][1]: script_globals[k]}
if 'junctionpotential' in thiscell:
self.analysis_parameters['junction'] = thiscell['junctionpotential']
if 'alternation' in thiscell:
self.analysis_parameters['alternation'] = thiscell['alternation']
else:
self.analysis_parameters['alternation'] = True
for n in range(0, 3): # get the current region definitions
self.regions['lrwin%d'%n]['region'].setRegion([x*1e-3 for x in thiscell['win%d'%n]])
self.regions['lrwin%d'%n]['start'].setValue(thiscell['win%d'%n][0])
self.regions['lrwin%d'%n]['stop'].setValue(thiscell['win%d'%n][1])
self.analysis_parameters['lrwin%d'%n]['times'] = [t*1e-3 for t in thiscell['win%d'%n]] # convert to sec
self.show_or_hide('lrwin%d'%n, forcestate=True)
for win in ['win1', 'win2']: # set the modes for the 2 windows
winmode = win+'_mode'
lrwinx = 'lr'+win
if winmode in thiscell:
thiswin = thiscell[winmode]
r = self.regions[lrwinx]['mode'].findText(thiswin)
if r >= 0:
print('setting %s mode to %s ' % (win, thiswin))
self.regions[lrwinx]['mode'].setCurrentIndex(r)
self.analysis_parameters[lrwinx]['mode'] = thiswin
else:
print('%s analysis mode not recognized: %s' % (win, thiswin))
else:
r = self.regions[lrwinx]['mode'].findText(self.analysis_parameters[lrwinx]['mode'])
if r >= 0:
self.regions[lrwinx]['mode'].setCurrentIndex(r)
return
def print_script_output(self):
"""
print(a clean version of the results to the terminal)
:return:
"""
print(self.remove_html_markup(self.textout))
def copy_script_output(self):
"""
Copy script output (results) to system clipboard
:return: Nothing
"""
self.scripts_form.PSPReversal_ScriptResults_text.copy()
def print_formatted_script_output(self, script_header=True, copytoclipboard=False):
"""
Print a nice formatted version of the analysis output to the terminal.
The output can be copied to another program (excel, prism) for further analysis
:param script_header:
:return:
"""
data_template = (OrderedDict([('ElapsedTime', '{:>8.2f}'), ('Drugs', '{:<8s}'), ('HoldV', '{:>5.1f}'), ('JP', '{:>5.1f}'),
('Rs', '{:>6.2f}'), ('Cm', '{:>6.1f}'), ('Ru', '{:>6.2f}'),
('Erev', '{:>6.2f}'),
('gsyn_Erev', '{:>9.2f}'), ('gsyn_60', '{:>7.2f}'), ('gsyn_13', '{:>7.2f}'),
#('p0', '{:6.3e}'), ('p1', '{:6.3e}'), ('p2', '{:6.3e}'), ('p3', '{:6.3e}'),
('I_ionic+', '{:>8.3f}'), ('I_ionic-', '{:>8.3f}'), ('ILeak', '{:>7.3f}'),
('win1Start', '{:>9.3f}'), ('win1End', '{:>7.3f}'),
('win2Start', '{:>9.3f}'), ('win2End', '{:>7.3f}'),
('win0Start', '{:>9.3f}'), ('win0End', '{:>7.3f}'),
]))
# summary table header is written anew for each cell
if script_header:
print('{:34s}\t{:24s}\t'.format("Cell", "Protocol")),
for k in data_template.keys():
print('{:<s}\t'.format(k)),
print('')
ltxt = ''
ltxt += ('{:34s}\t{:24s}\t'.format(self.analysis_summary['CellID'], self.analysis_summary['Protocol']))
for a in data_template.keys():
if a in self.analysis_summary.keys():
ltxt += ((data_template[a] + '\t').format(self.analysis_summary[a]))
else:
ltxt += '< >\t'
print(ltxt)
if copytoclipboard:
clipb = Qt.QApplication.clipboard()
clipb.clear(mode=clipb.Clipboard )
clipb.setText(ltxt, mode=clipb.Clipboard)
# fill table with current information
def update_win_analysis(self, region=None, clear=True, pw=False):
"""
Compute the current-voltage relationship from the selected time window
The IV curve is only valid when there are no spikes detected in
the window. In voltage-clamp mode, this is assumed to always
be true.
In current clamp mode, the results of the spike detection (count_spikes)
are used to remove traces with spikes in them.
The values in the curve are taken according to the "mode" of the window
as selected in the gui. This can be mean, min, max, sum, or the largest of the
abs(min) and max (as -abs(min)).
Subtraction of one window from another is also possible - this currently only
works in one direction: win1 can be subtracted from win2; if win1 has not been
analyzed, then the subtraction will not be done.
Alternation: if the data have been collected in an alternation mode,
then the data is split into "on" and "off" groups, and the current-voltage
relationship is computed for each group.
We can also compute the input resistance (although this does not always make sense)
For voltage clamp data, we can optionally remove the "leak" current.
The resulting IV curve is plotted at the end of the analysis.
:param region: which region of the linearRegion elements are used for
the time window.
:param clear: a boolean flag that originally allowed accumulation of plots
presently, ignored.
:param pw: print window flag = current ignored.
:return: Nothing
:modifies:
ivss, yleak, ivss_cmd, cmd.
dictionary of measurement window data in self.measure
"""
# the first action of this routine is to set the text boxes correctly to represent the status of the
# current LR region
# if not self.auto_updater: # do nothing if auto update is off
# return 'no auto updater'
window = region
region = 'lr' + window
if window is None:
return 'no window'
if self.traces is None:
return 'no traces'
if window == 'win0':
return 'window 0 called' # we don't use for calculations, just marking times
wincmd = window + 'cmd'
winoff = window + 'off'
winon = window + 'on'
windowsd = window + 'std'
winaltcmd = window + 'altcmd'
winunordered = window + '_unordered'
winlinfit = window + '_linfit'
winraw_i = window + 'rawI' # save the raw (uncorrected) voltage as well
winraw_v = window + 'rawV'
winorigcmd = window + 'origcmd'
winbkgd = window + 'bkgd' # background current (calculated from win 1 fit)
# these will always be filled
self.measure[window] = []
self.measure[wincmd] = []
# The next ones will only be set if the alt flag is on
self.measure[winoff] = []
self.measure[winon] = []
self.measure[winaltcmd] = []
self.measure[winunordered] = []
self.measure[windowsd] = []
self.measure[winraw_i] = []
self.measure[winraw_v] = []
self.measure[winorigcmd] = []
self.measure[winbkgd] = []
# import pprint
# pp = pprint.PrettyPrinter(indent=4)
# pp.pprint(self.analysis_parameters)
mode = self.analysis_parameters[region]['mode']
rgninfo = self.analysis_parameters[region]['times']
data1 = self.traces['Time': rgninfo[0]:rgninfo[1]] # extract analysis region
tx1 = ma.compressed(ma.masked_outside(self.time_base, rgninfo[0], rgninfo[1])) # time to match data1
if tx1.shape[0] > data1.shape[1]:
tx1 = tx1[0:-1] # clip extra point. Rules must be different between traces clipping and masking.
if window == 'win1': # check if win1 overlaps with win0, and select data
# print '***** WINDOW 1 SETUP *****'
r0 = self.analysis_parameters['lrwin0']['times'] #regions['lrwin0']['region'].getRegion()
tx = ma.masked_inside(tx1, r0[0], r0[1]) #
if tx.mask.all(): # handle case where win1 is entirely inside win2
print('update_win_analysis: Window 1 is entirely inside Window 0: No analysis possible')
print('rgninfo: ', rgninfo)
print('r0: ', r0)
return 'bad window1/0 relationship'
data1 = ma.array(data1, mask=ma.resize(ma.getmask(tx), data1.shape))
self.txm = ma.compressed(tx) # now compress tx as well
self.win1fits = None # reset the fits
if data1.shape[1] == 0 or data1.shape[0] == 1:
print('no data to analyze?')
return 'no data' # skip it
commands = np.array(self.values) # get clamp specified command levels
if self.data_mode in self.ic_modes:
self.count_spikes()
if mode in ['Mean-Win1', 'Sum-Win1']:
if 'win1_unordered' not in self.measure.keys() or len(
self.measure['win1_unordered']) == 0: # Window not analyzed yet, but needed: do it
self.update_win_analysis(region='win1')
if mode == 'Min':
self.measure[window] = data1.min(axis=1)
elif mode == 'Max':
self.measure[window] = data1.max(axis=1)
elif mode == 'Mean' or mode is None:
self.measure[window] = data1.mean(axis=1)
self.measure[windowsd] = np.std(np.array(data1), axis=1)
elif mode == 'Sum':
self.measure[window] = np.sum(data1, axis=1)
elif mode == 'Abs': # find largest regardless of the sign ('minormax')
x1 = data1.min(axis=1)
x2 = data1.max(axis=1)
self.measure[window] = np.zeros(data1.shape[0])
for i in range(data1.shape[0]):
if -x1[i] > x2[i]:
self.measure[window][i] = x1[i]
else:
self.measure[window][i] = x2[i]
elif mode == 'Linear' and window == 'win1':
ntr = data1.shape[0]
d1 = np.resize(data1.compressed(), (ntr, self.txm.shape[0]))
p = np.polyfit(self.txm, d1.T, 1)
self.win1fits = p
txw1 = ma.compressed(ma.masked_inside(self.time_base, rgninfo[0], rgninfo[1]))
fits = np.zeros((data1.shape[0], txw1.shape[0]))
for j in range(data1.shape[0]): # polyval only does 1d
fits[j, :] = np.polyval(self.win1fits[:, j], txw1)
self.measure[winbkgd] = fits.mean(axis=1)
self.measure[window] = data1.mean(axis=1)
elif mode == 'Poly2' and window == 'win1':
# fit time course of data
ntr = | |
<reponame>naxa-developers/mes-core<filename>onadata/apps/core/views.py
from django.views.generic import View, TemplateView, ListView, DetailView
from django.contrib.auth import views
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import render, redirect, get_object_or_404, render_to_response
from django.core.urlresolvers import reverse_lazy, reverse
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from rest_framework.response import Response
from django.contrib.gis.geos import Point
from django.template import RequestContext
from django.utils.translation import ugettext as _
from onadata.libs.utils.viewer_tools import enketo_url
from django.http import HttpResponse, JsonResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from rest_framework import viewsets, views
import pandas as pd
from django.db.models import Q
from django.db.models import Avg, Count, Sum
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User, Group
from django.core.exceptions import PermissionDenied, ValidationError
from itertools import chain
from rest_framework.authtoken import views as restviews
import json
from django.utils.decorators import method_decorator
from datetime import datetime
from django.db import transaction
from django.core.mail import EmailMessage
from django.utils.encoding import force_bytes, force_text
from django.contrib.sites.shortcuts import get_current_site
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from .signup_tokens import account_activation_token
from django.views.generic.list import MultipleObjectMixin
from django.contrib import messages
from django.contrib.auth import update_session_auth_hash
from django.shortcuts import render, redirect
from django.core.serializers import serialize
import json
import ast
from django.contrib.gis.geos import Point
from onadata.apps.logger.models import Instance, XForm
from .serializers import ActivityGroupSerializer, ActivitySerializer, OutputSerializer, ProjectSerializer, \
ClusterSerializer, BeneficiarySerialzier, ConfigSerializer, ClusterActivityGroupSerializer, CASerializer
from .models import Project, Output, ActivityGroup, Activity, Cluster, Beneficiary, UserRole, ClusterA, ClusterAG, \
Submission, Config, ProjectTimeInterval, ClusterAHistory, District, Municipality, ActivityAggregate, ActivityAggregateHistory
from .forms import LoginForm, SignUpForm, ProjectForm, OutputForm, ActivityGroupForm, ActivityForm, ClusterForm, \
BeneficiaryForm, UserRoleForm, ConfigForm, ChangePasswordform
from .mixin import LoginRequiredMixin, CreateView, UpdateView, DeleteView, ProjectView, ProjectRequiredMixin, \
ProjectMixin, group_required, ManagerMixin, AdminMixin
from .utils import get_beneficiaries, get_clusters, get_cluster_activity_data, get_progress_data, \
get_form_location_label, image_urls_dict, inject_instanceid, create_db_table, fill_cseb_table
from onadata.libs.utils.viewer_tools import _get_form_url
from django.db import transaction, connection
def logout_view(request):
if 'project_id' in request.session:
del request.session['project_id']
logout(request)
return HttpResponseRedirect('/core/sign-in/')
class ProjectSelectView(LoginRequiredMixin, TemplateView):
template_name = 'core/project-dashboard.html'
def get(self, request):
if self.request.group.name in ['super-admin',]:
return HttpResponseRedirect(reverse('home'))
else:
roles = UserRole.objects.filter(user=self.request.user).select_related('project')
return render(request, self.template_name, {'roles': roles})
class HomeView(LoginRequiredMixin, TemplateView):
template_name = 'core/index.html'
def get(self, request):
if self.request.group.name in ['project-coordinator', 'social-mobilizer']:
return HttpResponseRedirect(reverse('user_cluster_list', kwargs={'pk': self.request.user.pk}))
elif self.request.group.name in ['project-manager', 'project-management-unit']:
if 'project_id' in request.session:
project = Project.objects.get(id=self.request.session['project_id'])
else:
project = request.project
output_count = Output.objects.filter(project=project).count()
activity_count = Activity.objects.filter(activity_group__project=project).count()
ag_count = ActivityGroup.objects.filter(project=project).count()
cluster = Cluster.objects.filter(project=project).count()
beneficiary = Beneficiary.objects.filter(cluster__project=project).count()
context = {
'output_count': output_count,
'activity_count': activity_count,
'ag_count': ag_count,
'cluster': cluster,
'beneficiary': beneficiary
}
return render(request, self.template_name, context)
elif self.request.group.name in ['super-admin', ]:
output_count = Output.objects.all().count()
activity_count = Activity.objects.all().count()
ag_count = ActivityGroup.objects.all().count()
cluster = Cluster.objects.all().count()
beneficiary = Beneficiary.objects.all().count()
context = {
'output_count': output_count,
'activity_count': activity_count,
'ag_count': ag_count,
'cluster': cluster,
'beneficiary': beneficiary
}
return render(request, self.template_name, context)
elif self.request.group.name in ['donor']:
return HttpResponseRedirect(reverse('dashboard-1'))
else:
return HttpResponseRedirect(reverse('404_error'))
# raise PermissionDenied()
# def get_context_data(request, **kwargs):
# import ipdb
# ipdb.set_trace()
# context = super(HomeView, self).get_context_data(**kwargs)
# output = Output.objects.all()
# output_count = Output.objects.all().count()
# context['output'] = output
# context['output_count'] = output_count
# print(context)
# return context
class ProjectDashboardView(LoginRequiredMixin, TemplateView):
template_name = 'core/project-dashboard.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
def select_project(request, *args, **kwargs):
project_id = kwargs.get('pk')
request.session['project_id'] = project_id
return HttpResponseRedirect(reverse('home'))
def web_authenticate(username=None, password=<PASSWORD>):
try:
if "@" in username:
user = User.objects.get(email__iexact=username)
else:
user = User.objects.get(username__iexact=username)
if user.check_password(password):
return authenticate(username=user.username, password=password), False
else:
return None, True # Email is correct
except User.DoesNotExist:
return None, False # false Email incorrect
def signin(request):
if request.user.is_authenticated():
return redirect('home')
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
pwd = form.cleaned_data['password']
user, valid_email = web_authenticate(username=username, password=<PASSWORD>)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect(reverse('project-dashboard'))
else:
return render(request, 'core/sign-in.html',
{'form': form,
'email_error': "Your Account is Deactivated, Please Contact Administrator.",
'valid_email': valid_email,
'login_username': username
})
else:
if valid_email:
email_error = False
password_error = True
else:
password_error = False
email_error = "Invalid Username, please check your username."
return render(request, 'core/sign-in.html',
{'form': form,
'valid_email': valid_email,
'email_error': email_error,
'password_error': password_error,
'login_username': username
})
else:
if request.POST.get('login_username') is not None:
login_username = request.POST.get('login_username')
else:
login_username = ''
return render(request, 'core/sign-in.html', {
'form': form,
'valid_email': False,
'email_error': "Your username and password did not match.",
'login_username': login_username
})
else:
form = LoginForm()
return render(request, 'core/sign-in.html', {'form': form, 'valid_email': True, 'email_error': False})
def signup(request):
if request.user.is_authenticated():
return redirect('/core')
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
email = form.cleaned_data.get('email')
password = form.cleaned_data.get('<PASSWORD>')
user = User.objects.create(username=username, email=email, password=password)
user.set_password(<PASSWORD>)
user.is_active = False
user.save()
mail_subject = 'Activate your account.'
current_site = get_current_site(request)
message = render_to_string('core/acc_active_email.html', {
'user': user,
'domain': settings.SITE_URL,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user),
})
to_email = email
email = EmailMessage(
mail_subject, message, to=[to_email]
)
email.send()
return render(request, 'core/emailnotify.html', {'email': user.email})
else:
username = request.POST.get('username')
email = request.POST.get('email')
return render(request, 'core/sign-up.html', {
'form': form,
'username': username,
'email': email,
'valid_email': True,
'email_error': False
})
else:
form = SignUpForm()
return render(request, 'core/sign-up.html', {
'form': form,
'valid_email': True,
'email_error': False
})
def activate(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
return redirect(reverse_lazy('sign_in'))
else:
return HttpResponse('Activation link is invalid!')
class ForgotView(TemplateView):
template_name = 'core/forgot-password.html'
class ErrorView(TemplateView):
template_name = 'core/404.html'
class Dashboard1View(LoginRequiredMixin, TemplateView):
template_name = 'core/dashboard-1new.html'
def get(self, request):
if 'project_id' in self.request.session:
project = Project.objects.get(id=self.request.session['project_id'])
else:
project = request.project
beneficiary_count = Beneficiary.objects.filter(cluster__project=project).count()
activity_count = Activity.objects.filter(activity_group__project=project).count()
districts = District.objects.filter(beneficiary__isnull=False).distinct()
script_district_queryset = District.objects.filter(beneficiary__isnull=False).distinct().values('id', 'name')
script_district = json.dumps(list(script_district_queryset))
return render(request, self.template_name, {
'activity_count': activity_count,
'beneficiary_count': beneficiary_count,
'districts': districts,
'script_district': script_district,
'project': project})
def get_district_progress(request):
district = District.objects.get(pk=request.GET.get('id'))
types = Beneficiary.objects.filter(district=district, cluster__project=request.project).values('Type').distinct()
progress_data = {}
categories = []
if 'project_id' in request.session:
project = Project.objects.get(id=request.session['project_id'])
else:
project=request.project
for item in types:
beneficiary_progress = 0
total_dict = {}
if 'request_data[]' in request.GET:
municipalities = request.GET.getlist('request_data[]')
municipality_ids = []
for municipality in municipalities:
municipality_ids.append(int(municipality))
beneficiary = Beneficiary.objects.filter(municipality__id__in=municipality_ids, Type=item['Type'], cluster__project=project).distinct()
for obj in beneficiary:
try:
beneficiary_progress += obj.progress
except:
beneficiary_progress += 0
try:
total_dict['sum'] = round(beneficiary_progress / len(beneficiary), 2)
except:
total_dict['sum'] = beneficiary_progress / 1
total_dict['total'] = len(beneficiary)
progress_data[item['Type']] = total_dict
else:
beneficiary = Beneficiary.objects.filter(district=district, Type=item['Type'], cluster__project=project).distinct()
for obj in beneficiary:
try:
beneficiary_progress += obj.progress
except:
beneficiary_progress += 0
try:
total_dict['sum'] = round(beneficiary_progress / len(beneficiary), 2)
except:
total_dict['sum'] = beneficiary_progress / 1
total_dict['total'] = len(beneficiary)
progress_data[item['Type']] = total_dict
return JsonResponse(progress_data)
def get_phase_data(request, *args, **kwargs):
if 'project_id' in request.session['project_id']:
project = Project.objects.get(id=request.session['project_id'])
else:
project = request.project
types = Beneficiary.objects.filter(cluster__project=project)
construction_phases = {}
data = []
if 'district' in request.GET:
clusters = Cluster.objects.filter(project=project).order_by('name')
district = District.objects.get(id=int(request.GET.get('district')))
activity_groups = ActivityGroup.objects.filter(project=project, output__name='House Construction', clusterag__cluster__municipality__district=district, activity__is_registration=False, activity__is_entry=False).distinct()
for ag in activity_groups:
total_dict = {}
beneficiaries = 0
total = 0
phases = []
activities = Activity.objects.filter(activity_group=ag)
beneficiary = Beneficiary.objects.filter(
district=district,
submissions__cluster_activity__cag__activity_group=ag,
submissions__cluster_activity__cag__cluster__in=clusters).distinct()
for item in beneficiary:
completed = True
for activity in activities:
submission = Submission.objects.filter(beneficiary=item, cluster_activity__activity=activity)
for s in submission:
if s.status != 'approved':
completed = False
if completed:
beneficiaries += 1
total_dict['number'] = beneficiaries
total_dict['percentage'] = round((float(beneficiaries) / len(types)) * 100, 2)
construction_phases[ag.name] = total_dict
else:
clusters = Cluster.objects.filter(project=project).order_by('name')
activity_groups = ActivityGroup.objects.filter(project=project, output__name='House Construction', activity__is_entry=False, activity__is_registration=False).distinct()
for ag in activity_groups:
total_dict = {}
beneficiaries = 0
phases = []
activities = Activity.objects.filter(activity_group=ag)
beneficiary = Beneficiary.objects.filter(
submissions__cluster_activity__cag__activity_group=ag,
submissions__cluster_activity__cag__cluster__in=clusters).distinct()
for item in beneficiary:
completed = True
for activity in activities:
submission = Submission.objects.filter(beneficiary=item, cluster_activity__activity=activity)
for s in submission:
if s.status != 'approved':
completed = False
if completed:
beneficiaries += 1
total_dict['number'] = beneficiaries
total_dict['percentage'] = round((float(beneficiaries) / len(types)) * 100, 2)
construction_phases[ag.name] = total_dict
phases = sorted(construction_phases.items(), key=lambda x: x[1]['percentage'])
construction_phases = {}
for item in phases:
construction_phases[str(item[0])] = item[1]
return JsonResponse(construction_phases, safe=False)
# class Dashboard1View(LoginRequiredMixin, TemplateView):
# template_name = 'core/dashboard-1.html'
# def get(self, request):
# project = request.project
# # data required for charts and drop down menus
# districts = District.objects.filter(id__in=Beneficiary.objects.values('district__id').distinct())
# municipalities = Municipality.objects.filter(id__in=Beneficiary.objects.values('municipality__id').distinct())
# select_cluster = Cluster.objects.filter(project=project)
# types = Beneficiary.objects.filter(cluster__project=project).values('Type').distinct()
# # intervals = ProjectTimeInterval.objects.values('label').order_by('label')
# beneficiary_count = Beneficiary.objects.filter(cluster__project=project).count()
# activity_count = Activity.objects.filter(activity_group__project=project).count()
# interval = []
# # time intervals for activity progress data
# # for item in intervals:
# # interval.append(str(item['label']))
# # for beneficiary type pie data
# pie_data = {}
# beneficiary_types = types.annotate(total=Count('Type'))
# for item in beneficiary_types:
# pie_data[str(item['Type'])] = [round((float(item['total']) / beneficiary_count) * 100, 2)]
# # get cluster activity overview data on basis of filter used
# # if 'cluster_activity' in request.GET:
# # checked = [(name, value) for name, value | |
def get_positiveReference(self): return self.positiveReference
def set_positiveReference(self, positiveReference): self.positiveReference = positiveReference
def get_negativeReference(self): return self.negativeReference
def set_negativeReference(self, negativeReference): self.negativeReference = negativeReference
def validate_AlertType(self, value):
# Validate type AlertType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['BEEP', 'SILENT', 'RING_5', 'RING_15', 'RING_30', 'RING_60']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on AlertType' % {"value" : value.encode("utf-8")} )
def validate_AlertIntervalType(self, value):
# Validate type AlertIntervalType, a restriction on xs:string.
if value is not None and Validate_simpletypes_:
value = str(value)
enumerations = ['NONE', 'INTERVAL_5', 'INTERVAL_15', 'INTERVAL_30', 'INTERVAL_60', 'INTERVAL_300', 'INTERVAL_900', 'INTERVAL_3600']
enumeration_respectee = False
for enum in enumerations:
if value == enum:
enumeration_respectee = True
break
if not enumeration_respectee:
warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on AlertIntervalType' % {"value" : value.encode("utf-8")} )
def hasContent_(self):
if (
self.content is not None or
self.form is not None or
self.attachment or
super(FormMessage, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='FormMessage', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('FormMessage')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='FormMessage')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='FormMessage', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='FormMessage'):
super(FormMessage, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='FormMessage')
if self.member is not None and 'member' not in already_processed:
already_processed.add('member')
outfile.write(' member=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.member), input_name='member')), ))
if self.brandingKey is not None and 'brandingKey' not in already_processed:
already_processed.add('brandingKey')
outfile.write(' brandingKey=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.brandingKey), input_name='brandingKey')), ))
if self.autoLock is not None and 'autoLock' not in already_processed:
already_processed.add('autoLock')
outfile.write(' autoLock="%s"' % self.gds_format_boolean(self.autoLock, input_name='autoLock'))
if self.vibrate is not None and 'vibrate' not in already_processed:
already_processed.add('vibrate')
outfile.write(' vibrate="%s"' % self.gds_format_boolean(self.vibrate, input_name='vibrate'))
if self.alertType is not None and 'alertType' not in already_processed:
already_processed.add('alertType')
outfile.write(' alertType=%s' % (quote_attrib(self.alertType), ))
if self.alertIntervalType is not None and 'alertIntervalType' not in already_processed:
already_processed.add('alertIntervalType')
outfile.write(' alertIntervalType=%s' % (quote_attrib(self.alertIntervalType), ))
if self.positiveReference is not None and 'positiveReference' not in already_processed:
already_processed.add('positiveReference')
outfile.write(' positiveReference=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.positiveReference), input_name='positiveReference')), ))
if self.negativeReference is not None and 'negativeReference' not in already_processed:
already_processed.add('negativeReference')
outfile.write(' negativeReference=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.negativeReference), input_name='negativeReference')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='FormMessage', fromsubclass_=False, pretty_print=True):
super(FormMessage, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.content is not None:
self.content.export(outfile, level, namespaceprefix_, name_='content', pretty_print=pretty_print)
if self.form is not None:
self.form.export(outfile, level, namespaceprefix_, name_='form', pretty_print=pretty_print)
for attachment_ in self.attachment:
attachment_.export(outfile, level, namespaceprefix_, name_='attachment', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('member', node)
if value is not None and 'member' not in already_processed:
already_processed.add('member')
self.member = value
value = find_attr_value_('brandingKey', node)
if value is not None and 'brandingKey' not in already_processed:
already_processed.add('brandingKey')
self.brandingKey = value
value = find_attr_value_('autoLock', node)
if value is not None and 'autoLock' not in already_processed:
already_processed.add('autoLock')
if value in ('true', '1'):
self.autoLock = True
elif value in ('false', '0'):
self.autoLock = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('vibrate', node)
if value is not None and 'vibrate' not in already_processed:
already_processed.add('vibrate')
if value in ('true', '1'):
self.vibrate = True
elif value in ('false', '0'):
self.vibrate = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('alertType', node)
if value is not None and 'alertType' not in already_processed:
already_processed.add('alertType')
self.alertType = value
self.validate_AlertType(self.alertType) # validate type AlertType
value = find_attr_value_('alertIntervalType', node)
if value is not None and 'alertIntervalType' not in already_processed:
already_processed.add('alertIntervalType')
self.alertIntervalType = value
self.validate_AlertIntervalType(self.alertIntervalType) # validate type AlertIntervalType
value = find_attr_value_('positiveReference', node)
if value is not None and 'positiveReference' not in already_processed:
already_processed.add('positiveReference')
self.positiveReference = value
value = find_attr_value_('negativeReference', node)
if value is not None and 'negativeReference' not in already_processed:
already_processed.add('negativeReference')
self.negativeReference = value
super(FormMessage, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'content':
obj_ = contentType1.factory()
obj_.build(child_)
self.content = obj_
obj_.original_tagname_ = 'content'
elif nodeName_ == 'form':
obj_ = Form.factory()
obj_.build(child_)
self.form = obj_
obj_.original_tagname_ = 'form'
elif nodeName_ == 'attachment':
obj_ = Attachment.factory()
obj_.build(child_)
self.attachment.append(obj_)
obj_.original_tagname_ = 'attachment'
super(FormMessage, self).buildChildren(child_, node, nodeName_, True)
# end class FormMessage
class Outlet(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, value=None, name=None, reference=None):
self.original_tagname_ = None
self.value = _cast(None, value)
self.name = _cast(None, name)
self.reference = _cast(None, reference)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Outlet)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Outlet.subclass:
return Outlet.subclass(*args_, **kwargs_)
else:
return Outlet(*args_, **kwargs_)
factory = staticmethod(factory)
def get_value(self): return self.value
def set_value(self, value): self.value = value
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_reference(self): return self.reference
def set_reference(self, reference): self.reference = reference
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='Outlet', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('Outlet')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='Outlet')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='Outlet', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Outlet'):
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), ))
if self.reference is not None and 'reference' not in already_processed:
already_processed.add('reference')
outfile.write(' reference=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.reference), input_name='reference')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='Outlet', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('reference', node)
if value is not None and 'reference' not in already_processed:
already_processed.add('reference')
self.reference = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Outlet
class End(FlowElement):
subclass = None
superclass = FlowElement
def __init__(self, id=None, waitForFollowUpMessage=False):
self.original_tagname_ = None
super(End, self).__init__(id, )
self.waitForFollowUpMessage = _cast(bool, waitForFollowUpMessage)
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, End)
if subclass is not None:
return subclass(*args_, **kwargs_)
if End.subclass:
return End.subclass(*args_, **kwargs_)
else:
return End(*args_, **kwargs_)
factory = staticmethod(factory)
def get_waitForFollowUpMessage(self): return self.waitForFollowUpMessage
def set_waitForFollowUpMessage(self, waitForFollowUpMessage): self.waitForFollowUpMessage = waitForFollowUpMessage
def hasContent_(self):
if (
super(End, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', name_='End', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('End')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='End')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_='', name_='End', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='End'):
super(End, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='End')
if self.waitForFollowUpMessage and 'waitForFollowUpMessage' not in already_processed:
already_processed.add('waitForFollowUpMessage')
outfile.write(' waitForFollowUpMessage="%s"' % self.gds_format_boolean(self.waitForFollowUpMessage, input_name='waitForFollowUpMessage'))
def exportChildren(self, outfile, level, namespaceprefix_='', name_='End', fromsubclass_=False, pretty_print=True):
super(End, self).exportChildren(outfile, level, namespaceprefix_, name_, True, pretty_print=pretty_print)
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child | |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Callable, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from monai.networks import one_hot
from monai.utils import LossReduction, Weight
class DiceLoss(_Loss):
"""
Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks.
Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).
Axis N of `input` is expected to have logit predictions for each class rather than being image channels,
while the same axis of `target` can be 1 or N (one-hot format). The `smooth_nr` and `smooth_dr` parameters are
values added to the intersection and union components of the inter-over-union calculation to smooth results
respectively, these values should be small. The `include_background` class attribute can be set to False for
an instance of DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be
background. If the non-background segmentations are small compared to the total image size they can get
overwhelmed by the signal from the background so excluding it in such cases helps convergence.
<NAME>. et. al. (2016) V-Net: Fully Convolutional Neural Networks forVolumetric Medical Image Segmentation, 3DV, 2016.
"""
def __init__(
self,
include_background: bool = True,
to_onehot_y: bool = False,
sigmoid: bool = False,
softmax: bool = False,
other_act: Optional[Callable] = None,
squared_pred: bool = False,
jaccard: bool = False,
reduction: Union[LossReduction, str] = LossReduction.MEAN,
smooth_nr: float = 1e-5,
smooth_dr: float = 1e-5,
batch: bool = False,
) -> None:
"""
Args:
include_background: if False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: if True, apply a sigmoid function to the prediction.
softmax: if True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
squared_pred: use squared versions of targets and predictions in the denominator or not.
jaccard: compute Jaccard Index (soft IoU) instead of dice or not.
reduction: {``"none"``, ``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
- ``"none"``: no reduction will be applied.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid zero.
smooth_dr: a small constant added to the denominator to avoid nan.
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
Defaults to False, a Dice loss value is computed independently from each item in the batch
before any `reduction`.
Raises:
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
Incompatible values.
"""
super().__init__(reduction=LossReduction(reduction).value)
if other_act is not None and not callable(other_act):
raise TypeError(f"other_act must be None or callable but is {type(other_act).__name__}.")
if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
raise ValueError("Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].")
self.include_background = include_background
self.to_onehot_y = to_onehot_y
self.sigmoid = sigmoid
self.softmax = softmax
self.other_act = other_act
self.squared_pred = squared_pred
self.jaccard = jaccard
self.smooth_nr = float(smooth_nr)
self.smooth_dr = float(smooth_dr)
self.batch = batch
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""
Args:
input: the shape should be BNH[WD].
target: the shape should be BNH[WD].
Raises:
ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
"""
if self.sigmoid:
input = torch.sigmoid(input)
n_pred_ch = input.shape[1]
if self.softmax:
if n_pred_ch == 1:
warnings.warn("single channel prediction, `softmax=True` ignored.")
else:
input = torch.softmax(input, 1)
if self.other_act is not None:
input = self.other_act(input)
if self.to_onehot_y:
if n_pred_ch == 1:
warnings.warn("single channel prediction, `to_onehot_y=True` ignored.")
else:
target = one_hot(target, num_classes=n_pred_ch)
if not self.include_background:
if n_pred_ch == 1:
warnings.warn("single channel prediction, `include_background=False` ignored.")
else:
# if skipping background, removing first channel
target = target[:, 1:]
input = input[:, 1:]
assert (
target.shape == input.shape
), f"ground truth has differing shape ({target.shape}) from input ({input.shape})"
# reducing only spatial dimensions (not batch nor channels)
reduce_axis = list(range(2, len(input.shape)))
if self.batch:
# reducing spatial dimensions and batch
reduce_axis = [0] + reduce_axis
intersection = torch.sum(target * input, dim=reduce_axis)
if self.squared_pred:
target = torch.pow(target, 2)
input = torch.pow(input, 2)
ground_o = torch.sum(target, dim=reduce_axis)
pred_o = torch.sum(input, dim=reduce_axis)
denominator = ground_o + pred_o
if self.jaccard:
denominator = 2.0 * (denominator - intersection)
f: torch.Tensor = 1.0 - (2.0 * intersection + self.smooth_nr) / (denominator + self.smooth_dr)
if self.reduction == LossReduction.MEAN.value:
f = torch.mean(f) # the batch and channel average
elif self.reduction == LossReduction.SUM.value:
f = torch.sum(f) # sum over the batch and channel dims
elif self.reduction == LossReduction.NONE.value:
pass # returns [N, n_classes] losses
else:
raise ValueError(f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].')
return f
class MaskedDiceLoss(DiceLoss):
"""
Add an additional `masking` process before `DiceLoss`, accept a binary mask ([0, 1]) indicating a region,
`input` and `target` will be masked by the region: region with mask `1` will keep the original value,
region with `0` mask will be converted to `0`. Then feed `input` and `target` to normal `DiceLoss` computation.
This has the effect of ensuring only the masked region contributes to the loss computation and
hence gradient calculation.
"""
def forward(self, input: torch.Tensor, target: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Args:
input: the shape should be BNH[WD].
target: the shape should be BNH[WD].
mask: the shape should B1H[WD] or 11H[WD].
"""
if mask is not None:
# checking if mask is of proper shape
assert input.dim() == mask.dim(), f"dim of input ({input.shape}) is different from mask ({mask.shape})"
assert (
input.shape[0] == mask.shape[0] or mask.shape[0] == 1
), f" batch size of mask ({mask.shape}) must be 1 or equal to input ({input.shape})"
if target.dim() > 1:
assert mask.shape[1] == 1, f"mask ({mask.shape}) must have only 1 channel"
assert (
input.shape[2:] == mask.shape[2:]
), f"spatial size of input ({input.shape}) is different from mask ({mask.shape})"
input = input * mask
target = target * mask
else:
warnings.warn("no mask value specified for the MaskedDiceLoss.")
return super().forward(input=input, target=target)
class GeneralizedDiceLoss(_Loss):
"""
Compute the generalised Dice loss defined in:
Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning
loss function for highly unbalanced segmentations. DLMIA 2017.
Adapted from:
https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L279
"""
def __init__(
self,
include_background: bool = True,
to_onehot_y: bool = False,
sigmoid: bool = False,
softmax: bool = False,
other_act: Optional[Callable] = None,
w_type: Union[Weight, str] = Weight.SQUARE,
reduction: Union[LossReduction, str] = LossReduction.MEAN,
smooth_nr: float = 1e-5,
smooth_dr: float = 1e-5,
batch: bool = False,
) -> None:
"""
Args:
include_background: If False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: If True, apply a sigmoid function to the prediction.
softmax: If True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
squared_pred: use squared versions of targets and predictions in the denominator or not.
w_type: {``"square"``, ``"simple"``, ``"uniform"``}
Type of function to transform ground truth volume to a weight factor. Defaults to ``"square"``.
reduction: {``"none"``, ``"mean"``, ``"sum"``}
Specifies the | |
MoloSurveyPage.objects.get(
slug='french-translation-of-test-survey')
translated_survey.save_revision().publish()
response = self.client.get(self.section.url)
self.assertContains(response,
'<h1 class="surveys__title">Test Survey</h1>')
self.assertNotContains(
response,
'<h1 class="surveys__title">French translation of Test Survey</h1>'
)
response = self.client.get('/locale/fr/')
response = self.client.get(self.section.url)
self.assertNotContains(
response,
'<h1 class="surveys__title">Test Survey</h1>')
self.assertContains(
response,
'<h1 class="surveys__title">French translation of Test Survey</h1>'
)
def test_survey_template_tag_on_article_page(self):
molo_survey_page, molo_survey_form_field = \
self.create_molo_survey_page_with_field(parent=self.article)
response = self.client.get(self.article.url)
self.assertContains(response,
'Take The Survey</a>'.format(
molo_survey_page.url))
self.assertContains(response, molo_survey_page.homepage_introduction)
def test_survey_list_display_direct_logged_out(self):
molo_survey_page, molo_survey_form_field = \
self.create_molo_survey_page_with_field(
parent=self.surveys_index,
display_survey_directly=True)
response = self.client.get('/')
self.assertEquals(response.status_code, 200)
self.assertContains(response, 'Please log in to take this survey')
self.assertNotContains(response, molo_survey_form_field.label)
def test_survey_list_display_direct_logged_in(self):
molo_survey_page, molo_survey_form_field = \
self.create_molo_survey_page_with_field(
parent=self.surveys_index,
display_survey_directly=True)
self.user = self.login()
response = self.client.get('/')
self.assertEquals(response.status_code, 200)
self.assertNotContains(response, 'Please log in to take this survey')
self.assertContains(response, molo_survey_form_field.label)
response = self.client.post(molo_survey_page.url, {
molo_survey_form_field.label.lower().replace(' ', '-'): 'python'
}, follow=True)
self.assertContains(response, molo_survey_page.thank_you_text)
response = self.client.get('/')
self.assertNotContains(response, molo_survey_form_field.label)
self.assertContains(response,
'You have already completed this survey.')
def test_anonymous_submissions_option_display_direct(self):
molo_survey_page, molo_survey_form_field = \
self.create_molo_survey_page_with_field(
parent=self.surveys_index,
display_survey_directly=True,
allow_anonymous_submissions=True,
)
response = self.client.get('/')
self.assertContains(response, molo_survey_form_field.label)
response = self.client.post(molo_survey_page.url, {
molo_survey_form_field.label.lower().replace(' ', '-'): 'python'
}, follow=True)
self.assertContains(response, molo_survey_page.thank_you_text)
response = self.client.get('/')
self.assertNotContains(response, molo_survey_form_field.label)
self.assertContains(response,
'You have already completed this survey.')
def test_multiple_submissions_display_direct(self):
molo_survey_page, molo_survey_form_field = \
self.create_molo_survey_page_with_field(
parent=self.surveys_index,
display_survey_directly=True,
allow_multiple_submissions_per_user=True,
)
self.user = self.login()
response = self.client.post(molo_survey_page.url, {
molo_survey_form_field.label.lower().replace(' ', '-'): 'python'
}, follow=True)
self.assertContains(response, molo_survey_page.thank_you_text)
response = self.client.get('/')
self.assertContains(response, molo_survey_form_field.label)
self.assertNotContains(response,
'You have already completed this survey.')
class TestDeleteButtonRemoved(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.main = Main.objects.all().first()
self.language_setting = Languages.objects.create(
site_id=self.main.get_site().pk)
self.english = SiteLanguageRelation.objects.create(
language_setting=self.language_setting,
locale='en',
is_active=True)
self.login()
self.surveys_index = SurveysIndexPage(
title='Security Questions',
slug='security-questions')
self.main.add_child(instance=self.surveys_index)
self.surveys_index.save_revision().publish()
def test_delete_btn_removed_for_surveys_index_page_in_main(self):
main_page = Main.objects.first()
response = self.client.get('/admin/pages/{0}/'
.format(str(main_page.pk)))
self.assertEquals(response.status_code, 200)
surveys_index_page_title = (
SurveysIndexPage.objects.first().title)
soup = BeautifulSoup(response.content, 'html.parser')
index_page_rows = soup.find_all('tbody')[0].find_all('tr')
for row in index_page_rows:
if row.h2.a.string == surveys_index_page_title:
self.assertTrue(row.find('a', string='Edit'))
self.assertFalse(row.find('a', string='Delete'))
def test_delete_button_removed_from_dropdown_menu(self):
surveys_index_page = SurveysIndexPage.objects.first()
response = self.client.get('/admin/pages/{0}/'
.format(str(surveys_index_page.pk)))
self.assertEquals(response.status_code, 200)
delete_link = ('<a href="/admin/pages/{0}/delete/" '
'title="Delete this page" class="u-link '
'is-live ">Delete</a>'
.format(str(surveys_index_page.pk)))
self.assertNotContains(response, delete_link, html=True)
def test_delete_button_removed_in_edit_menu(self):
surveys_index_page = SurveysIndexPage.objects.first()
response = self.client.get('/admin/pages/{0}/edit/'
.format(str(surveys_index_page.pk)))
self.assertEquals(response.status_code, 200)
delete_button = ('<li><a href="/admin/pages/{0}/delete/" '
'class="shortcut">Delete</a></li>'
.format(str(surveys_index_page.pk)))
self.assertNotContains(response, delete_button, html=True)
class TestSkipLogicSurveyView(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.main = Main.objects.all().first()
self.language_setting = Languages.objects.create(
site_id=self.main.get_site().pk)
self.english = SiteLanguageRelation.objects.create(
language_setting=self.language_setting,
locale='en',
is_active=True)
self.molo_survey_page = self.new_survey('Test Survey')
self.another_molo_survey_page = self.new_survey('Another Test Survey')
self.last_molo_survey_form_field = MoloSurveyFormField.objects.create(
page=self.molo_survey_page,
sort_order=3,
label='Your favourite actor',
field_type='singleline',
required=True
)
self.choices = ['next', 'end', 'survey', 'question']
self.skip_logic_form_field = MoloSurveyFormField.objects.create(
page=self.molo_survey_page,
sort_order=1,
label='Where should we go',
field_type='dropdown',
skip_logic=skip_logic_data(
self.choices,
self.choices,
survey=self.another_molo_survey_page,
question=self.last_molo_survey_form_field,
),
required=True
)
self.molo_survey_form_field = MoloSurveyFormField.objects.create(
page=self.molo_survey_page,
sort_order=2,
label='Your favourite animal',
field_type='singleline',
required=True
)
self.another_molo_survey_form_field = (
MoloSurveyFormField.objects.create(
page=self.another_molo_survey_page,
sort_order=1,
label='Your favourite actress',
field_type='singleline',
required=True
)
)
def new_survey(self, name):
survey = MoloSurveyPage(
title=name, slug=slugify(name),
introduction='Introduction to {}...'.format(name),
thank_you_text='Thank you for taking the {}'.format(name),
submit_text='survey submission text for {}'.format(name),
allow_anonymous_submissions=True,
)
self.section_index.add_child(instance=survey)
survey.save_revision().publish()
return survey
def assertSurveyAndQuestions(self, response, survey, questions):
self.assertContains(response, survey.title)
self.assertContains(response, survey.introduction)
for question in questions:
self.assertContains(response, question.label)
self.assertContains(response, question.label)
def test_skip_logic_next_question(self):
response = self.client.get(self.molo_survey_page.url)
self.assertSurveyAndQuestions(
response,
self.molo_survey_page,
[self.skip_logic_form_field]
)
self.assertNotContains(
response,
self.last_molo_survey_form_field.label
)
self.assertNotContains(response, self.molo_survey_form_field.label)
self.assertContains(response, 'Next Question')
response = self.client.post(self.molo_survey_page.url + '?p=2', {
self.skip_logic_form_field.clean_name: self.choices[0],
})
self.assertSurveyAndQuestions(
response,
self.molo_survey_page,
[self.molo_survey_form_field, self.last_molo_survey_form_field]
)
self.assertNotContains(response, self.skip_logic_form_field.label)
self.assertContains(response, self.molo_survey_page.submit_text)
response = self.client.post(self.molo_survey_page.url + '?p=3', {
self.molo_survey_form_field.clean_name: 'python',
self.last_molo_survey_form_field.clean_name: '<NAME> ;)',
}, follow=True)
self.assertContains(response, self.molo_survey_page.thank_you_text)
def test_skip_logic_to_end(self):
response = self.client.get(self.molo_survey_page.url)
self.assertSurveyAndQuestions(
response,
self.molo_survey_page,
[self.skip_logic_form_field]
)
self.assertNotContains(
response,
self.last_molo_survey_form_field.label,
)
self.assertNotContains(response, self.molo_survey_form_field.label)
self.assertContains(response, 'Next Question')
response = self.client.post(self.molo_survey_page.url + '?p=2', {
self.skip_logic_form_field.clean_name: self.choices[1],
}, follow=True)
# Should end the survey and not complain about required
# field for the last field
self.assertContains(response, self.molo_survey_page.title)
self.assertNotContains(response, self.molo_survey_form_field.label)
self.assertNotContains(
response,
self.last_molo_survey_form_field.label
)
self.assertNotContains(response, self.molo_survey_page.submit_text)
self.assertContains(response, self.molo_survey_page.thank_you_text)
def test_skip_logic_to_another_survey(self):
response = self.client.get(self.molo_survey_page.url)
self.assertSurveyAndQuestions(
response,
self.molo_survey_page,
[self.skip_logic_form_field]
)
self.assertNotContains(
response,
self.last_molo_survey_form_field.label
)
self.assertNotContains(response, self.molo_survey_form_field.label)
self.assertContains(response, 'Next Question')
response = self.client.post(self.molo_survey_page.url + '?p=2', {
self.skip_logic_form_field.clean_name: self.choices[2],
}, follow=True)
# Should end the survey and progress to the new survey
self.assertSurveyAndQuestions(
response,
self.another_molo_survey_page,
[self.another_molo_survey_form_field],
)
def test_skip_logic_to_another_question(self):
response = self.client.get(self.molo_survey_page.url)
self.assertSurveyAndQuestions(
response,
self.molo_survey_page,
[self.skip_logic_form_field]
)
self.assertNotContains(
response,
self.last_molo_survey_form_field.label,
)
self.assertNotContains(response, self.molo_survey_form_field.label)
self.assertContains(response, 'Next Question')
response = self.client.post(self.molo_survey_page.url + '?p=2', {
self.skip_logic_form_field.clean_name: self.choices[3],
}, follow=True)
# Should end the survey and progress to the new survey
self.assertSurveyAndQuestions(
response,
self.molo_survey_page,
[self.last_molo_survey_form_field],
)
def test_skip_logic_checkbox_with_data(self):
self.skip_logic_form_field.field_type = 'checkbox'
self.skip_logic_form_field.skip_logic = skip_logic_data(
['', ''],
self.choices[:2],
)
self.skip_logic_form_field.save()
response = self.client.get(self.molo_survey_page.url)
self.assertSurveyAndQuestions(
response,
self.molo_survey_page,
[self.skip_logic_form_field]
)
self.assertNotContains(
response,
self.last_molo_survey_form_field.label,
)
self.assertNotContains(response, self.molo_survey_form_field.label)
self.assertContains(response, 'Next Question')
response = self.client.post(self.molo_survey_page.url + '?p=2', {
self.skip_logic_form_field.clean_name: 'on',
}, follow=True)
self.assertSurveyAndQuestions(
response,
self.molo_survey_page,
[self.molo_survey_form_field, self.last_molo_survey_form_field]
)
self.assertNotContains(response, self.skip_logic_form_field.label)
self.assertContains(response, self.molo_survey_page.submit_text)
response = self.client.post(self.molo_survey_page.url + '?p=3', {
self.molo_survey_form_field.clean_name: 'python',
self.last_molo_survey_form_field.clean_name: '<NAME> ;)',
}, follow=True)
self.assertContains(response, self.molo_survey_page.thank_you_text)
def test_skip_logic_checkbox_no_data(self):
self.skip_logic_form_field.field_type = 'checkbox'
self.skip_logic_form_field.skip_logic = skip_logic_data(
['', ''],
self.choices[:2],
)
self.skip_logic_form_field.save()
response = self.client.get(self.molo_survey_page.url)
self.assertSurveyAndQuestions(
response,
self.molo_survey_page,
[self.skip_logic_form_field]
)
self.assertNotContains(
response,
self.last_molo_survey_form_field.label,
)
self.assertNotContains(response, self.molo_survey_form_field.label)
self.assertContains(response, 'Next Question')
# Unchecked textboxes have no data sent to the backend
# Data cannot be empty as we will be submitting the csrf token
response = self.client.post(
self.molo_survey_page.url + '?p=2',
{'csrf': 'dummy'},
follow=True,
)
self.assertContains(response, self.molo_survey_page.title)
self.assertNotContains(response, self.molo_survey_form_field.label)
self.assertNotContains(
response,
self.last_molo_survey_form_field.label
)
self.assertNotContains(response, self.molo_survey_page.submit_text)
self.assertContains(response, self.molo_survey_page.thank_you_text)
def test_skip_logic_missed_required_with_checkbox(self):
self.skip_logic_form_field.field_type = 'checkbox'
self.skip_logic_form_field.skip_logic = skip_logic_data(
['', ''],
[self.choices[3], self.choices[2]], # question, survey
survey=self.another_molo_survey_page,
question=self.last_molo_survey_form_field,
)
self.skip_logic_form_field.save()
# Skip a required question
response = self.client.post(
self.molo_survey_page.url + '?p=2',
{self.skip_logic_form_field.clean_name: 'on'},
follow=True,
)
self.assertSurveyAndQuestions(
response,
self.molo_survey_page,
[self.last_molo_survey_form_field]
)
self.assertNotContains(response, self.skip_logic_form_field.label)
self.assertNotContains(response, self.molo_survey_form_field.label)
self.assertContains(response, self.molo_survey_page.submit_text)
# Dont answer last required question: trigger error messages
response = self.client.post(
self.molo_survey_page.url + '?p=3',
{self.last_molo_survey_form_field.clean_name: ''},
follow=True,
)
# Go back to the same page with validation errors showing
self.assertSurveyAndQuestions(
response,
self.molo_survey_page,
[self.last_molo_survey_form_field]
)
self.assertContains(response, 'required')
self.assertNotContains(response, self.skip_logic_form_field.label)
self.assertNotContains(response, self.molo_survey_form_field.label)
self.assertContains(response, self.molo_survey_page.submit_text)
def test_skip_logic_required_with_radio_button_field(self):
self.user = User.objects.create_user(
username='tester',
email='<EMAIL>',
password='<PASSWORD>')
self.client.login(username='tester', password='<PASSWORD>')
survey = MoloSurveyPage(
title='Test Survey With Redio Button',
slug='testw-survey-with-redio-button',
)
another_survey = MoloSurveyPage(
title='Anotherw Test Survey',
slug='anotherw-test-survey',
)
self.section_index.add_child(instance=survey)
survey.save_revision().publish()
self.section_index.add_child(instance=another_survey)
another_survey.save_revision().publish()
field_choices = ['next', 'end']
third_field = MoloSurveyFormField.objects.create(
page=survey,
sort_order=4,
label='A random animal',
field_type='dropdown',
skip_logic=skip_logic_data(
field_choices,
field_choices,
),
required=True
)
first_field = MoloSurveyFormField.objects.create(
page=survey,
sort_order=1,
label='Your other favourite animal',
field_type='radio',
skip_logic=skip_logic_data(
field_choices + ['question', 'survey'],
field_choices + ['question', 'survey'],
question=third_field,
survey=another_survey,
),
required=True
)
second_field = MoloSurveyFormField.objects.create(
page=survey,
sort_order=2,
label='Your favourite animal',
field_type='dropdown',
skip_logic=skip_logic_data(
field_choices,
field_choices,
),
required=True
)
response = self.client.post(
survey.url + '?p=2',
{another_survey: ''},
follow=True,
)
self.assertContains(response, 'required')
self.assertNotContains(response, second_field.label)
self.assertContains(response, first_field.label)
class TestPositiveNumberView(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.main = Main.objects.all().first()
self.language_setting = Languages.objects.create(
site_id=self.main.get_site().pk)
self.english = SiteLanguageRelation.objects.create(
language_setting=self.language_setting,
locale='en',
is_active=True)
self.surveys_index = SurveysIndexPage(
title='Surveys',
slug='surveys')
self.main.add_child(instance=self.surveys_index)
self.surveys_index.save_revision().publish()
def test_positive_number_field_validation(self):
self.user = User.objects.create_user(
username='tester',
email='<EMAIL>',
password='<PASSWORD>')
self.client.login(username='tester', password='<PASSWORD>')
survey = MoloSurveyPage(
title='Test Survey With Positive Number',
slug='testw-survey-with-positive-number',
thank_you_text='Thank you for taking the survey',
)
self.surveys_index.add_child(instance=survey)
survey.save_revision().publish()
positive_number_field = MoloSurveyFormField.objects.create(
page=survey,
sort_order=1,
label='Your lucky number?',
field_type='positive_number',
required=True
)
response = self.client.post(
survey.url + '?p=2',
{positive_number_field.clean_name: '-1'},
follow=True,
)
self.assertContains(response, positive_number_field.label)
self.assertContains(
response, 'Ensure this value is greater than or equal to 0')
response = self.client.post(
survey.url + '?p=2',
{positive_number_field.clean_name: '1'},
follow=True,
)
self.assertContains(
response, survey.thank_you_text)
class SegmentCountView(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.user = User.objects.create_user(
username='tester', email='<EMAIL>', password='<PASSWORD>')
# Create survey
self.personalisable_survey = PersonalisableSurvey(title='Test Survey')
SurveysIndexPage.objects.first().add_child(
instance=self.personalisable_survey
)
self.personalisable_survey.save_revision()
PersonalisableSurveyFormField.objects.create(
field_type='singleline', label='Singleline Text',
page=self.personalisable_survey
)
def submit_survey(self, survey, user):
submission = survey.get_submission_class()
data = {field.clean_name: 'super random text'
for field in survey.get_form_fields()}
submission.objects.create(user=user, page=self.personalisable_survey,
form_data=json.dumps(data))
def test_segment_user_count(self):
self.submit_survey(self.personalisable_survey, self.user)
response = self.client.post('/surveys/count/', SEGMENT_FORM_DATA)
self.assertDictEqual(response.json(), {"segmentusercount": 1})
def test_segment_user_count_returns_errors(self):
self.submit_survey(self.personalisable_survey, self.user)
data = SEGMENT_FORM_DATA
data['name'] = [""]
data['surveys_surveyresponserule_related-0-survey'] = ['20']
response = self.client.post('/surveys/count/', data)
self.assertDictEqual(response.json(), {"errors": {
"surveys_surveyresponserule_related-0-survey": [
"Select a valid choice. That choice is not one of the "
"available choices."],
"name": ["This field is required."]}})
class TestPollsViaSurveysView(TestCase, MoloTestCaseMixin):
"""
Tests to check if polls are not
being paginated when they include fields with skip_logic_data.
Also test that page_break is not causing any pagination on the surveys
"""
def setUp(self):
self.mk_main()
self.choices = ['next', 'end', 'survey']
self.surveys_index = SurveysIndexPage.objects.first()
def test_molo_poll(self):
survey = create_molo_survey_page(
self.surveys_index, display_survey_directly=True)
drop_down_field = create_molo_dropddown_field(
self.surveys_index, survey, self.choices)
response = self.client.post(
survey.url + '?p=1',
{drop_down_field.clean_name: 'next'},
follow=True,
)
self.assertContains(response, survey.thank_you_text)
self.assertNotContains(response, 'That page number is less than 1')
def test_molo_poll_with_page_break(self):
survey = create_molo_survey_page(
self.surveys_index, display_survey_directly=True)
drop_down_field = create_molo_dropddown_field(
self.surveys_index, survey, self.choices, page_break=True)
response = self.client.post(
survey.url + '?p=1',
{drop_down_field.clean_name: | |
import abc
from .aes import aes
from .stats import StatCount, StatIdentity, StatBin, StatNone, StatFunction
class FigureAttribute(abc.ABC):
pass
class Geom(FigureAttribute):
def __init__(self, aes):
self.aes = aes
@abc.abstractmethod
def apply_to_fig(self, parent, agg_result, fig_so_far, precomputed):
pass
@abc.abstractmethod
def get_stat(self):
pass
class GeomLineBasic(Geom):
def __init__(self, aes, color):
super().__init__(aes)
self.color = color
def apply_to_fig(self, parent, agg_result, fig_so_far, precomputed):
def plot_group(data, color):
scatter_args = {
"x": [element["x"] for element in data],
"y": [element["y"] for element in data],
"mode": "lines",
"line_color": color
}
if "color_legend" in data[0]:
scatter_args["name"] = data[0]["color_legend"]
if "tooltip" in parent.aes or "tooltip" in self.aes:
scatter_args["hovertext"] = [element["tooltip"] for element in data]
fig_so_far.add_scatter(**scatter_args)
if self.color is not None:
plot_group(agg_result, self.color)
elif "color" in parent.aes or "color" in self.aes:
groups = set([element["group"] for element in agg_result])
for group in groups:
just_one_group = [element for element in agg_result if element["group"] == group]
plot_group(just_one_group, just_one_group[0]["color"])
else:
plot_group(agg_result, "black")
@abc.abstractmethod
def get_stat(self):
return ...
class GeomPoint(Geom):
def __init__(self, aes, color=None):
super().__init__(aes)
self.color = color
def apply_to_fig(self, parent, agg_result, fig_so_far, precomputed):
def plot_group(data, color=None):
scatter_args = {
"x": [element["x"] for element in data],
"y": [element["y"] for element in data],
"mode": "markers",
"marker_color": color if color is not None else [element["color"] for element in data]
}
if "color_legend" in data[0]:
scatter_args["name"] = data[0]["color_legend"]
if "size" in parent.aes or "size" in self.aes:
scatter_args["marker_size"] = [element["size"] for element in data]
if "tooltip" in parent.aes or "tooltip" in self.aes:
scatter_args["hovertext"] = [element["tooltip"] for element in data]
fig_so_far.add_scatter(**scatter_args)
if self.color is not None:
plot_group(agg_result, self.color)
elif "color" in parent.aes or "color" in self.aes:
groups = set([element["group"] for element in agg_result])
for group in groups:
just_one_group = [element for element in agg_result if element["group"] == group]
plot_group(just_one_group)
else:
plot_group(agg_result, "black")
def get_stat(self):
return StatIdentity()
def geom_point(mapping=aes(), *, color=None):
"""Create a scatter plot.
Supported aesthetics: ``x``, ``y``, ``color``, ``tooltip``
Returns
-------
:class:`FigureAttribute`
The geom to be applied.
"""
return GeomPoint(mapping, color=color)
class GeomLine(GeomLineBasic):
def __init__(self, aes, color=None):
super().__init__(aes, color)
self.color = color
def apply_to_fig(self, parent, agg_result, fig_so_far, precomputed):
super().apply_to_fig(parent, agg_result, fig_so_far, precomputed)
def get_stat(self):
return StatIdentity()
def geom_line(mapping=aes(), *, color=None):
"""Create a line plot.
Supported aesthetics: ``x``, ``y``, ``color``, ``tooltip``
Returns
-------
:class:`FigureAttribute`
The geom to be applied.
"""
return GeomLine(mapping, color=color)
class GeomText(Geom):
def __init__(self, aes, color=None):
super().__init__(aes)
self.color = color
def apply_to_fig(self, parent, agg_result, fig_so_far, precomputed):
def plot_group(data, color=None):
scatter_args = {
"x": [element["x"] for element in data],
"y": [element["y"] for element in data],
"text": [element["label"] for element in data],
"mode": "text",
"textfont_color": color
}
if "color_legend" in data[0]:
scatter_args["name"] = data[0]["color_legend"]
if "tooltip" in parent.aes or "tooltip" in self.aes:
scatter_args["hovertext"] = [element["tooltip"] for element in data]
if "size" in parent.aes or "size" in self.aes:
scatter_args["marker_size"] = [element["size"] for element in data]
fig_so_far.add_scatter(**scatter_args)
if self.color is not None:
plot_group(agg_result, self.color)
elif "color" in parent.aes or "color" in self.aes:
groups = set([element["group"] for element in agg_result])
for group in groups:
just_one_group = [element for element in agg_result if element["group"] == group]
plot_group(just_one_group, just_one_group[0]["color"])
else:
plot_group(agg_result, "black")
def get_stat(self):
return StatIdentity()
def geom_text(mapping=aes(), *, color=None):
"""Create a scatter plot where each point is text from the ``text`` aesthetic.
Supported aesthetics: ``x``, ``y``, ``label``, ``color``, ``tooltip``
Returns
-------
:class:`FigureAttribute`
The geom to be applied.
"""
return GeomText(mapping, color=color)
class GeomBar(Geom):
def __init__(self, aes, fill=None, color=None, position="stack", size=None, stat=None):
super().__init__(aes)
self.fill = fill
self.color = color
self.position = position
self.size = size
if stat is None:
stat = StatCount()
self.stat = stat
def apply_to_fig(self, parent, agg_result, fig_so_far, precomputed):
def plot_group(data):
if self.fill is None:
if "fill" in data[0]:
fill = [element["fill"] for element in data]
else:
fill = "black"
else:
fill = self.fill
bar_args = {
"x": [element["x"] for element in data],
"y": [element["y"] for element in data],
"marker_color": fill
}
if "color_legend" in data[0]:
bar_args["name"] = data[0]["color_legend"]
if self.color is None and "color" in data[0]:
bar_args["marker_line_color"] = [element["color"] for element in data]
elif self.color is not None:
bar_args["marker_line_color"] = self.color
if self.size is not None:
bar_args["marker_line_width"] = self.size
fig_so_far.add_bar(**bar_args)
groups = set([element["group"] for element in agg_result])
for group in groups:
just_one_group = [element for element in agg_result if element["group"] == group]
plot_group(just_one_group)
ggplot_to_plotly = {'dodge': 'group', 'stack': 'stack'}
fig_so_far.update_layout(barmode=ggplot_to_plotly[self.position])
def get_stat(self):
return self.stat
def geom_bar(mapping=aes(), *, fill=None, color=None, position="stack", size=None):
"""Create a bar chart that counts occurrences of the various values of the ``x`` aesthetic.
Supported aesthetics: ``x``, ``color``, ``fill``
Returns
-------
:class:`FigureAttribute`
The geom to be applied.
"""
return GeomBar(mapping, fill=fill, color=color, position=position, size=size)
def geom_col(mapping=aes(), *, fill=None, color=None, position="stack", size=None):
"""Create a bar chart that uses bar heights specified in y aesthetic.
Supported aesthetics: ``x``, ``y``, ``color``, ``fill``
Returns
-------
:class:`FigureAttribute`
The geom to be applied.
"""
return GeomBar(mapping, stat=StatIdentity(), fill=fill, color=color, position=position, size=size)
class GeomHistogram(Geom):
def __init__(self, aes, min_val=None, max_val=None, bins=None, fill=None, color=None, position='stack', size=None):
super().__init__(aes)
self.min_val = min_val
self.max_val = max_val
self.bins = bins
self.fill = fill
self.color = color
self.position = position
self.size = size
def apply_to_fig(self, parent, agg_result, fig_so_far, precomputed):
min_val = self.min_val if self.min_val is not None else precomputed.min_val
max_val = self.max_val if self.max_val is not None else precomputed.max_val
# This assumes it doesn't really make sense to use another stat for geom_histogram
bins = self.bins if self.bins is not None else self.get_stat().DEFAULT_BINS
bin_width = (max_val - min_val) / bins
def plot_group(data, num_groups):
x = []
left_xs = []
right_xs = []
for element in data:
left_x = element.x
if self.position == "dodge":
group = element.group
center_x = left_x + bin_width * (2 * group + 1) / (2 * num_groups)
elif self.position == "stack":
center_x = left_x + bin_width / 2
left_xs.append(left_x)
x.append(center_x)
right_xs.append(left_x + bin_width)
if self.fill is None:
if "fill" in data[0]:
fill = [element["fill"] for element in data]
else:
fill = "black"
else:
fill = self.fill
hist_args = {
"x": x,
"y": [element["y"] for element in data],
"marker_color": fill,
"customdata": list(zip(left_xs, right_xs)),
"hovertemplate":
"Range: [%{customdata[0]:.3f}-%{customdata[1]:.3f})<br>"
"Count: %{y}<br>"
"<extra></extra>",
}
if self.color is None and "color" in data[0]:
hist_args["marker_line_color"] = [element["color"] for element in data]
elif self.color is not None:
hist_args["marker_line_color"] = self.color
if self.size is not None:
hist_args["marker_line_width"] = self.size
if "fill_legend" in data[0]:
hist_args["name"] = data[0]["fill_legend"]
width = bin_width if self.position == 'stack' else bin_width / num_groups
hist_args["width"] = [width] * len(data)
fig_so_far.add_bar(**hist_args)
groups = set([element["group"] for element in agg_result])
for group in groups:
just_one_group = [element for element in agg_result if element["group"] == group]
plot_group(just_one_group, len(groups))
ggplot_to_plotly = {'dodge': 'group', 'stack': 'stack'}
fig_so_far.update_layout(barmode=ggplot_to_plotly[self.position])
def get_stat(self):
return StatBin(self.min_val, self.max_val, self.bins)
def geom_histogram(mapping=aes(), *, min_val=None, max_val=None, bins=None, fill=None, color=None, position='stack',
size=None):
"""Creates a histogram.
Note: this function currently does not support same interface as R's ggplot.
Supported aesthetics: ``x``, ``color``, ``fill``
Parameters
----------
mapping: :class:`Aesthetic`
Any aesthetics specific to this geom.
min_val: `int` or `float`
Minimum value to include in histogram
max_val: `int` or `float`
Maximum value to include in histogram
bins: `int`
Number of bins to plot. 30 by default.
fill:
A single fill color for all bars of histogram, overrides ``fill`` aesthetic.
color:
A single outline color for all bars of histogram, overrides ``color`` aesthetic.
position: :class:`str`
Tells how to deal with different groups of data at same point. Options are "stack" and "dodge".
size
Returns
-------
:class:`FigureAttribute`
The geom to be applied.
"""
return GeomHistogram(mapping, min_val, max_val, bins, fill, color, position, size)
linetype_dict = {
"solid": "solid",
"dashed": "dash",
"dotted": "dot",
"longdash": "longdash",
"dotdash": "dashdot"
}
class GeomHLine(Geom):
def __init__(self, yintercept, linetype="solid", color=None):
self.yintercept = yintercept
self.aes = aes()
self.linetype = linetype
self.color = color
def apply_to_fig(self, parent, agg_result, fig_so_far, precomputed):
line_attributes = {
"y": self.yintercept,
"line_dash": linetype_dict[self.linetype]
}
if self.color is not None:
line_attributes["line_color"] = self.color
fig_so_far.add_hline(**line_attributes)
def get_stat(self):
return StatNone()
def geom_hline(yintercept, *, linetype="solid", color=None):
"""Plots a horizontal line at ``yintercept``.
Parameters
----------
yintercept : :class:`float`
Location to draw line.
linetype : :class:`str`
Type of line to draw. Choose from "solid", "dashed", "dotted", "longdash", "dotdash".
color : :class:`str`
Color of line to draw, black by default.
Returns
-------
:class:`FigureAttribute`
The geom to be applied.
"""
return GeomHLine(yintercept, linetype=linetype, color=color)
class GeomVLine(Geom):
def | |
= self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/devices/{deviceId}/devicedatasources/{hdsId}/instances/{instanceId}/alertsettings/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceDataSourceInstanceAlertSetting', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_device_datasource_instance_alert_setting_list(self, device_id, hds_id, instance_id, **kwargs): # noqa: E501
"""get a list of alert settings for a device # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_datasource_instance_alert_setting_list(device_id, hds_id, instance_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param int hds_id: Device-DataSource ID (required)
:param int instance_id: (required)
:return: DeviceDataSourceInstanceAlertSettingPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_device_datasource_instance_alert_setting_list_with_http_info(device_id, hds_id, instance_id, **kwargs) # noqa: E501
else:
(data) = self.get_device_datasource_instance_alert_setting_list_with_http_info(device_id, hds_id, instance_id, **kwargs) # noqa: E501
return data
def get_device_datasource_instance_alert_setting_list_with_http_info(self, device_id, hds_id, instance_id, **kwargs): # noqa: E501
"""get a list of alert settings for a device # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_datasource_instance_alert_setting_list_with_http_info(device_id, hds_id, instance_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param int hds_id: Device-DataSource ID (required)
:param int instance_id: (required)
:return: DeviceDataSourceInstanceAlertSettingPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_id', 'hds_id', 'instance_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_device_datasource_instance_alert_setting_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `get_device_datasource_instance_alert_setting_list`") # noqa: E501
# verify the required parameter 'hds_id' is set
if ('hds_id' not in params or
params['hds_id'] is None):
raise ValueError("Missing the required parameter `hds_id` when calling `get_device_datasource_instance_alert_setting_list`") # noqa: E501
# verify the required parameter 'instance_id' is set
if ('instance_id' not in params or
params['instance_id'] is None):
raise ValueError("Missing the required parameter `instance_id` when calling `get_device_datasource_instance_alert_setting_list`") # noqa: E501
if 'device_id' in params and not re.search('\d+', params['device_id'] if type(params['device_id']) is str else str(params['device_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `device_id` when calling `get_device_datasource_instance_alert_setting_list`, must conform to the pattern `/\d+/`") # noqa: E501
if 'hds_id' in params and not re.search('\d+', params['hds_id'] if type(params['hds_id']) is str else str(params['hds_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `hds_id` when calling `get_device_datasource_instance_alert_setting_list`, must conform to the pattern `/\d+/`") # noqa: E501
if 'instance_id' in params and not re.search('\d+', params['instance_id'] if type(params['instance_id']) is str else str(params['instance_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `instance_id` when calling `get_device_datasource_instance_alert_setting_list`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
if 'hds_id' in params:
path_params['hdsId'] = params['hds_id'] # noqa: E501
if 'instance_id' in params:
path_params['instanceId'] = params['instance_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/devices/{deviceId}/devicedatasources/{hdsId}/instances/{instanceId}/alertsettings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceDataSourceInstanceAlertSettingPaginationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_device_datasource_instance_by_id(self, device_id, hds_id, id, **kwargs): # noqa: E501
"""get device instance # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_datasource_instance_by_id(device_id, hds_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param int hds_id: The device-datasource ID (required)
:param int id: (required)
:param str fields:
:return: DeviceDataSourceInstance
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_device_datasource_instance_by_id_with_http_info(device_id, hds_id, id, **kwargs) # noqa: E501
else:
(data) = self.get_device_datasource_instance_by_id_with_http_info(device_id, hds_id, id, **kwargs) # noqa: E501
return data
def get_device_datasource_instance_by_id_with_http_info(self, device_id, hds_id, id, **kwargs): # noqa: E501
"""get device instance # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_datasource_instance_by_id_with_http_info(device_id, hds_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param int hds_id: The device-datasource ID (required)
:param int id: (required)
:param str fields:
:return: DeviceDataSourceInstance
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_id', 'hds_id', 'id', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_device_datasource_instance_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'device_id' is set
if ('device_id' not in params or
params['device_id'] is None):
raise ValueError("Missing the required parameter `device_id` when calling `get_device_datasource_instance_by_id`") # noqa: E501
# verify the required parameter 'hds_id' is set
if ('hds_id' not in params or
params['hds_id'] is None):
raise ValueError("Missing the required parameter `hds_id` when calling `get_device_datasource_instance_by_id`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_device_datasource_instance_by_id`") # noqa: E501
if 'device_id' in params and not re.search('\d+', params['device_id'] if type(params['device_id']) is str else str(params['device_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `device_id` when calling `get_device_datasource_instance_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
if 'hds_id' in params and not re.search('\d+', params['hds_id'] if type(params['hds_id']) is str else str(params['hds_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `hds_id` when calling `get_device_datasource_instance_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `get_device_datasource_instance_by_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_id' in params:
path_params['deviceId'] = params['device_id'] # noqa: E501
if 'hds_id' in params:
path_params['hdsId'] = params['hds_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/devices/{deviceId}/devicedatasources/{hdsId}/instances/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeviceDataSourceInstance', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_device_datasource_instance_data(self, device_id, hds_id, id, **kwargs): # noqa: E501
"""get device instance data # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_datasource_instance_data(device_id, hds_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param int hds_id: The device-datasource ID (required)
:param int id: (required)
:param float period:
:param int start:
:param int end:
:param str datapoints:
:param str format:
:return: DeviceDataSourceInstanceData
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_device_datasource_instance_data_with_http_info(device_id, hds_id, id, **kwargs) # noqa: E501
else:
(data) = self.get_device_datasource_instance_data_with_http_info(device_id, hds_id, id, **kwargs) # noqa: E501
return data
def get_device_datasource_instance_data_with_http_info(self, device_id, hds_id, id, **kwargs): # noqa: E501
"""get device instance data # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_datasource_instance_data_with_http_info(device_id, hds_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int device_id: (required)
:param int hds_id: The device-datasource ID (required)
:param int id: (required)
:param float period:
:param int start:
:param int end:
:param str datapoints:
:param | |
__all__ = ["PrettyHelp"]
from typing import Any, List, Optional, Union
import discord
from discord.ext import commands
from discord.ext.commands.help import HelpCommand
from .menu import DefaultMenu, PrettyMenu
class Paginator:
def __init__(self, color, pretty_help: "PrettyHelp"):
"""A class that creates pages for PrettyHelp.
For internal use only.
Args:
color (discord.Color): The color to use for embeds.
pretty_help (PrettyHelp): The PrettyHelp instance.
"""
self.pretty_help = pretty_help
self.ending_note = None
self.color = color
self.char_limit = 6000
self.field_limit = 25
self.prefix = ""
self.suffix = ""
self.usage_prefix = "```"
self.usage_suffix = "```"
self.clear()
def clear(self):
"""Clears the paginator to have no pages."""
self._pages = []
def _check_embed(self, embed: discord.Embed, *chars: str):
"""
Check if the emebed is too big to be sent on discord
Args:
embed (discord.Embed): The embed to check
Returns:
bool: Will return True if the emebed isn't too large
"""
check = (
len(embed) + sum(len(char) for char in chars if char)
< self.char_limit
and len(embed.fields) < self.field_limit
)
return check
def _new_page(self, title: str, description: str):
"""
Create a new page
Args:
title (str): The title of the new page
Returns:
discord.Emebed: Returns an embed with the title and color set
"""
embed = discord.Embed(
title=title, description=description, color=self.color
)
self._add_page(embed)
return embed
def _add_page(self, page: discord.Embed):
"""
Add a page to the paginator
Args:
page (discord.Embed): The page to add
"""
page.set_footer(text=self.ending_note)
self._pages.append(page)
def add_cog(
self,
title: Union[str, commands.Cog],
commands_list: List[commands.Command],
):
"""
Add a cog page to the help menu
Args:
title (Union[str, commands.Cog]): The title of the embed
commands_list (List[commands.Command]): List of commands
"""
cog = isinstance(title, commands.Cog)
if not commands_list:
return
page_title = title.qualified_name if cog else title
embed = self._new_page(
page_title, (title.description or "") if cog else ""
)
self._add_command_fields(embed, page_title, commands_list)
def _add_command_fields(
self,
embed: discord.Embed,
page_title: str,
commands: List[commands.Command],
):
"""
Adds command fields to Category/Cog and Command Group pages
Args:
embed (discord.Embed): The page to add command descriptions
page_title (str): The title of the page
commands (List[commands.Command]): The list of commands for
the fields
"""
for command in commands:
if not self._check_embed(
embed,
self.ending_note,
command.name,
command.short_doc,
self.prefix,
self.suffix,
):
embed = self._new_page(page_title, embed.description)
embed.add_field(
name=command.name,
value=(
f'{self.prefix}{command.short_doc or "No Description"}'
f'{self.suffix}'
),
)
@staticmethod
def __command_info(
command: Union[commands.Command, commands.Group]
) -> str:
info = ""
if command.description:
info += command.description + "\n\n"
if command.help:
info += command.help
if not info:
info = "None"
return info
def add_command(
self, command: commands.Command, signature: str
) -> discord.Embed:
"""
Add a command help page
Args:
command (commands.Command): The command to get help for
signature (str): The command signature/usage string
"""
page = self._new_page(
command.qualified_name,
f"{self.prefix}{self.__command_info(command)}{self.suffix}" or "",
)
if command.aliases:
aliases = ", ".join(command.aliases)
page.add_field(
name=self.pretty_help.aliases_string,
value=f"{self.prefix}{aliases}{self.suffix}",
inline=False,
)
page.add_field(
name=self.pretty_help.usage_string,
value=f"{self.usage_prefix}{signature}{self.usage_suffix}",
inline=False,
)
if self.pretty_help.show_bot_perms:
try:
perms = command.callback.__bot_perms__
except AttributeError:
pass
else:
if perms:
page.add_field(
name=self.pretty_help.bot_perms_title,
value=", ".join(perms),
inline=False,
)
if self.pretty_help.show_user_perms:
try:
chan_perms = command.callback.__channel_perms__
except AttributeError:
pass
else:
if chan_perms:
page.add_field(
name=self.pretty_help.user_channel_perms_title,
value=", ".join(chan_perms),
inline=False,
)
try:
guild_perms = command.callback.__guild_perms__
except AttributeError:
pass
else:
if guild_perms:
page.add_field(
name=self.pretty_help.user_guild_perms_title,
value=", ".join(guild_perms),
inline=False,
)
return page
def add_group(
self, group: commands.Group, commands_list: List[commands.Command]
):
"""
Add a group help page
Args:
group (commands.Group): The command group to get help for
commands_list (List[commands.Command]): The list of commands in
the group
"""
page = self.add_command(
group, self.pretty_help.get_command_signature(group)
)
self._add_command_fields(page, group.name, commands_list)
def add_index(self, include: bool, title: str, bot: commands.Bot):
"""
Add an index page to the response of the bot_help command
Args:
include (bool): Include the index page or not
title (str): The title of the index page
bot (commands.Bot): The bot instance
"""
if include:
index = self._new_page(title, bot.description or "")
self._pages.pop(-1)
for page_no, page in enumerate(self._pages, start=1):
index.add_field(
name=f"{page_no}) {page.title}",
value=(
f'{self.prefix}{page.description or "No Description"}'
f'{self.suffix}'
),
)
index.set_footer(text=self.ending_note)
self._pages.insert(0, index)
else:
self._pages[0].description = bot.description
@property
def pages(self):
"""Returns the rendered list of pages."""
return self._pages
class PrettyHelp(HelpCommand):
def __init__(
self,
color: discord.Color = discord.Color.random(),
dm_help: Optional[bool] = False,
menu: PrettyMenu = DefaultMenu(),
sort_commands: bool = True,
show_index: bool = True,
show_bot_perms: bool = False,
show_user_perms: bool = False,
bot_perms_title: str = "Required Bot Permissions",
user_guild_perms_title: str = "Required User Permissions",
user_channel_perms_title: str =
"Required User Permissions (channel specific)",
ending_note: Optional[str] = None,
index_title: str = "Index",
no_category: str = "No Category",
aliases_string: str = "Aliases",
usage_string: str = "Usage",
**options: Any,
):
"""PrettyHelp constructor.
Args:
color (discord.Color, optional):
The color to use for help embeds. Defaults to
discord.Color.random().
dm_help (Optional[bool], optional):
A tribool for whether the bot should dm help or not. Defaults
to False.
menu (PrettyMenu, optional):
A customizable menu. Defaults to DefaultMenu().
sort_commands (bool, optional):
Whether or not commands should be sorted. Defaults to True.
show_index (bool, optional):
Whether or not to show the index page. Defaults to True.
show_bot_perms (bool, optional):
Whether or not to show required bot permissions. Defaults to
False.
show_user_perms (bool, optional):
Whether or not to show required user permissions. Defaults to
False.
bot_perms_title (str, optional):
The embed field name for required bot permissions. Defaults to
"Required Bot Permissions".
user_guild_perms_title (str, optional):
The embed field name for guild-wide required user permissions.
Defaults to "Required User Permissions".
user_channel_perms_title (str, optional):
The embed field name for channel-specific required user
permissions. Defaults to "Required User Permissions (channel
specific)".
ending_note (Optional[str], optional):
The ending note to put in the footer of embeds. Defaults to
None.
index_title (str, optional):
The string to use for the index embed title. Defaults to
"Index".
no_category (str, optional):
The string to use for commands not in a cog. Defaults to "No
Category".
aliases_string (str, optional):
The string to use for the aliases field. Defaults to "Aliases".
usage_string (str, optional):
The string to use for the usage field. Defaults to "Usage".
"""
self.color = color
self.dm_help = dm_help
self.sort_commands = sort_commands
self.show_index = show_index
self.menu = menu
self.paginator = Paginator(self.color, self)
self.show_user_perms = show_user_perms
self.show_bot_perms = show_bot_perms
self.bot_perms_title = bot_perms_title
self.user_guild_perms_title = user_guild_perms_title
self.user_channel_perms_title = user_channel_perms_title
self.index_title = index_title
self.no_category = no_category
self.ending_note = ending_note or ""
self.usage_string = usage_string
self.aliases_string = aliases_string
super().__init__(**options)
async def prepare_help_command(
self, ctx: commands.Context, command: commands.Command
):
"""Prepares the help command. Desinged for internal call only.
Args:
ctx (commands.Context): The context help was invoked in.
command (commands.Command): The command help was invoked for.
Raises:
commands.BotMissingPermissions:
The bot is missing permissions needed to run the paginator.
"""
if ctx.guild is not None:
perms = ctx.channel.permissions_for(ctx.guild.me)
missing: List[str] = []
if not perms.embed_links:
missing.append("Embed Links")
if not perms.read_message_history:
missing.append("Read Message History")
if not perms.add_reactions:
missing.append("Add Reactions")
if missing:
raise commands.BotMissingPermissions(missing)
self.paginator.clear()
self.paginator.ending_note = self.get_ending_note()
await super().prepare_help_command(ctx, command)
def get_ending_note(self) -> str:
"""Gets the ending note for the bot.
Returns:
str: The ending note.
"""
note = self.ending_note or (
"Type {help.clean_prefix}{help.invoked_with} command for more "
"info on a command.\nYou can also type {help.clean_prefix}"
"{help.invoked_with} category for more info on a category."
)
return note.format(ctx=self.context, help=self)
async def send_pages(self):
"""Invokes self.menu.send_pages with the list of embeds.
"""
pages = self.paginator.pages
destination = self.get_destination()
await self.menu.send_pages(self.context, destination, pages)
def get_destination(self) -> discord.abc.Messageable:
"""Gets the destination to send help to.
Returns:
discord.abc.Messageable: The destination channel.
"""
ctx = self.context
if self.dm_help is True:
return ctx.author
else:
return ctx.channel
async def send_bot_help(self, mapping: dict):
"""Sends help for the entire bot.
For internal use only.
Args:
mapping (dict): A dictionary of Cogs and Commands.
"""
bot = self.context.bot
channel = self.get_destination()
async with channel.typing():
mapping = dict((name, []) for name in mapping)
for cmd in await self.filter_commands(
bot.commands,
sort=self.sort_commands,
):
mapping[cmd.cog].append(cmd)
self.paginator.add_cog(self.no_category, mapping.pop(None))
sorted_map = sorted(
mapping.items(),
key=lambda cg: cg[0].qualified_name
if isinstance(cg[0], commands.Cog)
else str(cg[0]),
)
for cog, command_list in sorted_map:
self.paginator.add_cog(cog, command_list)
self.paginator.add_index(self.show_index, self.index_title, bot)
await self.send_pages()
async def send_command_help(self, command: commands.Command):
"""Sends help for a single command.
Args:
command (commands.Command): The command to send help for.
"""
filtered = await self.filter_commands([command])
if filtered:
self.paginator.add_command(
command, self.get_command_signature(command)
)
await self.send_pages()
async def send_group_help(self, group: commands.Group):
"""Sends help | |
<filename>amulet/world_interface/formats/anvil/anvil_format.py
from __future__ import annotations
import os
import struct
import zlib
import gzip
from typing import Tuple, Any, Dict, Union, Generator
import numpy
import time
import re
import amulet_nbt as nbt
from amulet.world_interface.formats import Format
from amulet.utils import world_utils
from amulet.utils.format_utils import check_all_exist, check_one_exists, load_leveldat
from amulet.api.errors import (
ChunkDoesNotExist,
LevelDoesNotExist,
ChunkLoadError,
ChunkSaveError,
)
class AnvilRegion:
region_regex = re.compile(r"r\.(?P<rx>-?\d+)\.(?P<rz>-?\d+)\.mca")
@staticmethod
def get_coords(file_path: str) -> Tuple[Union[int, None], Union[int, None]]:
file_path = os.path.basename(file_path)
match = AnvilRegion.region_regex.fullmatch(file_path)
if match is None:
return None, None
return int(match.group("rx")), int(match.group("rz"))
def __init__(self, file_path: str, create=False, mcc=False):
"""
A class wrapper for a region file
:param file_path: The file path of the region file
:param create: bool - if true will create the region from scratch. If false will try loading from disk
"""
self._file_path = file_path
self.rx, self.rz = self.get_coords(file_path)
self._mcc = mcc # create mcc file if the chunk is greater than 1MiB
# [dirty, mod_time, data_length, data] feel free to extend if you want to implement modifying in place and defragging
self._chunks: Dict[Tuple[int, int], Tuple[int, bytes]] = {}
self._committed_chunks: Dict[Tuple[int, int], Tuple[int, bytes]] = {}
if create:
# create the region from scratch.
self._loaded = True
else:
# mark the region to be loaded when needed
self._loaded = False
# shallow load the data
with open(self._file_path, "rb") as fp:
offsets = numpy.fromfile(fp, dtype=">u4", count=1024).reshape(32, 32)
for x in range(32):
for z in range(32):
offset = offsets[z, x]
if offset != 0:
self._chunks[(x, z)] = (0, b"")
def all_chunk_coords(self) -> Generator[Tuple[int, int]]:
for (cx, cz), (_, chunk_) in self._committed_chunks.items():
if chunk_:
yield cx + self.rx * 32, cz + self.rz * 32
for (cx, cz), (_, chunk_) in self._chunks.items():
if (cx, cz) not in self._committed_chunks:
yield cx + self.rx * 32, cz + self.rz * 32
def _load(self):
if not self._loaded:
with open(self._file_path, "rb+") as fp:
# check that the file is a multiple of 4096 bytes and extend if not
# TODO: perhaps rewrite this in a way that is more readable
file_size = os.path.getsize(self._file_path)
if file_size & 0xFFF:
file_size = (file_size | 0xFFF) + 1
fp.truncate(file_size)
# if the length of the region file is 0 extend it to 8KiB TODO (perhaps have some error)
if file_size < world_utils.SECTOR_BYTES * 2:
file_size = world_utils.SECTOR_BYTES * 2
fp.truncate(file_size)
# read the file and populate the internal database
# self._file_size = file_size
fp.seek(0)
# offsets = fp.read(world_utils.SECTOR_BYTES)
# mod_times = fp.read(world_utils.SECTOR_BYTES)
# self._free_sectors = free_sectors = numpy.full(
# file_size // world_utils.SECTOR_BYTES, True, bool
# )
# self._free_sectors[0:2] = False, False
# the first array is made of 3 byte sector offset and 1 byte sector count
sectors = (
numpy.fromfile(fp, dtype=">u4", count=1024).reshape(32, 32) >> 8
)
mod_times = numpy.fromfile(fp, dtype=">u4", count=1024).reshape(32, 32)
# for offset in offsets:
# sector = offset >> 8
# count = offset & 0xFF
#
# for i in range(sector, sector + count):
# if i >= len(free_sectors):
# return False
#
# free_sectors[i] = False
self._chunks.clear()
for x in range(32):
for z in range(32):
sector = sectors[z, x]
if sector:
fp.seek(world_utils.SECTOR_BYTES * sector)
# read int value and then read that amount of data
buffer_size = struct.unpack(">I", fp.read(4))[0]
self._chunks[(x, z)] = (
mod_times[z, x],
fp.read(buffer_size),
)
self._loaded = True
def unload(self):
for key in self._chunks.keys():
self._chunks[key] = (0, b"")
self._loaded = False
def save(self):
if self._committed_chunks:
self._load()
if self._mcc:
mcc_chunks = {(cx, cz) for cx in range(32) for cz in range(32)}
else:
mcc_chunks = set()
for key, val in self._committed_chunks.items():
if val[1]:
if self._mcc or len(val[1]) <= 2 ** 20 - 4:
self._chunks[key] = val
elif key in self._chunks:
del self._chunks[key]
self._committed_chunks.clear()
offsets = numpy.zeros(1024, dtype=">u4")
mod_times = numpy.zeros(1024, dtype=">u4")
offset = 2
data = []
for ((cx, cz), (mod_time, buffer)) in self._chunks.items():
if buffer:
index = cx + (cz << 5)
buffer_size = len(buffer)
if (
buffer_size > 2 ** 20 - 4
): # if mcc is false the chunks that are too large should have already been removed.
mcc_chunks.remove((cx, cz))
with open(
os.path.join(
os.path.dirname(self._file_path),
f"c.{cx+self.rx*32}.{cz+self.rz*32}.mcc",
),
"wb",
) as f:
f.write(buffer[1:])
buffer = bytes([buffer[0] | 128]) # set the external flag
buffer_size = 1
sector_count = ((buffer_size + 4 | 0xFFF) + 1) >> 12
offsets[index] = (offset << 8) + sector_count
mod_times[index] = mod_time
data.append(
struct.pack(">I", buffer_size)
+ buffer
+ b"\x00" * ((sector_count << 12) - buffer_size - 4)
)
offset += sector_count
os.makedirs(os.path.dirname(self._file_path), exist_ok=True)
with open(self._file_path, "wb") as fp:
fp.write(
struct.pack(">1024I", *offsets)
) # there is probably a prettier way of doing this
fp.write(
struct.pack(">1024I", *mod_times)
) # but I could not work it out with Numpy
fp.write(b"".join(data))
# remove orphaned mcc files
for cx, cz in mcc_chunks:
mcc_path = os.path.join(
os.path.dirname(self._file_path),
f"c.{cx + self.rx * 32}.{cz + self.rz * 32}.mcc",
)
if os.path.isfile(mcc_path):
os.remove(mcc_path)
def get_chunk_data(self, cx: int, cz: int) -> nbt.NBTFile:
"""Get chunk data. Coords are in region space."""
if (cx, cz) in self._committed_chunks:
# if the chunk exists in the committed but unsaved chunks return that
data = self._committed_chunks[(cx, cz)][1]
compress_type, data = data[0], data[1:]
if data:
return self._decompress(compress_type, data)
elif (cx, cz) in self._chunks:
# otherwise if the chunk exists in the main database return that
self._load()
data = self._chunks[(cx, cz)][1]
compress_type, data = data[0], data[1:]
if (
self._mcc and compress_type & 128
): # if the mcc file is supported and the mcc bit is set
mcc_path = os.path.join(
os.path.dirname(self._file_path),
f"c.{cx+self.rx*32}.{cz+self.rz*32}.mcc",
)
if os.path.isfile(mcc_path):
with open(mcc_path, "rb") as f:
data = f.read()
compress_type = compress_type & 127
if data:
return self._decompress(compress_type, data)
raise ChunkDoesNotExist
def put_chunk_data(self, cx: int, cz: int, data: nbt.NBTFile):
"""compress the data and put it in the class database"""
bytes_data = self._compress(data)
self._committed_chunks[(cx, cz)] = (int(time.time()), bytes_data)
def delete_chunk_data(self, cx: int, cz: int):
self._committed_chunks[(cx, cz)] = (0, b"")
@staticmethod
def _compress(data: nbt.NBTFile) -> bytes:
"""Convert an NBTFile into a compressed bytes object"""
data = data.save_to(compressed=False)
return b"\x02" + zlib.compress(data)
@staticmethod
def _decompress(compress_type: int, data: bytes) -> nbt.NBTFile:
"""Convert a bytes object into an NBTFile"""
if compress_type == world_utils.VERSION_GZIP:
return nbt.load(buffer=gzip.decompress(data), compressed=False)
elif compress_type == world_utils.VERSION_DEFLATE:
return nbt.load(buffer=zlib.decompress(data), compressed=False)
raise ChunkLoadError(f"Invalid compression type {compress_type}")
class AnvilLevelManager:
level_regex = re.compile(r"DIM(?P<level>-?\d+)")
def __init__(self, directory: str, mcc=False):
self._directory = directory
self._regions: Dict[Tuple[int, int], AnvilRegion] = {}
self._mcc = mcc
# shallow load all of the existing region classes
region_dir = os.path.join(self._directory, "region")
if os.path.isdir(region_dir):
for region_file_name in os.listdir(region_dir):
rx, rz = AnvilRegion.get_coords(region_file_name)
if rx is None:
continue
self._regions[(rx, rz)] = AnvilRegion(
os.path.join(self._directory, "region", region_file_name),
mcc=self._mcc,
)
def all_chunk_coords(self) -> Generator[Tuple[int, int]]:
for region in self._regions.values():
yield from region.all_chunk_coords()
def save(self, unload=True):
# use put_chunk_data to actually upload modified chunks
# run this to push those changes to disk
for region in self._regions.values():
region.save()
if unload:
region.unload()
def close(self):
pass
def unload(self):
for region in self._regions.values():
region.unload()
def get_chunk_data(self, cx: int, cz: int) -> nbt.NBTFile:
"""Get an NBTFile of a chunk from the database.
Will raise ChunkDoesNotExist if the region or chunk does not exist
"""
# get the region key
return self._get_region(cx, cz).get_chunk_data(cx & 0x1F, cz & 0x1F)
def _get_region(self, cx: int, cz: int, create=False) -> AnvilRegion:
key = rx, rz = world_utils.chunk_coords_to_region_coords(cx, cz)
if key in self._regions:
return self._regions[key]
if create:
file_path = os.path.join(self._directory, "region", f"r.{rx}.{rz}.mca")
self._regions[key] = AnvilRegion(file_path, True, mcc=self._mcc)
else:
raise ChunkDoesNotExist
return self._regions[key]
def put_chunk_data(self, cx: int, cz: int, data: nbt.NBTFile):
"""pass data to the region file class"""
# get the region key
self._get_region(cx, cz, create=True).put_chunk_data(cx & 0x1F, cz & 0x1F, data)
def delete_chunk(self, cx: int, cz: int):
try:
self._get_region(cx, cz).delete_chunk_data(cx & 0x1F, cz & 0x1F)
except ChunkDoesNotExist:
pass
class AnvilFormat(Format):
_level_names = {-1: "nether", 0: "overworld", 1: "end"}
def __init__(self, directory: str):
super().__init__(directory)
self.root_tag: nbt.NBTFile = nbt.NBTFile()
self._load_level_dat()
self._levels: Dict[int, AnvilLevelManager] = {}
self._lock = None
def _load_level_dat(self):
"""Load the level.dat file and check the image file"""
self.root_tag = nbt.load(filename=os.path.join(self._world_path, "level.dat"))
if os.path.isfile(os.path.join(self._world_path, "icon.png")):
self._world_image_path = os.path.join(self._world_path, "icon.png")
else:
self._world_image_path = self._missing_world_icon
@staticmethod
def is_valid(directory) -> bool:
"""
Returns whether this format is | |
<reponame>gdsports/NSGadget_Pi
#!/usr/bin/python3
"""
MIT License
Copyright (c) 2020 <EMAIL>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-----------------------------------------------------------------------------
Adapt various USB joystick controllers for use with a Nintendo Switch (NS)
console. All controllers are active but are seen by the console as one
controller so co-pilot mode is always active.
Read from joysticks and write to NSGadget device. The following joysticks are
supported
* Hori HoriPad gamepad
* Xbox One gamepads
* PS4 gamepad
* Logitech Extreme 3D Pro flightstick
* Thrustmaster T.16000M flightstick
* Dragon Rise arcade joysticks
NSGadget is an Adafruit Trinket M0 emulating an NS compatible gamepad. The
connection to between the Pi and NSGadget is 2 Mbits/sec UART.
"""
import os
import time
import sys
import signal
import getopt
from struct import unpack
import threading
import array
from fcntl import ioctl
import serial
from gpiozero import Button
from nsgpadserial import NSGamepadSerial, NSButton, NSDPad
import mido
# Map the 4 direction buttons (up, right, down, left) to NS direction pad values
BUTTONS_MAP_DPAD = array.array('B', [
# U = Up button, R = right button, etc
# LDRU
NSDPad.CENTERED, # 0000
NSDPad.UP, # 0001
NSDPad.RIGHT, # 0010
NSDPad.UP_RIGHT, # 0011
NSDPad.DOWN, # 0100
NSDPad.CENTERED, # 0101
NSDPad.DOWN_RIGHT, # 0110
NSDPad.CENTERED, # 0111
NSDPad.LEFT, # 1000
NSDPad.UP_LEFT, # 1001
NSDPad.CENTERED, # 1010
NSDPad.CENTERED, # 1011
NSDPad.DOWN_LEFT, # 1100
NSDPad.CENTERED, # 1101
NSDPad.CENTERED, # 1110
NSDPad.CENTERED # 1111
])
NSG = NSGamepadSerial()
try:
# Raspberry Pi UART on pins 14,15
NS_SERIAL = serial.Serial('/dev/ttyAMA0', 2000000, timeout=0)
print("Found ttyAMA0")
except:
try:
# CP210x is capable of 2,000,000 bits/sec
NS_SERIAL = serial.Serial('/dev/ttyUSB0', 2000000, timeout=0)
print("Found ttyUSB0")
except:
print("NSGadget serial port not found")
sys.exit(1)
NSG.begin(NS_SERIAL)
def read_horipad(jsdev):
"""
The Hori HoriPad is a Nintendo Switch compatible gamepad.
Buttons and axes are mapped straight through so this is
the easiest. Runs as a thread
"""
while True:
try:
evbuf = jsdev.read(8)
except:
jsdev.close()
break
if evbuf:
timestamp, value, type, number = unpack('IhBB', evbuf)
if type == 0x01: # button event
if value:
NSG.press(number)
else:
NSG.release(number)
if type == 0x02: # axis event
axis = ((value + 32768) >> 8)
# Axes 0,1 left stick X,Y
if number == 0:
NSG.leftXAxis(axis)
elif number == 1:
NSG.leftYAxis(axis)
# Axes 2,3 right stick X,Y
elif number == 2:
NSG.rightXAxis(axis)
elif number == 3:
NSG.rightYAxis(axis)
# Axes 4,5 directional pad X,Y
elif number == 4:
NSG.dPadXAxis(axis)
elif number == 5:
NSG.dPadYAxis(axis)
def read_hori_wheel(jsdev):
"""
The Hori Hori Maro Wheel is a Nintendo Switch compatible racing wheel.
Runs as a thread.
"""
BUTTON_MAP = array.array('B', [
NSButton.A,
NSButton.B,
NSButton.X,
NSButton.Y,
NSButton.LEFT_TRIGGER,
NSButton.RIGHT_TRIGGER,
NSButton.MINUS,
NSButton.PLUS,
NSButton.HOME,
NSButton.LEFT_STICK,
NSButton.RIGHT_STICK])
last_left_throttle = 0
last_right_throttle = 0
last_wheel = 128
while True:
try:
evbuf = jsdev.read(8)
except:
jsdev.close()
break
if evbuf:
timestamp, value, type, number = unpack('IhBB', evbuf)
if type == 0x01: # button event
button_out = BUTTON_MAP[number]
if value:
NSG.press(button_out)
else:
NSG.release(button_out)
if type == 0x02: # axis event
axis = ((value + 32768) >> 8)
# Axes 0,1 left stick X,Y
if number == 0:
if last_wheel != axis:
NSG.leftXAxis(axis)
elif number == 1:
NSG.leftYAxis(axis)
elif number == 2:
if last_left_throttle != axis:
last_left_throttle = axis
if axis > 64:
NSG.press(NSButton.LEFT_THROTTLE)
else:
NSG.release(NSButton.LEFT_THROTTLE)
# Axes 3,4 right stick X,Y
elif number == 3:
NSG.rightXAxis(axis)
elif number == 4:
NSG.rightYAxis(axis)
elif number == 5:
if last_right_throttle != axis:
last_right_throttle = axis
if axis > 64:
NSG.press(NSButton.RIGHT_THROTTLE)
else:
NSG.release(NSButton.RIGHT_THROTTLE)
# Axes 6,7 directional pad X,Y
elif number == 6:
NSG.dPadXAxis(axis)
elif number == 7:
NSG.dPadYAxis(axis)
def read_xbox1(jsdev):
"""
The Xbox One controller has fewer buttons and the throttles are analog instead of buttons.
Runs as a thread
axis 0: left stick X
1: left stick Y
2: left throttle
3: right stick X
4: right stick Y
5: right throttle
6: dPad X
7: dPad Y
button 0: A NS B
1: B NS A
2: X NS Y
3: Y NS X
4: left trigger NS left trigger
5: right trigger NS right trigger
6: windows NS minus
7: lines NS plus
8: logo NS home
9: left stick button NS left stick
10: right stick button NS right stick
windows lines
Y
X B
A
"""
BUTTON_MAP = array.array('B', [
NSButton.B,
NSButton.A,
NSButton.Y,
NSButton.X,
NSButton.LEFT_TRIGGER,
NSButton.RIGHT_TRIGGER,
NSButton.MINUS,
NSButton.PLUS,
NSButton.HOME,
NSButton.LEFT_STICK,
NSButton.RIGHT_STICK])
while True:
try:
evbuf = jsdev.read(8)
except:
jsdev.close()
break
if evbuf:
timestamp, value, type, number = unpack('IhBB', evbuf)
if type == 0x01: # button event
button_out = BUTTON_MAP[number]
if value:
NSG.press(button_out)
else:
NSG.release(button_out)
if type == 0x02: # axis event
axis = ((value + 32768) >> 8)
# Axes 0,1 left stick X,Y
if number == 0:
NSG.leftXAxis(axis)
elif number == 1:
NSG.leftYAxis(axis)
# Xbox throttle 0..255 but NS throttle is a button on/ff
elif number == 2:
if axis > 128:
NSG.press(NSButton.LEFT_THROTTLE)
else:
NSG.release(NSButton.LEFT_THROTTLE)
# Axes 3,4 right stick X,Y
elif number == 3:
NSG.rightXAxis(axis)
elif number == 4:
NSG.rightYAxis(axis)
# Xbox throttle 0..255 but NS throttle is a button on/ff
elif number == 5:
if axis > 128:
NSG.press(NSButton.RIGHT_THROTTLE)
else:
NSG.release(NSButton.RIGHT_THROTTLE)
# Axes 6,7 directional pad X,Y
elif number == 6:
NSG.dPadXAxis(axis)
elif number == 7:
NSG.dPadYAxis(axis)
def read_ps4ds(jsdev):
"""
The Sony Playstation 4 controller has fewer buttons. The throttles are
analog (see axes) and binary (see buttons). Runs as a thread.
axis 0: left stick X
1: left stick Y
2: left throttle
3: right stick X
4: right stick Y
5: right throttle
6: dPad X
7: dPad Y
button 0: cross NS B
1: circle NS A
2: triangle NS X
3: square NS Y
4: left trigger NS left trigger
5: right trigger NS right trigger
6: left throttle NS left throttle
7: right throttle NS right throttle
8: share NS minus
9: options NS plus
10: logo NS home
11: left stick button NS left stick button
12: right stick button NS rgith stick button
share options
triangle
square circle
cross
"""
BUTTON_MAP = array.array('B', [
NSButton.B,
NSButton.A,
NSButton.Y,
NSButton.X,
NSButton.LEFT_TRIGGER,
NSButton.RIGHT_TRIGGER,
NSButton.LEFT_THROTTLE,
NSButton.RIGHT_THROTTLE,
NSButton.MINUS,
NSButton.PLUS,
NSButton.HOME,
NSButton.LEFT_STICK,
NSButton.RIGHT_STICK])
while True:
try:
evbuf = jsdev.read(8)
except:
jsdev.close()
break
if evbuf:
timestamp, value, type, number = unpack('IhBB', evbuf)
if type == 0x01: # button event
button_out = BUTTON_MAP[number]
if value:
NSG.press(button_out)
else:
NSG.release(button_out)
if type == 0x02: # axis event
axis = ((value + 32768) >> 8)
# Axes 0,1 left stick X,Y
if number == 0:
NSG.leftXAxis(axis)
elif number == 1:
NSG.leftYAxis(axis)
# axis 2 Xbox throttle 0..255 but NS throttle is a button on/ff
# Axes 3,4 right stick X,Y
elif number == 3:
NSG.rightXAxis(axis)
elif number == 4:
NSG.rightYAxis(axis)
# axis 5 Xbox throttle 0..255 but NS throttle is a button on/ff
# Axes 6,7 directional pad X,Y
elif number == 6:
NSG.dPadXAxis(axis)
elif number == 7:
NSG.dPadYAxis(axis)
def read_dragon_rise(jsdev, right_side):
"""
Map two Dragon Rise arcade joysticks to one NS gamepad
The Dragon Rise arcade joystick has 1 stick and up to 10 buttons. Two are required
to make 1 gamepad with 2 sticks plus 18 buttons. The right_side flag determines
which joystick is the left or right side of the gamepad.
"""
BUTTON_MAP_LEFT = array.array('B', [
NSButton.LEFT_THROTTLE,
NSButton.LEFT_TRIGGER,
NSButton.MINUS,
255, # DPAD Up
255, # DPAD Right
255, # DPAD Down
255, # DPAD Left
| |
<reponame>haroonf/azure-cli-extensions<filename>src/providerhub/azext_providerhub/tests/latest/example_steps.py
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from azure.cli.testsdk import (live_only)
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
from .. import try_manual
# EXAMPLE: /CustomRollouts/put/CustomRollouts_CreateOrUpdate
@try_manual
def step_custom_rollout_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub custom-rollout create '
'--provider-namespace "{providerNamespace}" '
'--rollout-name "{customRolloutName}" '
'--canary regions="EastUS2EUAP" regions="centraluseuap"',
checks=[])
# EXAMPLE: /CustomRollouts/get/CustomRollouts_Get
@try_manual
def step_custom_rollout_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub custom-rollout show '
'--provider-namespace "{providerNamespace}" '
'--rollout-name "{customRolloutName}"',
checks=checks)
# EXAMPLE: /CustomRollouts/get/CustomRollouts_ListByProviderRegistration
@AllowLargeResponse()
@try_manual
def step_custom_rollout_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub custom-rollout list '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /DefaultRollouts/put/DefaultRollouts_CreateOrUpdate
@AllowLargeResponse()
@live_only()
def step_default_rollout_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub default-rollout create '
'--provider-namespace "{providerNamespace}" '
'--rollout-name "{defaultRolloutName}" '
'--rest-of-the-world-group-two wait-duration="PT2H" '
'--canary skip-regions="centraluseuap"',
checks=checks)
# EXAMPLE: /DefaultRollouts/get/DefaultRollouts_Get
@AllowLargeResponse()
@try_manual
def step_default_rollout_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub default-rollout show '
'--provider-namespace "{providerNamespace}" '
'--rollout-name "{defaultRolloutName}"',
checks=checks)
# EXAMPLE: /DefaultRollouts/get/DefaultRollouts_ListByProviderRegistration
@AllowLargeResponse()
@try_manual
def step_default_rollout_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub default-rollout list '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /DefaultRollouts/post/DefaultRollouts_Stop
@try_manual
def step_default_rollout_stop(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub default-rollout stop '
'--provider-namespace "{providerNamespace}" '
'--rollout-name "{defaultRolloutName}"',
checks=checks)
# EXAMPLE: /DefaultRollouts/delete/DefaultRollouts_Delete
@try_manual
def step_default_rollout_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub default-rollout delete -y '
'--provider-namespace "{providerNamespace}" '
'--rollout-name "{defaultRolloutName}"',
checks=checks)
# EXAMPLE: /Operations/put/Operations_CreateOrUpdate
@try_manual
def step_operation_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub operation create '
'--contents "[{{\\"name\\":\\"Microsoft.Contoso/Employees/Read\\",\\"display\\":{{\\"description\\":\\"Rea'
'd employees\\",\\"operation\\":\\"Gets/List employee resources\\",\\"provider\\":\\"Microsoft.Contoso\\",'
'\\"resource\\":\\"Employees\\"}}}}]" '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /Operations/get/Operations_ListByProviderRegistration
@AllowLargeResponse()
@try_manual
def step_operation_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub operation list '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /Operations/delete/Operations_Delete
@try_manual
def step_operation_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub operation delete -y '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /providerhub/post/CheckinManifest
@try_manual
def step_manifest_checkin(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub manifest checkin '
'--environment "Prod" '
'--baseline-arm-manifest-location "EastUS2EUAP" '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /providerhub/post/GenerateManifest
@AllowLargeResponse()
@try_manual
def step_manifest_generate(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub manifest generate '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /ProviderRegistrations/put/ProviderRegistrations_CreateOrUpdate
@AllowLargeResponse()
@try_manual
def step_provider_registration_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub provider-registration create '
'--providerhub-metadata-authorizations application-id="3d834152-5efa-46f7-85a4-a18c2b5d46f9" '
'role-definition-id="760505bf-dcfa-4311-b890-18da392a00b2" '
'--providerhub-metadata-authentication allowed-audiences="https://management.core.windows.net/" '
'--service-tree-infos service-id="6f53185c-ea09-4fc3-9075-318dec805303" '
'component-id="6f53185c-ea09-4fc3-9075-318dec805303" '
'--capabilities effect="Allow" quota-id="CSP_2015-05-01" '
'--capabilities effect="Allow" quota-id="CSP_MG_2017-12-01" '
'--manifest-owners "SPARTA-PlatformServiceAdministrator" '
'--incident-contact-email "<EMAIL>" '
'--incident-routing-service "Contoso Resource Provider" '
'--incident-routing-team "Contoso Triage" '
'--provider-type "Internal, Hidden" '
'--provider-version "2.0" '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /ProviderRegistrations/get/ProviderRegistrations_Get
@AllowLargeResponse()
@try_manual
def step_provider_registration_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub provider-registration show '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /ProviderRegistrations/get/ProviderRegistrations_List
@AllowLargeResponse()
@try_manual
def step_provider_registration_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub provider-registration list',
checks=checks)
# EXAMPLE: /ProviderRegistrations/post/ProviderRegistrations_GenerateOperations
@AllowLargeResponse()
@try_manual
def step_provider_registration_generate_operation(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub provider-registration generate-operation '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /ProviderRegistrations/delete/ProviderRegistrations_Delete
@try_manual
def step_provider_registration_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub provider-registration delete -y '
'--provider-namespace "{providerNamespace}" ',
checks=checks)
# EXAMPLE: /ResourceTypeRegistration/put/ResourceTypeRegistration_CreateOrUpdate
@AllowLargeResponse()
@try_manual
def step_resource_type_registration_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub resource-type-registration create '
'--endpoints api-versions="2020-01-01-preview" '
'locations="" required-features="Microsoft.Contoso/RPaaSSampleApp" '
'--regionality "Global" '
'--routing-type "Proxyonly, Extension" '
'--swagger-specifications api-versions="2020-01-01-preview" swagger-spec-folder-uri="https://github.com/Azure/azure-rest-api-specs-pr/blob/RPSaaSMaster/specification/contoso/resource-manager/Microsoft.Contoso/" '
'--provider-namespace "{providerNamespace}" '
'--enable-async-operation false '
'--enable-third-party-s2s false '
'--resource-type "extensionresourcetype"',
checks=checks)
# EXAMPLE: /ResourceTypeRegistrations/get/ResourceTypeRegistrations_ListByProviderRegistration
@AllowLargeResponse()
@try_manual
def step_resource_type_registration_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub resource-type-registration list '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /ResourceTypeRegistrations/get/ResourceTypeRegistrations_Get
@AllowLargeResponse()
@try_manual
def step_resource_type_registration_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub resource-type-registration show '
'--provider-namespace "{providerNamespace}" '
'--resource-type "employees"',
checks=checks)
# EXAMPLE: /ResourceTypeRegistration/put/ResourceTypeRegistration_CreateOrUpdate
@AllowLargeResponse()
@try_manual
def step_nested_resource_type_registration_create(test, checks=None):
if checks is None:
checks = [
test.check("properties.name", "employees/NestedResourceType", case_sensitive=False),
test.check("properties.routingType", "ProxyOnly", case_sensitive=False),
test.check("properties.regionality", "Global", case_sensitive=False)
]
test.cmd('az providerhub resource-type-registration create '
'--endpoints api-versions="2019-01-01" locations="Global" '
'required-features="Microsoft.Contoso/RPaaSSampleApp" extension-endpoint-uri="https://contoso-test-extension-endpoint.com/" extension-categories="ResourceReadValidate" extension-categories="ResourceDeletionValidate" '
'--regionality "Global" '
'--routing-type "ProxyOnly" '
'--swagger-specifications api-versions="2019-01-01" swagger-spec-folder-uri="https://github.com/Azure/azure-rest-api-specs-pr/tree/RPSaaSMaster/specification/rpsaas/resource-manager/Microsoft.Contoso/" '
'--provider-namespace "{providerNamespace}" '
'--enable-async-operation false '
'--template-deployment-options preflight-supported="true" preflight-options="DefaultValidationOnly" preflight-options="continueDeploymentOnFailure" '
'--resource-type "{resourceType}/{nestedResourceType}"',
checks=checks)
# EXAMPLE: /ResourceTypeRegistration/put/ResourceTypeRegistration_CreateOrUpdate
@AllowLargeResponse()
@try_manual
def step_nested_resource_type_registration_extensions_create(test, checks=None):
if checks is None:
checks = [
test.check("properties.name", "employees/NestedResourceType", case_sensitive=False),
test.check("properties.routingType", "ProxyOnly", case_sensitive=False),
test.check("properties.regionality", "Global", case_sensitive=False)
]
test.cmd('az providerhub resource-type-registration create '
'--endpoints api-versions="2019-01-01" locations="Global" '
'required-features="Microsoft.Contoso/RPaaSSampleApp" extensions=[{{\\"endpointUri\\":\\"https://contoso-test-extension-endpoint.com/\\",\\"extensionCategories\\":[\\"ResourceReadValidate\\",\\"ResourceDeletionValidate\\"]}}] '
'--regionality "Global" '
'--routing-type "ProxyOnly" '
'--swagger-specifications api-versions="2019-01-01" swagger-spec-folder-uri="https://github.com/Azure/azure-rest-api-specs-pr/tree/RPSaaSMaster/specification/rpsaas/resource-manager/Microsoft.Contoso/" '
'--provider-namespace "{providerNamespace}" '
'--enable-async-operation false '
'--template-deployment-options preflight-supported="true" preflight-options="DefaultValidationOnly" preflight-options="continueDeploymentOnFailure" '
'--resource-type "{resourceType}/{nestedResourceType}"',
checks=checks)
# EXAMPLE: /ResourceTypeRegistration/delete/ResourceTypeRegistration_Delete
@try_manual
def step_nested_resource_type_registration_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub resource-type-registration delete -y '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}/{nestedResourceType}"',
checks=checks)
# EXAMPLE: /ResourceTypeRegistrations/get/ResourceTypeRegistrations_Get
@AllowLargeResponse()
@try_manual
def step_nested_resource_type_registration_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub resource-type-registration show '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}/{nestedResourceType}"',
checks=checks)
# EXAMPLE: /NotificationRegistrations/put/NotificationRegistrations_CreateOrUpdate
@try_manual
def step_notification_registration_create(test, checks=None):
if checks is None:
checks = [
test.check("name", "{notificationRegistration}", case_sensitive=False),
test.check("properties.messageScope", "RegisteredSubscriptions", case_sensitive=False),
test.check("properties.notificationMode", "EventHub", case_sensitive=False)
]
test.cmd('az providerhub notification-registration create '
'--name "{notificationRegistration}" '
'--included-events "*/write" "Microsoft.Contoso/employees/delete" '
'--message-scope "RegisteredSubscriptions" '
'--notification-endpoints locations="" locations="East US" notification-destination="/subscriptions/ac6bcfb5-3dc1-491f-95a6-646b89bf3e88/resourceGroups/mgmtexp-eastus/providers/Microsoft.EventHub/namespaces/unitedstates-mgmtexpint/eventhubs/armlinkednotifications" '
'--notification-endpoints locations="East US" notification-destination="/subscriptions/{subscription_'
'id}/resourceGroups/providers/Microsoft.EventHub/namespaces/europe-mgmtexpint/eventhubs/armlinkedno'
'tifications" '
'--notification-mode "EventHub" '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /NotificationRegistrations/get/NotificationRegistrations_Get
@try_manual
def step_notification_registration_show(test, checks=None):
if checks is None:
checks = [
test.check("name", "{notificationRegistration}", case_sensitive=False),
test.check("properties.messageScope", "RegisteredSubscriptions", case_sensitive=False),
test.check("properties.notificationMode", "EventHub", case_sensitive=False),
]
test.cmd('az providerhub notification-registration show '
'--name "{notificationRegistration}" '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /NotificationRegistrations/get/NotificationRegistrations_ListByProviderRegistration
@try_manual
def step_notification_registration_list(test, checks=None):
if checks is None:
checks = [
test.check('length(@)', 2),
]
test.cmd('az providerhub notification-registration list '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /NotificationRegistrations/delete/NotificationRegistrations_Delete
@try_manual
def step_notification_registration_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub notification-registration delete -y '
'--name "{notificationRegistration}" '
'--provider-namespace "{providerNamespace}"',
checks=checks)
# EXAMPLE: /Skus/put/Skus_CreateOrUpdate
@try_manual
def step_sku_create(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku create '
'--sku-settings "[{{\\"name\\":\\"freeSku\\"}}]" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/put/Skus_CreateOrUpdateNestedResourceTypeFirst
@try_manual
def step_sku_create2(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku create '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--sku-settings "[{{\\"name\\":\\"freeSku\\",\\"kind\\":\\"Standard\\",\\"tier\\":\\"Tier1\\"}},{{\\"name'
'\\":\\"premiumSku\\",\\"costs\\":[{{\\"meterId\\":\\"xxx\\"}}],\\"kind\\":\\"Premium\\",\\"tier\\":\\"Tie'
'r2\\"}}]" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/put/Skus_CreateOrUpdateNestedResourceTypeSecond
@try_manual
def step_sku_create3(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku create '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--nested-resource-type-second "nestedResourceTypeSecond" '
'--sku-settings "[{{\\"name\\":\\"freeSku\\",\\"kind\\":\\"Standard\\",\\"tier\\":\\"Tier1\\"}},{{\\"name'
'\\":\\"premiumSku\\",\\"costs\\":[{{\\"meterId\\":\\"xxx\\"}}],\\"kind\\":\\"Premium\\",\\"tier\\":\\"Tie'
'r2\\"}}]" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/put/Skus_CreateOrUpdateNestedResourceTypeThird
@try_manual
def step_sku_create4(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku create '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--nested-resource-type-second "nestedResourceTypeSecond" '
'--nested-resource-type-third "nestedResourceTypeThird" '
'--sku-settings "[{{\\"name\\":\\"freeSku\\",\\"kind\\":\\"Standard\\",\\"tier\\":\\"Tier1\\"}},{{\\"name'
'\\":\\"premiumSku\\",\\"costs\\":[{{\\"meterId\\":\\"xxx\\"}}],\\"kind\\":\\"Premium\\",\\"tier\\":\\"Tie'
'r2\\"}}]" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/get/Skus_Get
@try_manual
def step_sku_show(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku show '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/get/Skus_GetNestedResourceTypeFirst
@try_manual
def step_sku_show_nested_resource_type_first(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku show-nested-resource-type-first '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/get/Skus_GetNestedResourceTypeSecond
@try_manual
def step_sku_show_nested_resource_type_second(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku show-nested-resource-type-second '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--nested-resource-type-second "nestedResourceTypeSecond" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/get/Skus_GetNestedResourceTypeThird
@try_manual
def step_sku_show_nested_resource_type_third(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku show-nested-resource-type-third '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--nested-resource-type-second "nestedResourceTypeSecond" '
'--nested-resource-type-third "nestedResourceTypeThird" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/get/Skus_ListByResourceTypeRegistrations
@try_manual
def step_sku_list(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku list '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}"',
checks=checks)
# EXAMPLE: /Skus/get/Skus_ListByResourceTypeRegistrationsNestedResourceTypeFirst
@try_manual
def step_sku_list2(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku list '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}"',
checks=checks)
# EXAMPLE: /Skus/get/Skus_ListByResourceTypeRegistrationsNestedResourceTypeSecond
@try_manual
def step_sku_list3(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku list '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--nested-resource-type-second "nestedResourceTypeSecond" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}"',
checks=checks)
# EXAMPLE: /Skus/get/Skus_ListByResourceTypeRegistrationsNestedResourceTypeThird
@try_manual
def step_sku_list4(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku list '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--nested-resource-type-second "nestedResourceTypeSecond" '
'--nested-resource-type-third "nestedResourceTypeThird" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}"',
checks=checks)
# EXAMPLE: /Skus/delete/Skus_Delete
@try_manual
def step_sku_delete(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku delete -y '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/delete/Skus_DeleteNestedResourceTypeFirst
@try_manual
def step_sku_delete2(test, checks=None):
if checks is None:
checks = []
test.cmd('az providerhub sku delete -y '
'--nested-resource-type-first "nestedResourceTypeFirst" '
'--provider-namespace "{providerNamespace}" '
'--resource-type "{resourceType}" '
'--sku "{skuName}"',
checks=checks)
# EXAMPLE: /Skus/delete/Skus_DeleteNestedResourceTypeSecond
@try_manual
def step_sku_delete3(test, | |
<gh_stars>0
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# <EMAIL>. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
import os
import sys
import unittest
from iptest import IronPythonTestCase, skipUnlessIronPython, is_cli, is_netcoreapp, is_mono, is_osx, is_posix
from shutil import copyfile
@skipUnlessIronPython()
class ClrLoadTest(IronPythonTestCase):
def setUp(self):
super(ClrLoadTest, self).setUp()
self.load_iron_python_test()
def test_loadtest(self):
import IronPythonTest.LoadTest as lt
self.assertEqual(lt.Name1.Value, lt.Values.GlobalName1)
self.assertEqual(lt.Name2.Value, lt.Values.GlobalName2)
self.assertEqual(lt.Nested.Name1.Value, lt.Values.NestedName1)
self.assertEqual(lt.Nested.Name2.Value, lt.Values.NestedName2)
def test_negative_assembly_names(self):
import clr
self.assertRaises(IOError, clr.AddReferenceToFileAndPath, os.path.join(self.test_dir, 'this_file_does_not_exist.dll'))
self.assertRaises(IOError, clr.AddReferenceToFileAndPath, os.path.join(self.test_dir, 'this_file_does_not_exist.dll'))
self.assertRaises(IOError, clr.AddReferenceToFileAndPath, os.path.join(self.test_dir, 'this_file_does_not_exist.dll'))
self.assertRaises(IOError, clr.AddReferenceByName, 'bad assembly name', 'WellFormed.But.Nonexistent, Version=9.9.9.9, Culture=neutral, PublicKeyToken=<PASSWORD>, processorArchitecture=6502')
self.assertRaises(IOError, clr.AddReference, 'this_assembly_does_not_exist_neither_by_file_name_nor_by_strong_name')
self.assertRaises(TypeError, clr.AddReference, 35)
for method in [
clr.AddReference,
clr.AddReferenceToFile,
clr.AddReferenceToFileAndPath,
clr.AddReferenceByName,
clr.AddReferenceByPartialName,
clr.LoadAssemblyFromFileWithPath,
clr.LoadAssemblyFromFile,
clr.LoadAssemblyByName,
clr.LoadAssemblyByPartialName,
]:
self.assertRaises(TypeError, method, None)
for method in [
clr.AddReference,
clr.AddReferenceToFile,
clr.AddReferenceToFileAndPath,
clr.AddReferenceByName,
clr.AddReferenceByPartialName,
]:
self.assertRaises(TypeError, method, None, None)
import System
self.assertRaises(ValueError, clr.LoadAssemblyFromFile, System.IO.Path.DirectorySeparatorChar)
self.assertRaises(ValueError, clr.LoadAssemblyFromFile, '')
def test_get_type(self):
import clr
self.assertEqual(clr.GetClrType(None), None)
self.assertRaises(TypeError, clr.GetPythonType, None)
#TODO:@skip("multiple_execute")
def test_ironpythontest_from_alias(self):
IPTestAlias = self.load_iron_python_test(True)
self.assertEqual(dir(IPTestAlias).count('IronPythonTest'), 1)
def test_references(self):
import clr
refs = clr.References
atuple = refs + (clr.GetClrType(int).Assembly, ) # should be able to append to references_tuple
#self.assertRaises(TypeError, refs.__add__, "I am not a tuple")
s = str(refs)
temp = ',' + os.linesep
self.assertEqual(s, '(' + temp.join(map((lambda x:'<'+x.ToString()+'>'), refs)) + ')' + os.linesep)
@unittest.skipIf(is_netcoreapp, "no GAC")
def test_gac(self):
import clr
import System
def get_gac():
process = System.Diagnostics.Process()
if is_osx:
process.StartInfo.FileName = "/Library/Frameworks/Mono.framework/Versions/Current/Commands/gacutil"
elif is_posix:
process.StartInfo.FileName = "/usr/bin/gacutil"
else:
process.StartInfo.FileName = System.IO.Path.Combine(System.Runtime.InteropServices.RuntimeEnvironment.GetRuntimeDirectory(), "gacutil.exe")
process.StartInfo.Arguments = "/nologo /l"
process.StartInfo.CreateNoWindow = True
process.StartInfo.UseShellExecute = False
process.StartInfo.RedirectStandardInput = True
process.StartInfo.RedirectStandardOutput = True
process.StartInfo.RedirectStandardError = True
try:
process.Start()
except WindowsError:
return []
result = process.StandardOutput.ReadToEnd()
process.StandardError.ReadToEnd()
process.WaitForExit()
if process.ExitCode == 0:
try:
divByNewline = result.split(newline + ' ')[1:]
divByNewline[-1] = divByNewline[-1].split(newline + newline)[0]
return divByNewline
except Exception:
return []
return []
gaclist = get_gac()
if (len(gaclist) > 0):
clr.AddReferenceByName(gaclist[-1])
def test_nonamespaceloadtest(self):
import NoNamespaceLoadTest
a = NoNamespaceLoadTest()
self.assertEqual(a.HelloWorld(), 'Hello World')
#TODO:@skip("multiple_execute")
def test_addreferencetofileandpath_conflict(self):
"""verify AddReferenceToFileAndPath picks up the path specified, not some arbitrary assembly somewhere in your path already"""
code1 = """
using System;
public class CollisionTest {
public static string Result(){
return "Test1";
}
}
"""
code2 = """
using System;
public class CollisionTest {
public static string Result(){
return "Test2";
}
}
"""
import clr
tmp = self.temporary_dir
test1_cs, test1_dll = os.path.join(tmp, 'test1.cs'), os.path.join(tmp, 'CollisionTest.dll')
test2_cs, test2_dll = os.path.join(tmp, 'test2.cs'), os.path.join(sys.prefix, 'CollisionTest.dll')
self.write_to_file(test1_cs, code1)
self.write_to_file(test2_cs, code2)
self.assertEqual(self.run_csc("/nologo /target:library /out:" + test2_dll + ' ' + test2_cs), 0)
self.assertEqual(self.run_csc("/nologo /target:library /out:" + test1_dll + ' ' + test1_cs), 0)
clr.AddReferenceToFileAndPath(test1_dll)
import CollisionTest
self.assertEqual(CollisionTest.Result(), "Test1")
#TODO:@skip("multiple_execute")
def test_addreferencetofile_verification(self):
import clr
tmp = self.temporary_dir
sys.path.append(tmp)
code1 = """
using System;
public class test1{
public static string Test1(){
test2 t2 = new test2();
return t2.DoSomething();
}
public static string Test2(){
return "test1.test2";
}
}
"""
code2 = """
using System;
public class test2{
public string DoSomething(){
return "hello world";
}
}
"""
test1_dll_along_with_ipy = os.path.join(sys.prefix, 'test1.dll') # this dll is need for peverify
# delete the old test1.dll if exists
self.delete_files(test1_dll_along_with_ipy)
test1_cs, test1_dll = os.path.join(tmp, 'test1.cs'), os.path.join(tmp, 'test1.dll')
test2_cs, test2_dll = os.path.join(tmp, 'test2.cs'), os.path.join(tmp, 'test2.dll')
self.write_to_file(test1_cs, code1)
self.write_to_file(test2_cs, code2)
self.assertEqual(self.run_csc("/nologo /target:library /out:"+ test2_dll + ' ' + test2_cs), 0)
self.assertEqual(self.run_csc("/nologo /target:library /r:" + test2_dll + " /out:" + test1_dll + ' ' + test1_cs), 0)
clr.AddReferenceToFile('test1')
self.assertEqual(len([x for x in clr.References if x.FullName.startswith("test1")]), 1)
# test 2 shouldn't be loaded yet...
self.assertEqual(len([x for x in clr.References if x.FullName.startswith("test2")]), 0)
import test1
# should create test1 (even though we're a top-level namespace)
a = test1()
self.assertEqual(a.Test2(), 'test1.test2')
# should load test2 from path
self.assertEqual(a.Test1(), 'hello world')
self.assertEqual(len([x for x in clr.References if x.FullName.startswith("test2")]), 0)
# this is to make peverify happy, apparently snippetx.dll referenced to test1
copyfile(test1_dll, test1_dll_along_with_ipy)
#TODO: @skip("multiple_execute")
@unittest.skipIf(is_mono, "mono may have a bug here...need to investigate https://github.com/IronLanguages/main/issues/1595")
@unittest.skipIf(is_netcoreapp, "TODO: figure out")
def test_assembly_resolve_isolation(self):
import clr, os
clr.AddReference("IronPython")
clr.AddReference("Microsoft.Scripting")
from IronPython.Hosting import Python
from Microsoft.Scripting import SourceCodeKind
tmp = self.temporary_dir
tmp1 = os.path.join(tmp, 'resolve1')
tmp2 = os.path.join(tmp, 'resolve2')
if not os.path.exists(tmp1):
os.mkdir(tmp1)
if not os.path.exists(tmp2):
os.mkdir(tmp2)
code1a = """
using System;
public class ResolveTestA {
public static string Test() {
ResolveTestB test = new ResolveTestB();
return test.DoSomething();
}
}
"""
code1b = """
using System;
public class ResolveTestB {
public string DoSomething() {
return "resolve test 1";
}
}
"""
code2a = """
using System;
public class ResolveTestA {
public static string Test() {
ResolveTestB test = new ResolveTestB();
return test.DoSomething();
}
}
"""
code2b = """
using System;
public class ResolveTestB {
public string DoSomething() {
return "resolve test 2";
}
}
"""
script_code = """import clr
clr.AddReferenceToFile("ResolveTestA")
from ResolveTestA import Test
result = Test()
"""
test1a_cs, test1a_dll, test1b_cs, test1b_dll = map(
lambda x: os.path.join(tmp1, x),
['ResolveTestA.cs', 'ResolveTestA.dll', 'ResolveTestB.cs', 'ResolveTestB.dll']
)
test2a_cs, test2a_dll, test2b_cs, test2b_dll = map(
lambda x: os.path.join(tmp2, x),
['ResolveTestA.cs', 'ResolveTestA.dll', 'ResolveTestB.cs', 'ResolveTestB.dll']
)
self.write_to_file(test1a_cs, code1a)
self.write_to_file(test1b_cs, code1b)
self.write_to_file(test2a_cs, code2a)
self.write_to_file(test2b_cs, code2b)
self.assertEqual(self.run_csc("/nologo /target:library /out:" + test1b_dll + ' ' + test1b_cs), 0)
self.assertEqual(self.run_csc("/nologo /target:library /r:" + test1b_dll + " /out:" + test1a_dll + ' ' + test1a_cs), 0)
self.assertEqual(self.run_csc("/nologo /target:library /out:" + test2b_dll + ' ' + test2b_cs), 0)
self.assertEqual(self.run_csc("/nologo /target:library /r:" + test2b_dll + " /out:" + test2a_dll + ' ' + test2a_cs), 0)
engine1 = Python.CreateEngine()
paths1 = engine1.GetSearchPaths()
paths1.Add(tmp1)
engine1.SetSearchPaths(paths1)
scope1 = engine1.CreateScope()
script1 = engine1.CreateScriptSourceFromString(script_code, SourceCodeKind.Statements)
script1.Execute(scope1)
result1 = scope1.GetVariable("result")
self.assertEqual(result1, "resolve test 1")
engine2 = Python.CreateEngine()
paths2 = engine2.GetSearchPaths()
paths2.Add(tmp2)
engine2.SetSearchPaths(paths2)
scope2 = engine2.CreateScope()
script2 = engine2.CreateScriptSourceFromString(script_code, SourceCodeKind.Statements)
script2.Execute(scope2)
result2 = scope2.GetVariable("result")
self.assertEqual(result2, "resolve test 2")
def test_addreference_sanity(self):
import clr
# add reference directly to assembly
clr.AddReference(''.GetType().Assembly)
# add reference via partial name
clr.AddReference('System.Xml')
# add a reference via a fully qualified name
clr.AddReference(''.GetType().Assembly.FullName)
def get_local_filename(self, base):
if __file__.count(os.sep):
return os.path.join(__file__.rsplit(os.sep, 1)[0], base)
else:
return base
def compileAndLoad(self, name, filename, *args):
import clr
sys.path.append(sys.exec_prefix)
self.assertEqual(self.run_csc("/nologo /t:library " + ' '.join(args) + " /out:\"" + os.path.join(sys.exec_prefix, name +".dll") + "\" \"" + filename + "\""), 0)
return clr.LoadAssemblyFromFile(name)
#TODO: @skip("multiple_execute")
def test_classname_same_as_ns(self):
import clr
sys.path.append(sys.exec_prefix)
self.assertEqual(self.run_csc("/nologo /t:library /out:\"" + os.path.join(sys.exec_prefix, "c4.dll") + "\" \"" + self.get_local_filename('c4.cs') + "\""), 0)
clr.AddReference("c4")
import c4
self.assertTrue(not c4 is c4.c4)
self.assertTrue(c4!=c4.c4)
#TODO: @skip("multiple_execute")
def test_local_dll(self):
x = self.compileAndLoad('c3', self.get_local_filename('c3.cs') )
self.assertEqual(repr(x), "<Assembly c3, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null>")
self.assertEqual(repr(x.Foo), "<type 'Foo'>")
self.assertEqual(repr(x.BarNamespace), "<module 'BarNamespace' (CLS module from c3, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null)>")
self.assertEqual(repr(x.BarNamespace.NestedNamespace), "<module 'NestedNamespace' (CLS module from c3, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null)>")
self.assertEqual(repr(x.BarNamespace.Bar.NestedBar), "<type 'NestedBar'>")
self.assertEqual(x.__dict__["BarNamespace"], x.BarNamespace)
self.assertEqual(x.BarNamespace.__dict__["Bar"], x.BarNamespace.Bar)
self.assertEqual(x.BarNamespace.__dict__["NestedNamespace"], x.BarNamespace.NestedNamespace)
self.assertEqual(x.BarNamespace.NestedNamespace.__name__, "NestedNamespace")
self.assertEqual(x.BarNamespace.NestedNamespace.__file__, "c3, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null")
self.assertRaises(AttributeError, lambda: x.BarNamespace.NestedNamespace.not_exist)
self.assertRaises(AttributeError, lambda: x.Foo2) # assembly c3 has no type Foo2
self.assertTrue(set(['NestedNamespace', 'Bar']) <= set(dir(x.BarNamespace)))
def f(): x.BarNamespace.Bar = x.Foo
self.assertRaises(AttributeError, f)
def f(): del x.BarNamespace.NotExist
self.assertRaises(AttributeError, f)
def f(): del x.BarNamespace
self.assertRaises(AttributeError, f)
#TODO:@skip("multiple_execute")
@unittest.skipIf(is_netcoreapp, "TODO: figure out")
def test_namespaceimport(self):
import clr
tmp = self.temporary_dir
if tmp not in sys.path:
sys.path.append(tmp)
code1 = "namespace TestNamespace { public class Test1 {} }"
code2 = "namespace TestNamespace { public class Test2 {} }"
test1_cs, test1_dll = os.path.join(tmp, 'testns1.cs'), os.path.join(tmp, 'testns1.dll')
test2_cs, test2_dll = os.path.join(tmp, 'testns2.cs'), os.path.join(tmp, 'testns2.dll')
self.write_to_file(test1_cs, code1)
self.write_to_file(test2_cs, code2)
self.assertEqual(self.run_csc("/nologo /target:library /out:"+ test1_dll + ' ' + test1_cs), 0)
self.assertEqual(self.run_csc("/nologo /target:library /out:"+ test2_dll + ' ' + test2_cs), 0)
clr.AddReference('testns1')
import TestNamespace
self.assertEqual(dir(TestNamespace), ['Test1'])
clr.AddReference('testns2')
# verify that you don't need to import TestNamespace again to see Test2
self.assertEqual(dir(TestNamespace), ['Test1', 'Test2'])
def test_no_names_provided(self):
import clr
self.assertRaises(TypeError, clr.AddReference, None)
self.assertRaises(TypeError, clr.AddReferenceToFile, None)
self.assertRaises(TypeError, clr.AddReferenceByName, None)
self.assertRaises(TypeError, clr.AddReferenceByPartialName, None)
self.assertRaises(ValueError, clr.AddReference)
self.assertRaises(ValueError, clr.AddReferenceToFile)
self.assertRaises(ValueError, clr.AddReferenceByName)
self.assertRaises(ValueError, clr.AddReferenceByPartialName)
#TODO: @skip("multiple_execute")
def test_load_count(self):
# verify loading an assembly updates the | |
= 'High'
col.loc[((df['loan_condition'] == 'Bad Loan'), 'loan_condition_int')] = 0
col.loc[((df['loan_condition'] == 'Good Loan'), 'loan_condition_int')] = 1
by_emp_length = df.groupby(['region', 'addr_state'], as_index=False).emp_length_int.mean().sort_values(by='addr_state')
loan_condition_bystate = pd.crosstab(df['addr_state'], df['loan_condition'])
cross_condition = pd.crosstab(df['addr_state'], df['loan_condition'])
percentage_loan_contributor = pd.crosstab(df['addr_state'], df['loan_condition']).apply((lambda x: ((x / x.sum()) * 100)))
condition_ratio = (cross_condition['Bad Loan'] / cross_condition['Good Loan'])
by_dti = df.groupby(['region', 'addr_state'], as_index=False).dti.mean()
state_codes = sorted(states)
default_ratio = condition_ratio.values.tolist()
average_dti = by_dti['dti'].values.tolist()
average_emp_length = by_emp_length['emp_length_int'].values.tolist()
number_of_badloans = loan_condition_bystate['Bad Loan'].values.tolist()
percentage_ofall_badloans = percentage_loan_contributor['Bad Loan'].values.tolist()
risk_data = OrderedDict([('state_codes', state_codes), ('default_ratio', default_ratio), ('badloans_amount', number_of_badloans), ('percentage_of_badloans', percentage_ofall_badloans), ('average_dti', average_dti), ('average_emp_length', average_emp_length)])
risk_df = pd.DataFrame.from_dict(risk_data)
return risk_df
#=============
# Function 260
def cleaning_func_50(col,df):
# additional context code from user definitions
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = np.nan
df['loan_condition'] = df['loan_status'].apply(loan_condition)
df['emp_length_int'] = np.nan
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
by_loan_amount = df.groupby(['region', 'addr_state'], as_index=False).loan_amount.sum()
states = by_loan_amount['addr_state'].values.tolist()
from collections import OrderedDict
col.loc[((col['annual_income'] <= 100000), 'income_category')] = 'Low'
col.loc[(((col['annual_income'] > 100000) & (col['annual_income'] <= 200000)), 'income_category')] = 'Medium'
col.loc[((col['annual_income'] > 200000), 'income_category')] = 'High'
col.loc[((df['loan_condition'] == 'Bad Loan'), 'loan_condition_int')] = 0
col.loc[((df['loan_condition'] == 'Good Loan'), 'loan_condition_int')] = 1
by_emp_length = df.groupby(['region', 'addr_state'], as_index=False).emp_length_int.mean().sort_values(by='addr_state')
loan_condition_bystate = pd.crosstab(df['addr_state'], df['loan_condition'])
cross_condition = pd.crosstab(df['addr_state'], df['loan_condition'])
percentage_loan_contributor = pd.crosstab(df['addr_state'], df['loan_condition']).apply((lambda x: ((x / x.sum()) * 100)))
condition_ratio = (cross_condition['Bad Loan'] / cross_condition['Good Loan'])
by_dti = df.groupby(['region', 'addr_state'], as_index=False).dti.mean()
state_codes = sorted(states)
default_ratio = condition_ratio.values.tolist()
average_dti = by_dti['dti'].values.tolist()
average_emp_length = by_emp_length['emp_length_int'].values.tolist()
number_of_badloans = loan_condition_bystate['Bad Loan'].values.tolist()
percentage_ofall_badloans = percentage_loan_contributor['Bad Loan'].values.tolist()
risk_data = OrderedDict([('state_codes', state_codes), ('default_ratio', default_ratio), ('badloans_amount', number_of_badloans), ('percentage_of_badloans', percentage_ofall_badloans), ('average_dti', average_dti), ('average_emp_length', average_emp_length)])
risk_df = pd.DataFrame.from_dict(risk_data)
risk_df = risk_df.round(decimals=3)
risk_df[col] = risk_df[col].astype(str)
risk_df[col] = risk_df[col]
risk_df[col].astype = risk_df[col].astype
risk_df[col] = risk_df[col].astype(str)
risk_df['text'] = (((((((((((((risk_df['state_codes'] + '<br>') + 'Number of Bad Loans: ') + risk_df['badloans_amount']) + '<br>') + 'Percentage of all Bad Loans: ') + risk_df['percentage_of_badloans']) + '%') + '<br>') + 'Average Debt-to-Income Ratio: ') + risk_df['average_dti']) + '<br>') + 'Average Length of Employment: ') + risk_df['average_emp_length'])
return risk_df
#=============
# Function 261
def cleaning_func_51(col,df):
# additional context code from user definitions
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = np.nan
df['loan_condition'] = df['loan_status'].apply(loan_condition)
df['emp_length_int'] = np.nan
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
by_loan_amount = df.groupby(['region', 'addr_state'], as_index=False).loan_amount.sum()
return by_loan_amount
#=============
# Function 262
def cleaning_func_53(col,df):
# additional context code from user definitions
def loan_condition(status):
if (status in bad_loan):
return 'Bad Loan'
else:
return 'Good Loan'
# core cleaning code
import pandas as pd
import numpy as np
# df = pd.read_csv('../input/loan.csv', low_memory=False)
df = df.rename(columns={'loan_amnt': 'loan_amount', 'funded_amnt': 'funded_amount', 'funded_amnt_inv': 'investor_funds', 'int_rate': 'interest_rate', 'annual_inc': 'annual_income'})
bad_loan = ['Charged Off', 'Default', 'Does not meet the credit policy. Status:Charged Off', 'In Grace Period', 'Late (16-30 days)', 'Late (31-120 days)']
df['loan_condition'] = np.nan
df['loan_condition'] = df['loan_status'].apply(loan_condition)
df['emp_length_int'] = np.nan
col.loc[((col['emp_length'] == '10+ years'), 'emp_length_int')] = 10
col.loc[((col['emp_length'] == '9 years'), 'emp_length_int')] = 9
col.loc[((col['emp_length'] == '8 years'), 'emp_length_int')] = 8
col.loc[((col['emp_length'] == '7 years'), 'emp_length_int')] = 7
col.loc[((col['emp_length'] == '6 years'), 'emp_length_int')] = 6
col.loc[((col['emp_length'] == '5 years'), 'emp_length_int')] = 5
col.loc[((col['emp_length'] == '4 years'), 'emp_length_int')] = 4
col.loc[((col['emp_length'] == '3 years'), 'emp_length_int')] = 3
col.loc[((col['emp_length'] == '2 years'), 'emp_length_int')] = 2
col.loc[((col['emp_length'] == '1 year'), 'emp_length_int')] = 1
col.loc[((col['emp_length'] == '< 1 year'), 'emp_length_int')] = 0.5
col.loc[((col['emp_length'] == 'n/a'), 'emp_length_int')] = 0
by_loan_amount = df.groupby(['region', 'addr_state'], as_index=False).loan_amount.sum()
states = by_loan_amount['addr_state'].values.tolist()
from collections import OrderedDict
col.loc[((col['annual_income'] <= 100000), 'income_category')] = 'Low'
col.loc[(((col['annual_income'] > 100000) & (col['annual_income'] <= 200000)), 'income_category')] = 'Medium'
col.loc[((col['annual_income'] > 200000), 'income_category')] = 'High'
col.loc[((df['loan_condition'] == 'Bad Loan'), 'loan_condition_int')] = 0
col.loc[((df['loan_condition'] == 'Good Loan'), 'loan_condition_int')] = 1
by_emp_length = df.groupby(['region', 'addr_state'], as_index=False).emp_length_int.mean().sort_values(by='addr_state')
loan_condition_bystate = pd.crosstab(df['addr_state'], df['loan_condition'])
cross_condition = pd.crosstab(df['addr_state'], df['loan_condition'])
percentage_loan_contributor = pd.crosstab(df['addr_state'], df['loan_condition']).apply((lambda x: ((x / x.sum()) * 100)))
return percentage_loan_contributor
#=============
# Function 263
def cleaning_func_0(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
return data
#=============
# Function 264
def cleaning_func_1(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.next_pymnt_d = pd.to_datetime(data.next_pymnt_d)
return data
#=============
# Function 265
def cleaning_func_2(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.issue_d = pd.to_datetime(data.issue_d)
return data
#=============
# Function 266
def cleaning_func_3(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.last_pymnt_d = pd.to_datetime(data.last_pymnt_d)
return data
#=============
# Function 267
def cleaning_func_4(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.last_credit_pull_d = pd.to_datetime(data.last_credit_pull_d)
return data
#=============
# Function 268
def cleaning_func_5(data):
# core cleaning code
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
earliest_cr_line = pd.to_datetime(data.earliest_cr_line)
data.earliest_cr_line = earliest_cr_line.dt.year
return data
#=============
# Function 269
def cleaning_func_7(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data['rating'] = np.where((data.loan_status != 'Current'), 1, 0)
return data
#=============
# Function 270
def cleaning_func_8(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data['recovery'] = np.where((data.recoveries != 0.0), 1, 0)
return data
#=============
# Function 271
def cleaning_func_9(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.emp_length = data.emp_length.replace(np.nan, 0)
return data
#=============
# Function 272
def cleaning_func_10(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.dti_joint = data.dti_joint.replace(np.nan, 0)
return data
#=============
# Function 273
def cleaning_func_11(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.annual_inc_joint = data.annual_inc_joint.replace(np.nan, 0)
return data
#=============
# Function 274
def cleaning_func_12(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data.verification_status_joint = data.verification_status_joint.replace(np.nan, 'None')
return data
#=============
# Function 275
def cleaning_func_13(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data[e] = data[e].replace(np.nan, 0)
data[e] = data[e]
data[e].replace = data[e].replace
np.nan = np.nan
data[e] = data[e].replace(np.nan, 0)
data.loc[(data.mths_since_last_delinq.notnull(), 'delinq')] = 1
data.loc[(data.mths_since_last_delinq.isnull(), 'delinq')] = 0
return data
#=============
# Function 276
def cleaning_func_14(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data[e] = data[e].replace(np.nan, 0)
data[e] = data[e]
data[e].replace = data[e].replace
np.nan = np.nan
data[e] = data[e].replace(np.nan, 0)
data.loc[(data.mths_since_last_delinq.notnull(), 'delinq')] = 1
data.loc[(data.mths_since_last_delinq.isnull(), 'delinq')] = 0
data.loc[(data.mths_since_last_major_derog.notnull(), 'derog')] = 1
data.loc[(data.mths_since_last_major_derog.isnull(), 'derog')] = 0
return data
#=============
# Function 277
def cleaning_func_15(data):
# core cleaning code
import numpy as np
import pandas as pd
# data = pd.read_csv('../input/loan.csv', parse_dates=True)
data = data[(data.loan_status != 'Fully Paid')]
data = data[(data.loan_status != 'Does not meet the credit policy. Status:Fully Paid')]
data[e] = data[e].replace(np.nan, 0)
data[e] = data[e]
data[e].replace = data[e].replace
np.nan = np.nan
data[e] = data[e].replace(np.nan, 0)
data.loc[(data.mths_since_last_delinq.notnull(), 'delinq')] = | |
image_id):
""" Reads the JSON metadata for specified layer / image id """
for layer in self.docker.history(image_id):
layers.append(layer['Id'])
def _parse_image_name(self, image):
"""
Parses the provided image name and splits it in the
name and tag part, if possible. If no tag is provided
'latest' is used.
"""
if ':' in image and '/' not in image.split(':')[-1]:
image_tag = image.split(':')[-1]
image_name = image[:-(len(image_tag) + 1)]
else:
image_tag = "latest"
image_name = image
return (image_name, image_tag)
def _dump_json(self, data, new_line=False):
"""
Helper function to marshal object into JSON string.
Additionally a sha256sum of the created JSON string is generated.
"""
# We do not want any spaces between keys and values in JSON
json_data = json.dumps(data, separators=(',', ':'))
if new_line:
json_data = "%s\n" % json_data
# Generate sha256sum of the JSON data, may be handy
sha = hashlib.sha256(json_data.encode('utf-8')).hexdigest()
return json_data, sha
def _generate_repositories_json(self, repositories_file, image_id, name, tag):
if not image_id:
raise SquashError("Provided image id cannot be null")
if name == tag == None:
self.log.debug(
"No name and tag provided for the image, skipping generating repositories file")
return
repos = {}
repos[name] = {}
repos[name][tag] = image_id
data = json.dumps(repos, separators=(',', ':'))
with open(repositories_file, 'w') as f:
f.write(data)
f.write("\n")
def _write_version_file(self, squashed_dir):
version_file = os.path.join(squashed_dir, "VERSION")
with open(version_file, 'w') as f:
f.write("1.0")
def _write_json_metadata(self, metadata, metadata_file):
with open(metadata_file, 'w') as f:
f.write(metadata)
def _read_old_metadata(self, old_json_file):
self.log.debug("Reading JSON metadata file '%s'..." % old_json_file)
# Read original metadata
with open(old_json_file, 'r') as f:
metadata = json.load(f)
return metadata
def _move_layers(self, layers, src, dest):
"""
This moves all the layers that should be copied as-is.
In other words - all layers that are not meant to be squashed will be
moved from the old image to the new image untouched.
"""
for layer in layers:
layer_id = layer.replace('sha256:', '')
self.log.debug("Moving unmodified layer '%s'..." % layer_id)
shutil.move(os.path.join(src, layer_id), dest)
def _file_should_be_skipped(self, file_name, file_paths):
# file_paths is now array of array with files to be skipped.
# First level are layers, second are files in these layers.
layer_nb = 1
for layers in file_paths:
for file_path in layers:
if file_name == file_path or file_name.startswith(file_path + "/"):
return layer_nb
layer_nb += 1
return 0
def _marker_files(self, tar, members):
"""
Searches for marker files in the specified archive.
Docker marker files are files taht have the .wh. prefix in the name.
These files mark the corresponding file to be removed (hidden) when
we start a container from the image.
"""
marker_files = {}
self.log.debug(
"Searching for marker files in '%s' archive..." % tar.name)
for member in members:
if '.wh.' in member.name:
self.log.debug("Found '%s' marker file" % member.name)
marker_files[member] = tar.extractfile(member)
self.log.debug("Done, found %s files" % len(marker_files))
return marker_files
def _add_markers(self, markers, tar, files_in_layers, added_symlinks):
"""
This method is responsible for adding back all markers that were not
added to the squashed layer AND files they refer to can be found in layers
we do not squash.
"""
if markers:
self.log.debug("Marker files to add: %s" %
[o.name for o in markers.keys()])
else:
# No marker files to add
return
# https://github.com/goldmann/docker-squash/issues/108
# Some tar archives do have the filenames prefixed with './'
# which does not have any effect when we unpack the tar achive,
# but when processing tar content - we see this.
tar_files = [self._normalize_path(x) for x in tar.getnames()]
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
normalized_file = self._normalize_path(actual_file)
should_be_added_back = False
if self._file_should_be_skipped(normalized_file, added_symlinks):
self.log.debug(
"Skipping '%s' marker file, this file is on a symlink path" % normalized_file)
continue
if normalized_file in tar_files:
self.log.debug(
"Skipping '%s' marker file, this file was added earlier for some reason..." % normalized_file)
continue
if files_in_layers:
for files in files_in_layers.values():
if normalized_file in files:
should_be_added_back = True
break
else:
# There are no previous layers, so we need to add it back
# In fact this shouldn't happen since having a marker file
# where there is no previous layer does not make sense.
should_be_added_back = True
if should_be_added_back:
self.log.debug(
"Adding '%s' marker file back..." % marker.name)
# Marker files on AUFS are hardlinks, we need to create
# regular files, therefore we need to recreate the tarinfo
# object
tar.addfile(tarfile.TarInfo(name=marker.name), marker_file)
# Add the file name to the list too to avoid re-reading all files
# in tar archive
tar_files.append(normalized_file)
else:
self.log.debug(
"Skipping '%s' marker file..." % marker.name)
def _normalize_path(self, path):
return os.path.normpath(os.path.join("/", path))
def _add_hardlinks(self, squashed_tar, squashed_files, to_skip, skipped_hard_links):
for layer, hardlinks_in_layer in enumerate(skipped_hard_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(hardlinks_in_layer):
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# We need to check if we should skip adding back the hard link
# This can happen in the following situations:
# 1. hard link is on the list of files to skip
# 2. hard link target is on the list of files to skip
# 3. hard link is already in squashed files
# 4. hard link target is NOT in already squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname or normalized_name in squashed_files or normalized_linkname not in squashed_files:
self.log.debug("Found a hard link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding hard link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
def _add_file(self, member, content, squashed_tar, squashed_files, to_skip):
normalized_name = self._normalize_path(member.name)
if normalized_name in squashed_files:
self.log.debug(
"Skipping file '%s' because it is already squashed" % normalized_name)
return
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
return
if content:
squashed_tar.addfile(member, content)
else:
# Special case: other(?) files, we skip the file
# itself
squashed_tar.addfile(member)
# We added a file to the squashed tar, so let's note it
squashed_files.append(normalized_name)
def _add_symlinks(self, squashed_tar, squashed_files, to_skip, skipped_sym_links):
added_symlinks = []
for layer, symlinks_in_layer in enumerate(skipped_sym_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(symlinks_in_layer):
# Handling symlinks. This is similar to hard links with one
# difference. Sometimes we do want to have broken symlinks
# be addedeither case because these can point to locations
# that will become avaialble after adding volumes for example.
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# File is already in squashed files, skipping
if normalized_name in squashed_files:
self.log.debug(
"Found a symbolic link '%s' which is already squashed, skipping" % (normalized_name))
continue
if self._file_should_be_skipped(normalized_name, added_symlinks):
self.log.debug(
"Found a symbolic link '%s' which is on a path to previously squashed symlink, skipping" % (normalized_name))
continue
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# If name or linkname was found in the lists of files to be
# skipped or it's not found in the squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname:
self.log.debug("Found a symbolic link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding symbolic link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
added_symlinks.append([normalized_name])
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
return added_symlinks
def _squash_layers(self, layers_to_squash, layers_to_move):
self.log.info("Starting squashing...")
# Reverse the layers to squash - we begin with the newest one
# to make the tar lighter
layers_to_squash.reverse()
# | |
#!/usr/bin/python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is not supported by Google
#
"""Classes for administering the Google Search Appliance.
Currently, the classes available can be used to import/export/rewrite
the config file. This module is designed to be expandable so that
other functionality can be easily included.
gsaConfig: Class for handling XML from GSA export/import.
gsaWebInterface: Class for interacting with GSA Web Interface.
Example usage:
1. Export the config file:
gsa_admin.py -n <host> --port 8000 -u admin -p <pw> -e --sign-password
<PASSWORD> -o ~/tmp/o.xml -v
2. Make a change to the config file and sign:
./gsa_admin.py -n <host> -u admin -p <pw> -s --sign-password hellohello
-f ~/tmp/o.xml -v -o ~/tmp/o2.xml
3. Import the new config file:
./gsa_admin.py -n <host> --port 8000 -u admin -p <pw> -i --sign-password
hello<PASSWORD> -f ~/tmp/o2.xml -v
Note that you must use the same password to sign a file that you used
when exporting. You will get an error when importing if you do not do
this.
4. Export all the URLs to a file:
./gsa_admin.py --hostname=<host> --username=admin
--password=<pw> --all_urls --output=/tmp/all_urls
5. Retrieve GSA^n (mirroring) status from the admin console
./gsa_admin.py -z -n <host> -u admin -p <pw>
6. Trigger database synchronization
./gsa_admin.py -n YOUR_GSA --port 8000 -u admin -p YOUR_PASSWORD --database_sync --sources=DB_NAME
7. Run custom support script provided by Google Support
./gsa_admin.py -n YOUR_GSA --port 8000 -u admin -p YOUR_PASSWORD -m -f ./sscript.txt -o ./out.txt -t 300
8. Pause crawl
./gsa_admin.py -n YOUR_GSA --port 8000 -u admin -p YOUR_PASSWORD --pause_crawl
9. Resume crawl
./gsa_admin.py -n YOUR_GSA --port 8000 -u admin -p YOUR_PASSWORD --resume_crawl
TODO(jlowry): add in functionality from adminconsole.py:
get crawl status, shutdown.
"""
__author__ = "<EMAIL> (<NAME>)"
import cgi
import os.path
import logging
import sys
import xml.dom.minidom
import hashlib
import hmac
import json
import codecs
import urllib2
import urllib
import cookielib
import re
import time
import urlparse
from optparse import OptionParser, OptionGroup
# Required for utf-8 file compatibility
reload(sys)
sys.setdefaultencoding("utf-8")
del sys.setdefaultencoding
class NullHandler(logging.Handler):
def emit(self, record):
pass
DEFAULTLOGLEVEL=logging.DEBUG
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
class gsaConfig:
"Google Search Appliance XML configuration tool"
configXMLString = None
def __init__(self, fileName=None):
if fileName:
self.openFile(fileName)
def __str__(self):
return self.configXMLString
def openFile(self, fileName):
"Read in file as string"
if not os.path.exists(fileName):
log.error("Input file does not exist")
sys.exit(1)
configXMLdoc = open(fileName)
self.configXMLString = configXMLdoc.read()
configXMLdoc.close()
def setXMLContents(self, xmlString):
"Sets the runtime XML contents"
self.configXMLString = xmlString.encode("utf-8")
#log.warning("Signature maybe invalid. Please verify before uploading or saving")
def getXMLContents(self):
"Returns the contents of the XML file"
return self.configXMLString.encode("utf-8")
def computeSignature(self, password):
configXMLString = self.getXMLContents()
# ugly removal of spaces because minidom cannot remove them automatically when removing a node
configXMLString = re.sub(' <uam_dir>', '<uam_dir>', configXMLString)
configXMLString = re.sub('</uam_dir>\n', '</uam_dir>', configXMLString)
doc = xml.dom.minidom.parseString(configXMLString)
# Remove <uam_dir> node because new GSAs expect so
uamdirNode = doc.getElementsByTagName("uam_dir").item(0)
uamdirNode.parentNode.removeChild(uamdirNode)
uardataNode = doc.getElementsByTagName("uar_data").item(0)
uardataB64contents = uardataNode.firstChild.nodeValue.strip()+'\n'
if uardataB64contents != "\n":
log.debug("UAR data contains data. Must be 7.0 or newer")
# replace <uar_data> node with "/tmp/tmp_uar_data_dir,hash"
# 1: Strip additional spaces at the end but we need the new line
# to compute hash.
# "AAAAAAAAAA==\n ]]></uar_data>" <-- 10 spaces
uardataHash = hmac.new(password, uardataB64contents, hashlib.sha1).hexdigest()
# 2: Replace to <dummy file name, hash> with additional whitespaces.
uardataNode.firstChild.nodeValue = ("\n/tmp/tmp_uar_data_dir,"
+ "%s\n ") % (''+uardataHash)
log.debug("uar_data is replaced to %s" % uardataNode.toxml())
# Get <config> node
configNode = doc.getElementsByTagName("config").item(0)
# get string of Node and children (as utf-8)
configNodeXML = configNode.toxml()
# Create new HMAC using user password and configXML as sum contents
return hmac.new(password, configNodeXML, hashlib.sha1).hexdigest()
def sign(self, password):
computedSignature=self.computeSignature(password)
configXMLString = self.getXMLContents()
doc = xml.dom.minidom.parseString(configXMLString)
# Get <signature> node
signatureNode = doc.getElementsByTagName("signature").item(0)
signatureCDATANode = signatureNode.firstChild
# Set CDATA/Text area to new HMAC
signatureCDATANode.nodeValue = computedSignature
self.setXMLContents(doc.toxml())
def writeFile(self, filename):
if os.path.exists(filename):
log.error("Output file exists")
sys.exit(1)
doc = xml.dom.minidom.parseString(self.configXMLString)
outputXMLFile = codecs.open(filename, 'w', "utf-8")
log.debug("Writing XML to %s" % filename)
# GSA newer than 6.? expects '<eef>' to be on the second line.
outputXMLFile.write(doc.toxml().replace("<eef>", "\n<eef>", 1))
def verifySignature(self, password):
computedSignature = self.computeSignature(password)
configXMLString = self.getXMLContents()
doc = xml.dom.minidom.parseString(configXMLString)
# Get <signature> node
signatureNode = doc.getElementsByTagName("signature").item(0)
signatureCDATANode = signatureNode.firstChild
signatureValue = signatureNode.firstChild.nodeValue
# signatureValue may contain whitespace and linefeeds so we'll just ensure that
# our HMAC is found within
if signatureValue.count(computedSignature) :
log.debug("Signature matches")
return 1
else:
log.debug("Signature does not match %s vs %s" %
(signatureValue, computedSignature))
return None
class gsaWebInterface:
"Google Search Appliance Web Interface Wrapper)"
baseURL = None
username = None
password = <PASSWORD>
hostName = None
loggedIn = None
_url_opener = None
def __init__(self, hostName, username, password, port=8000, use_ssl=False):
protocol = 'https' if use_ssl else 'http'
self.baseURL = '%s://%s:%s/EnterpriseController' % (protocol, hostName, port)
self.hostName = hostName
self.username = username
self.password = password
log.debug("Using a base URL of '%s'" % self.baseURL)
# build cookie jar for this web instance only. Should allow for GSAs port mapped behind a reverse proxy.
cookieJar = cookielib.CookieJar()
self._url_opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar))
def _openurl(self, request):
"""Args:
request: urllib2 request object or URL string.
"""
return self._url_opener.open(request)
def _encode_multipart_formdata(self, fields, files):
"""
fields: a sequence of (name, value) elements for regular form fields.
files: a sequence of (name, filename, value) elements for data to be uploaded as files
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
lines.append('Content-Type: text/xml')
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def _login(self):
if not self.loggedIn:
log.debug("Fetching initial page for new cookie")
self._openurl(self.baseURL)
request = urllib2.Request(self.baseURL,
urllib.urlencode(
{'actionType' : 'authenticateUser',
# for 7.0 or older
'userName' : self.username,
'password' : <PASSWORD>,
# for 7.2 and newer. Having both doesn't hurt
'reqObj' : json.dumps([None, self.username, self.password, None, 1]),
}))
log.debug("Logging in as %s..." % self.username)
result = self._openurl(request)
resultString = result.read()
# Pre 7.2 has "Google Search Appliance >Home"
# 7.2 and later returns JSON like object
home = re.compile("Google Search Appliance\s*>\s*Home")
home72 = re.compile('"xsrf": \[null,"security_token","')
if home.search(resultString):
log.debug("7.0 or older")
self.is72 = False
elif home72.search(resultString):
log.debug("7.2 or newer")
# The first line is junk to prevent some action on browsers: )]}',
# Just skip it.
response = json.loads(resultString[5:])
log.info("Security token is: " + response["xsrf"][2])
self.is72 = True
else:
log.error("Login failed: " + resultString)
sys.exit(2)
log.debug("Successfully logged in")
self.loggedIn = True
def _logout(self):
request = urllib2.Request(self.baseURL + "?" + urllib.urlencode({'actionType' : 'logout'}))
self._openurl(request)
self.loggedIn = False
def __del__(self):
self._logout()
def importConfig(self, gsaConfig, configPassword):
fields = [("actionType", "importExport"), ("passwordIn", configPassword),
("import", " Import Configuration ")]
files = [("importFileName", "config.xml", gsaConfig.getXMLContents() )]
content_type, body = self._encode_multipart_formdata(fields,files)
headers = {'User-Agent': 'python-urllib2', 'Content-Type': content_type}
self._login()
security_token = self.getSecurityToken('cache')
request = urllib2.Request(self.baseURL + "?" +
urllib.urlencode({'actionType': 'importExport',
'export': ' Import Configuration ',
'security_token' : security_token,
'a' : '1',
'passwordIn': configPassword}),
body, headers)
log.info("Sending XML...")
result = self._openurl(request)
content = result.read()
if content.count("Invalid file"):
log.error("Invalid configuration file")
sys.exit(2)
elif content.count("Wrong passphrase or the file is corrupt"):
log.error("Wrong passphrase or the file is corrupt. Try ")
sys.exit(2)
elif content.count("Passphrase should be at least 8 characters long"):
log.error("Passphrase should be at least 8 characters long")
sys.exit(2)
elif content.count("File does not exist"):
log.error("Configuration file does not exist")
sys.exit(2)
elif not content.count("Configuration imported successfully"):
log.error("Import failed")
sys.exit(2)
else:
log.info("Import successful")
def exportConfig(self, configPassword):
self._login()
security_token = self.getSecurityToken('cache')
request = urllib2.Request(self.baseURL + "?" +
urllib.urlencode({'actionType': 'importExport',
'export': ' Export Configuration ',
'security_token': security_token,
'a': '1',
'password1': configPassword,
'password2': <PASSWORD>}))
log.debug("Fetching config XML")
result = self._openurl(request)
content = result.read()
if content.count("Passphrase should be at least 8 characters long"):
log.error("Passphrase should be at least 8 characters long. You entered: '%s'" % (configPassword))
sys.exit(2)
gsac = gsaConfig()
log.debug("Returning gsaConfig object")
gsac.setXMLContents(content)
return gsac
def getSecurityTokenFromContents(self, content):
"""Gets the value of the security_token hidden form parameter.
Args:
content: | |
0, 0, 0, 0],
[983, 44.0, 0, 9999, -9999, 1.0, 100, 1, 44.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[984, 465.0, 0, 9999, -9999, 1.0, 100, 1, 465.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[985, 22.0, 0, 9999, -9999, 1.0, 100, 1, 22.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[986, 11.2, 0, 9999, -9999, 1.0, 100, 1, 11.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[987, 164.5, 0, 9999, -9999, 1.0, 100, 1, 164.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[988, 5.1, 0, 9999, -9999, 1.0, 100, 1, 5.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[990, 300.0, 0, 9999, -9999, 1.0, 100, 1, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[992, 150.0, 0, 9999, -9999, 1.0, 100, 1, 150.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[993, 392.0, 0, 9999, -9999, 1.0, 100, 1, 392.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[994, 33.0, 0, 9999, -9999, 1.0, 100, 1, 33.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[995, 4.2, 0, 9999, -9999, 1.0, 100, 1, 4.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[996, 11.5, 0, 9999, -9999, 1.0, 100, 1, 11.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[997, 18.8, 0, 9999, -9999, 1.0, 100, 1, 18.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[998, 423.0, 0, 9999, -9999, 1.0, 100, 1, 423.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[999, 15.6, 0, 9999, -9999, 1.0, 100, 1, 15.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1000, 49.0, 0, 9999, -9999, 1.0, 100, 1, 49.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1002, 9.9, 0, 9999, -9999, 1.0, 100, 1, 9.9, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1003, 900.0, 0, 9999, -9999, 1.0, 100, 1, 900.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1006, 122.0, 0, 9999, -9999, 1.0, 100, 1, 122.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1007, 23.3, 0, 9999, -9999, 1.0, 100, 1, 23.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1008, 49.0, 0, 9999, -9999, 1.0, 100, 1, 49.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1010, 750.0, 0, 9999, -9999, 1.0, 100, 1, 750.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1011, 18.7, 0, 9999, -9999, 1.0, 100, 1, 18.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1012, 2508.35776, 0, 9999, -9999, 1.0, 100, 1, 2835.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1014, 750.0, 0, 9999, -9999, 1.0, 100, 1, 750.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1016, 41.674213, 0, 9999, -9999, 1.0, 100, 1, 323.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1018, 175.9, 0, 9999, -9999, 1.0, 100, 1, 175.9, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1019, 120.0, 0, 9999, -9999, 1.0, 100, 1, 120.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1023, 0.2, 0, 9999, -9999, 1.0, 100, 1, 0.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1025, 113.6, 0, 9999, -9999, 1.0, 100, 1, 113.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1026, 655.6, 0, 9999, -9999, 1.0, 100, 1, 655.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1027, 12.670638, 0, 9999, -9999, 1.0, 100, 1, 48.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1028, 32.306759, 0, 9999, -9999, 1.0, 100, 1, 400.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1030, 73.497878, 0, 9999, -9999, 1.0, 100, 1, 1018.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1031, 213.617515, 0, 9999, -9999, 1.0, 100, 1, 1447.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1032, 2.251274, 0, 9999, -9999, 1.0, 100, 1, 153.510391, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1033, 0.001296, 0, 9999, -9999, 1.0, 100, 1, 50.164506, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1034, 2.182378, 0, 9999, -9999, 1.0, 100, 1, 84.262779, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1035, 0.590176, 0, 9999, -9999, 1.0, 100, 1, 49.886469, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1036, 0.028033, 0, 9999, -9999, 1.0, 100, 1, 67.223077, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1037, 6.223385, 0, 9999, -9999, 1.0, 100, 1, 94.684044, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1038, 5.820552, 0, 9999, -9999, 1.0, 100, 1, 85.798525, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1039, 20.502062, 0, 9999, -9999, 1.0, 100, 1, 132.724114, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1040, 0.00444, 0, 9999, -9999, 1.0, 100, 1, 0.064179, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1041, 3.867196, 0, 9999, -9999, 1.0, 100, 1, 204.187624, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1042, 12.017683, 0, 9999, -9999, 1.0, 100, 1, 52.70053, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1043, 0.52234, 0, 9999, -9999, 1.0, 100, 1, 6.035538, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1044, 2.648144, 0, 9999, -9999, 1.0, 100, 1, 36.163532, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1045, 3.180681, 0, 9999, -9999, 1.0, 100, 1, 61.836204, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1046, 0.998236, 0, 9999, -9999, 1.0, 100, 1, 106.787063, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1047, 1.007449, 0, 9999, -9999, 1.0, 100, 1, 13.029581, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1048, 2.894351, 0, 9999, -9999, 1.0, 100, 1, 71.656883, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1049, 12.866289, 0, 9999, -9999, 1.0, 100, 1, 293.755375, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1051, 0.127628, 0, 9999, -9999, 1.0, 100, 1, 304.42978, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1052, 0.031187, 0, 9999, -9999, 1.0, 100, 1, 20.66869, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1053, 0.022357, 0, 9999, -9999, 1.0, 100, 1, 16.368087, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1054, 0.103136, 0, 9999, -9999, 1.0, 100, 1, 273.855776, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1055, 0.46288, 0, 9999, -9999, 1.0, 100, 1, 2.856069, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1056, 20.999543, 0, 9999, -9999, 1.0, 100, 1, 603.943953, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1057, 53.049883, 0, 9999, -9999, 1.0, 100, 1, 426.979979, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1058, 44.457402, 0, 9999, -9999, 1.0, 100, 1, 1055.735174, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1059, 35.358156, 0, 9999, -9999, 1.0, 100, 1, 414.871332, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1060, 2.10207, 0, 9999, -9999, 1.0, 100, 1, 10.351632, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1061, 31.180405, 0, 9999, -9999, 1.0, 100, 1, 161.862597, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1062, 0.634638, 0, 9999, -9999, 1.0, 100, 1, 2.878561, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1063, 2.079167, 0, 9999, -9999, 1.0, 100, 1, 8.670916, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1064, 1.426024, 0, 9999, -9999, 1.0, 100, 1, 209.786524, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1065, 0.083906, 0, 9999, -9999, 1.0, 100, 1, 339.421643, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1066, 0.599031, 0, 9999, -9999, 1.0, 100, 1, 134.399019, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1067, 6.634113, 0, 9999, -9999, 1.0, 100, 1, 32.653526, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1068, 0.26593, 0, 9999, -9999, 1.0, 100, 1, 5.009022, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1069, 0.199144, 0, 9999, -9999, 1.0, 100, 1, 3.190759, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1070, 0.050813, 0, 9999, -9999, 1.0, 100, 1, 0.788599, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1071, 0.587626, 0, 9999, -9999, 1.0, 100, 1, 4.328696, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1072, 3.151748, 0, 9999, -9999, 1.0, 100, 1, 112.606433, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1073, 2.668898, 0, 9999, -9999, 1.0, 100, 1, 77.81765, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1074, 7.894042, 0, 9999, -9999, 1.0, 100, 1, 153.592986, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1075, 2.530305, 0, 9999, -9999, 1.0, 100, 1, 15.783448, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1076, 0.196968, 0, 9999, -9999, 1.0, 100, 1, 2.29551, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1077, 3.166384, 0, 9999, -9999, 1.0, 100, 1, 26.120041, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1078, 4.982008, 0, 9999, -9999, 1.0, 100, 1, 34.413246, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1079, 3.40533, 0, 9999, -9999, 1.0, 100, 1, 72.327992, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1080, 13.320313, 0, 9999, -9999, 1.0, 100, 1, 132.149983, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1081, 6.403037, 0, 9999, -9999, 1.0, 100, 1, 405.642115, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1082, 0.25017, 0, 9999, -9999, 1.0, 100, 1, 510.054159, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1083, 99.895654, 0, 9999, -9999, 1.0, 100, 1, 633.681488, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1087, 24.929132, 0, 9999, -9999, 1.0, 100, 1, 116.66597, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1088, 8.142923, 0, 9999, -9999, 1.0, 100, 1, 36.782492, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1089, 116.529933, 0, 9999, -9999, 1.0, 100, 1, 384.449592, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1090, 0.666879, 0, 9999, -9999, 1.0, 100, 1, 89.140897, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1091, 1.798315, 0, 9999, -9999, 1.0, 100, 1, 45.7939, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1092, 2.133875, 0, 9999, -9999, 1.0, 100, 1, 54.002032, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1093, 0.00814, 0, 9999, -9999, 1.0, 100, 1, 155.605298, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1094, 0.064948, 0, 9999, -9999, 1.0, 100, 1, 3.759038, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1095, 0.003213, 0, 9999, -9999, 1.0, 100, 1, 0.204951, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1097, 0.000284, 0, 9999, -9999, 1.0, 100, 1, 4.601122, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1098, 0.044441, 0, 9999, -9999, 1.0, 100, 1, 71.025499, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1099, 0.326881, 0, 9999, -9999, 1.0, 100, 1, 290.937198, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1100, 0.001554, 0, 9999, -9999, 1.0, 100, 1, 0.026696, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1101, 10.756219, 0, 9999, -9999, 1.0, 100, 1, 83.930665, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1102, 18.335148, 0, 9999, -9999, 1.0, 100, 1, 350.979988, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1103, 15.674245, 0, 9999, -9999, 1.0, 100, 1, 245.381701, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1104, 1.4e-05, 0, 9999, -9999, 1.0, 100, 1, 0.206918, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1105, 4.1e-05, 0, 9999, -9999, 1.0, 100, 1, 2.178593, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1106, 0.000239, 0, 9999, -9999, 1.0, 100, 1, 2.289793, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1110, 6.2e-05, 0, 9999, -9999, 1.0, 100, 1, 1.654557, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1112, 0.045137, 0, 9999, -9999, 1.0, 100, 1, 69.53429, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1113, 0.005007, 0, 9999, -9999, 1.0, 100, 1, 3.536361, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1114, 1.745806, 0, 9999, -9999, 1.0, 100, 1, 13.446889, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1115, 0.014667, 0, 9999, -9999, 1.0, 100, 1, 50.575278, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1116, 0.005493, 0, 9999, -9999, 1.0, 100, 1, 32.601142, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1117, 3.150074, 0, 9999, -9999, 1.0, 100, 1, 90.792541, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1118, 0.00706, 0, 9999, -9999, 1.0, 100, 1, 8.725012, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1119, 0.194231, 0, 9999, -9999, 1.0, 100, 1, 43.254023, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1120, 0.005842, 0, 9999, -9999, 1.0, 100, 1, 2.416001, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1121, 0.000957, 0, 9999, -9999, 1.0, 100, 1, 0.540589, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1122, 0.002075, 0, 9999, -9999, 1.0, 100, 1, 1.462883, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1123, 0.000406, 0, 9999, -9999, 1.0, 100, 1, 1.464336, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1124, 0.001668, 0, 9999, -9999, 1.0, 100, 1, 1.288283, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1127, 1.806913, 0, 9999, -9999, 1.0, 100, 1, 105.296621, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1130, 0.00015, 0, 9999, -9999, 1.0, 100, 1, 1.025754, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1131, 1.1e-05, 0, 9999, -9999, 1.0, 100, 1, 2.897078, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1132, 6.3e-05, 0, 9999, -9999, 1.0, 100, 1, 0.359497, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1133, 0.001273, 0, 9999, -9999, 1.0, 100, 1, 0.719597, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1134, 0.0009, 0, 9999, -9999, 1.0, 100, 1, 0.508453, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1135, 0.000196, 0, 9999, -9999, 1.0, 100, 1, 8.117819, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1136, 0.000586, 0, 9999, -9999, 1.0, 100, 1, 0.4027, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1137, 0.004718, 0, 9999, -9999, 1.0, 100, 1, 3.669012, 0.0, 0, 0, 0, 0, 0, 0, 0, | |
<filename>data/data-pipeline/data_pipeline/score/field_names.py
# Suffixes
PERCENTILE_FIELD_SUFFIX = " (percentile)"
PERCENTILE_URBAN_RURAL_FIELD_SUFFIX = " (percentile urban/rural)"
MIN_MAX_FIELD_SUFFIX = " (min-max normalized)"
TOP_25_PERCENTILE_SUFFIX = " (top 25th percentile)"
# Geographic field names
GEOID_TRACT_FIELD = "GEOID10_TRACT"
STATE_FIELD = "State Name"
COUNTY_FIELD = "County Name"
# Score file field names
SCORE_A = "Score A"
SCORE_B = "Score B"
SCORE_C = "Score C"
C_SOCIOECONOMIC = "Socioeconomic Factors"
C_SENSITIVE = "Sensitive populations"
C_ENVIRONMENTAL = "Environmental effects"
C_EXPOSURES = "Exposures"
SCORE_D = "Score D"
SCORE_E = "Score E"
SCORE_F_COMMUNITIES = "Score F (communities)"
SCORE_G = "Score G"
SCORE_G_COMMUNITIES = "Score G (communities)"
SCORE_H = "Score H"
SCORE_H_COMMUNITIES = "Score H (communities)"
SCORE_I = "Score I"
SCORE_I_COMMUNITIES = "Score I (communities)"
SCORE_K = "NMTC (communities)"
SCORE_K_COMMUNITIES = "Score K (communities)"
SCORE_L = "Definition L"
SCORE_L_COMMUNITIES = "Definition L (communities)"
L_CLIMATE = "Climate Factor (Definition L)"
L_ENERGY = "Energy Factor (Definition L)"
L_TRANSPORTATION = "Transportation Factor (Definition L)"
L_HOUSING = "Housing Factor (Definition L)"
L_POLLUTION = "Pollution Factor (Definition L)"
L_WATER = "Water Factor (Definition L)"
L_HEALTH = "Health Factor (Definition L)"
L_WORKFORCE = "Workforce Factor (Definition L)"
L_NON_WORKFORCE = "Any Non-Workforce Factor (Definition L)"
PERCENTILE = 90
MEDIAN_HOUSE_VALUE_PERCENTILE = 90
# Poverty / Income
POVERTY_FIELD = "Poverty (Less than 200% of federal poverty line)"
POVERTY_LESS_THAN_200_FPL_FIELD = (
"Percent of individuals < 200% Federal Poverty Line"
)
POVERTY_LESS_THAN_150_FPL_FIELD = (
"Percent of individuals < 150% Federal Poverty Line"
)
POVERTY_LESS_THAN_100_FPL_FIELD = (
"Percent of individuals < 100% Federal Poverty Line"
)
STATE_MEDIAN_INCOME_FIELD = (
"Median household income (State; 2019 inflation-adjusted dollars)"
)
MEDIAN_INCOME_FIELD = "Median household income in the past 12 months"
MEDIAN_INCOME_AS_PERCENT_OF_STATE_FIELD = (
"Median household income (% of state median household income)"
)
PERSISTENT_POVERTY_FIELD = "Persistent Poverty Census Tract"
AMI_FIELD = "Area Median Income (State or metropolitan)"
COLLEGE_ATTENDANCE_FIELD = "Percent enrollment in college or graduate school"
MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD = (
"Median household income as a percent of area median income"
)
LOW_MEDIAN_INCOME_AS_PERCENT_OF_AMI_FIELD = (
"Low median household income as a percent of area median income"
)
# Climate
FEMA_RISK_FIELD = "FEMA Risk Index Expected Annual Loss Score"
EXPECTED_BUILDING_LOSS_RATE_FIELD = (
"Expected building loss rate (Natural Hazards Risk Index)"
)
EXPECTED_AGRICULTURE_LOSS_RATE_FIELD = (
"Expected agricultural loss rate (Natural Hazards Risk Index)"
)
EXPECTED_POPULATION_LOSS_RATE_FIELD = (
"Expected population loss rate (Natural Hazards Risk Index)"
)
# Environment
DIESEL_FIELD = "Diesel particulate matter"
PM25_FIELD = "Particulate matter (PM2.5)"
OZONE_FIELD = "Ozone"
TRAFFIC_FIELD = "Traffic proximity and volume"
LEAD_PAINT_FIELD = "Percent pre-1960s housing (lead paint indicator)"
WASTEWATER_FIELD = "Wastewater discharge"
AGGREGATION_POLLUTION_FIELD = "Pollution Burden"
RMP_FIELD = "Proximity to Risk Management Plan (RMP) facilities"
TSDF_FIELD = "Proximity to TSDF sites"
NPL_FIELD = "Proximity to NPL sites"
AIR_TOXICS_CANCER_RISK_FIELD = "Air toxics cancer risk"
RESPIRATORY_HAZARD_FIELD = "Respiratory hazard index"
# Housing
HOUSING_BURDEN_FIELD = "Housing burden (percent)"
HT_INDEX_FIELD = (
"Housing + Transportation Costs % Income for the Regional Typical Household"
)
# Energy
ENERGY_BURDEN_FIELD = "Energy burden"
# Health
DIABETES_FIELD = "Diagnosed diabetes among adults aged >=18 years"
ASTHMA_FIELD = "Current asthma among adults aged >=18 years"
HEART_DISEASE_FIELD = "Coronary heart disease among adults aged >=18 years"
CANCER_FIELD = "Cancer (excluding skin cancer) among adults aged >=18 years"
HEALTH_INSURANCE_FIELD = (
"Current lack of health insurance among adults aged 18-64 years"
)
PHYS_HEALTH_NOT_GOOD_FIELD = (
"Physical health not good for >=14 days among adults aged >=18 years"
)
LIFE_EXPECTANCY_FIELD = "Life expectancy (years)"
LOW_LIFE_EXPECTANCY_FIELD = "Low life expectancy"
# Other Demographics
TOTAL_POP_FIELD = "Total population"
UNEMPLOYMENT_FIELD = "Unemployed civilians (percent)"
LINGUISTIC_ISO_FIELD = "Linguistic isolation (percent)"
HOUSEHOLDS_LINGUISTIC_ISO_FIELD = (
"Percent of households in linguistic isolation"
)
HIGH_SCHOOL_ED_FIELD = (
"Percent individuals age 25 or over with less than high school degree"
)
AGGREGATION_POPULATION_FIELD = "Population Characteristics"
UNDER_5_FIELD = "Individuals under 5 years old"
OVER_64_FIELD = "Individuals over 64 years old"
# Fields from 2010 decennial census (generally only loaded for the territories)
CENSUS_DECENNIAL_MEDIAN_INCOME_2009 = "Median household income in 2009 ($)"
CENSUS_DECENNIAL_POVERTY_LESS_THAN_100_FPL_FIELD_2009 = (
"Percentage households below 100% of federal poverty line in 2009"
)
CENSUS_DECENNIAL_HIGH_SCHOOL_ED_FIELD_2009 = "Percent individuals age 25 or over with less than high school degree in 2009"
CENSUS_DECENNIAL_UNEMPLOYMENT_FIELD_2009 = (
"Unemployed civilians (percent) in 2009"
)
CENSUS_DECENNIAL_TOTAL_POPULATION_FIELD_2009 = "Total population in 2009"
CENSUS_DECENNIAL_AREA_MEDIAN_INCOME_PERCENT_FIELD_2009 = (
"Median household income as a percent of territory median income in 2009"
)
LOW_CENSUS_DECENNIAL_AREA_MEDIAN_INCOME_PERCENT_FIELD_2009 = "Low median household income as a percent of territory median income in 2009"
# Fields from 2010 ACS (loaded for comparison with the territories)
CENSUS_UNEMPLOYMENT_FIELD_2010 = "Unemployed civilians (percent) in 2010"
CENSUS_POVERTY_LESS_THAN_100_FPL_FIELD_2010 = (
"Percent of individuals < 100% Federal Poverty Line in 2010"
)
# Combined fields that merge island areas and states data
COMBINED_CENSUS_TOTAL_POPULATION_2010 = (
"Total population in 2009 (island areas) and 2019 (states and PR)"
)
COMBINED_UNEMPLOYMENT_2010 = "Unemployed civilians (percent) in 2009 (island areas) and 2010 (states and PR)"
COMBINED_POVERTY_LESS_THAN_100_FPL_FIELD_2010 = (
"Percentage households below 100% of federal poverty line in 2009 (island areas) "
"and 2010 (states and PR)"
)
# Urban Rural Map
URBAN_HEURISTIC_FIELD = "Urban Heuristic Flag"
# Housing value
MEDIAN_HOUSE_VALUE_FIELD = "Median value ($) of owner-occupied housing units"
# EJSCREEN Areas of Concern
EJSCREEN_AREAS_OF_CONCERN_NATIONAL_70TH_PERCENTILE_COMMUNITIES_FIELD = (
"EJSCREEN Areas of Concern, National, 70th percentile (communities)"
)
EJSCREEN_AREAS_OF_CONCERN_NATIONAL_75TH_PERCENTILE_COMMUNITIES_FIELD = (
"EJSCREEN Areas of Concern, National, 75th percentile (communities)"
)
EJSCREEN_AREAS_OF_CONCERN_NATIONAL_80TH_PERCENTILE_COMMUNITIES_FIELD = (
"EJSCREEN Areas of Concern, National, 80th percentile (communities)"
)
EJSCREEN_AREAS_OF_CONCERN_NATIONAL_85TH_PERCENTILE_COMMUNITIES_FIELD = (
"EJSCREEN Areas of Concern, National, 85th percentile (communities)"
)
EJSCREEN_AREAS_OF_CONCERN_NATIONAL_90TH_PERCENTILE_COMMUNITIES_FIELD = (
"EJSCREEN Areas of Concern, National, 90th percentile (communities)"
)
EJSCREEN_AREAS_OF_CONCERN_NATIONAL_95TH_PERCENTILE_COMMUNITIES_FIELD = (
"EJSCREEN Areas of Concern, National, 95th percentile (communities)"
)
EJSCREEN_AREAS_OF_CONCERN_STATE_70TH_PERCENTILE_COMMUNITIES_FIELD = (
"EJSCREEN Areas of Concern, State, 70th percentile (communities)"
)
EJSCREEN_AREAS_OF_CONCERN_STATE_75TH_PERCENTILE_COMMUNITIES_FIELD = (
"EJSCREEN Areas of Concern, State, 75th percentile (communities)"
)
EJSCREEN_AREAS_OF_CONCERN_STATE_80TH_PERCENTILE_COMMUNITIES_FIELD = (
"EJSCREEN Areas of Concern, State, 80th percentile (communities)"
)
EJSCREEN_AREAS_OF_CONCERN_STATE_85TH_PERCENTILE_COMMUNITIES_FIELD = (
"EJSCREEN Areas of Concern, State, 85th percentile (communities)"
)
EJSCREEN_AREAS_OF_CONCERN_STATE_90TH_PERCENTILE_COMMUNITIES_FIELD = (
"EJSCREEN Areas of Concern, State, 90th percentile (communities)"
)
EJSCREEN_AREAS_OF_CONCERN_STATE_95TH_PERCENTILE_COMMUNITIES_FIELD = (
"EJSCREEN Areas of Concern, State, 95th percentile (communities)"
)
# Mapping inequality data.
HOLC_GRADE_D_TRACT_PERCENT_FIELD: str = "Percent of tract that is HOLC Grade D"
HOLC_GRADE_D_TRACT_20_PERCENT_FIELD: str = "Tract is >20% HOLC Grade D"
HOLC_GRADE_D_TRACT_50_PERCENT_FIELD: str = "Tract is >50% HOLC Grade D"
HOLC_GRADE_D_TRACT_75_PERCENT_FIELD: str = "Tract is >75% HOLC Grade D"
# Child Opportunity Index data
# Summer days with maximum temperature above 90F.
EXTREME_HEAT_FIELD = "Summer days above 90F"
# Percentage households without a car located further than a half-mile from the
# nearest supermarket.
HEALTHY_FOOD_FIELD = "Percent low access to healthy food"
# Percentage impenetrable surface areas such as rooftops, roads or parking lots.
IMPENETRABLE_SURFACES_FIELD = "Percent impenetrable surface areas"
# Percentage third graders scoring proficient on standardized reading tests,
# converted to NAEP scale score points.
READING_FIELD = "Third grade reading proficiency"
LOW_READING_FIELD = "Low third grade reading proficiency"
# Alternative energy-related definition of DACs
ENERGY_RELATED_COMMUNITIES_DEFINITION_ALTERNATIVE = (
"Energy-related alternative definition of communities"
)
COAL_EMPLOYMENT = "Coal employment"
OUTAGE_EVENTS = "Outage Events"
HOMELESSNESS = "Homelessness"
DISABLED_POPULATION = "Disabled population"
OUTAGE_DURATION = "Outage Duration"
JOB_ACCESS = "Job Access"
FOSSIL_ENERGY_EMPLOYMENT = "Fossil energy employment"
FOOD_DESERT = "Food Desert"
INCOMPLETE_PLUMBING = "Incomplete Plumbing"
NON_GRID_CONNECTED_HEATING_FUEL = "Non-grid-connected heating fuel"
PARKS = "Parks"
GREATER_THAN_30_MIN_COMMUTE = "Greater than 30 min commute"
INTERNET_ACCESS = "Internet Access"
MOBILE_HOME = "Mobile Home"
SINGLE_PARENT = "Single Parent"
TRANSPORTATION_COSTS = "Transportation Costs"
#####
# Names for individual factors being exceeded
# Climate Change
EXPECTED_POPULATION_LOSS_RATE_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for expected population loss rate and is low income"
EXPECTED_AGRICULTURE_LOSS_RATE_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for expected agriculture loss rate and is low income"
EXPECTED_BUILDING_LOSS_RATE_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for expected building loss rate and is low income"
# Clean energy and efficiency
PM25_EXPOSURE_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for PM2.5 exposure and is low income"
ENERGY_BURDEN_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for energy burden and is low income"
# Clean transportation
DIESEL_PARTICULATE_MATTER_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for diesel particulate matter and is low income"
TRAFFIC_PROXIMITY_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for traffic proximity and is low income"
# Affordable and Sustainable Housing
LEAD_PAINT_MEDIAN_HOUSE_VALUE_LOW_INCOME_FIELD = (
f"At or above the {PERCENTILE}th percentile for lead paint and"
f" the median house value is less than {MEDIAN_HOUSE_VALUE_PERCENTILE}th "
f"percentile and is low income"
)
HOUSING_BURDEN_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for housing burden and is low income"
# Remediation and Reduction of Legacy Pollution
RMP_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for proximity to RMP sites and is low income"
SUPERFUND_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for proximity to superfund sites and is low income"
HAZARDOUS_WASTE_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for proximity to hazardous waste facilities and is low income"
# Critical Clean Water and Waste Infrastructure
WASTEWATER_DISCHARGE_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for wastewater discharge and is low income"
# Health Burdens
DIABETES_LOW_INCOME_FIELD = (
f"At or above the {PERCENTILE}th percentile for diabetes and is low income"
)
ASTHMA_LOW_INCOME_FIELD = (
f"At or above the {PERCENTILE}th percentile for asthma and is low income"
)
HEART_DISEASE_LOW_INCOME_FIELD = f"At or above the {PERCENTILE}th percentile for heart disease and is low income"
LOW_LIFE_EXPECTANCY_LOW_INCOME_FIELD = (
f"At or above the {PERCENTILE}th percentile | |
closefd: bool = True,
opener: Optional[Callable[[str, int], int]] = None,
tempdir: Optional[str] = None
) -> Iterator[IO]:
"""Save a file with a temporary name and rename it into place when ready.
This is a context manager which is meant for saving data to files.
The data is written to a temporary file, which gets renamed to the target
name when the context is closed. This avoids readers of the file from
getting an incomplete file.
**Example:**
.. code:: python
with save_file_atomic('/path/to/foo', 'w') as f:
f.write(stuff)
The file will be called something like ``tmpCAFEBEEF`` until the
context block ends, at which point it gets renamed to ``foo``. The
temporary file will be created in the same directory as the output file.
The ``filename`` parameter must be an absolute path.
If an exception occurs or the process is terminated, the temporary file will
be deleted.
"""
# This feature has been proposed for upstream Python in the past, e.g.:
# https://bugs.python.org/issue8604
assert os.path.isabs(filename), "The utils.save_file_atomic() parameter ``filename`` must be an absolute path"
if tempdir is None:
tempdir = os.path.dirname(filename)
fd, tempname = tempfile.mkstemp(dir=tempdir)
# Apply mode allowed by umask
os.fchmod(fd, 0o666 & ~_UMASK)
os.close(fd)
f = open(
tempname,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
closefd=closefd,
opener=opener,
)
def cleanup_tempfile():
f.close()
try:
os.remove(tempname)
except FileNotFoundError:
pass
except OSError as e:
raise UtilError("Failed to cleanup temporary file {}: {}".format(tempname, e)) from e
try:
with _signals.terminator(cleanup_tempfile):
# Disable type-checking since "IO[Any]" has no attribute "real_filename"
f.real_filename = filename # type: ignore
yield f
f.close()
# This operation is atomic, at least on platforms we care about:
# https://bugs.python.org/issue8828
os.replace(tempname, filename)
except Exception:
cleanup_tempfile()
raise
# get_umask():
#
# Get the process's file mode creation mask without changing it.
#
# Returns:
# (int) The process's file mode creation mask.
#
def get_umask():
return _UMASK
# _get_dir_size():
#
# Get the disk usage of a given directory in bytes.
#
# This function assumes that files do not inadvertantly
# disappear while this function is running.
#
# Arguments:
# (str) The path whose size to check.
#
# Returns:
# (int) The size on disk in bytes.
#
def _get_dir_size(path):
path = os.path.abspath(path)
def get_size(path):
total = 0
for f in os.scandir(path):
total += f.stat(follow_symlinks=False).st_size
if f.is_dir(follow_symlinks=False):
total += get_size(f.path)
return total
return get_size(path)
# _get_volume_size():
#
# Gets the overall usage and total size of a mounted filesystem in bytes.
#
# Args:
# path (str): The path to check
#
# Returns:
# (int): The total number of bytes on the volume
# (int): The number of available bytes on the volume
#
def _get_volume_size(path):
try:
usage = shutil.disk_usage(path)
except OSError as e:
raise UtilError("Failed to retrieve stats on volume for path '{}': {}".format(path, e)) from e
return usage.total, usage.free
# _parse_size():
#
# Convert a string representing data size to a number of
# bytes. E.g. "2K" -> 2048.
#
# This uses the same format as systemd's
# [resource-control](https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#).
#
# Arguments:
# size (str) The string to parse
# volume (str) A path on the volume to consider for percentage
# specifications
#
# Returns:
# (int|None) The number of bytes, or None if 'infinity' was specified.
#
# Raises:
# UtilError if the string is not a valid data size.
#
def _parse_size(size, volume):
if size == "infinity":
return None
matches = re.fullmatch(r"([0-9]+\.?[0-9]*)([KMGT%]?)", size)
if matches is None:
raise UtilError("{} is not a valid data size.".format(size))
num, unit = matches.groups()
if unit == "%":
num = float(num)
if num > 100:
raise UtilError("{}% is not a valid percentage value.".format(num))
disk_size, _ = _get_volume_size(volume)
return disk_size * (num / 100)
units = ("", "K", "M", "G", "T")
return int(num) * 1024 ** units.index(unit)
# _pretty_size()
#
# Converts a number of bytes into a string representation in KiB, MiB, GiB, TiB
# represented as K, M, G, T etc.
#
# Args:
# size (int): The size to convert in bytes.
# dec_places (int): The number of decimal places to output to.
#
# Returns:
# (str): The string representation of the number of bytes in the largest
def _pretty_size(size, dec_places=0):
psize = size
unit = "B"
units = ("B", "K", "M", "G", "T")
for unit in units:
if psize < 1024:
break
if unit != units[-1]:
psize /= 1024
return "{size:g}{unit}".format(size=round(psize, dec_places), unit=unit)
# _is_in_main_thread()
#
# Return whether we are running in the main thread or not
#
def _is_in_main_thread():
return threading.current_thread() is threading.main_thread()
# Remove a path and any empty directories leading up to it.
#
# Args:
# basedir - The basedir at which to stop pruning even if
# it is empty.
# path - A path relative to basedir that should be pruned.
#
# Raises:
# FileNotFoundError - if the path itself doesn't exist.
# OSError - if something else goes wrong
#
def _remove_path_with_parents(basedir: Union[Path, str], path: Union[Path, str]):
assert not os.path.isabs(path), "The path ({}) should be relative to basedir ({})".format(path, basedir)
path = os.path.join(basedir, path)
# Start by removing the path itself
os.unlink(path)
# Now walk up the directory tree and delete any empty directories
path = os.path.dirname(path)
while path != basedir:
try:
os.rmdir(path)
except FileNotFoundError:
# The parent directory did not exist (race conditions can
# cause this), but it's parent directory might still be
# ready to prune
pass
except OSError as e:
if e.errno == errno.ENOTEMPTY:
# The parent directory was not empty, so we
# cannot prune directories beyond this point
break
raise
path = os.path.dirname(path)
# Recursively remove directories, ignoring file permissions as much as
# possible.
def _force_rmtree(rootpath):
def fix_permissions(function, path, info):
parent = os.path.dirname(path)
try:
os.chmod(parent, 0o755)
except OSError as e:
raise UtilError("Failed to ensure write permission on directory '{}': {}".format(parent, e))
# Directories need to be removed with `rmdir`, though
# `os.path.isdir` will follow symlinks, so make sure it's
# not a symlink first
if not os.path.islink(path) and os.path.isdir(path):
os.rmdir(path)
else:
os.remove(path)
try:
shutil.rmtree(rootpath, onerror=fix_permissions)
except OSError as e:
raise UtilError("Failed to remove cache directory '{}': {}".format(rootpath, e))
# Recursively make directories in target area
def _copy_directories(srcdir, destdir, target):
this_dir = os.path.dirname(target)
new_dir = os.path.join(destdir, this_dir)
old_dir = os.path.join(srcdir, this_dir)
if not os.path.lexists(new_dir):
if this_dir:
yield from _copy_directories(srcdir, destdir, this_dir)
if os.path.lexists(old_dir):
dir_stat = os.lstat(old_dir)
mode = dir_stat.st_mode
if stat.S_ISDIR(mode) or stat.S_ISLNK(mode):
os.makedirs(new_dir)
yield (new_dir, mode)
else:
raise UtilError("Source directory tree has file where " "directory expected: {}".format(old_dir))
else:
if not os.access(new_dir, os.W_OK):
# If the destination directory is not writable, change permissions to make it
# writable. Callers of this method (like `_process_list`) must
# restore the original permissions towards the end of their processing.
try:
os.chmod(new_dir, 0o755)
yield (new_dir, os.lstat(old_dir).st_mode)
except PermissionError:
raise UtilError("Directory {} is not writable".format(destdir))
# _ensure_real_directory()
#
# Ensure `path` is a real directory and there are no symlink components.
#
# Symlink components are allowed in `root`.
#
def _ensure_real_directory(root, path):
destpath = root
for name in os.path.split(path):
destpath = os.path.join(destpath, name)
try:
deststat = os.lstat(destpath)
if not stat.S_ISDIR(deststat.st_mode):
relpath = destpath[len(root) :]
if stat.S_ISLNK(deststat.st_mode):
filetype = "symlink"
elif stat.S_ISREG(deststat.st_mode):
filetype = "regular file"
else:
filetype = "special file"
raise UtilError("Destination is a {}, not a directory: {}".format(filetype, relpath))
except FileNotFoundError:
os.makedirs(destpath)
# _process_list()
#
# Internal helper for copying/moving/linking file lists
#
# This will handle directories, symlinks and special files
# internally, the `actionfunc` will only be called for regular files.
#
# Args:
# srcdir: The source base directory
# destdir: The destination base directory
# actionfunc: The function to call for regular files
# result: The FileListResult
# filter_callback: Optional callback to invoke for every directory entry
# ignore_missing: Dont raise any error if a source file is missing
#
#
def _process_list(
srcdir, destdir, actionfunc, result, filter_callback=None, ignore_missing=False, report_written=False
):
# Keep track of directory permissions, since these need to be set
# *after* files have been written.
permissions = []
filelist = list_relative_paths(srcdir)
if filter_callback:
filelist = [path for path in filelist if filter_callback(path)]
# Now walk the list
for path in filelist:
srcpath = os.path.join(srcdir, path)
destpath = os.path.join(destdir, path)
# Ensure that the parent of the destination path exists without symlink
# components.
_ensure_real_directory(destdir, os.path.dirname(path))
# Add to the results the list of files written
if report_written:
result.files_written.append(path)
# Collect overlaps
if os.path.lexists(destpath) and not os.path.isdir(destpath):
result.overwritten.append(path)
# The destination directory may not have been created separately
permissions.extend(_copy_directories(srcdir, destdir, path))
try:
file_stat = os.lstat(srcpath)
mode = file_stat.st_mode
except FileNotFoundError as e:
# Skip | |
import json
import unittest
from datetime import datetime
from unittest.mock import MagicMock, patch
from werkzeug.datastructures import MultiDict
from alerta.app import create_app, db, qb
# service, tags (=, !=, =~, !=~)
# attributes (=, !=, =~, !=~)
# everything else (=, !=, =~, !=~)
class SearchTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app()
def test_alerts_query(self):
self.maxDiff = None
search_params = MultiDict([
('status', 'open'),
('status', 'ack'),
('environment', '~DEV'),
('group!', 'Network'),
('sort-by', '-severity'),
('sort-by', '-lastReceiveTime'),
])
with self.app.test_request_context():
query = qb.alerts.from_params(search_params) # noqa
if self.app.config['DATABASE_URL'].startswith('postgres'):
self.assertEqual(query.where, '1=1\nAND "status"=ANY(%(status)s)\nAND "environment" ILIKE %(environment)s\nAND "group"!=%(not_group)s')
self.assertEqual(query.vars, {'status': ['open', 'ack'], 'environment': '%DEV%', 'not_group': 'Network'})
self.assertEqual(query.sort, 's.code DESC,last_receive_time ASC')
else:
import re
self.assertEqual(query.where, {'status': {'$in': ['open', 'ack']}, 'environment': {'$regex': re.compile('DEV', re.IGNORECASE)}, 'group': {'$ne': 'Network'}})
self.assertEqual(query.sort, [('code', -1), ('lastReceiveTime', 1)])
def test_alerts_attributes(self):
search_params = MultiDict([('attributes.country_code', 'US')])
with self.app.test_request_context():
query = qb.alerts.from_params(search_params)
if self.app.config['DATABASE_URL'].startswith('postgres'):
self.assertIn('AND attributes @> %(attr_country_code)s', query.where)
self.assertEqual(query.vars, {'attr_country_code': {'country_code': 'US'}})
else:
self.assertEqual(query.where, {'attributes.country_code': 'US'})
def test_blackouts_filter(self):
self.maxDiff = None
search_params = MultiDict([
('id', 'd1340d76-2277-4d47-937f-571bc1da6411'),
('priority', '1'),
('environment', 'Development'),
('service', 'svc1'),
('resource', 'res1'),
('event', 'evt1'),
('group', 'grp1'),
('tag', 'tag1'),
('customer', 'cust1'),
('startTime', ''),
('endTime', ''),
('duration', '100'),
('status', 'pending'),
('remaining', '100'),
('user', '<EMAIL>'),
('createTime', ''),
('text', 'reason'),
])
try:
with self.app.test_request_context():
query = qb.blackouts.from_params(search_params) # noqa
except Exception as e:
self.fail(f'Unexpected exception in blackout filter query: {e}')
def test_blackouts_sort_by(self):
sort_params = MultiDict([
('sort-by', 'priority'),
('sort-by', 'environment'),
('sort-by', 'service'),
('sort-by', 'resource'),
('sort-by', 'event'),
('sort-by', 'group'),
('sort-by', 'tags'),
('sort-by', 'customer'),
('sort-by', 'startTime'),
('sort-by', 'endTime'),
('sort-by', 'duration'),
('sort-by', 'status'),
('sort-by', 'remaining'),
('sort-by', 'user'),
('sort-by', 'createTime'),
('sort-by', 'text'),
])
try:
with self.app.test_request_context():
query = qb.blackouts.from_params(sort_params) # noqa
except Exception as e:
self.fail(f'Unexpected exception in blackouts sort-by query: {e}')
@patch('alerta.database.backends.mongodb.utils.datetime')
def test_blackouts_query(self, mock_datetime):
# mock datetime.utcnow()
now = datetime(2021, 1, 17, 20, 58, 0)
mock_datetime.utcnow = MagicMock(return_value=now)
mock_datetime.strftime = datetime.strftime
# ?status=expired&status=pending&page=2&page-size=20&sort-by=-startTime
search_params = MultiDict([
('status', 'expired'),
('status', 'pending'),
('sort-by', '-startTime'),
('page', '2'),
('page-size', '20'),
])
with self.app.test_request_context():
query = qb.blackouts.from_params(search_params) # noqa
if self.app.config['DATABASE_URL'].startswith('postgres'):
self.assertEqual(query.where, "1=1\nAND (start_time > NOW() at time zone 'utc' OR end_time <= NOW() at time zone 'utc')")
self.assertEqual(query.vars, {})
self.assertEqual(query.sort, 'start_time ASC')
else:
self.assertEqual(query.where, {'$or': [{'startTime': {'$gt': now}}, {'endTime': {'$lte': now}}]})
self.assertEqual(query.sort, [('startTime', 1)])
def test_heartbeats_filter(self):
self.maxDiff = None
search_params = MultiDict([
('id', 'd1340d76-2277-4d47-937f-571bc1da6411'),
('origin', 'origin/foo'),
('tag', 'tag1'),
('tag', 'tag2'),
('attributes', 'attributes.foo'),
('attributes', 'attributes.bar'),
('type', 'exceptionAlert'),
('createTime', ''),
('timeout', '3600'),
('receiveTime', ''),
('customer', 'cust1'),
('latency', '200'),
('since', ''),
('status', 'expired'),
])
try:
with self.app.test_request_context():
query = qb.heartbeats.from_params(search_params) # noqa
except Exception as e:
self.fail(f'Unexpected exception in heartbeats filter query: {e}')
def test_heartbeats_sort_by(self):
sort_params = MultiDict([
('sort-by', 'origin'),
('sort-by', 'tags'),
('sort-by', 'attributes'),
('sort-by', 'type'),
('sort-by', 'createTime'),
('sort-by', 'timeout'),
('sort-by', 'receiveTime'),
('sort-by', 'customer'),
('sort-by', 'latency'),
('sort-by', 'since'),
])
try:
with self.app.test_request_context():
query = qb.heartbeats.from_params(sort_params) # noqa
except Exception as e:
self.fail(f'Unexpected exception in heartbeats sort-by query: {e}')
def test_heartbeats_query(self):
self.maxDiff = None
# ?status=slow&page=1&page-size=20&sort-by=-latency
search_params = MultiDict([
('status', 'slow'),
('sort-by', '-latency'),
('page', '1'),
('page-size', '20'),
])
with self.app.test_request_context():
query = qb.heartbeats.from_params(search_params) # noqa
if self.app.config['DATABASE_URL'].startswith('postgres'):
self.assertEqual(query.where, '1=1')
self.assertEqual(query.vars, {})
self.assertEqual(query.sort, 'latency DESC')
else:
self.assertEqual(query.where, {}) # heartbeat status is a special case
self.assertEqual(query.sort, [('latency', -1)])
def test_keys_filter(self):
self.maxDiff = None
search_params = MultiDict([
('id', 'd1340d76-2277-4d47-937f-571bc1da6411'),
('key', '<KEY>'),
('status', 'expired'),
('user', '<EMAIL>'),
('scope', 'read:alerts'),
('type', 'read-write'),
('text', 'test key'),
('expireTime', ''),
('count', '123'),
('lastUsedTime', ''),
('customer', 'cust1'),
])
try:
with self.app.test_request_context():
query = qb.keys.from_params(search_params) # noqa
except Exception as e:
self.fail(f'Unexpected exception in API keys filter query: {e}')
def test_keys_sort_by(self):
sort_params = MultiDict([
('sort-by', 'key'),
('sort-by', 'status'),
('sort-by', 'user'),
('sort-by', 'scopes'),
('sort-by', 'type'),
('sort-by', 'text'),
('sort-by', 'expireTime'),
('sort-by', 'count'),
('sort-by', 'lastUsedTime'),
('sort-by', 'customer'),
])
try:
with self.app.test_request_context():
query = qb.keys.from_params(sort_params) # noqa
except Exception as e:
self.fail(f'Unexpected exception in API keys sort-by query: {e}')
@patch('alerta.database.backends.mongodb.utils.datetime')
def test_keys_query(self, mock_datetime):
# mock datetime.utcnow()
now = datetime(2021, 1, 17, 20, 58, 0)
mock_datetime.utcnow = MagicMock(return_value=now)
mock_datetime.strftime = datetime.strftime
self.maxDiff = None
# ?status=active&page=1&page-size=20&sort-by=count
search_params = MultiDict([
('status', 'active'),
('sort-by', '-count'),
('page', '2'),
('page-size', '20'),
])
with self.app.test_request_context():
query = qb.keys.from_params(search_params) # noqa
if self.app.config['DATABASE_URL'].startswith('postgres'):
self.assertEqual(query.where, "1=1\nAND (expire_time >= NOW() at time zone 'utc')")
self.assertEqual(query.vars, {})
self.assertEqual(query.sort, 'count DESC')
else:
self.assertEqual(query.where, {'$or': [{'expireTime': {'$gte': now}}]}, query.where)
self.assertEqual(query.sort, [('count', -1)])
def test_users_filter(self):
self.maxDiff = None
search_params = MultiDict([
('id', 'd1340d76-2277-4d47-937f-571bc1da6411'),
('name', '<NAME>'),
('login', 'ops'),
('email', '<EMAIL>'),
('domain', 'alerta.dev'),
('status', 'inactive'),
('role', 'ops'),
('attributes.prefs', ''),
('createTime', ''),
('lastLogin', ''),
('text', 'devops'),
('updateTime', ''),
('email_verified', 'true'),
])
try:
with self.app.test_request_context():
query = qb.users.from_params(search_params) # noqa
except Exception as e:
self.fail(f'Unexpected exception in users filter query: {e}')
def test_users_sort_by(self):
sort_params = MultiDict([
('sort-by', 'name'),
('sort-by', 'login'),
('sort-by', 'email'),
('sort-by', 'domain'),
('sort-by', 'status'),
('sort-by', 'roles'),
('sort-by', 'attributes'),
('sort-by', 'createTime'),
('sort-by', 'lastLogin'),
('sort-by', 'text'),
('sort-by', 'updateTime'),
('sort-by', 'email_verified'),
])
try:
with self.app.test_request_context():
query = qb.users.from_params(sort_params) # noqa
except Exception as e:
self.fail(f'Unexpected exception in users sort-by query: {e}')
def test_users_query(self):
self.maxDiff = None
# ?status=inactive&page=1&page-size=20&sort-by=lastLogin
search_params = MultiDict([
('status', 'inactive'),
('sort-by', '-lastLogin'),
('page', '1'),
('page-size', '20'),
])
with self.app.test_request_context():
query = qb.users.from_params(search_params) # noqa
if self.app.config['DATABASE_URL'].startswith('postgres'):
self.assertEqual(query.where, '1=1\nAND "status"=%(status)s')
self.assertEqual(query.vars, {'status': 'inactive'})
self.assertEqual(query.sort, 'last_login ASC')
else:
self.assertEqual(query.where, {'status': 'inactive'})
self.assertEqual(query.sort, [('lastLogin', 1)])
def test_groups_filter(self):
self.maxDiff = None
search_params = MultiDict([
('id', 'd1340d76-2277-4d47-937f-571bc1da6411'),
('name', 'devops-team'),
('text', 'Devops Team'),
('count', '5'),
])
try:
with self.app.test_request_context():
query = qb.groups.from_params(search_params) # noqa
except Exception as e:
self.fail(f'Unexpected exception in groups filter query: {e}')
def test_groups_sort_by(self):
sort_params = MultiDict([
('sort-by', 'name'),
('sort-by', 'text'),
('sort-by', 'count'),
])
try:
with self.app.test_request_context():
query = qb.groups.from_params(sort_params) # noqa
except Exception as e:
self.fail(f'Unexpected exception in groups sort-by query: {e}')
def test_groups_query(self):
self.maxDiff = None
# ?page=1&page-size=20&sort-by=count
search_params = MultiDict([
('sort-by', '-count'),
('page', '1'),
('page-size', '20'),
])
with self.app.test_request_context():
query = qb.groups.from_params(search_params) # noqa
if self.app.config['DATABASE_URL'].startswith('postgres'):
self.assertEqual(query.where, '1=1')
self.assertEqual(query.vars, {})
self.assertEqual(query.sort, 'count DESC')
else:
self.assertEqual(query.where, {})
self.assertEqual(query.sort, [('count', -1)])
def test_perms_filter(self):
self.maxDiff = None
search_params = MultiDict([
('id', 'd1340d76-2277-4d47-937f-571bc1da6411'),
('match', 'read-write'), # FIXME: role, group, org?
('scope', 'read'),
('scope', 'write'),
])
try:
with self.app.test_request_context():
query = qb.perms.from_params(search_params) # noqa
except Exception as e:
self.fail(f'Unexpected exception in perms filter query: {e}')
def test_perms_sort_by(self):
sort_params = MultiDict([
('sort-by', 'match'), # FIXME: role, group, org?
('sort-by', 'scopes'),
])
try:
with self.app.test_request_context():
query = qb.perms.from_params(sort_params) # noqa
except Exception as e:
self.fail(f'Unexpected exception in perms sort-by query: {e}')
def test_perms_query(self):
self.maxDiff = None
# ?scope=read&page=1&page-size=20&sort-by=match
search_params = MultiDict([
('scope', 'read'),
('sort-by', 'match'), # FIXME: role, group, org?
('page', '1'),
('page-size', '20'),
])
with self.app.test_request_context():
query = qb.perms.from_params(search_params) # noqa
if self.app.config['DATABASE_URL'].startswith('postgres'):
self.assertEqual(query.where, '1=1\nAND "scopes"=%(scopes)s')
self.assertEqual(query.vars, {'scopes': 'read'})
self.assertEqual(query.sort, 'match ASC')
else:
self.assertEqual(query.where, {'scopes': 'read'})
self.assertEqual(query.sort, [('match', 1)])
def test_customers_filter(self):
self.maxDiff = None
search_params = MultiDict([
('id', 'd1340d76-2277-4d47-937f-571bc1da6411'),
('match', 'read-write'), # FIXME: ??
('customer', 'cust1'),
('customer', 'cust2'),
])
try:
with self.app.test_request_context():
query = qb.customers.from_params(search_params) # noqa
except Exception as e:
self.fail(f'Unexpected exception in customers filter query: {e}')
def test_customers_sort_by(self):
sort_params = MultiDict([
('sort-by', 'match'), # FIXME: ??
('sort-by', 'customer'),
])
try:
with self.app.test_request_context():
query = qb.customers.from_params(sort_params) # noqa
except Exception as e:
self.fail(f'Unexpected exception in customers sort-by query: {e}')
def test_customers_query(self):
self.maxDiff = None
search_params = MultiDict([
('match', 'keycloak-role'),
('match', 'github-org'),
('match', 'gitlab-group'),
('sort-by', '-customer'),
('page', '2'),
('page-size', '50'),
])
with self.app.test_request_context():
query = qb.customers.from_params(search_params) # noqa
if self.app.config['DATABASE_URL'].startswith('postgres'):
self.assertEqual(query.where, '1=1\nAND "match"=ANY(%(match)s)')
self.assertEqual(query.vars, {'match': ['keycloak-role', 'github-org', 'gitlab-group']})
self.assertEqual(query.sort, 'customer DESC')
else:
self.assertEqual(query.where, {'match': {'$in': ['keycloak-role', 'github-org', 'gitlab-group']}})
self.assertEqual(query.sort, [('customer', -1)])
class QueryParserTestCase(unittest.TestCase):
def setUp(self):
test_config = {
'TESTING': True,
'AUTH_REQUIRED': False,
'ALERT_TIMEOUT': 120,
'HISTORY_LIMIT': 5,
'DEBUG': False,
}
self.app = create_app(test_config)
self.client = self.app.test_client()
alerts = [
{
'resource': 'net01',
'event': 'node_marginal',
'environment': 'Production',
'severity': 'major',
'correlate': ['node_down', 'node_marginal', 'node_up'],
'status': 'open',
'service': ['Network', 'Core'],
'group': 'Network',
'value': 'johno',
'text': 'panic: this is a foo alert',
'tags': ['aaa', 'bbb', 'ccc'],
'attributes': {'region': 'EMEA', 'partition': '7.0'},
'origin': 'alpha',
'timeout': 100,
'rawData': ''
},
{
'resource': 'network02',
'event': 'node_down',
'environment': 'Production',
'severity': 'major',
'correlate': ['node_down', 'node_marginal', 'node_up'],
'status': 'ack',
'service': ['Network', 'Core', 'Shared'],
'group': 'Network',
'value': 'jonathon',
'text': 'Kernel Panic: this is a bar test alert',
'tags': ['bbb', 'ccc', 'ddd'],
'attributes': {'region': 'LATAM', 'partition': '72'},
'origin': 'bravo',
'timeout': 200,
'rawData': ''
},
{
'resource': 'netwrk03',
'event': 'node_up',
'environment': 'Production',
'severity': | |
in conjugation with 2Q RB results, see :func:`calculate_2q_epg`.
Note:
This function presupposes the basis gate consists
of ``u1``, ``u2`` and ``u3``.
Args:
gate_per_cliff: dictionary of gate per Clifford. see :func:`gates_per_clifford`.
epc_1q: EPC fit from 1Q RB experiment data.
qubit: index of qubit to calculate EPGs.
Returns:
Dictionary of EPGs of single qubit basis gates.
Raises:
QiskitError: when ``u2`` or ``u3`` is not found, ``cx`` gate count is nonzero,
or specified qubit is not included in the gate count dictionary.
"""
if qubit not in gate_per_cliff:
raise QiskitError('Qubit %d is not included in the `gate_per_cliff`' % qubit)
gpc_per_qubit = gate_per_cliff[qubit]
if 'u3' not in gpc_per_qubit or 'u2' not in gpc_per_qubit:
raise QiskitError('Invalid basis set is given. Use `u1`, `u2`, `u3` for basis gates.')
n_u2 = gpc_per_qubit['u2']
n_u3 = gpc_per_qubit['u3']
if gpc_per_qubit.get('cx', 0) > 0:
raise QiskitError('Two qubit gate is included in the RB sequence.')
return {'u1': 0, 'u2': epc_1q / (n_u2 + 2 * n_u3), 'u3': 2 * epc_1q / (n_u2 + 2 * n_u3)}
def calculate_2q_epg(gate_per_cliff: Dict[int, Dict[str, float]],
epc_2q: float,
qubit_pair: List[int],
list_epgs_1q: Optional[List[Dict[str, float]]] = None,
two_qubit_name: Optional[str] = 'cx') -> float:
r"""
Convert error per Clifford (EPC) into error per gate (EPG) of two qubit ``cx`` gates.
Given that a standard 2Q RB sequences consist of ``u1``, ``u2``, ``u3``, and ``cx`` gates,
the EPG of ``cx`` gate can be roughly approximated by :math:`EPG_{CX} = EPC/N_{CX}`,
where :math:`N_{CX}` is number of ``cx`` gates per Clifford which is designed to be 1.5.
Because an error from two qubit gates are usually dominant and the contribution of
single qubit gates in 2Q RB experiments is thus able to be ignored.
If ``list_epgs_1q`` is not provided, the function returns
the EPG calculated based upon this assumption.
When we know the EPG of every single qubit gates used in the 2Q RB experiment,
we can isolate the EPC of the two qubit gate, ie :math:`EPG_{CX} = EPC_{CX}/N_{CX}` [1].
This will give you more accurate estimation of EPG, especially when the ``cx``
gate fidelity is close to that of single qubit gate.
To evaluate EPGs of single qubit gates, you first need to run standard 1Q RB experiments
separately and feed the fit result and gate counts to :func:`calculate_1q_epg`.
.. jupyter-execute::
import qiskit.ignis.verification.randomized_benchmarking as rb
# assuming we ran 1Q RB experiment for qubit 0 and qubit 1
gpc = {0: {'cx': 0, 'u1': 0.13, 'u2': 0.31, 'u3': 0.51},
1: {'cx': 0, 'u1': 0.10, 'u2': 0.33, 'u3': 0.51}}
epc_q0 = 1.5e-3
epc_q1 = 5.8e-4
# calculate 1Q EPGs
epgs_q0 = rb.rb_utils.calculate_1q_epg(gate_per_cliff=gpc, epc_1q=epc_q0, qubit=0)
epgs_q1 = rb.rb_utils.calculate_1q_epg(gate_per_cliff=gpc, epc_1q=epc_q1, qubit=1)
# assuming we ran 2Q RB experiment for qubit 0 and qubit 1
gpc = {0: {'cx': 1.49, 'u1': 0.25, 'u2': 0.95, 'u3': 0.56},
1: {'cx': 1.49, 'u1': 0.24, 'u2': 0.98, 'u3': 0.49}}
epc = 2.4e-2
# calculate 2Q EPG
epg_no_comp = rb.rb_utils.calculate_2q_epg(
gate_per_cliff=gpc,
epc_2q=epc,
qubit_pair=[0, 1])
epg_comp = rb.rb_utils.calculate_2q_epg(
gate_per_cliff=gpc,
epc_2q=epc,
qubit_pair=[0, 1],
list_epgs_1q=[epgs_q0, epgs_q1])
print('EPG without `list_epgs_1q`: %f, with `list_epgs_1q`: %f' % (epg_no_comp, epg_comp))
Note:
This function presupposes the basis gate consists
of ``u1``, ``u2``, ``u3`` and ``cx``.
References:
[1] <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>, “Three-Qubit Randomized Benchmarking,”
Phys. Rev. Lett., vol. 122, no. 20, 2019 (arxiv:1712.06550).
Args:
gate_per_cliff: dictionary of gate per Clifford. see :func:`gates_per_clifford`.
epc_2q: EPC fit from 2Q RB experiment data.
qubit_pair: index of two qubits to calculate EPG.
list_epgs_1q: list of single qubit EPGs of qubit listed in ``qubit_pair``.
two_qubit_name: name of two qubit gate in ``basis gates``.
Returns:
EPG of 2Q gate.
Raises:
QiskitError: when ``cx`` is not found, specified ``qubit_pair`` is not included
in the gate count dictionary, or length of ``qubit_pair`` is not 2.
"""
list_epgs_1q = list_epgs_1q or []
if len(qubit_pair) != 2:
raise QiskitError('Number of qubit is not 2.')
# estimate single qubit gate error contribution
alpha_1q = [1.0, 1.0]
for ind, (qubit, epg_1q) in enumerate(zip(qubit_pair, list_epgs_1q)):
if qubit not in gate_per_cliff:
raise QiskitError('Qubit %d is not included in the `gate_per_cliff`' % qubit)
gpc_per_qubit = gate_per_cliff[qubit]
for gate_name, epg in epg_1q.items():
n_gate = gpc_per_qubit.get(gate_name, 0)
alpha_1q[ind] *= (1 - 2 * epg) ** n_gate
alpha_c_1q = 1 / 5 * (alpha_1q[0] + alpha_1q[1] + 3 * alpha_1q[0] * alpha_1q[1])
alpha_c_2q = (1 - 4 / 3 * epc_2q) / alpha_c_1q
n_gate_2q = gate_per_cliff[qubit_pair[0]].get(two_qubit_name, 0)
if n_gate_2q > 0:
return 3 / 4 * (1 - alpha_c_2q) / n_gate_2q
raise QiskitError('Two qubit gate %s is not included in the `gate_per_cliff`. '
'Set correct `two_qubit_name` or use 2Q RB gate count.' % two_qubit_name)
def calculate_1q_epc(gate_per_cliff: Dict[int, Dict[str, float]],
epg_1q: Dict[str, float],
qubit: int) -> float:
r"""
Convert error per gate (EPG) into error per Clifford (EPC) of single qubit basis gates.
Given that we know the number of gates per Clifford :math:`N_i` and those EPGs,
we can predict EPC of that RB sequence:
.. math::
EPC = 1 - \prod_i \left( 1 - EPG_i \right)^{N_i}
To run this function, you need to know EPG of every single qubit basis gates.
For example, when you prepare 1Q RB experiment with appropriate error model,
you can define EPG of those basis gate set. Then you can estimate the EPC of
prepared RB sequence without running experiment.
.. jupyter-execute::
import qiskit.ignis.verification.randomized_benchmarking as rb
# gate counts of your 1Q RB experiment
gpc = {0: {'cx': 0, 'u1': 0.13, 'u2': 0.31, 'u3': 0.51}}
# EPGs from error model
epgs_q0 = {'u1': 0, 'u2': 0.001, 'u3': 0.002}
# calculate 1Q EPC
epc = rb.rb_utils.calculate_1q_epc(
gate_per_cliff=gpc,
epg_1q=epgs_q0,
qubit=0)
print(epc)
Args:
gate_per_cliff: dictionary of gate per Clifford. see :func:`gates_per_clifford`.
epg_1q: EPG of single qubit gates estimated by error model.
qubit: index of qubit to calculate EPC.
Returns:
EPG of 2Q gate.
Raises:
QiskitError: when specified ``qubit`` is not included in the gate count dictionary
"""
if qubit not in gate_per_cliff:
raise QiskitError('Qubit %d is not included in the `gate_per_cliff`' % qubit)
fid = 1
gpc_per_qubit = gate_per_cliff[qubit]
for gate_name, epg in epg_1q.items():
n_gate = gpc_per_qubit.get(gate_name, 0)
fid *= (1 - epg) ** n_gate
return 1 - fid
def calculate_2q_epc(gate_per_cliff: Dict[int, Dict[str, float]],
epg_2q: float,
qubit_pair: List[int],
list_epgs_1q: List[Dict[str, float]],
two_qubit_name: Optional[str] = 'cx') -> float:
r"""
Convert error per gate (EPG) into error per Clifford (EPC) of two qubit ``cx`` gates.
Given that we know the number of gates per Clifford :math:`N_i` and those EPGs,
we can predict EPC of that RB sequence:
.. math::
EPC = 1 - \prod_i \left( 1 - EPG_i \right)^{N_i}
This function isolates the contribution of two qubit gate to the EPC [1].
This will give you more accurate estimation of EPC, especially when the ``cx``
gate fidelity is close to that of single qubit gate.
To run this function, you need to know EPG of both single and two qubit gates.
For example, when you prepare 2Q RB experiment with appropriate error model,
you can define EPG of those basis gate set. Then you can estimate the EPC of
prepared RB sequence without running experiment.
.. jupyter-execute::
import qiskit.ignis.verification.randomized_benchmarking as rb
# gate counts of your 2Q RB experiment
gpc = {0: {'cx': 1.49, 'u1': 0.25, 'u2': 0.95, 'u3': 0.56},
1: {'cx': 1.49, 'u1': 0.24, 'u2': 0.98, 'u3': 0.49}}
# EPGs from error model
epgs_q0 = {'u1': 0, 'u2': 0.001, 'u3': 0.002}
epgs_q1 = {'u1': 0, 'u2': 0.002, 'u3': 0.004}
epg_q01 = 0.03
# calculate 2Q EPC
epc_2q = rb.rb_utils.calculate_2q_epc(
gate_per_cliff=gpc,
epg_2q=epg_q01,
qubit_pair=[0, 1],
list_epgs_1q=[epgs_q0, epgs_q1])
# calculate EPC according to the definition
fid = 1
for qubit in (0, 1):
for epgs in (epgs_q0, epgs_q1):
for gate, val in epgs.items():
fid *= (1 - val) ** gpc[qubit][gate]
fid *= (1 - epg_q01) ** 1.49
epc = 1 - fid
print('Total sequence EPC: %f, 2Q gate contribution: %f' % (epc, epc_2q))
As you can see two qubit gate contribution is dominant in this RB sequence.
References:
[1] <NAME>, <NAME>, | |
self.___NB_s___(s)[1]
NFs = self.___NF_s___(s)[1]
SBs = self.___SB_s___(s)[1]
SFs = self.___SF_s___(s)[1]
WB = self.___WB___(r)[1]
WF = self.___WF___(r)[1]
EB = self.___EB___(r)[1]
EF = self.___EF___(r)[1]
NWB = self.___N_W_B___[1]
NWF = self.___N_W_F___[1]
NEB = self.___N_E_B___[1]
NEF = self.___N_E_F___[1]
SWB = self.___S_W_B___[1]
SWF = self.___S_W_F___[1]
SEB = self.___S_E_B___[1]
SEF = self.___S_E_F___[1]
x1 = (1 - r) * Ns + r * Ss
x2 = -_1_ * W + _1_ * E
x3 = (1 - t) * Bs + t * Fs
x12 = -(1 - r) * NW + (1 - r) * NE - r * SW + r * SE
x13 = (1 - r) * (1 - t) * NBs + (1 - r) * t * NFs + r * (1 - t) * SBs + r * t * SFs
x23 = -t * WF - (1 - t) * WB + (1 - t) * EB + t * EF
x123 = - (1 - r) * (1 - t) * NWB \
- (1 - r) * t * NWF \
+ (1 - r) * (1 - t) * NEB \
+ (1 - r) * t * NEF \
- r * (1 - t) * SWB \
- r * t * SWF \
+ r * (1 - t) * SEB \
+ r * t * SEF
ys = x1 + x2 + x3 - x12 - x13 - x23 + x123
return ys
def Jacobian_Yt(self, r, s, t):
""" r, s, t be in [0, 1]. """
r, s, t = self.___check_rst___(r, s, t)
_1_ = np.ones(np.shape(r))
Nt = self.___Nt_Nt___(s, t)[1]
St = self.___St_St___(s, t)[1]
Wt = self.___Wt_Wt___(r, t)[1]
Et = self.___Et_Et___(r, t)[1]
B = self.___BbB___(r, s)[1]
F = self.___FfF___(r, s)[1]
NWt = self.___NW_t___(t)[1]
NEt = self.___NE_t___(t)[1]
SWt = self.___SW_t___(t)[1]
SEt = self.___SE_t___(t)[1]
NB = self.___NB___(s)[1]
NF = self.___NF___(s)[1]
SB = self.___SB___(s)[1]
SF = self.___SF___(s)[1]
WB = self.___WB___(r)[1]
WF = self.___WF___(r)[1]
EB = self.___EB___(r)[1]
EF = self.___EF___(r)[1]
NWB = self.___N_W_B___[1]
NWF = self.___N_W_F___[1]
NEB = self.___N_E_B___[1]
NEF = self.___N_E_F___[1]
SWB = self.___S_W_B___[1]
SWF = self.___S_W_F___[1]
SEB = self.___S_E_B___[1]
SEF = self.___S_E_F___[1]
x1 = (1 - r) * Nt + r * St
x2 = (1 - s) * Wt + s * Et
x3 = -_1_ * B + _1_ * F
x12 = (1 - r) * (1 - s) * NWt + (1 - r) * s * NEt + r * (1 - s) * SWt + r * s * SEt
x13 = -(1 - r) * NB + (1 - r) * NF - r * SB + r * SF
x23 = -(1 - s) * WB + (1 - s) * WF - s * EB + s * EF
x123 = - (1 - r) * (1 - s) * NWB \
+ (1 - r) * (1 - s) * NWF \
- (1 - r) * s * NEB \
+ (1 - r) * s * NEF \
- r * (1 - s) * SWB \
+ r * (1 - s) * SWF \
- r * s * SEB \
+ r * s * SEF
yt = x1 + x2 + x3 - x12 - x13 - x23 + x123
return yt
def Jacobian_Zr(self, r, s, t):
""" r, s, t be in [0, 1]. """
r, s, t = self.___check_rst___(r, s, t)
_1_ = np.ones(np.shape(r))
N = self.___NnN___(s, t)[2]
S = self.___SsS___(s, t)[2]
Wr = self.___Wr_Wr___(r, t)[2]
Er = self.___Er_Er___(r, t)[2]
Br = self.___Br_Br___(r, s)[2]
Fr = self.___Fr_Fr___(r, s)[2]
NW = self.___NW___(t)[2]
NE = self.___NE___(t)[2]
SW = self.___SW___(t)[2]
SE = self.___SE___(t)[2]
NB = self.___NB___(s)[2]
NF = self.___NF___(s)[2]
SB = self.___SB___(s)[2]
SF = self.___SF___(s)[2]
WBr = self.___WB_r___(r)[2]
WFr = self.___WF_r___(r)[2]
EBr = self.___EB_r___(r)[2]
EFr = self.___EF_r___(r)[2]
NWB = self.___N_W_B___[2]
NWF = self.___N_W_F___[2]
NEB = self.___N_E_B___[2]
NEF = self.___N_E_F___[2]
SWB = self.___S_W_B___[2]
SWF = self.___S_W_F___[2]
SEB = self.___S_E_B___[2]
SEF = self.___S_E_F___[2]
x1 = -_1_ * N + _1_ * S
x2 = (1 - s) * Wr + s * Er
x3 = (1 - t) * Br + t * Fr
x12 = -s * NE - (1 - s) * NW + (1 - s) * SW + s * SE
x13 = -t * NF - (1 - t) * NB + (1 - t) * SB + t * SF
x23 = (1 - s) * (1 - t) * WBr + (1 - s) * t * WFr + s * (1 - t) * EBr + s * t * EFr
x123 = - (1 - s) * (1 - t) * NWB \
- (1 - s) * t * NWF \
- s * (1 - t) * NEB \
- s * t * NEF \
+ (1 - s) * (1 - t) * SWB \
+ (1 - s) * t * SWF \
+ s * (1 - t) * SEB \
+ s * t * SEF
zr = x1 + x2 + x3 - x12 - x13 - x23 + x123
return zr
def Jacobian_Zs(self, r, s, t):
""" r, s, t be in [0, 1]. """
r, s, t = self.___check_rst___(r, s, t)
_1_ = np.ones(np.shape(r))
Ns = self.___Ns_Ns___(s, t)[2]
Ss = self.___Ss_Ss___(s, t)[2]
W = self.___WwW___(r, t)[2]
E = self.___EeE___(r, t)[2]
Bs = self.___Bs_Bs___(r, s)[2]
Fs = self.___Fs_Fs___(r, s)[2]
NW = self.___NW___(t)[2]
NE = self.___NE___(t)[2]
SW = self.___SW___(t)[2]
SE = self.___SE___(t)[2]
NBs = self.___NB_s___(s)[2]
NFs = self.___NF_s___(s)[2]
SBs = self.___SB_s___(s)[2]
SFs = self.___SF_s___(s)[2]
WB = self.___WB___(r)[2]
WF = self.___WF___(r)[2]
EB = self.___EB___(r)[2]
EF = self.___EF___(r)[2]
NWB = self.___N_W_B___[2]
NWF = self.___N_W_F___[2]
NEB = self.___N_E_B___[2]
NEF = self.___N_E_F___[2]
SWB = self.___S_W_B___[2]
SWF = self.___S_W_F___[2]
SEB = self.___S_E_B___[2]
SEF = self.___S_E_F___[2]
x1 = (1 - r) * Ns + r * Ss
x2 = -_1_ * W + _1_ * E
x3 = (1 - t) * Bs + t * Fs
x12 = -(1 - r) * NW + (1 - r) * NE - r * SW + r * SE
x13 = (1 - r) * (1 - t) * NBs + (1 - r) * t * NFs + r * (1 - t) * SBs + r * t * SFs
x23 = -t * WF - (1 - t) * WB + (1 - t) * EB + t * EF
x123 = - (1 - r) * (1 - t) * NWB \
- (1 - r) * t * NWF \
+ (1 - r) * (1 - t) * NEB \
+ (1 - r) * t * NEF \
- r * (1 - t) * SWB \
- r * t * SWF \
+ r * (1 - t) * SEB \
+ r * t * SEF
zs = x1 + x2 + x3 - x12 - x13 - x23 + x123
return zs
def Jacobian_Zt(self, r, s, t):
""" r, s, t be in [0, 1]. """
r, s, t = self.___check_rst___(r, s, t)
_1_ = np.ones(np.shape(r))
Nt = self.___Nt_Nt___(s, t)[2]
St = self.___St_St___(s, t)[2]
Wt = self.___Wt_Wt___(r, t)[2]
Et = self.___Et_Et___(r, t)[2]
B = self.___BbB___(r, s)[2]
F = self.___FfF___(r, s)[2]
NWt = self.___NW_t___(t)[2]
NEt = self.___NE_t___(t)[2]
SWt = self.___SW_t___(t)[2]
SEt = self.___SE_t___(t)[2]
NB = self.___NB___(s)[2]
NF = self.___NF___(s)[2]
SB = self.___SB___(s)[2]
SF = self.___SF___(s)[2]
WB = self.___WB___(r)[2]
WF = self.___WF___(r)[2]
EB = self.___EB___(r)[2]
EF = self.___EF___(r)[2]
NWB = self.___N_W_B___[2]
NWF = self.___N_W_F___[2]
NEB = self.___N_E_B___[2]
NEF = self.___N_E_F___[2]
SWB = self.___S_W_B___[2]
SWF = self.___S_W_F___[2]
SEB = self.___S_E_B___[2]
SEF = self.___S_E_F___[2]
x1 = (1 - r) * Nt + r * St
x2 = (1 - s) * Wt | |
datetime objects used to
put the data in the database, but we can check that it's
basically the same time.
:param python: A datetime from the Python part of this test.
:param postgres: A float from the Postgres part.
"""
expect = (
python - datetime.datetime.utcfromtimestamp(0)
).total_seconds()
eq_(int(expect), int(postgres))
search_doc = work.to_search_document()
eq_(work.id, search_doc['_id'])
eq_(work.id, search_doc['work_id'])
eq_(work.title, search_doc['title'])
eq_(edition.subtitle, search_doc['subtitle'])
eq_(edition.series, search_doc['series'])
eq_(edition.series_position, search_doc['series_position'])
eq_(edition.language, search_doc['language'])
eq_(work.sort_title, search_doc['sort_title'])
eq_(work.author, search_doc['author'])
eq_(work.sort_author, search_doc['sort_author'])
eq_(edition.publisher, search_doc['publisher'])
eq_(edition.imprint, search_doc['imprint'])
eq_(edition.permanent_work_id, search_doc['permanent_work_id'])
eq_("Nonfiction", search_doc['fiction'])
eq_("YoungAdult", search_doc['audience'])
eq_(work.summary_text, search_doc['summary'])
eq_(work.quality, search_doc['quality'])
eq_(work.rating, search_doc['rating'])
eq_(work.popularity, search_doc['popularity'])
eq_(work.presentation_ready, search_doc['presentation_ready'])
assert_time_match(work.last_update_time, search_doc['last_update_time'])
eq_(dict(lower=7, upper=8), search_doc['target_age'])
# Each LicensePool for the Work is listed in
# the 'licensepools' section.
licensepools = search_doc['licensepools']
eq_(2, len(licensepools))
eq_(set([x.id for x in work.license_pools]),
set([x['licensepool_id'] for x in licensepools]))
# Each item in the 'licensepools' section has a variety of useful information
# about the corresponding LicensePool.
for pool in work.license_pools:
[match] = [x for x in licensepools if x['licensepool_id'] == pool.id]
eq_(pool.open_access, match['open_access'])
eq_(pool.collection_id, match['collection_id'])
eq_(pool.suppressed, match['suppressed'])
eq_(pool.data_source_id, match['data_source_id'])
assert isinstance(match['available'], bool)
eq_(pool.licenses_available > 0, match['available'])
assert isinstance(match['licensed'], bool)
eq_(pool.licenses_owned > 0, match['licensed'])
# The work quality is stored in the main document, but
# it's also stored in the license pool subdocument so that
# we can apply a nested filter that includes quality +
# information from the subdocument.
eq_(work.quality, match['quality'])
assert_time_match(
pool.availability_time, match['availability_time']
)
# The medium of the work's presentation edition is stored
# in the main document, but it's also stored in the
# license poolsubdocument, so that we can filter out
# license pools that represent audiobooks from unsupported
# sources.
eq_(edition.medium, search_doc['medium'])
eq_(edition.medium, match['medium'])
# Each identifier that could, with high confidence, be
# associated with the work, is in the 'identifiers' section.
#
# This includes each identifier associated with a LicensePool
# for the work, and the ISBN associated with one of those
# LicensePools through a high-confidence equivalency. It does
# not include the low-confidence ISBN, or any of the
# identifiers not tied to a LicensePool.
expect = [
dict(identifier=identifier1.identifier, type=identifier1.type),
dict(identifier=pool1.identifier.identifier,
type=pool1.identifier.type),
]
def s(x):
# Sort an identifier dictionary by its identifier value.
return sorted(x, key = lambda b: b['identifier'])
eq_(s(expect), s(search_doc['identifiers']))
# Each custom list entry for the work is in the 'customlists'
# section.
not_featured, featured = sorted(
search_doc['customlists'], key = lambda x: x['featured']
)
assert_time_match(appeared_1, not_featured.pop('first_appearance'))
eq_(dict(featured=False, list_id=l1.id), not_featured)
assert_time_match(appeared_2, featured.pop('first_appearance'))
eq_(dict(featured=True, list_id=l2.id), featured)
contributors = search_doc['contributors']
eq_(2, len(contributors))
[contributor1_doc] = [c for c in contributors if c['sort_name'] == contributor1.sort_name]
[contributor2_doc] = [c for c in contributors if c['sort_name'] == contributor2.sort_name]
eq_(contributor1.display_name, contributor1_doc['display_name'])
eq_(None, contributor2_doc['display_name'])
eq_(contributor1.family_name, contributor1_doc['family_name'])
eq_(None, contributor2_doc['family_name'])
eq_(contributor1.viaf, contributor1_doc['viaf'])
eq_(None, contributor2_doc['viaf'])
eq_(contributor1.lc, contributor1_doc['lc'])
eq_(None, contributor2_doc['lc'])
eq_(Contributor.PRIMARY_AUTHOR_ROLE, contributor1_doc['role'])
eq_(Contributor.AUTHOR_ROLE, contributor2_doc['role'])
classifications = search_doc['classifications']
eq_(3, len(classifications))
[classification1_doc] = [c for c in classifications if c['scheme'] == Subject.uri_lookup[Subject.BISAC]]
[classification2_doc] = [c for c in classifications if c['scheme'] == Subject.uri_lookup[Subject.OVERDRIVE]]
[classification3_doc] = [c for c in classifications if c['scheme'] == Subject.uri_lookup[Subject.FAST]]
eq_("FICTION Science Fiction Time Travel", classification1_doc['term'])
eq_(float(6 + 1)/(6 + 1 + 2 + 7), classification1_doc['weight'])
eq_("Romance", classification2_doc['term'])
eq_(float(2)/(6 + 1 + 2 + 7), classification2_doc['weight'])
eq_("Sea Stories", classification3_doc['term'])
eq_(float(7)/(6 + 1 + 2 + 7), classification3_doc['weight'])
genres = search_doc['genres']
eq_(2, len(genres))
[genre1_doc] = [g for g in genres if g['name'] == genre1.name]
[genre2_doc] = [g for g in genres if g['name'] == genre2.name]
eq_(Subject.SIMPLIFIED_GENRE, genre1_doc['scheme'])
eq_(genre1.id, genre1_doc['term'])
eq_(1, genre1_doc['weight'])
eq_(Subject.SIMPLIFIED_GENRE, genre2_doc['scheme'])
eq_(genre2.id, genre2_doc['term'])
eq_(0, genre2_doc['weight'])
target_age_doc = search_doc['target_age']
eq_(work.target_age.lower, target_age_doc['lower'])
eq_(work.target_age.upper, target_age_doc['upper'])
# If a book stops being available through a collection
# (because its LicensePool loses all its licenses or stops
# being open access), it will no longer be listed
# in its Work's search document.
[pool] = collection1.licensepools
pool.licenses_owned = 0
self._db.commit()
search_doc = work.to_search_document()
eq_([collection2.id],
[x['collection_id'] for x in search_doc['licensepools']])
# If the book becomes available again, the collection will
# start showing up again.
pool.open_access = True
self._db.commit()
search_doc = work.to_search_document()
eq_(set([collection1.id, collection2.id]),
set([x['collection_id'] for x in search_doc['licensepools']]))
def test_age_appropriate_for_patron(self):
work = self._work()
work.audience = Classifier.AUDIENCE_YOUNG_ADULT
work.target_age = tuple_to_numericrange((12, 15))
patron = self._patron()
# If no Patron is specified, the method always returns True.
eq_(True, work.age_appropriate_for_patron(None))
# Otherwise, this method is a simple passthrough for
# Patron.work_is_age_appropriate.
patron.work_is_age_appropriate = MagicMock(return_value="value")
eq_("value", work.age_appropriate_for_patron(patron))
patron.work_is_age_appropriate.assert_called_with(
work.audience, work.target_age
)
def test_age_appropriate_for_patron_end_to_end(self):
# A test of age_appropriate_for_patron without any mocks.
# More detailed unit tests are in test_patron.py.
#
# Some end-to-end examples are useful because the
# 'age-appropriate' logic is quite complicated, and because
# target age ranges are sometimes passed around as tuples and
# sometimes as NumericRange objects.
patron = self._patron()
patron.external_type = "a"
# This Lane contains books at the old end of the "children"
# range and the young end of the "young adult" range.
lane = self._lane()
lane.root_for_patron_type = ["a"]
# A patron with this root lane can see children's and YA
# titles in the age range 9-14.
# NOTE: setting target_age sets .audiences to appropriate values,
# so setting .audiences here is purely demonstrative.
lane.audiences = [
Classifier.AUDIENCE_CHILDREN, Classifier.AUDIENCE_YOUNG_ADULT
]
lane.target_age = (9,14)
# This work is a YA title within the age range.
work = self._work()
work.audience = Classifier.AUDIENCE_YOUNG_ADULT
work.target_age = tuple_to_numericrange((12, 15))
eq_(True, work.age_appropriate_for_patron(patron))
# Bump up the target age of the work, and it stops being
# age-appropriate.
work.target_age = tuple_to_numericrange((16, 17))
eq_(False, work.age_appropriate_for_patron(patron))
# Bump up the lane to match, and it's age-appropriate again.
lane.target_age = (9,16)
eq_(True, work.age_appropriate_for_patron(patron))
# Change the audience to AUDIENCE_ADULT, and the work stops being
# age-appropriate.
work.audience = Classifier.AUDIENCE_ADULT
eq_(False, work.age_appropriate_for_patron(patron))
def test_unlimited_access_books_are_available_by_default(self):
# Set up an edition and work.
edition, pool = self._edition(authors=[self._str, self._str], with_license_pool=True)
work = self._work(presentation_edition=edition)
pool.open_access = False
pool.self_hosted = False
pool.unlimited_access = True
# Make sure all of this will show up in a database query.
self._db.flush()
search_doc = work.to_search_document()
# Each LicensePool for the Work is listed in
# the 'licensepools' section.
licensepools = search_doc['licensepools']
eq_(1, len(licensepools))
eq_(licensepools[0]['open_access'], False)
eq_(licensepools[0]['available'], True)
def test_self_hosted_books_are_available_by_default(self):
# Set up an edition and work.
edition, pool = self._edition(authors=[self._str, self._str], with_license_pool=True)
work = self._work(presentation_edition=edition)
pool.licenses_owned = 0
pool.licenses_available = 0
pool.self_hosted = True
# Make sure all of this will show up in a database query.
self._db.flush()
search_doc = work.to_search_document()
# Each LicensePool for the Work is listed in
# the 'licensepools' section.
licensepools = search_doc['licensepools']
eq_(1, len(licensepools))
eq_(licensepools[0]['open_access'], False)
eq_(licensepools[0]['available'], True)
def test_target_age_string(self):
work = self._work()
work.target_age = NumericRange(7, 8, '[]')
eq_("7-8", work.target_age_string)
work.target_age = NumericRange(0, 8, '[]')
eq_("0-8", work.target_age_string)
work.target_age = NumericRange(8, None, '[]')
eq_("8", work.target_age_string)
work.target_age = NumericRange(None, 8, '[]')
eq_("8", work.target_age_string)
work.target_age = NumericRange(7, 8, '[)')
eq_("7", work.target_age_string)
work.target_age = NumericRange(0, 8, '[)')
eq_("0-7", work.target_age_string)
work.target_age = NumericRange(7, 8, '(]')
eq_("8", work.target_age_string)
work.target_age = NumericRange(0, 8, '(]')
eq_("1-8", work.target_age_string)
work.target_age = NumericRange(7, 9, '()')
eq_("8", work.target_age_string)
work.target_age = NumericRange(0, 8, '()')
eq_("1-7", work.target_age_string)
work.target_age = NumericRange(None, None, '()')
eq_("", work.target_age_string)
work.target_age = None
eq_("", work.target_age_string)
def test_reindex_on_availability_change(self):
# A change in a LicensePool's availability creates a
# WorkCoverageRecord indicating that the work needs to be
# re-indexed.
def find_record(work):
"""Find the Work's 'update search index operation'
WorkCoverageRecord.
"""
records = [
x for x in work.coverage_records
if x.operation.startswith(
WorkCoverageRecord.UPDATE_SEARCH_INDEX_OPERATION
)
]
if records:
return records[0]
return None
registered = WorkCoverageRecord.REGISTERED
success = WorkCoverageRecord.SUCCESS
# A Work with no LicensePool isn't registered as needing
# indexing. (It will be indexed anyway, but it's not registered
# as needing it.)
no_licensepool = self._work()
eq_(None, find_record(no_licensepool))
# A Work with a LicensePool starts off in a state where it
# needs to be indexed.
work = self._work(with_open_access_download=True)
[pool] = work.license_pools
record = find_record(work)
eq_(registered, record.status)
# If it stops being open-access, it needs to be reindexed.
record.status = success
pool.open_access = False
record = find_record(work)
eq_(registered, record.status)
# If it becomes open-access again, it needs to be reindexed.
record.status = success
pool.open_access = True
record = | |
Union[str, HospitalizationId] = None
category: Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, HospitalizationId):
self.id = HospitalizationId(self.id)
super().__post_init__(**kwargs)
@dataclass
class SocioeconomicAttribute(Attribute):
"""
Attributes relating to a socioeconomic manifestation
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.SocioeconomicAttribute
class_class_curie: ClassVar[str] = "biolink:SocioeconomicAttribute"
class_name: ClassVar[str] = "socioeconomic attribute"
class_model_uri: ClassVar[URIRef] = BIOLINK.SocioeconomicAttribute
has_attribute_type: Union[dict, OntologyClass] = None
@dataclass
class Case(IndividualOrganism):
"""
An individual (human) organism that has a patient role in some clinical context.
"""
_inherited_slots: ClassVar[List[str]] = ["in_taxon"]
class_class_uri: ClassVar[URIRef] = BIOLINK.Case
class_class_curie: ClassVar[str] = "biolink:Case"
class_name: ClassVar[str] = "case"
class_model_uri: ClassVar[URIRef] = BIOLINK.Case
id: Union[str, CaseId] = None
category: Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, CaseId):
self.id = CaseId(self.id)
super().__post_init__(**kwargs)
@dataclass
class Cohort(StudyPopulation):
"""
A group of people banded together or treated as a group who share common characteristics. A cohort 'study' is a
particular form of longitudinal study that samples a cohort, performing a cross-section at intervals through time.
"""
_inherited_slots: ClassVar[List[str]] = ["in_taxon"]
class_class_uri: ClassVar[URIRef] = BIOLINK.Cohort
class_class_curie: ClassVar[str] = "biolink:Cohort"
class_name: ClassVar[str] = "cohort"
class_model_uri: ClassVar[URIRef] = BIOLINK.Cohort
id: Union[str, CohortId] = None
category: Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, CohortId):
self.id = CohortId(self.id)
super().__post_init__(**kwargs)
@dataclass
class ExposureEvent(YAMLRoot):
"""
A (possibly time bounded) incidence of a feature of the environment of an organism that influences one or more
phenotypic features of that organism, potentially mediated by genes
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.ExposureEvent
class_class_curie: ClassVar[str] = "biolink:ExposureEvent"
class_name: ClassVar[str] = "exposure event"
class_model_uri: ClassVar[URIRef] = BIOLINK.ExposureEvent
timepoint: Optional[Union[str, TimeType]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.timepoint is not None and not isinstance(self.timepoint, TimeType):
self.timepoint = TimeType(self.timepoint)
super().__post_init__(**kwargs)
@dataclass
class GenomicBackgroundExposure(YAMLRoot):
"""
A genomic background exposure is where an individual's specific genomic background of genes, sequence variants or
other pre-existing genomic conditions constitute a kind of 'exposure' to the organism, leading to or influencing
an outcome.
"""
_inherited_slots: ClassVar[List[str]] = ["in_taxon"]
class_class_uri: ClassVar[URIRef] = BIOLINK.GenomicBackgroundExposure
class_class_curie: ClassVar[str] = "biolink:GenomicBackgroundExposure"
class_name: ClassVar[str] = "genomic background exposure"
class_model_uri: ClassVar[URIRef] = BIOLINK.GenomicBackgroundExposure
timepoint: Optional[Union[str, TimeType]] = None
has_gene_or_gene_product: Optional[Union[Union[str, GeneId], List[Union[str, GeneId]]]] = empty_list()
has_biological_sequence: Optional[Union[str, BiologicalSequence]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.timepoint is not None and not isinstance(self.timepoint, TimeType):
self.timepoint = TimeType(self.timepoint)
if not isinstance(self.has_gene_or_gene_product, list):
self.has_gene_or_gene_product = [self.has_gene_or_gene_product] if self.has_gene_or_gene_product is not None else []
self.has_gene_or_gene_product = [v if isinstance(v, GeneId) else GeneId(v) for v in self.has_gene_or_gene_product]
if self.has_biological_sequence is not None and not isinstance(self.has_biological_sequence, BiologicalSequence):
self.has_biological_sequence = BiologicalSequence(self.has_biological_sequence)
super().__post_init__(**kwargs)
class PathologicalEntityMixin(YAMLRoot):
"""
A pathological (abnormal) structure or process.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.PathologicalEntityMixin
class_class_curie: ClassVar[str] = "biolink:PathologicalEntityMixin"
class_name: ClassVar[str] = "pathological entity mixin"
class_model_uri: ClassVar[URIRef] = BIOLINK.PathologicalEntityMixin
@dataclass
class PathologicalProcess(BiologicalProcess):
"""
A biologic function or a process having an abnormal or deleterious effect at the subcellular, cellular,
multicellular, or organismal level.
"""
_inherited_slots: ClassVar[List[str]] = ["has_input", "has_output", "enabled_by"]
class_class_uri: ClassVar[URIRef] = BIOLINK.PathologicalProcess
class_class_curie: ClassVar[str] = "biolink:PathologicalProcess"
class_name: ClassVar[str] = "pathological process"
class_model_uri: ClassVar[URIRef] = BIOLINK.PathologicalProcess
id: Union[str, PathologicalProcessId] = None
category: Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, PathologicalProcessId):
self.id = PathologicalProcessId(self.id)
super().__post_init__(**kwargs)
@dataclass
class PathologicalProcessExposure(YAMLRoot):
"""
A pathological process, when viewed as an exposure, representing a precondition, leading to or influencing an
outcome, e.g. autoimmunity leading to disease.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.PathologicalProcessExposure
class_class_curie: ClassVar[str] = "biolink:PathologicalProcessExposure"
class_name: ClassVar[str] = "pathological process exposure"
class_model_uri: ClassVar[URIRef] = BIOLINK.PathologicalProcessExposure
timepoint: Optional[Union[str, TimeType]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.timepoint is not None and not isinstance(self.timepoint, TimeType):
self.timepoint = TimeType(self.timepoint)
super().__post_init__(**kwargs)
@dataclass
class PathologicalAnatomicalStructure(AnatomicalEntity):
"""
An anatomical structure with the potential of have an abnormal or deleterious effect at the subcellular, cellular,
multicellular, or organismal level.
"""
_inherited_slots: ClassVar[List[str]] = ["in_taxon"]
class_class_uri: ClassVar[URIRef] = BIOLINK.PathologicalAnatomicalStructure
class_class_curie: ClassVar[str] = "biolink:PathologicalAnatomicalStructure"
class_name: ClassVar[str] = "pathological anatomical structure"
class_model_uri: ClassVar[URIRef] = BIOLINK.PathologicalAnatomicalStructure
id: Union[str, PathologicalAnatomicalStructureId] = None
category: Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, PathologicalAnatomicalStructureId):
self.id = PathologicalAnatomicalStructureId(self.id)
super().__post_init__(**kwargs)
@dataclass
class PathologicalAnatomicalExposure(YAMLRoot):
"""
An abnormal anatomical structure, when viewed as an exposure, representing an precondition, leading to or
influencing an outcome, e.g. thrombosis leading to an ischemic disease outcome.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.PathologicalAnatomicalExposure
class_class_curie: ClassVar[str] = "biolink:PathologicalAnatomicalExposure"
class_name: ClassVar[str] = "pathological anatomical exposure"
class_model_uri: ClassVar[URIRef] = BIOLINK.PathologicalAnatomicalExposure
timepoint: Optional[Union[str, TimeType]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.timepoint is not None and not isinstance(self.timepoint, TimeType):
self.timepoint = TimeType(self.timepoint)
super().__post_init__(**kwargs)
@dataclass
class DiseaseOrPhenotypicFeatureExposure(YAMLRoot):
"""
A disease or phenotypic feature state, when viewed as an exposure, represents an precondition, leading to or
influencing an outcome, e.g. HIV predisposing an individual to infections; a relative deficiency of skin
pigmentation predisposing an individual to skin cancer.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.DiseaseOrPhenotypicFeatureExposure
class_class_curie: ClassVar[str] = "biolink:DiseaseOrPhenotypicFeatureExposure"
class_name: ClassVar[str] = "disease or phenotypic feature exposure"
class_model_uri: ClassVar[URIRef] = BIOLINK.DiseaseOrPhenotypicFeatureExposure
timepoint: Optional[Union[str, TimeType]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.timepoint is not None and not isinstance(self.timepoint, TimeType):
self.timepoint = TimeType(self.timepoint)
super().__post_init__(**kwargs)
@dataclass
class ChemicalExposure(YAMLRoot):
"""
A chemical exposure is an intake of a particular chemical entity.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.ChemicalExposure
class_class_curie: ClassVar[str] = "biolink:ChemicalExposure"
class_name: ClassVar[str] = "chemical exposure"
class_model_uri: ClassVar[URIRef] = BIOLINK.ChemicalExposure
timepoint: Optional[Union[str, TimeType]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.timepoint is not None and not isinstance(self.timepoint, TimeType):
self.timepoint = TimeType(self.timepoint)
super().__post_init__(**kwargs)
class ComplexChemicalExposure(YAMLRoot):
"""
A complex chemical exposure is an intake of a chemical mixture (e.g. gasoline), other than a drug.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.ComplexChemicalExposure
class_class_curie: ClassVar[str] = "biolink:ComplexChemicalExposure"
class_name: ClassVar[str] = "complex chemical exposure"
class_model_uri: ClassVar[URIRef] = BIOLINK.ComplexChemicalExposure
@dataclass
class DrugExposure(ChemicalExposure):
"""
A drug exposure is an intake of a particular drug.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.DrugExposure
class_class_curie: ClassVar[str] = "biolink:DrugExposure"
class_name: ClassVar[str] = "drug exposure"
class_model_uri: ClassVar[URIRef] = BIOLINK.DrugExposure
timepoint: Optional[Union[str, TimeType]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.timepoint is not None and not isinstance(self.timepoint, TimeType):
self.timepoint = TimeType(self.timepoint)
super().__post_init__(**kwargs)
@dataclass
class DrugToGeneInteractionExposure(DrugExposure):
"""
drug to gene interaction exposure is a drug exposure is where the interactions of the drug with specific genes are
known to constitute an 'exposure' to the organism, leading to or influencing an outcome.
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.DrugToGeneInteractionExposure
class_class_curie: ClassVar[str] = "biolink:DrugToGeneInteractionExposure"
class_name: ClassVar[str] = "drug to gene interaction exposure"
class_model_uri: ClassVar[URIRef] = BIOLINK.DrugToGeneInteractionExposure
has_gene_or_gene_product: Optional[Union[Union[str, GeneId], List[Union[str, GeneId]]]] = empty_list()
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if not isinstance(self.has_gene_or_gene_product, list):
self.has_gene_or_gene_product = [self.has_gene_or_gene_product] if self.has_gene_or_gene_product is not None else []
self.has_gene_or_gene_product = [v if isinstance(v, GeneId) else GeneId(v) for v in self.has_gene_or_gene_product]
super().__post_init__(**kwargs)
@dataclass
class Treatment(NamedThing):
"""
A treatment is targeted at a disease or phenotype and may involve multiple drug 'exposures', medical devices
and/or procedures
"""
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = BIOLINK.Treatment
class_class_curie: ClassVar[str] = "biolink:Treatment"
class_name: ClassVar[str] = "treatment"
class_model_uri: ClassVar[URIRef] = BIOLINK.Treatment
id: Union[str, TreatmentId] = None
category: Union[Union[str, NamedThingId], List[Union[str, NamedThingId]]] = None
has_drug: Optional[Union[Union[str, DrugId], List[Union[str, DrugId]]]] = empty_list()
has_device: Optional[Union[Union[str, DeviceId], List[Union[str, DeviceId]]]] = empty_list()
has_procedure: Optional[Union[Union[str, ProcedureId], List[Union[str, ProcedureId]]]] = empty_list()
timepoint: Optional[Union[str, TimeType]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.id):
self.MissingRequiredField("id")
if not isinstance(self.id, TreatmentId):
self.id = TreatmentId(self.id)
if not isinstance(self.has_drug, list):
self.has_drug = [self.has_drug] if self.has_drug is not None else []
self.has_drug = [v if isinstance(v, DrugId) else DrugId(v) for v in self.has_drug]
if not isinstance(self.has_device, list):
self.has_device = [self.has_device] if self.has_device is not None else []
self.has_device = [v if isinstance(v, DeviceId) else DeviceId(v) for v in self.has_device]
if not isinstance(self.has_procedure, list):
self.has_procedure = [self.has_procedure] if self.has_procedure is not None else []
self.has_procedure = | |
#!/usr/bin/env python
# cardinal_pythonlib/psychiatry/drugs.py
"""
===============================================================================
Original code copyright (C) 2009-2021 <NAME> (<EMAIL>).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**Drug information, with an emphasis on psychotropic drugs, including
translating specific to generic names.**
**Examples**
Test within Python:
.. code-block:: python
from cardinal_pythonlib.psychiatry.drugs import *
drug_name_to_generic("UNKNOWN")
drug_name_to_generic("UNKNOWN", unknown_to_default=True)
drug_names_to_generic([
"citalopram", "Citalopram", "Cipramil", "Celexa",
"olanzepine", # typo
"dextroamphetamine",
"amitryptyline",
])
**Antidepressants**
As of 2018-07-01, this is a functional superset of the SLAM
antidepressant-finding SQL (see ``dep_med_v1``), though mainly a superset in
non-antidepressant respects; the only antidepressants it adds are:
- buproprion, maprotiline
The SLAM antidepressant finder finds:
- tricyclic (category)
- amitriptyline, clomipramine, dosulepin, doxepin, imipramine, lofepramine,
nortriptyline, trimipramine
- mianserin, trazodone, phenelzine, isocarboxazid, tranylcypromine, moclobemide
- citalopram, escitalopram, fluoxetine, fluvoxamine, paroxetine, sertraline
- mirtazapine, reboxetine, venlafaxine, agomelatine, duloxetine
- flupentixol, tryptophan
Sorted, that is:
.. code-block:: none
agomelatine
amitriptyline
citalopram
clomipramine
dosulepin
doxepin
duloxetine
escitalopram
fluoxetine
flupentixol
fluvoxamine
imipramine
isocarboxazid
lofepramine
mianserin
mirtazapine
moclobemide
nortriptyline
paroxetine
phenelzine
reboxetine
sertraline
tranylcypromine
trazodone
tricyclic
trimipramine
tryptophan
venlafaxine
Compare that against the output of:
.. code-block:: python
[x.generic_name for x in all_drugs_where(slam_antidepressant_finder=True,
include_categories=True)]
**Using this code from R via reticulate**
Test within R:
.. code-block:: r
# -------------------------------------------------------------------------
# Load libraries
# -------------------------------------------------------------------------
RUN_ONCE_ONLY <- '
library(devtools)
devtools::install_github("rstudio/reticulate") # get latest version
'
library(data.table)
library(reticulate)
# -------------------------------------------------------------------------
# Set up reticulate
# -------------------------------------------------------------------------
VENV <- "~/dev/venvs/cardinal_pythonlib" # or your preferred virtualenv
PYTHON_EXECUTABLE <- ifelse(
.Platform$OS.type == "windows",
file.path(VENV, "Scripts", "python.exe"), # Windows
file.path(VENV, "bin", "python") # Linux
)
reticulate::use_python(PYTHON_EXECUTABLE, required=TRUE)
# ... it is CRITICAL to use required=TRUE, or it might fail silently
# Unnecessary now reticulate::use_python() works:
#
# PYTHON_VERSION <- "python3.5"
# CARDINAL_PYTHONLIB_BASEDIR <- ifelse(
# .Platform$OS.type == "windows",
# file.path(VENV, "lib", "site-packages/cardinal_pythonlib"),
# file.path(VENV, "lib", PYTHON_VERSION, "site-packages/cardinal_pythonlib")
# )
# reticulate::use_virtualenv(VENV, required=TRUE)
#
# cpl_fileops <- reticulate::import_from_path("fileops", CARDINAL_PYTHONLIB_BASEDIR)
# cpl_drugs <- reticulate::import_from_path("drugs", file.path(CARDINAL_PYTHONLIB_BASEDIR, "psychiatry"))
#
# ... this is NOT WORKING properly; dotted imports via reticulate::import() fail; also, imports from
# within the Python code fail even if you use reticulate::import_from_path(); this suggests the virtualenv is not set up
# properly; use reticulate::use_python() instead.
# -------------------------------------------------------------------------
# Import Python modules
# -------------------------------------------------------------------------
cardinal_pythonlib <- reticulate::import("cardinal_pythonlib")
cpl_fileops <- reticulate::import("cardinal_pythonlib.fileops")
cpl_drugs <- reticulate::import("cardinal_pythonlib.psychiatry.drugs")
# -------------------------------------------------------------------------
# Do something useful
# -------------------------------------------------------------------------
testnames <- c("citalopram", "Cipramil", "Prozac", "fluoxetine")
# Works for simple variables:
cpl_drugs$drug_names_to_generic(testnames)
# Also works for data table replacements:
dt <- data.table(
subject = c("Alice", "Bob", "Charles", "Dawn", "Egbert", "Flora"),
drug = c("citalopram", "Cipramil", "Prozac", "fluoxetine", "Priadel", "Haldol")
)
dt[, drug_generic := cpl_drugs$drug_names_to_generic(drug)]
dt[, is_antidepressant := cpl_drugs$drug_names_match_criteria(
drug_generic,
names_are_generic=TRUE,
antidepressant=TRUE)]
dt[, is_antidepressant_not_ssri := cpl_drugs$drug_names_match_criteria(
drug_generic,
names_are_generic=TRUE,
antidepressant=TRUE,
ssri=FALSE)]
dt[, is_conventional_antidepressant := cpl_drugs$drug_names_match_criteria(
drug_generic,
names_are_generic=TRUE,
conventional_antidepressant=TRUE)]
dt[, slam_antidepressant_finder := cpl_drugs$drug_names_match_criteria(
drug_generic,
names_are_generic=TRUE,
slam_antidepressant_finder=TRUE,
include_categories=TRUE)]
**Use for SQL finding**
.. code-block:: python
from typing import List
from cardinal_pythonlib.psychiatry.drugs import *
colname = "somecol"
antidepressants = all_drugs_where(conventional_antidepressant=True) # type: List[Drug]
antidep_sql_parts = [drug.sql_column_like_drug(colname) for drug in antidepressants]
antidep_sql = " OR ".join(antidep_sql_parts)
antipsychotics = all_drugs_where(antipsychotic=True) # type: List[Drug]
antipsy_sql_parts = [drug.sql_column_like_drug(colname) for drug in antipsychotics]
antipsy_sql = " OR ".join(antipsy_sql_parts)
alldrugs = all_drugs_where()
alldrug_sql_parts = [drug.sql_column_like_drug(colname) for drug in alldrugs]
alldrug_sql = " OR ".join(alldrug_sql_parts)
lithium = get_drug("lithium")
lithium_sql = lithium.sql_column_like_drug(colname)
# HOWEVER, NOTE THAT LITHIUM IS CURRENTLY OVER-INCLUSIVE and will include
# lithium chloride for LiDCO measurement.
""" # noqa
import re
from typing import Dict, List, Optional, Pattern, Union
from cardinal_pythonlib.sql.literals import sql_string_literal
# =============================================================================
# Regex constants
# =============================================================================
WILDCARD = ".*" # if re.DOTALL is set, this also matches newlines
WB = WORD_BOUNDARY = r"\b"
# =============================================================================
# Class to capture drug information
# =============================================================================
class Drug(object):
"""
Class to describe a specific drug, or a drug category.
Also embodies knowledge about brand names and common misspellings.
See the :const:`DRUGS` list for example of use.
"""
def __init__(
self,
# Names
generic: Union[str, List[str]],
alternatives: List[str] = None,
category_not_drug: bool = False,
add_preceding_wildcards: bool = True,
add_preceding_word_boundary: bool = True,
add_following_wildcards: bool = True,
# Psychiatry
psychotropic: bool = None, # special; can be used as override if False # noqa
antidepressant: bool = False,
conventional_antidepressant: bool = False,
ssri: bool = False,
non_ssri_modern_antidepressant: bool = False,
tricyclic_antidepressant: bool = False,
tetracyclic_and_related_antidepressant: bool = False,
monoamine_oxidase_inhibitor: bool = False,
antipsychotic: bool = False,
first_generation_antipsychotic: bool = False,
second_generation_antipsychotic: bool = False,
stimulant: bool = False,
anticholinergic: bool = False,
benzodiazepine: bool = False,
z_drug: bool = False,
non_benzodiazepine_anxiolytic: bool = False,
gaba_a_functional_agonist: bool = False,
gaba_b_functional_agonist: bool = False,
mood_stabilizer: bool = False,
# Endocrinology
antidiabetic: bool = False,
sulfonylurea: bool = False,
biguanide: bool = False,
glifozin: bool = False,
glp1_agonist: bool = False,
dpp4_inhibitor: bool = False,
meglitinide: bool = False,
thiazolidinedione: bool = False,
# Cardiovascular
cardiovascular: bool = False,
beta_blocker: bool = False,
ace_inhibitor: bool = False,
statin: bool = False,
# Respiratory
respiratory: bool = False,
beta_agonist: bool = False,
# Gastrointestinal
gastrointestinal: bool = False,
proton_pump_inhibitor: bool = False,
nonsteroidal_anti_inflammatory: bool = False,
# Nutritional
vitamin: bool = False,
# Special flags:
slam_antidepressant_finder: bool = False) -> None:
# noinspection PyUnresolvedReferences
"""
Initialize and determine/store category knowledge.
``alternatives`` can include regexes (as text).
We add front/back wildcards by default; this handles all situations
like "depot X", etc. We also add a preceding word boundary (after the
wildcard); thus the usual transformation is ``XXX`` -> ``.*\bXXX.*``.
Args:
generic: generic name, or list of names
alternatives: can include regexes (as text)
category_not_drug: is this a drug category, not a specific drug?
add_preceding_wildcards: when making a regex (etc.), add a wildcard
to the start of all possibilities (generic + alternative names)
that don't already have one?
add_preceding_word_boundary: when making a regex (etc.), add word
boundaries to the start of all possibilities (generic +
alternative names) that don't already have one?
add_following_wildcards: when making a regex (etc.), add a wildcard
to the end of all possibilities (generic + alternative names)
that don't already have one?
psychotropic: a psychotropic drug?
antidepressant: an antidepressant?
conventional_antidepressant: a traditional antidepressant?
ssri: a selective serotonin reuptake inhibitor (SSRI)?
non_ssri_modern_antidepressant: a non-SSRI "modern" antidepressant?
tricyclic_antidepressant: a tricyclic?
tetracyclic_and_related_antidepressant: a tetracyclic or related?
monoamine_oxidase_inhibitor: a MAO-I?
antipsychotic: an antipsychotic?
first_generation_antipsychotic: an FGA?
second_generation_antipsychotic: an SGA?
stimulant: a psychostimulant?
anticholinergic: an anticholinergic?
benzodiazepine: a benzodiazepine?
z_drug: a "Z" drug (e.g. zopiclone, zolpidem, ...)
non_benzodiazepine_anxiolytic: a non-BZ anxiolytic?
gaba_a_functional_agonist: a GABA-A functional agonist?
gaba_b_functional_agonist: a GABA-B functional agonist?
mood_stabilizer: a "mood stabilizer"?
antidiabetic: treats diabetes?
sulfonylurea: a sulfonylurea (sulphonylurea), for diabetes?
biguanide: a biguanide, for diabetes?
glifozin: a glifozin, for diabetes?
glp1_agonist: a GLP-1 agonist, for diabetes?
dpp4_inhibitor: a DPP4 inhibitor, for diabetes?
meglitinide: a meglitinide, for diabetes?
thiazolidinedione: a thiazolidinedione, for diabetes?
cardiovascular: a cardiovascular drug?
beta_blocker: a beta adrenoceptor antagonist?
ace_inhibitor: an ACE inhibitor?
statin: a statin?
respiratory: a respiratory drug?
beta_agonist: a beta adrenoceptor agonist?
gastrointestinal: a gastrointestinal drug?
proton_pump_inhibitor: a PPI?
nonsteroidal_anti_inflammatory: an NSAID?
vitamin: a vitamin?
slam_antidepressant_finder: a drug found by the SLAM
antidepressant-finding code? (A bit specialized, this one!)
Attributes:
mixture (bool): is this a mixture of more than one drug?
Will be set if more than one generic name is given.
all_generics (List[str]): list of all generic names in lower case
generic_name: generic name (or combination name like ``a_with_b``
for mixtures of ``a`` and ``b``)
regex: compiled case-insensitive regular expression to match
possible names
"""
self.add_preceding_word_boundary = add_preceding_word_boundary
self.add_preceding_wildcards = add_preceding_wildcards
self.add_following_wildcards = add_following_wildcards
# ---------------------------------------------------------------------
# Name handling
# ---------------------------------------------------------------------
if isinstance(generic, list):
self.mixture = True
self.all_generics = [x.lower().strip() for x in generic]
self.generic_name = "_with_".join(self.all_generics)
elif isinstance(generic, str):
self.mixture = False
self.generic_name = generic.lower().strip()
self.all_generics = [self.generic_name]
else:
raise ValueError(f"Bad generic_name: {generic!r}")
self.alternatives | |
<filename>PerceptualLoss.py
import torch
import torch.nn as nn
from torchvision import models
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from DeepImageDenoiser import LR_THRESHOLD, DIMENSION, LEARNING_RATE
from NeuralModels import SpectralNorm
ITERATION_LIMIT = int(1e6)
SQUEEZENET_CONFIG = {'dnn' : models.squeezenet1_1(pretrained=True).features, 'features' : [2, 5, 8, 13]}
VGG_16_CONFIG = {'dnn' : models.vgg16(pretrained=True).features, 'features' : [4, 9, 16, 23]}
VGG_16_BN_CONFIG = {'dnn' : models.vgg16_bn(pretrained=True).features, 'features' : [6, 13, 23, 33] }
VGG_19_CONFIG = {'dnn' : models.vgg19(pretrained=True).features, 'features' : [ 4, 9, 18, 36] }
VGG_19_BN_CONFIG = {'dnn': models.vgg19_bn(pretrained=True).features, 'features' : [6, 13, 23, 52]}
class BasicFeatureExtractor(nn.Module):
def __init__(self, vgg_config , feature_limit = 9):
super(BasicFeatureExtractor, self).__init__()
if DIMENSION == 3:
self.mean = Parameter(torch.tensor([0.485, 0.456, 0.406]).view(-1, 1, 1))
self.std = Parameter(torch.tensor([0.229, 0.224, 0.225]).view(-1, 1, 1))
elif DIMENSION == 1:
self.mean = Parameter(torch.tensor([0.449]).view(-1, 1, 1))
self.std = Parameter(torch.tensor([0.226]).view(-1, 1, 1))
else:
self.mean = Parameter(torch.zeros(DIMENSION).view(-1, 1, 1))
self.std = Parameter(torch.ones(DIMENSION).view(-1, 1, 1))
vgg_pretrained = vgg_config['dnn']
conv = BasicFeatureExtractor.configure_input(DIMENSION, vgg_pretrained)
self.slice1 = nn.Sequential(conv)
for x in range(1, feature_limit):
self.slice1.add_module(str(x), vgg_pretrained[x])
@staticmethod
def configure_input(dimension, vgg):
conv = nn.Conv2d(dimension, 64, kernel_size=3, padding=1)
if dimension == 1 or dimension == 3:
weight = torch.FloatTensor(64, DIMENSION, 3, 3)
parameters = list(vgg.parameters())
for i in range(64):
if DIMENSION == 1:
weight[i, :, :, :] = parameters[0].data[i].mean(0)
else:
weight[i, :, :, :] = parameters[0].data[i]
conv.weight.data.copy_(weight)
conv.bias.data.copy_(parameters[1].data)
return conv
def forward(self, x):
if DIMENSION == 1 or DIMENSION == 3:
if self.mean.device != x.device:
self.mean.to(x.device)
if self.std.device != x.device:
self.std.to(x.device)
x = (x - self.mean) / self.std
return self.slice1(x)
class BasicMultiFeatureExtractor(BasicFeatureExtractor):
def __init__(self, vgg_config , requires_grad):
super(BasicMultiFeatureExtractor, self).__init__(vgg_config, vgg_config['features'][0])
vgg_pretrained = vgg_config['dnn']
self.slice2 = torch.nn.Sequential()
for x in range(vgg_config['features'][0], vgg_config['features'][1]):
self.slice2.add_module(str(x), vgg_pretrained[x])
self.slice3 = torch.nn.Sequential()
for x in range(vgg_config['features'][1], vgg_config['features'][2]):
self.slice3.add_module(str(x), vgg_pretrained[x])
self.slice4 = torch.nn.Sequential()
for x in range(vgg_config['features'][2], vgg_config['features'][3]):
self.slice4.add_module(str(x), vgg_pretrained[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
h_relu1 = super(BasicMultiFeatureExtractor, self).forward(x)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
return h_relu1, h_relu2, h_relu3, h_relu4
class FastNeuralStyleExtractor(BasicMultiFeatureExtractor):
def __init__(self, requires_grad=False , bn = True):
features = VGG_16_BN_CONFIG if bn else VGG_16_CONFIG
super(FastNeuralStyleExtractor, self).__init__(features, requires_grad)
class FastNeuralStylePerceptualLoss(nn.Module):
def __init__(self, weight:float = 1e-3):
super(FastNeuralStylePerceptualLoss, self).__init__()
self.factors = [1e0 , 1e-1, 1e-2 , 1e-3]
self.weight = weight
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.cudas = list(range(torch.cuda.device_count()))
self.features = FastNeuralStyleExtractor()
self.features.eval()
self.features.to(self.device)
self.criterion = nn.MSELoss()
def compute_gram_matrix(self, x):
b, ch, h, w = x.size()
f = x.view(b, ch, w * h)
f_T = f.transpose(1, 2)
G = f.bmm(f_T) / (h * w * ch)
return G
def forward(self, actual, desire):
actuals = torch.nn.parallel.data_parallel(module=self.features, inputs=actual, device_ids=self.cudas)
desires = torch.nn.parallel.data_parallel(module=self.features, inputs=desire, device_ids=self.cudas)
closs = 0.0
for i in range(len(actuals)):
closs += self.factors[i] * self.criterion(actuals[i], desires[i])
sloss = 0.0
if self.weight != 0:
self.weight * self.criterion(self.compute_gram_matrix(actuals[i]),
self.compute_gram_matrix(desires[i]))
self.loss = closs + sloss
return self.loss
def backward(self, retain_variables=True):
return self.loss.backward(retain_variables=retain_variables)
class FluentExtractor(BasicMultiFeatureExtractor):
def __init__(self):
super(BasicFeatureExtractor, self).__init__()
self.mean = Parameter(torch.zeros(DIMENSION).view(-1, 1, 1))
self.std = Parameter(torch.ones(DIMENSION).view(-1, 1, 1))
self.slice1 = torch.nn.Sequential(
nn.Conv2d(in_channels=DIMENSION, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice2 = torch.nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice3 = torch.nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice4 = torch.nn.Sequential(
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
)
class AdaptivePerceptualLoss(nn.Module):
def __init__(self):
super(AdaptivePerceptualLoss, self).__init__()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.cudas = list(range(torch.cuda.device_count()))
self.features = FluentExtractor()
self.factors = [1e0, 1e-1, 1e-2, 1e-3]
self.predictor = nn.Sequential()
self.predictor.add_module('conv_9', nn.Conv2d(in_channels=512, out_channels=8, kernel_size=3, stride=2, padding=1, bias=False))
self.predictor.add_module('lrelu_9', nn.LeakyReLU(0.2))
self.predictor.add_module('fc', nn.Conv2d(8, 1, 1, 1, 0, bias=False))
self.predictor.add_module('sigmoid', nn.Sigmoid())
self.features.to(self.device)
self.predictor.to(self.device)
self.optimizer = torch.optim.Adam(self.parameters(), lr=LEARNING_RATE)
self.ContentCriterion = nn.L1Loss()
self.AdversarialCriterion = nn.BCELoss()
self.loss = None
self.counter = int(0)
self.best_loss = float(100500)
self.current_loss = float(0)
self.relu = nn.ReLU()
self.margin = 1.0
def evaluate(self, actual, desire):
actual_features = torch.nn.parallel.data_parallel(module=self.features, inputs=actual, device_ids=self.cudas)
desire_features = torch.nn.parallel.data_parallel(module=self.features, inputs=desire, device_ids=self.cudas)
ploss = 0.0
for i in range(len(desire_features)):
ploss += self.factors[i]*self.ContentCriterion(actual_features[i], desire_features[i])
return actual_features, desire_features, ploss
def meta_optimize(self, lossD, length):
self.current_loss += float(lossD.item()) / length
if self.counter > ITERATION_LIMIT:
self.current_loss = self.current_loss / float(ITERATION_LIMIT)
if self.current_loss < self.best_loss:
self.best_loss = self.current_loss
print('! best_loss !', self.best_loss)
else:
for param_group in self.optimizer.param_groups:
lr = param_group['lr']
if lr >= LR_THRESHOLD:
param_group['lr'] = lr * 0.2
print('! Decrease LearningRate in Perceptual !', lr)
self.counter = int(0)
self.current_loss = float(0)
self.counter += int(1)
def pretrain(self, dataloaders, num_epochs=20):
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
for phase in ['train', 'val']:
if phase == 'train':
self.features.train(True)
self.predictor.train(True)
else:
self.features.train(False)
self.predictor.train(False)
running_loss = 0.0
running_corrects = 0
for data in dataloaders[phase]:
inputs, targets = data
targets = targets.float()
inputs = Variable(inputs.to(self.device))
targets = Variable(targets.to(self.device))
self.optimizer.zero_grad()
features = torch.nn.parallel.data_parallel(module=self.features, inputs=inputs, device_ids=self.cudas)
outputs = torch.nn.parallel.data_parallel(module=self.predictor, inputs=features[-1].detach(), device_ids=self.cudas).view(-1)
loss = self.AdversarialCriterion(outputs, targets)
if phase == 'train':
loss.backward()
self.optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(torch.round(outputs.data) == targets.data)
self.meta_optimize(loss, float(targets.size(0)))
epoch_loss = float(running_loss) / float(len(dataloaders[phase].dataset))
epoch_acc = float(running_corrects) / float(len(dataloaders[phase].dataset))
print(' epoch_acc ', epoch_acc, ' epoch_loss ', epoch_loss)
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
print('curent best_acc ', best_acc)
self.optimizer = torch.optim.Adam(self.parameters(), lr=LEARNING_RATE)
def fit(self, actual, desire):
self.features.train()
self.predictor.train()
self.optimizer.zero_grad()
actual_features, desire_features, ploss = self.evaluate(actual, desire)
fake = torch.nn.parallel.data_parallel(module=self.predictor, inputs=actual_features[-1].detach(),device_ids=self.cudas).view(-1)
zeros = Variable(torch.zeros(fake.shape).to(self.device))
real = torch.nn.parallel.data_parallel(module=self.predictor, inputs=desire_features[-1].detach(), device_ids=self.cudas).view(-1)
ones = Variable(torch.ones(real.shape).to(self.device))
lossDreal = self.AdversarialCriterion(real, ones)
lossDfake = self.AdversarialCriterion(fake, zeros)
lossD = lossDreal + lossDfake + self.relu(self.margin - ploss).mean()
lossD.backward(retain_graph=True)
self.optimizer.step()
self.meta_optimize(lossD, float(actual.size(0)))
def forward(self, actual, desire):
self.predictor.eval()
self.features.eval()
actual_features, _, ploss = self.evaluate(actual, desire)
rest = self.predictor(actual_features[-1]).view(-1)
ones = Variable(torch.ones(rest.shape).to(self.device))
aloss = self.AdversarialCriterion(rest, ones)
self.loss = ploss + aloss + self.ContentCriterion(actual, desire)
self.fit(actual, desire)
return self.loss
def backward(self, retain_variables=True):
return self.loss.backward(retain_variables=retain_variables)
class MobileExtractor(BasicMultiFeatureExtractor):
def __init__(self, requires_grad=False, bn = True):
features = VGG_19_BN_CONFIG if bn else VGG_19_CONFIG
super(MobileExtractor, self).__init__(features, requires_grad)
class MobilePerceptualLoss(nn.Module):
def __init__(self):
super(MobilePerceptualLoss, self).__init__()
self.factors = [1e0, 1e-1, 1e-2, 1e-3]
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.cudas = list(range(torch.cuda.device_count()))
self.features = MobileExtractor()
self.features.eval()
self.features.to(self.device)
self.criterion = nn.MSELoss()
def forward(self, actual, desire):
actuals = torch.nn.parallel.data_parallel(module=self.features, inputs=actual, device_ids=self.cudas)
desires = torch.nn.parallel.data_parallel(module=self.features, inputs=desire, device_ids=self.cudas)
loss = 0.0
for i in range(len(actuals)):
loss += self.factors[i]*self.criterion(actuals[i], desires[i])
self.loss = loss
return self.loss
def backward(self, retain_variables=True):
return self.loss.backward(retain_variables=retain_variables)
class SimpleExtractor(BasicFeatureExtractor):
def __init__(self, feat=1, bn = True):
features_list = VGG_19_BN_CONFIG['features'] if bn else VGG_19_CONFIG['features']
features_limit = features_list[1]
super(SimpleExtractor, self).__init__(VGG_19_CONFIG, features_limit)
class SimplePerceptualLoss(nn.Module):
def __init__(self, feat : int = 2):
super(SimplePerceptualLoss, self).__init__()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.cudas = list(range(torch.cuda.device_count()))
self.features = SimpleExtractor(feat)
self.features.eval()
self.features.to(self.device)
self.criterion = nn.MSELoss()
def forward(self, actual, desire):
actuals = torch.nn.parallel.data_parallel(module=self.features, inputs=actual, device_ids=self.cudas)
desires = torch.nn.parallel.data_parallel(module=self.features, inputs=desire, device_ids=self.cudas)
loss = self.criterion(actuals, desires)
self.loss = loss
return self.loss
def backward(self, retain_variables=True):
return self.loss.backward(retain_variables=retain_variables)
class SqueezeExtractor(BasicMultiFeatureExtractor):
def __init__(self, requires_grad=False):
super(SqueezeExtractor, self).__init__(SQUEEZENET_CONFIG, requires_grad)
class SqueezeAdaptivePerceptualLoss(AdaptivePerceptualLoss):
def __init__(self):
super(SqueezeAdaptivePerceptualLoss, self).__init__()
self.features = SqueezeExtractor(requires_grad=True)
self.features.to(self.device)
self.predictor.to(self.device)
class SpectralFluentExtractor(BasicMultiFeatureExtractor):
def __init__(self):
super(BasicFeatureExtractor, self).__init__()
self.mean = Parameter(torch.zeros(DIMENSION).view(-1, 1, 1))
self.std = Parameter(torch.ones(DIMENSION).view(-1, 1, 1))
self.slice1 = torch.nn.Sequential(
nn.Conv2d(in_channels=DIMENSION, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice2 = torch.nn.Sequential(
SpectralNorm(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
SpectralNorm(nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
SpectralNorm(nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice3 = torch.nn.Sequential(
SpectralNorm(nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
SpectralNorm(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
)
self.slice4 = torch.nn.Sequential(
SpectralNorm(nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
SpectralNorm(nn.Conv2d(in_channels=512, out_channels=8, kernel_size=3, stride=2, padding=1, bias=False)),
nn.LeakyReLU(0.2, inplace=True),
)
class SpectralAdaptivePerceptualLoss(AdaptivePerceptualLoss):
def __init__(self):
super(SpectralAdaptivePerceptualLoss, self).__init__()
self.features = SpectralFluentExtractor()
self.predictor = nn.Sequential()
self.predictor.add_module('fc', SpectralNorm(nn.Conv2d(8, 1, 1, 1, 0, bias=False)))
self.features.to(self.device)
self.predictor.to(self.device)
def fit(self, actual, desire):
self.features.train()
self.predictor.train()
self.optimizer.zero_grad()
actual_features, desire_features, ploss = self.evaluate(actual, desire)
fake = torch.nn.parallel.data_parallel(module=self.predictor, inputs=actual_features[-1].detach(),
device_ids=self.cudas).view(-1)
real = torch.nn.parallel.data_parallel(module=self.predictor, inputs=desire_features[-1].detach(),
device_ids=self.cudas).view(-1)
lossDreal = self.relu(1.0 - real).mean()
lossDfake = self.relu(1.0 + fake).mean()
lossD = lossDreal + lossDfake + self.relu(self.margin - ploss).mean()
lossD.backward(retain_graph=True)
self.optimizer.step()
self.meta_optimize(lossD, float(actual.size(0)))
def forward(self, actual, desire):
self.predictor.eval()
self.features.eval()
actual_features, _, ploss = self.evaluate(actual, desire)
self.loss = ploss - self.predictor(actual_features[-1]).view(-1).mean() + self.ContentCriterion(actual, desire)
self.fit(actual, desire)
return self.loss
class WassersteinAdaptivePerceptualLoss(SpectralAdaptivePerceptualLoss):
def | |
{
'constant': ConstantDetModel,
'scaled': ScaledDetModel,
}
return text_to_type[spec['det_model']['type']](spec['det_model'])
class ModelInput(ABC):
def __init__(self,
spec,
composition_list,
composition_distribution,
header=None):
self.spec = deepcopy(spec)
self.compartmental_structure = spec['compartmental_structure']
self.inf_compartment_list = subsystem_key[self.compartmental_structure][2]
self.no_inf_compartments = len(self.inf_compartment_list)
self.fine_bds = spec['fine_bds']
self.coarse_bds = spec['coarse_bds']
self.no_age_classes = len(self.coarse_bds)
self.pop_pyramid = read_csv(
spec['pop_pyramid_file_name'], index_col=0)
self.pop_pyramid = (self.pop_pyramid['F'] + self.pop_pyramid['M']).to_numpy()
if self.no_age_classes==1:
self.k_home = array([[1]]) # If we have no age structure, we use a 1x1 array as the contact "matrix"
self.k_ext = array([[1]])
else:
self.k_home = read_excel(
spec['k_home']['file_name'],
sheet_name=spec['k_home']['sheet_name'],
header=header).to_numpy()
self.k_all = read_excel(
spec['k_all']['file_name'],
sheet_name=spec['k_all']['sheet_name'],
header=header).to_numpy()
self.k_home = aggregate_contact_matrix(
self.k_home, self.fine_bds, self.coarse_bds, self.pop_pyramid)
self.k_all = aggregate_contact_matrix(
self.k_all, self.fine_bds, self.coarse_bds, self.pop_pyramid)
self.k_ext = self.k_all - self.k_home
self.density_expo = spec['density_expo']
self.composition_list = composition_list
self.composition_distribution = composition_distribution
self.ave_hh_size = \
composition_distribution.T.dot(
composition_list.sum(axis=1)) # Average household size
self.dens_adj_ave_hh_size = \
composition_distribution.T.dot((
composition_list.sum(axis=1))**self.density_expo) # Average household size adjusted for density, needed to get internal transmission rate from secondary attack prob
self.ave_hh_by_class = composition_distribution.T.dot(composition_list)
class SIRInput(ModelInput):
def __init__(self, spec, composition_list, composition_distribution):
super().__init__(spec, composition_list, composition_distribution)
self.expandables = ['sus']
self.sus = spec['sus']
self.inf_scales = [ones((self.no_age_classes,))] # In the SIR model there is only one infectious compartment
home_eig = max(eig(
self.sus * ((1/spec['recovery_rate']) *
(self.k_home))
)[0])
ext_eig = max(eig(
self.sus * ((1/spec['recovery_rate']) *
(self.k_ext))
)[0])
R_int = - log(1 - spec['AR']) * self.dens_adj_ave_hh_size
self.k_home = R_int * self.k_home / home_eig
external_scale = spec['R*']/(self.ave_hh_size*spec['AR'])
self.k_ext = external_scale * self.k_ext / ext_eig
@property
def gamma(self):
return self.spec['recovery_rate']
class SEIRInput(ModelInput):
def __init__(self, spec, composition_list, composition_distribution):
super().__init__(spec, composition_list, composition_distribution)
self.expandables = ['sus']
self.sus = spec['sus']
self.inf_scales = [ones((self.no_age_classes,))]
home_eig = max(eig(
self.sus * ((1/spec['recovery_rate']) *
(self.k_home))
)[0])
ext_eig = max(eig(
self.sus * ((1/spec['recovery_rate']) *
(self.k_ext))
)[0])
R_int = - log(1 - spec['AR']) * self.dens_adj_ave_hh_size
self.k_home = R_int * self.k_home / home_eig
external_scale = spec['R*']/(self.ave_hh_size*spec['AR'])
self.k_ext = external_scale * self.k_ext / ext_eig
@property
def alpha(self):
return self.spec['incubation_rate']
@property
def gamma(self):
return self.spec['recovery_rate']
class SEPIRInput(ModelInput):
def __init__(self, spec, composition_list, composition_distribution):
super().__init__(spec, composition_list, composition_distribution)
self.expandables = ['sus',
'inf_scales']
self.sus = spec['sus']
self.inf_scales = [spec['prodromal_trans_scaling'],
ones(shape(spec['prodromal_trans_scaling']))]
home_eig = max(eig(
self.sus * ((1/spec['recovery_rate']) *
(self.k_home) + \
(1/spec['symp_onset_rate']) *
(self.k_home ) * spec['prodromal_trans_scaling'])
)[0])
ext_eig = max(eig(
self.sus * ((1/spec['recovery_rate']) *
(self.k_ext) + \
(1/spec['symp_onset_rate']) *
(self.k_ext ) * spec['prodromal_trans_scaling'])
)[0])
R_int = - log(1 - spec['AR']) * self.dens_adj_ave_hh_size
self.k_home = R_int * self.k_home / home_eig
if spec['fit_method'] == 'R*':
external_scale = spec['R*'] / (self.ave_hh_size*spec['AR'])
else:
external_scale = 1 / (self.ave_hh_size*spec['AR'])
self.k_ext = external_scale * self.k_ext / ext_eig
@property
def alpha_1(self):
return self.spec['incubation_rate']
@property
def alpha_2(self):
return self.spec['symp_onset_rate']
@property
def gamma(self):
return self.spec['recovery_rate']
class SEPIRQInput(ModelInput):
def __init__(self, spec, composition_list, composition_distribution):
super().__init__(spec, composition_list, composition_distribution)
self.expandables = ['sus',
'inf_scales',
'iso_rates']
self.sus = spec['sus']
self.inf_scales = [spec['prodromal_trans_scaling'],
ones(shape(spec['prodromal_trans_scaling'])),
spec['iso_trans_scaling']]
home_eig = max(eig(
self.sus * ((1/spec['recovery_rate']) *
(self.k_home) + \
(1/spec['symp_onset_rate']) *
(self.k_home ) * spec['prodromal_trans_scaling'])
)[0])
ext_eig = max(eig(
self.sus * ((1/spec['recovery_rate']) *
(self.k_ext) + \
(1/spec['symp_onset_rate']) *
(self.k_ext ) * spec['prodromal_trans_scaling'])
)[0])
R_int = - log(1 - spec['AR']) * self.dens_adj_ave_hh_size
self.k_home = R_int * self.k_home / home_eig
if spec['fit_method'] == 'R*':
external_scale = spec['R*'] / (self.ave_hh_size*spec['AR'])
else:
external_scale = 1 / (self.ave_hh_size*spec['AR'])
self.k_ext = external_scale * self.k_ext / ext_eig
# To define the iso_rates property, we add some zeros which act as dummy
# entries so that the index of the isolation rates match the
# corresponding compartmental indices.
self.iso_rates = [ zeros((self.no_age_classes,)),
spec['exp_iso_rate'],
spec['pro_iso_rate'],
spec['inf_iso_rate'],
zeros((self.no_age_classes,)),
zeros((self.no_age_classes,)) ]
self.adult_bd = spec['adult_bd']
self.class_is_isolating = spec['class_is_isolating']
self.iso_method = spec['iso_method']
self.ad_prob = spec['ad_prob']
self.discharge_rate = spec['discharge_rate']
@property
def alpha_1(self):
return self.spec['incubation_rate']
@property
def alpha_2(self):
return self.spec['symp_onset_rate']
@property
def gamma(self):
return self.spec['recovery_rate']
class StandardModelInput(ModelInput):
'''TODO: add docstring'''
def __init__(self, spec):
super().__init__(spec, composition_list, composition_distribution)
# Because we want 80 to be included as well.
fine_bds = arange(0, 81, 5)
self.coarse_bds = concatenate((fine_bds[:6], fine_bds[12:]))
pop_pyramid = read_csv(
spec['pop_pyramid_file_name'], index_col=0)
pop_pyramid = (pop_pyramid['F'] + pop_pyramid['M']).to_numpy()
self.k_home = aggregate_contact_matrix(
self.k_home, fine_bds, self.coarse_bds, pop_pyramid)
self.k_all = aggregate_contact_matrix(
self.k_all, fine_bds, self.coarse_bds, pop_pyramid)
self.k_ext = self.k_all - self.k_home
# This is in ten year blocks
rho = read_csv(
spec['rho_file_name'], header=None).to_numpy().flatten()
cdc_bds = arange(0, 81, 10)
aggregator = make_aggregator(cdc_bds, fine_bds)
# This is in five year blocks
rho = sparse((
rho[aggregator],
(arange(len(aggregator)), [0]*len(aggregator))))
rho = spec['recovery_rate'] * spec['R0'] * aggregate_vector_quantities(
rho, fine_bds, self.coarse_bds, pop_pyramid).toarray().squeeze()
det_model = det_from_spec(self.spec)
# self.det = (0.9/max(rho)) * rho
self.det = det_model(rho)
self.tau = spec['asymp_trans_scaling'] * ones(rho.shape)
self.sus = rho / self.det
@property
def alpha(self):
return self.spec['incubation_rate']
@property
def gamma(self):
return self.spec['recovery_rate']
class VoInput(ModelInput):
'''TODO: add docstring'''
def __init__(self, spec):
super().__init__(spec, header=0)
fine_bds = arange(0, 96, 5)
self.coarse_bds = arange(0, 96, 10)
pop_pyramid = read_csv(
spec['pop_pyramid_file_name'], index_col=0)
pop_pyramid = (pop_pyramid['F'] + pop_pyramid['M']).to_numpy()
'''We need to add an extra row to contact matrix to split 75+ class
into 75-90 and 90+'''
proportions_75_plus = append(
pop_pyramid[15:18],
sum(pop_pyramid[18:]))
proportions_75_plus = proportions_75_plus/sum(proportions_75_plus)
premultiplier = vstack((
identity(16),
tile(identity(16)[15, :], (3, 1))))
postmultiplier = hstack((
identity(16),
zeros((16, 3))))
postmultiplier[15, 15:] = proportions_75_plus
k_home = (premultiplier.dot(self.k_home)).dot(postmultiplier)
k_all = (premultiplier.dot(self.k_all)).dot(postmultiplier)
self.k_home = aggregate_contact_matrix(
k_home, fine_bds, self.coarse_bds, pop_pyramid)
self.k_all = aggregate_contact_matrix(
k_all, fine_bds, self.coarse_bds, pop_pyramid)
self.k_ext = self.k_all - self.k_home
no_age_classes = self.k_home.shape[0]
# Now construct a matrix to map the age-stratified quantities from the
# specs to the age boundaries used in the model.
self.age_quant_bounds = spec['age_quant_bounds']
age_quant_map = []
min_now = 0
for i in range(len(self.age_quant_bounds)):
max_now = where(self.coarse_bds>self.age_quant_bounds[i])[0][0]
# The additions in the expression below are list additions, not
# array additions. We convert to an array after construction
age_quant_map.append(
[0] * min_now
+ [1] * (max_now - min_now)
+ [0] * (no_age_classes-max_now))
min_now = max_now
age_quant_map.append([0]*min_now + [1]*(no_age_classes - min_now))
age_quant_map = array(age_quant_map)
self.det = array(spec['symptom_prob']).dot(age_quant_map)
self.tau = array(spec['asymp_trans_scaling']).dot(age_quant_map)
self.sus = array(spec['sus']).dot(age_quant_map)
self.import_model = import_model_from_spec(spec, self.det)
@property
def alpha(self):
return self.spec['incubation_rate']
@property
def gamma(self):
return self.spec['recovery_rate']
class TwoAgeWithVulnerableInput:
'''TODO: add docstring'''
def __init__(self, spec):
self.spec = deepcopy(spec)
self.epsilon = spec['external_trans_scaling']
self.vuln_prop = spec['vuln_prop']
left_expander = vstack((
identity(2),
[0, 1]))
# Add copy of bottom row - vulnerables behave identically to adults
right_expander = array([
[1, 0, 0],
[0, 1-self.vuln_prop, self.vuln_prop]
])
# Add copy of right row, scaled by vulnerables, and scale adult column
# by non-vuln proportion
k_home = read_excel(
spec['k_home']['file_name'],
sheet_name=spec['k_home']['sheet_name'],
header=None).to_numpy()
k_all = read_excel(
spec['k_all']['file_name'],
sheet_name=spec['k_all']['sheet_name'],
header=None).to_numpy()
fine_bds = arange(0, 81, 5)
self.coarse_bds = array([0, 20])
# pop_pyramid = read_csv(
# 'inputs/United Kingdom-2019.csv', index_col=0)
pop_pyramid = read_csv(
spec['pop_pyramid_file_name'], index_col=0)
pop_pyramid = (pop_pyramid['F'] + pop_pyramid['M']).to_numpy()
self.k_home = aggregate_contact_matrix(
k_home, fine_bds, self.coarse_bds, pop_pyramid)
self.k_all = aggregate_contact_matrix(
k_all, fine_bds, self.coarse_bds, pop_pyramid)
self.k_ext = self.k_all - self.k_home
self.k_home = left_expander.dot(self.k_home.dot(right_expander))
self.k_all = left_expander.dot(self.k_all.dot(right_expander))
self.k_ext = left_expander.dot(self.k_ext.dot(right_expander))
self.sus = spec['sus']
self.tau = spec['prodromal_trans_scaling']
eigenvalue = max(eig(
self.sus * (
(1.0/spec['recovery_rate'])
* (self.k_home + self.epsilon * self.k_ext)
+ (1.0/spec['symp_onset_rate']) *
(self.k_home + self.epsilon * self.k_ext) * self.tau)
)[0])
self.k_home = (spec['R0']/eigenvalue)*self.k_home
self.k_all = (spec['R0']/eigenvalue)*self.k_all
self.k_ext = (spec['R0']/eigenvalue)*self.k_ext
self.k_ext[2, :] = 0 * self.k_ext[2, :]
@property
def alpha_1(self):
return self.spec['incubation_rate']
@property
def alpha_2(self):
return self.spec['symp_onset_rate']
@property
def gamma(self):
return self.spec['recovery_rate']
class CareHomeInput(ModelInput):
'''TODO: add docstring'''
def __init__(self, spec):
# We do not call super constructor as array are constructed manually.
self.spec = deepcopy(spec)
# Within-home contact matrix for patients and carers (full time and
# agency)
self.k_home = array([
[1, 0, 0],
[0, 0, 0],
[0, 0, 0]])
# Contact matrix with other care homes - agency staff may work more
# than one home
self.k_ext = array([
[0, 0, 0],
[0, 0.01, 0.01],
[0.5, 0.5, 0.5]])
# Rate of contact with general outside population
self.import_rate = array([0.5, 0.5, 0.5])
self.sus = spec['sus']
self.tau = spec['prodromal_trans_scaling']
eigenvalue = max(eig(
self.sus * ((1/spec['recovery_rate']) * (self.k_home) + \
(1/spec['symp_onset_rate']) * (self.k_home) * self.tau)
)[0])
# Scaling below means R0 is the one defined in specs
self.k_home = (spec['R_carehome']/eigenvalue) * self.k_home
self.k_ext = self.k_ext
self.mu = spec['empty_rate']
self.mu_cov = spec['covid_mortality_rate']
self.b = spec['refill_rate']
self.epsilon = spec['inter_home_coupling']
@property
def alpha_1(self):
return self.spec['incubation_rate']
@property
def alpha_2(self):
return self.spec['symp_onset_rate']
@property
def gamma(self):
return self.spec['recovery_rate']
def get_multiplier(r, Q_int, FOI_by_state, index_prob, starter_mat):
inv_diff = inv(r * identity(Q_int.shape[0]) - Q_int)
step_1 = FOI_by_state.dot(index_prob)
step_2 = inv_diff.dot(step_1)
step_3 = starter_mat.dot(step_2)
step_4 = step_3
return step_4
def estimate_growth_rate(household_population,rhs,interval=[0.01,0.1],tol=1e-3):
reverse_comp_dist = | |
a DDE element to delegate
# the :py:meth:`Usage.size` and :py:meth:`Usage.create_func()` operations to this class.
#
# The :py:meth:`Usage.size` method returns the number
# of bytes used by the data element.
#
# - For usage ``DISPLAY``, the size is computed directly from the picture clause.
#
# - For usage ``COMP``, the size is 2, 4 or 8 bytes based on the picture clause.
#
# - For usage ``COMP-3``, the picture clause digits are packed two per byte
# with an extra half-byte for sign information. This must be rounded up.
# COMP-3 fields often have an odd number of digits to reflect this.
#
# - When usage is not provided, it is inherited from the parent
# structure. The top-most parent has a default usage of DISPLAY.
#
# The :py:meth:`Usage.create_func()` method returns a :py:class:`cell.Cell` type
# that should be built from the raw bytes.
#
# .. code-block:: none
#
# http://yuml.me/diagram/scruffy;/class/
# #cobol_loader_usage,
# [RecordFactory]<>-[DDE],
# [DDE]<>-[DDE],
# [DDE]-[Usage],
# [Usage]^[UsageDisplay],
# [Usage]^[UsageComp]
# [Usage]^[UsageComp3]
#
# .. image:: cobol_usage.png
#
# .. py:class:: Usage
#
# The Usage class provides detailed representation and conversion support
# for a given DDE. A :py:class:`schema.Attribute` will refer to a
# :py:class:`cobol.defs.DDE`. This DDE will have a :py:class:`Usage` object that shows
# how to create the underlying ``Cell`` instance from the raw data
# in the :py:class:`cobol.COBOL_File` subclass of ``Workbook``.
#
# For numeric types, this may mean a fallback from creating a :py:class:`NumberCell`
# to creating a :py:class:`ErrorCell`. If the number is invalid in some way, then
# an error is required.
#
# The superclass of ``Usage`` is abstract and doesn't compute a proper size.
#
# .. TODO::
#
# This is regrettably stateful.
#
# ::
class Usage:
"""Covert numeric data based on Usage clause."""
def __init__( self, source ):
self.source_= source
# Stateful type information bound in by picture clause
self.picture = None
self.final= ""
self.numeric= None # is the picture all digits?
self.length= None
self.scale= None
self.precision= None
self.signed= None
self.decimal= None
# Stateful context bound in during parsing.
self.dde = None
def setTypeInfo( self, picture ):
"""Details from parsing a PICTURE clause."""
self.picture = picture
self.final= picture.final
self.numeric = not picture.alpha
self.length = picture.length
self.scale = picture.scale
self.precision = picture.precision
self.signed = picture.signed
self.decimal = picture.decimal
# .. py:method:: Usage.source()
#
# ::
def source( self ):
return self.source_
# .. py:method:: Usage.resolve()
#
# ::
def resolve( self, aDDE ):
"""Associate back to the owning DDE."""
self.dde= weakref.ref(aDDE)
# .. py:method:: Usage.create_func()
#
# Create a CELL object. Use the raw bytes to build an Cell described
# by the given Attribute.
#
# ::
def create_func( self, raw, workbook, attr ):
"""Converts bytes to a proper Cell object.
NOTE: EBCDIC->ASCII conversion handled by the Workbook object.
"""
return stingray.cobol.TextCell( raw, workbook, attr )
# .. py:method:: Usage.size( picture )
#
# The count is in bytes. Not characters.
#
# ::
def size( self, picture ):
"""Default for group-level items."""
return 0
# .. py:class:: UsageDisplay
#
# Usage "DISPLAY" is the COBOL language default. It's also assumed for group-level items.
#
# ::
class UsageDisplay( Usage ):
"""Ordinary character data which is numeric."""
def __init__( self, source ):
super().__init__( source )
def create_func( self, raw, workbook, attr ):
if self.numeric:
try:
return NumberDisplayCell( raw, workbook, attr )
except Exception as e:
error= ErrorCell( raw, workbook, attr, exception=e )
return error
return stingray.cobol.TextCell( raw, workbook, attr )
def size( self ):
"""Return the actual size of this data, based on PICTURE and SIGN."""
return len(self.final)
# .. py:class:: UsageComp
#
# Usage "COMPUTATIONAL" is binary-encoded data.
#
# ::
class UsageComp( Usage ):
"""Binary-encoded COMP data which is numeric."""
def __init__( self, source ):
super().__init__( source )
def create_func( self, raw, workbook, attr ):
try:
return NumberCompCell( raw, workbook, attr )
except Exception as e:
error= ErrorCell( raw, workbook, attr, exception=e )
return error
def size( self ):
"""COMP is binary half word, whole word or double word."""
if len(self.final) <= 4:
return 2
elif len(self.final) <= 9:
return 4
else:
return 8
# .. py:class:: UsageComp3
#
# Usage "COMP-3" is packed-decimal encoded data.
#
# ::
class UsageComp3( Usage ):
"""Binary-Decimal packed COMP-3 data which is numeric."""
def __init__( self, source ):
super().__init__( source )
def create_func( self, raw, workbook, attr ):
try:
return NumberComp3Cell(raw, workbook, attr)
except Exception as e:
error= ErrorCell( raw, workbook, attr, exception=e )
return error
def size( self ):
"""COMP-3 is packed decimal."""
return (len(self.final)+2)//2
# .. py:class:: UsageParent
#
# This is a bit more complex situation. Unless otherwise specified, all DDE's inherit usage
# from their parent. At the top, it's a default UsageDisplay("").
#
# When a Usage clause is created, it must contain *both* the source text **and** a link to the containing
# DDE so the parent can be located by a walk up the structure.
#
# THe DDE links, however, are added when the parent is built.
#
# ::
class UsageParent(Usage):
"""Inherit Usage from parent. Or default to UsageDisplay("") if there is no parent."""
def __init__(self):
super().__init__("")
def size(self):
"""Not provided here. Depends on parent!"""
raise NotImplementedError
def create_func(selfself, raw, workbook, attr):
"""Depends on parent!"""
raise NotImplementedError
# Allocation Strategy Hierarchy
# ------------------------------
#
# We actually have three kinds of allocation relationships among DDE items.
#
# - Predecessor/Successor
#
# - Group/Elementary
#
# - Redefines
#
# [*Formerly, we had only two subclasses.*]
#
# This leads to a **Strategy** class hierarchy to handle the various algorithmic
# choices.
#
# The Pred/Succ strategy computes the offset to a specific item based on the predecessor.
# This is the default for non-head items in a group.
#
# The Group/Elem strategy computes the offset based on the offset to the parent group.
# This is the default for the head item in a group.
#
# The Redefines strategy depends on another element: not it's immediate predecessor.
# This element will be assigned the same offset as the element on which it depends.
#
# The **Strategy** design pattern allows an element to delegate the
# :py:meth:`Redefines.offset`,
# and :py:meth:`Redefines.totalSize` methods.
#
# .. code-block:: none
#
# http://yuml.me/diagram/scruffy;/class/
# #cobol_loader_redefines,
# [RecordFactory]<>-[DDE],
# [DDE]<>-[DDE],
# [DDE]-[Allocation],
# [Allocation]^[Redefines],
# [Allocation]^[Pred-Succ],
# [Allocation]^[Group-Elem]
#
# .. image:: cobol_redefines.png
#
# .. py:class:: Allocation
#
# The :py:class:`Allocation` superclass defines an abstract base
# class for the various allocation strategies.
#
# ::
class Allocation:
def __init__( self ):
self.dde= None
def resolve( self, aDDE ):
"""Associate back to the owning DDE."""
self.dde= weakref.ref(aDDE)
# .. py:class:: Redefines
#
# The :py:class:`Redefines` subclass depends on another element. It uses
# the referenced name to look up the offset and total size information.
#
# For this to work, the name must be resolved via the :py:meth:`Redefines.resolve` method.
# The :py:func:`resolver` function applies the :py:meth:`Redefines.resolve` method throughout the structure.
#
# ::
class Redefines(Allocation):
"""Lookup size and offset from another field we refer to."""
def __init__( self, name, refers_to=None ):
super().__init__()
self.name= name
self.refers_to= refers_to # Used for unit testing
def source( self ):
return "REDEFINES {0}".format( self.refers_to.name )
# .. py:method:: Redefines.resolve( aDDE )
#
# Resolve a DDE name. See our ``self.refers_to`` to refer to a DDE within
# the given structure.
#
# ::
def resolve( self, aDDE ):
"""Search the structure for the referenced name.
Must be done before sizing can be done.
"""
super().resolve( aDDE )
self.refers_to= aDDE.top().get( self.name )
# .. py:method:: Redefines.offset( offset )
#
# For a redefines, this uses the resolved ``refers_to`` name and fetches
# the offset.
#
# ::
def offset( self, offset ):
""":param offset: computed offset for this relative position.
:return: named DDE element offset instead.
"""
return self.refers_to.offset
# .. py:method:: Redefines.totalSize()
#
# Returns the total size.
#
# ::
def totalSize( self ):
""":return: total size of this DDE include all children and occurs.
"""
warnings.warn("totalSize method is deprecated", DeprecationWarning )
return 0
# Note that ``01`` level items may have a REDEFINES.
# However, this can never meaningfully redefine anything.
# All ``01`` level definitions start at an offset of 0 by definition.
# A copybook may include multiple ``01`` levels with REDEFINES clauses;
# an 01-level REDEFINES is irrelevant with respect to offset and size calculations.
#
# .. py:class:: Successor
#
# The :py:class:`Successor`
# subclass does not depend on a named element, it depends on the immediate
# predecessor. It uses that contextual offset and size information provided by
# the :py:func:`setSizeAndOffset` function.
#
# ::
class Successor(Allocation):
"""More typical case is that | |
mėnesį", "12 month ago"),
param('lt', "po 2 valandų", "in 2 hour"),
# lu
param('lu', "lelu", "0 day ago"),
param('lu', "makelela", "1 day ago"),
# luo
param('luo', "nyoro", "1 day ago"),
param('luo', "kiny", "in 1 day"),
# luy
param('luy', "mgorova", "1 day ago"),
param('luy', "lero", "0 day ago"),
# lv
param('lv', "pēc 67 minūtes", "in 67 minute"),
param('lv', "pirms 5 nedēļām", "5 week ago"),
param('lv', "nākamajā gadā", "in 1 year"),
# mas
param('mas', "tááisérè", "in 1 day"),
param('mas', "ŋolé", "1 day ago"),
# mer
param('mer', "ĩgoro", "1 day ago"),
param('mer', "narua", "0 day ago"),
# mfe
param('mfe', "zordi", "0 day ago"),
param('mfe', "demin", "in 1 day"),
# mg
param('mg', "rahampitso", "in 1 day"),
param('mg', "omaly", "1 day ago"),
# mgh
param('mgh', "lel'lo", "0 day ago"),
param('mgh', "n'chana", "1 day ago"),
# mgo
param('mgo', "ikwiri", "1 day ago"),
param('mgo', "isu", "in 1 day"),
# mk
param('mk', "пред 4 минута", "4 minute ago"),
param('mk', "за 6 месеци", "in 6 month"),
param('mk', "минатата година", "1 year ago"),
# ml
param('ml', "ഈ മിനിറ്റിൽ", "0 minute ago"),
param('ml', "7 മണിക്കൂറിൽ", "in 7 hour"),
param('ml', "2 വർഷം മുമ്പ്", "2 year ago"),
# mn
param('mn', "5 цагийн өмнө", "5 hour ago"),
param('mn', "10 жилийн дараа", "in 10 year"),
param('mn', "өнгөрсөн долоо хоног", "1 week ago"),
# mr
param('mr', "2 मिनिटांमध्ये", "in 2 minute"),
param('mr', "5 महिन्यापूर्वी", "5 month ago"),
param('mr', "हे वर्ष", "0 year ago"),
# ms
param('ms', "dalam 7 hari", "in 7 day"),
param('ms', "3 thn lalu", "3 year ago"),
param('ms', "bulan depan", "in 1 month"),
# mt
param('mt', "ix-xahar li għadda", "1 month ago"),
param('mt', "2 sena ilu", "2 year ago"),
param('mt', "il-ġimgħa d-dieħla", "in 1 week"),
# mua
param('mua', "tǝ'nahko", "0 day ago"),
param('mua', "tǝ'nane", "in 1 day"),
# my
param('my', "ပြီးခဲ့သည့် 7 မိနစ်", "7 minute ago"),
param('my', "12 လအတွင်း", "in 12 month"),
param('my', "ယခု သီတင်းပတ်", "0 week ago"),
# nb
param('nb', "om 6 timer", "in 6 hour"),
param('nb', "om 2 måneder", "in 2 month"),
param('nb', "forrige uke", "1 week ago"),
param('nb', "for 3 dager siden", "3 day ago"),
param('nb', "for 3 timer siden", "3 hour ago"),
param('nb', '3 dager siden', '3 day ago'),
param('nb', "3 mnd siden", "3 month ago"),
param('nb', "2 uker siden", "2 week ago"),
param('nb', "1 uke siden", "1 week ago"),
param('nb', "10 timer siden", "10 hour ago"),
# nd
param('nd', "kusasa", "in 1 day"),
param('nd', "izolo", "1 day ago"),
# ne
param('ne', "5 वर्ष अघि", "5 year ago"),
param('ne', "35 मिनेटमा", "in 35 minute"),
param('ne', "यो हप्ता", "0 week ago"),
# nl
param('nl', "15 dgn geleden", "15 day ago"),
param('nl', "over 2 maand", "in 2 month"),
param('nl', "vorige jaar", "1 year ago"),
# nmg
param('nmg', "nakugú", "1 day ago"),
param('nmg', "namáná", "in 1 day"),
# nn
param('nn', "for 5 minutter siden", "5 minute ago"),
param('nn', "om 3 uker", "in 3 week"),
param('nn', "i morgon", "in 1 day"),
# nnh
param('nnh', "jǔɔ gẅie à ne ntóo", "in 1 day"),
param('nnh', "jǔɔ gẅie à ka tɔ̌g", "1 day ago"),
# nus
param('nus', "ruun", "in 1 day"),
param('nus', "walɛ 06:23 tŋ", "0 day ago 06:23 pm"),
# nyn
param('nyn', "nyomwabazyo", "1 day ago"),
param('nyn', "erizooba", "0 day ago"),
# os
param('os', "3 боны размӕ", "3 day ago"),
param('os', "47 сахаты фӕстӕ", "in 47 hour"),
param('os', "знон", "1 day ago"),
# pa-Guru
param('pa-Guru', "ਅਗਲਾ ਹਫ਼ਤਾ", "in 1 week"),
param('pa-Guru', "5 ਮਹੀਨੇ ਪਹਿਲਾਂ", "5 month ago"),
param('pa-Guru', "22 ਮਿੰਟਾਂ ਵਿੱਚ", "in 22 minute"),
# pa
param('pa', "15 ਘੰਟੇ ਪਹਿਲਾਂ", "15 hour ago"),
param('pa', "16 ਸਕਿੰਟ ਵਿੱਚ", "in 16 second"),
param('pa', "ਅਗਲਾ ਸਾਲ", "in 1 year"),
# pl
param('pl', "6 tygodni temu", "6 week ago"),
param('pl', "za 8 lat", "in 8 year"),
param('pl', "ta minuta", "0 minute ago"),
# rm
param('rm', "damaun", "in 1 day"),
param('rm', "ier", "1 day ago"),
# ro
param('ro', "acum 2 de ore", "2 hour ago"),
param('ro', "peste 5 de ani", "in 5 year"),
param('ro', "săptămâna trecută", "1 week ago"),
# rof
param('rof', "linu", "0 day ago"),
param('rof', "ng'ama", "in 1 day"),
# ru
param('ru', "12 секунд назад", "12 second ago"),
param('ru', "через 8 месяцев", "in 8 month"),
param('ru', "в прошлом году", "1 year ago"),
# rwk
param('rwk', "ukou", "1 day ago"),
param('rwk', "ngama", "in 1 day"),
# sah
param('sah', "20 чаас ынараа өттүгэр", "20 hour ago"),
param('sah', "50 сылынан", "in 50 year"),
param('sah', "ааспыт нэдиэлэ", "1 week ago"),
# saq
param('saq', "duo", "0 day ago"),
param('saq', "taisere", "in 1 day"),
# sbp
param('sbp', "pamulaawu", "in 1 day"),
param('sbp', "ineng'uni", "0 day ago"),
# se
param('se', "51 minuhta maŋŋilit", "in 51 minute"),
param('se', "3 jahkki árat", "3 year ago"),
param('se', "ihttin", "in 1 day"),
# seh
param('seh', "manguana", "in 1 day"),
param('seh', "zuro", "1 day ago"),
# ses
param('ses', "suba", "in 1 day"),
param('ses', "hõo", "0 day ago"),
# sg
param('sg', "bîrï", "1 day ago"),
param('sg', "lâsô", "0 day ago"),
# shi-Latn
param('shi-Latn', "iḍlli", "1 day ago"),
param('shi-Latn', "askka 06:15 tifawt", "in 1 day 06:15 am"),
# shi-Tfng
param('shi-Tfng', "ⴰⵙⴽⴽⴰ", "in 1 day"),
param('shi-Tfng', "ⴰⵙⵙⴰ", "0 day ago"),
# shi
param('shi', "ⵉⴹⵍⵍⵉ", "1 day ago"),
# si
param('si', "තත්පර 14කින්", "in 14 second"),
param('si', "වසර 2කට පෙර", "2 year ago"),
param('si', "මෙම සතිය", "0 week ago"),
# sk
param('sk', "pred 11 týždňami", "11 week ago"),
param('sk', "o 25 rokov", "in 25 year"),
param('sk', "v tejto hodine", "0 hour ago"),
# sl
param('sl', "pred 4 dnevom", "4 day ago"),
param('sl', "čez 76 leto", "in 76 year"),
param('sl', "naslednji mesec", "in 1 month"),
# sn
param('sn', "mangwana", "in 1 day"),
param('sn', "nhasi", "0 day ago"),
# so
param('so', "berri", "in 1 day"),
param('so', "shalay", "1 day ago"),
# sq
param('sq', "pas 6 muajsh", "in 6 month"),
param('sq', "72 orë më parë", "72 hour ago"),
param('sq', "javën e ardhshme", "in 1 week"),
# sr-Cyrl
param('sr-Cyrl', "пре 5 година", "5 year ago"),
param('sr-Cyrl', "за 52 нед", "in 52 week"),
param('sr-Cyrl', "данас", "0 day ago"),
# sr-Latn
param('sr-Latn', "za 120 sekundi", "in 120 second"),
param('sr-Latn', "pre 365 dana", "365 day ago"),
param('sr-Latn', "prošle nedelje", "1 week ago"),
# sr
param('sr', "пре 40 сати", "40 hour ago"),
param('sr', "за 100 год", "in 100 year"),
param('sr', "овог месеца", "0 month ago"),
# sv
param('sv', "för 15 vecka sedan", "15 week ago"),
param('sv', "om 2 sekunder", "in 2 second"),
param('sv', "förra året", "1 year ago"),
# sw
param('sw', "sekunde 25 zilizopita", "25 second ago"),
param('sw', "miezi 5 iliyopita", "5 month ago"),
param('sw', "mwaka uliopita", "1 year ago"),
# ta
param('ta', "7 நாட்களுக்கு முன்", "7 day ago"),
param('ta', "45 ஆண்டுகளில்", "in 45 year"),
param('ta', "இப்போது", "0 second ago"),
# te
param('te', "12 గంటల క్రితం", "12 hour ago"),
param('te', "25 సంవత్సరాల్లో", "in 25 year"),
param('te', "గత వారం", "1 week ago"),
# teo
param('teo', "moi", "in 1 day"),
param('teo', "lolo", "0 day ago"),
# to
param('to', "miniti 'e 5 kuo'osi", "5 minute ago"),
param('to', "'i he ta'u 'e 6", "in 6 year"),
param('to', "'aneafi", "1 day ago"),
# tr
param('tr', "11 saat önce", "11 hour ago"),
param('tr', "10 yıl sonra", "in 10 year"),
param('tr', "geçen ay", "1 month ago"),
# twq
param('twq', "hõo", "0 day ago"),
param('twq', "suba", "in 1 day"),
# tzm
param('tzm', "assenaṭ", "1 day ago"),
param('tzm', "asekka", "in 1 day"),
# uk
param('uk', "18 хвилин тому", "18 minute ago"),
param('uk', "через 22 роки", "in 22 year"),
param('uk', "цього тижня", "0 week ago"),
param('uk', "півгодини тому", "30 minute ago"),
param('uk', "пів години тому", "30 minute ago"),
param('uk', "півроку тому", "6 month ago"),
param('uk', "за півтора року", "in 18 month"),
# uz-Cyrl
param('uz-Cyrl', "кейинги ой", "in 1 month"),
param('uz-Cyrl', "30 йил аввал", "30 year ago"),
param('uz-Cyrl', "59 сониядан сўнг", "in 59 second"),
# uz-Latn
param('uz-Latn', "3 haftadan keyin", "in 3 week"),
param('uz-Latn', "5 soat oldin", "5 hour ago"),
param('uz-Latn', "shu yil", "0 year ago"),
# uz
param('uz', "25 soat oldin", "25 hour ago"),
param('uz', "8 yildan keyin", "in 8 year"),
param('uz', "bugun", | |
self.filling_values = None
self.columns_with_null = None
if strategy not in ("mean", "median", "fix"):
raise ValueError("I don't know that type of strategy '%s' " % self.strategy)
def fit(self, X, y=None):
type_of_data = get_type(X)
self._expected_type = type_of_data
self.filling_values = {}
self.columns_with_null = []
self.columns_mapping = {}
if type_of_data == DataTypes.SparseArray and not isinstance(X, sps.csc_matrix):
X = sps.csc_matrix(X)
for col, Xc in _gen_column_iterator(X, type_of_data=type_of_data):
if type_of_data == DataTypes.SparseArray:
Xca = Xc.todense().view(np.ndarray)[:, 0]
elif type_of_data in (DataTypes.DataFrame, DataTypes.SparseDataFrame):
Xca = Xc.values
else:
Xca = Xc
# Here Xca is an array
ii_not_null = ~pd.isnull(Xca)
if Xca.dtype.kind not in ("f", "i"):
ii_contain_number = _index_with_number(Xca)
ii_not_null = np.logical_and(ii_not_null, ii_contain_number)
any_not_null = ii_not_null.any()
all_not_null = ii_not_null.all()
if self.strategy == "fix":
m = self.fix_value
elif any_not_null:
### There are things that are NOT null
if not self.allow_unseen_null and all_not_null:
m = None
# No need to compute mean/median because
# 1) I dont have any missing value in that column
# 2) I wont allow missing value in testing, if there weren't any missing value in train
elif self.strategy == "mean":
m = Xca[ii_not_null].mean()
elif self.strategy == "median":
m = np.median(Xca[ii_not_null])
else:
raise ValueError("unknown strategy %s" % self.strategy)
else:
### Column is null everywhere...
m = self.fix_value
if not all_not_null:
self.columns_with_null.append(col)
if m is not None:
self.filling_values[col] = m
# cols = _get_columns(X)
self._Xcolumns = _get_columns(X)
return self
def transform(self, X):
type_of_data = get_type(X)
if type_of_data != self._expected_type:
raise ValueError("I'm expecting a type %s" % self._expected_type)
if self.filling_values is None:
raise ValueError("model isn't fitted yet")
if self.copy_df:
Xcopy = None
# I'll delayed the copy until I need... that way if no missing value and I don't
else:
Xcopy = X
if self.add_is_null:
new_columns = []
if type_of_data == DataTypes.SparseArray and not isinstance(X, sps.csc_matrix):
X = sps.csc_matrix(X) #
for col, Xc in _gen_column_iterator(X, type_of_data=type_of_data):
# if type_of_data == DataTypes.SparseArray:
# Xca = Xc.todense().view(np.ndarray)[:,0]
# TODO : directly optimized way to get NaN index without converting to sparse
if type_of_data == DataTypes.DataFrame:
Xca = Xc.values
elif type_of_data == DataTypes.SparseDataFrame:
raise NotImplementedError("I didn't code it yet")
else:
Xca = Xc
if type_of_data == DataTypes.SparseArray:
### ATTENTION : ce ne marche que pour CSC matrix !!!
ii_null = Xca.indices[pd.isnull(Xca.data)]
has_null = ii_null.shape[0] > 0
# elif isinstance(Xca,sps.csr_matrix):
# INDEXES of NON EMPTY things
# Directly look within non empty things
else:
ii_null = pd.isnull(Xca)
if Xca.dtype.kind not in ("f", "i"):
ii_contain_number = _index_with_number(Xca)
ii_null = np.logical_or(ii_null, np.logical_not(ii_contain_number))
has_null = ii_null.any()
if has_null:
if not self.allow_unseen_null and col not in self.columns_with_null:
raise ValueError(
"This column %s add a null value but it wasn't null anywhere in training set" % str(col)
)
if Xcopy is None:
# Now I explicitely need a copy, since I'll modify the DataFrame
Xcopy = X.copy()
if type_of_data == DataTypes.SparseArray and not isinstance(X, sps.csc_matrix):
Xcopy = sps.csc_matrix(Xcopy) # coo_matrix can't be be subsetted
if type_of_data in (DataTypes.DataFrame, DataTypes.SparseDataFrame):
Xcopy.loc[ii_null, col] = self.filling_values[col]
if Xcopy.dtypes[col].kind not in ("f", "i"):
Xcopy[col] = Xcopy[col].astype(np.number)
else:
Xcopy[ii_null, col] = self.filling_values[col]
if self.add_is_null and col in self.columns_with_null:
if type_of_data in (DataTypes.DataFrame, DataTypes.SparseDataFrame):
if col + "_isnull" in list(X.columns):
raise ValueError("column %s already exists" % (col + "_isnull"))
new_columns.append(pd.DataFrame(1 * ii_null, index=X.index, columns=[col + "_isnull"]))
elif type_of_data == DataTypes.SparseArray:
# Direct creation of a sparse vector of 1
_nb = ii_null.shape[0]
_data = np.ones(_nb, dtype=np.int32)
_col = np.zeros(_nb, dtype=np.int32)
_row = ii_null
new_columns.append(sps.csc_matrix((_data, (_row, _col)), shape=(X.shape[0], 1)))
# TODO : maybe use 'coo_matrix' ? (more efficient to concatenate after ?)
# sps.csr_matrix((np.array([1,1]),(np.array([1,4]),np.array([0,0])))).todense()
else:
new_columns.append(1 * make2dimensions(ii_null))
if self.add_is_null:
if Xcopy is None:
# I dont need a copy... (it will be done copied any way when I stack everything)
Xcopy = X
Xcopy = generic_hstack([Xcopy] + new_columns, output_type=type_of_data)
else:
if Xcopy is None:
Xcopy = X # If I'm here, it means that nothing was actually Null... and so I don't need a copy
return Xcopy
def get_feature_names(self, input_features=None):
if input_features is None:
input_features = self._Xcolumns
features_names = [str(c) for c in input_features]
if self.add_is_null:
features_names += [
str(c1) + "_isnull" for c1, c2 in zip(input_features, self._Xcolumns) if c2 in self.columns_with_null
]
return features_names
class NumImputer(ModelWrapper):
"""Missing value imputer for numerical features.
Parameters
----------
strategy : str, default = 'mean'
how to fill missing value, possibilities ('mean', 'fix' or 'median')
add_is_null : boolean, default = True
if this is True of 'is_null' columns will be added to the result
fix_value : float, default = 0
the fix value to use if needed
allow_unseen_null : boolean, default = True
if not True an error will be generated on testing data if a column has missing value in test but didn't have one in train
columns_to_use : list of str or None
the columns to use
regex_match : boolean, default = False
if True, use regex to match columns
"""
def __init__(
self,
strategy="mean",
add_is_null=True,
fix_value=0,
allow_unseen_null=True,
columns_to_use=None,
regex_match=False,
):
self.strategy = strategy
self.add_is_null = add_is_null
self.fix_value = fix_value
self.allow_unseen_null = allow_unseen_null
super(NumImputer, self).__init__(
columns_to_use=columns_to_use,
regex_match=regex_match,
work_on_one_column_only=False,
all_columns_at_once=True,
accepted_input_types=None,
column_prefix=None,
desired_output_type=None,
must_transform_to_get_features_name=False,
dont_change_columns=False,
)
def _get_model(self, X, y=None):
return _NumImputer(
strategy=self.strategy,
add_is_null=self.add_is_null,
fix_value=self.fix_value,
allow_unseen_null=self.allow_unseen_null,
copy_df=True,
)
def can_cv_transform(self):
""" this method tells if a given transformer can be used to return out-sample prediction
If this returns True, a call to approx_cross_validation(self, X , y , return_predict = True, no_scoring = True, method = "transform") will works
Otherwise it will generate an error
If the model is part of a GraphPipeline it will tell the GraphPipeline object how to cross-validate this node
Method should be overrided if needed
Return
------
boolean, True or False depending on the model
"""
return not self.add_is_null
# class RandomForestTransformer(BaseEstimator,TransformerMixin):
# def __init__(self):
# pass
#
# def fit(self,X, y):
# self.rf = RandomForestClassifier()
# self.one_hot = OneHotEncoder()
#
# self.rf.fit(X,y)
# Xnode = self.rf.apply(X)
# self.one_hot.fit(Xnode)
# return self
#
# def transform(self,X):
# Xnode = self.rf.apply(X)
# result = self.one_hot.transform(Xnode)
# return result
#
# def fit_transform(self,X,y):
# self.rf = RandomForestClassifier()
# self.one_hot = OneHotEncoder()
#
# self.rf.fit(X,y)
# Xnode = self.rf.apply(X)
# result = self.one_hot.fit_transform(Xnode)
# return result
#
# In[]
# In[] : Scaler
class _CdfScaler(BaseEstimator, TransformerMixin):
""" Scaler using the CDF of a law """
def __init__(
self,
distribution="auto-kernel",
output_distribution="uniform",
copy=True,
verbose=False,
sampling_number=1000,
random_state=None,
):
self.distribution = distribution
self.output_distribution = output_distribution
self.copy = copy
self.verbose = verbose
self.sampling_number = sampling_number
self.random_state = random_state
def _prepare_attributes(self, X):
""" method to create the distributions attributes """
nbcols = _nbcols(X)
if isinstance(self.distribution, str):
self.distributions = [self.distribution] * nbcols
elif isinstance(self.distribution, (list, tuple)):
if len(self.distributions) != nbcols:
raise ValueError("If distribution is a list it should have the same number of column has X")
self.distributions = self.distribution
# TODO : dico of distributions
else:
raise TypeError("I don't know how to handle that type of distribution %s" % type(self.distribution))
def _guess_distribution(self, X, type_of_data):
""" method to guess which distribution to use in the case of "auto-kernel" or "auto-param"
The guessing uses the following rules :
* if less than 5 differents values : use "none" <=> no transformation applied
* otherwise if "auto-kernel" : uses "kernel" <=> fit a kernel density
* otherwise if "auto-param" :
* if negative and positive values : use "normal" <=> fit a normal law
* if positive values only and values above 1 : use 'gamma' <=> fit a gamma law
* if values between 0 and 1 : use 'beta' <=> fit a betta law
"""
if len({"auto-param", "auto-kernel", "auto-rank", "auto-nonparam"}.intersection(self.distributions)) == 0:
return
modified_distributions = []
for dist, (col, Xc) in zip(self.distributions, _gen_column_iterator(X, type_of_data=type_of_data)):
if dist not in ("auto-param", "auto-kernel", "auto-rank", "auto-nonparam"):
modified_distributions.append(dist)
continue
if type_of_data == DataTypes.SparseArray:
Xca = Xc.data # => only non zero elements
# Xc.todense().view(np.ndarray)[:,0] # everything
elif type_of_data in (DataTypes.DataFrame, DataTypes.SparseDataFrame):
Xca = Xc.values
else:
Xca = Xc
### Less than 5 elements => no scaling ###
if len(np.unique(Xca)) <= 5:
modified_distributions.append("none")
else:
if dist == "auto-kernel":
guess = "kernel"
elif dist == "auto-rank":
guess = "rank"
elif dist == "auto-nonparam":
if len(Xca) <= 1000:
guess = "kernel"
# When too | |
<filename>fiftyone/utils/kitti.py
"""
Utilities for working with datasets in
`KITTI format <http://www.cvlibs.net/datasets/kitti/eval_object.php>`_.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import csv
import logging
import os
import eta.core.utils as etau
import eta.core.web as etaw
import fiftyone.core.labels as fol
import fiftyone.core.metadata as fom
import fiftyone.utils.data as foud
logger = logging.getLogger(__name__)
class KITTIDetectionDatasetImporter(
foud.LabeledImageDatasetImporter, foud.ImportPathsMixin
):
"""Importer for KITTI detection datasets stored on disk.
See :ref:`this page <KITTIDetectionDataset-import>` for format details.
Args:
dataset_dir (None): the dataset directory. If omitted, ``data_path``
and/or ``labels_path`` must be provided
data_path (None): an optional parameter that enables explicit control
over the location of the media. Can be any of the following:
- a folder name like ``"data"`` or ``"data/"`` specifying a
subfolder of ``dataset_dir`` where the media files reside
- an absolute directory path where the media files reside. In
this case, the ``dataset_dir`` has no effect on the location of
the data
- a filename like ``"data.json"`` specifying the filename of the
JSON data manifest file in ``dataset_dir``
- an absolute filepath specifying the location of the JSON data
manifest. In this case, ``dataset_dir`` has no effect on the
location of the data
If None, this parameter will default to whichever of ``data/`` or
``data.json`` exists in the dataset directory
labels_path (None): an optional parameter that enables explicit control
over the location of the labels. Can be any of the following:
- a folder name like ``"labels"`` or ``"labels/"`` specifying the
location of the labels in ``dataset_dir``
- an absolute folder path to the labels. In this case,
``dataset_dir`` has no effect on the location of the labels
If None, the parameter will default to ``labels/``
include_all_data (False): whether to generate samples for all images in
the data directory (True) rather than only creating samples for
images with label entries (False)
extra_attrs (True): whether to load extra annotation attributes onto
the imported labels. Supported values are:
- ``True``: load all extra attributes found
- ``False``: do not load extra attributes
- a name or list of names of specific attributes to load
shuffle (False): whether to randomly shuffle the order in which the
samples are imported
seed (None): a random seed to use when shuffling
max_samples (None): a maximum number of samples to import. By default,
all samples are imported
"""
def __init__(
self,
dataset_dir=None,
data_path=None,
labels_path=None,
include_all_data=False,
extra_attrs=True,
shuffle=False,
seed=None,
max_samples=None,
):
if dataset_dir is None and data_path is None and labels_path is None:
raise ValueError(
"At least one of `dataset_dir`, `data_path`, and "
"`labels_path` must be provided"
)
data_path = self._parse_data_path(
dataset_dir=dataset_dir, data_path=data_path, default="data/",
)
labels_path = self._parse_labels_path(
dataset_dir=dataset_dir,
labels_path=labels_path,
default="labels/",
)
super().__init__(
dataset_dir=dataset_dir,
shuffle=shuffle,
seed=seed,
max_samples=max_samples,
)
self.data_path = data_path
self.labels_path = labels_path
self.include_all_data = include_all_data
self.extra_attrs = extra_attrs
self._image_paths_map = None
self._labels_paths_map = None
self._uuids = None
self._iter_uuids = None
self._num_samples = None
def __iter__(self):
self._iter_uuids = iter(self._uuids)
return self
def __len__(self):
return self._num_samples
def __next__(self):
uuid = next(self._iter_uuids)
try:
image_path = self._image_paths_map[uuid]
except KeyError:
raise ValueError("No image found for sample '%s'" % uuid)
image_metadata = fom.ImageMetadata.build_for(image_path)
labels_path = self._labels_paths_map.get(uuid, None)
if labels_path:
# Labeled image
frame_size = (image_metadata.width, image_metadata.height)
detections = load_kitti_detection_annotations(
labels_path, frame_size, extra_attrs=self.extra_attrs
)
else:
# Unlabeled image
detections = None
return image_path, image_metadata, detections
@property
def has_dataset_info(self):
return False
@property
def has_image_metadata(self):
return True
@property
def label_cls(self):
return fol.Detections
def setup(self):
self._image_paths_map = self._load_data_map(
self.data_path, ignore_exts=True, recursive=True
)
if self.labels_path is not None and os.path.isdir(self.labels_path):
self._labels_paths_map = {
os.path.splitext(p)[0]: os.path.join(self.labels_path, p)
for p in etau.list_files(self.labels_path, recursive=True)
}
else:
self._labels_paths_map = {}
uuids = set(self._labels_paths_map.keys())
if self.include_all_data:
uuids.update(self._image_paths_map.keys())
self._uuids = self._preprocess_list(sorted(uuids))
self._num_samples = len(self._uuids)
@staticmethod
def _get_num_samples(dataset_dir):
# Used only by dataset zoo
return len(etau.list_files(os.path.join(dataset_dir, "data")))
class KITTIDetectionDatasetExporter(
foud.LabeledImageDatasetExporter, foud.ExportPathsMixin
):
"""Exporter that writes KITTI detection datasets to disk.
See :ref:`this page <KITTIDetectionDataset-export>` for format details.
Args:
export_dir (None): the directory to write the export. This has no
effect if ``data_path`` and ``labels_path`` are absolute paths
data_path (None): an optional parameter that enables explicit control
over the location of the exported media. Can be any of the
following:
- a folder name like ``"data"`` or ``"data/"`` specifying a
subfolder of ``export_dir`` in which to export the media
- an absolute directory path in which to export the media. In
this case, the ``export_dir`` has no effect on the location of
the data
- a JSON filename like ``"data.json"`` specifying the filename of
the manifest file in ``export_dir`` generated when
``export_media`` is ``"manifest"``
- an absolute filepath specifying the location to write the JSON
manifest file when ``export_media`` is ``"manifest"``. In this
case, ``export_dir`` has no effect on the location of the data
If None, the default value of this parameter will be chosen based
on the value of the ``export_media`` parameter
labels_path (None): an optional parameter that enables explicit control
over the location of the exported labels. Can be any of the
following:
- a folder name like ``"labels"`` or ``"labels/"`` specifying the
location in ``export_dir`` in which to export the labels
- an absolute folder path to which to export the labels. In this
case, the ``export_dir`` has no effect on the location of the
labels
If None, the labels will be exported into ``export_dir`` using the
default folder name
export_media (None): controls how to export the raw media. The
supported values are:
- ``True``: copy all media files into the output directory
- ``False``: don't export media
- ``"move"``: move all media files into the output directory
- ``"symlink"``: create symlinks to the media files in the output
directory
- ``"manifest"``: create a ``data.json`` in the output directory
that maps UUIDs used in the labels files to the filepaths of
the source media, rather than exporting the actual media
If None, the default value of this parameter will be chosen based
on the value of the ``data_path`` parameter
image_format (None): the image format to use when writing in-memory
images to disk. By default, ``fiftyone.config.default_image_ext``
is used
"""
def __init__(
self,
export_dir=None,
data_path=None,
labels_path=None,
export_media=None,
image_format=None,
):
data_path, export_media = self._parse_data_path(
export_dir=export_dir,
data_path=data_path,
export_media=export_media,
default="data/",
)
labels_path = self._parse_labels_path(
export_dir=export_dir, labels_path=labels_path, default="labels/",
)
super().__init__(export_dir=export_dir)
self.data_path = data_path
self.labels_path = labels_path
self.export_media = export_media
self.image_format = image_format
self._writer = None
self._media_exporter = None
@property
def requires_image_metadata(self):
return True
@property
def label_cls(self):
return fol.Detections
def setup(self):
self._writer = KITTIAnnotationWriter()
self._media_exporter = foud.ImageExporter(
self.export_media,
export_path=self.data_path,
default_ext=self.image_format,
ignore_exts=True,
)
self._media_exporter.setup()
etau.ensure_dir(self.labels_path)
def export_sample(self, image_or_path, detections, metadata=None):
_, uuid = self._media_exporter.export(image_or_path)
if detections is None:
return
out_anno_path = os.path.join(self.labels_path, uuid + ".txt")
if metadata is None:
metadata = fom.ImageMetadata.build_for(image_or_path)
self._writer.write(detections, metadata, out_anno_path)
def close(self, *args):
self._media_exporter.close()
class KITTIAnnotationWriter(object):
"""Class for writing annotations in KITTI detection format.
See :ref:`this page <KITTIDetectionDataset-export>` for format details.
"""
def write(self, detections, metadata, txt_path):
"""Writes the detections to disk.
Args:
detections: a :class:`fiftyone.core.labels.Detections` instance
metadata: a :class:`fiftyone.core.metadata.ImageMetadata` instance
txt_path: the path to write the annotation TXT file
"""
frame_size = (metadata.width, metadata.height)
rows = []
for detection in detections.detections:
row = _make_kitti_detection_row(detection, frame_size)
rows.append(row)
etau.write_file("\n".join(rows), txt_path)
def load_kitti_detection_annotations(txt_path, frame_size, extra_attrs=True):
"""Loads the KITTI detection annotations from the given TXT file.
See :ref:`this page <KITTIDetectionDataset-import>` for format details.
Args:
txt_path: the path to the annotations TXT file
frame_size: the ``(width, height)`` of the image
extra_attrs (True): whether to load extra annotation attributes onto
the imported labels. Supported values are:
- ``True``: load all extra attributes found
- ``False``: do not load extra attributes
- a name or list of names of specific attributes to load
Returns:
a :class:`fiftyone.core.detections.Detections` instance
"""
if extra_attrs == True:
extra_attrs = {
"truncated",
"occluded",
"alpha",
"dimensions",
"location",
"rotation_y",
}
elif extra_attrs == False:
extra_attrs = set()
elif etau.is_str(extra_attrs):
extra_attrs = {extra_attrs}
else:
extra_attrs = set(extra_attrs)
detections = []
with open(txt_path) as f:
reader = csv.reader(f, delimiter=" ")
for row in reader:
detections.append(
_parse_kitti_detection_row(row, frame_size, extra_attrs)
)
return fol.Detections(detections=detections)
_LABELS_ZIP_URL = (
"https://s3.eu-central-1.amazonaws.com/avg-kitti/data_object_label_2.zip"
)
_IMAGES_ZIP_URL = (
"https://s3.eu-central-1.amazonaws.com/avg-kitti/data_object_image_2.zip"
)
# unused
_DEVKIT_ZIP_URL = (
"https://s3.eu-central-1.amazonaws.com/avg-kitti/devkit_object.zip"
)
_CALIB_ZIP_URL = (
"https://s3.eu-central-1.amazonaws.com/avg-kitti/data_object_calib.zip"
)
def download_kitti_detection_dataset(
dataset_dir, overwrite=True, cleanup=True
):
"""Downloads the KITTI object detection dataset from the web.
The | |
:param values: matrix of values if range given, if a value is None its unchanged
:param majordim: major dimension of given data
"""
if cell_list:
crange = 'A1:' + str(format_addr((self.rows, self.cols)))
# @TODO fit the minimum rectangle than whole array
values = [[None for x in range(self.cols)] for y in range(self.rows)]
for cell in cell_list:
values[cell.col-1][cell.row-1] = cell.value
body = dict()
estimate_size = False
if type(crange) == str:
if crange.find(':') == -1:
estimate_size = True
elif type(crange) == tuple:
estimate_size = True
else:
raise InvalidArgumentValue('crange')
if estimate_size:
start_r_tuple = format_addr(crange, output='tuple')
if majordim == 'ROWS':
end_r_tuple = (start_r_tuple[0]+len(values), start_r_tuple[1]+len(values[0]))
else:
end_r_tuple = (start_r_tuple[0] + len(values[0]), start_r_tuple[1] + len(values))
body['range'] = self._get_range(crange, format_addr(end_r_tuple))
else:
body['range'] = self._get_range(*crange.split(':'))
body['majorDimension'] = majordim
body['values'] = values
self.client.sh_update_range(self.spreadsheet.id, body, self.spreadsheet.batch_mode)
def update_col(self, index, values):
"""update an existing colum with values
"""
colrange = format_addr((1, index), 'label') + ":" + format_addr((len(values), index), "label")
self.update_cells(crange=colrange, values=[values], majordim='COLUMNS')
def update_row(self, index, values):
"""update an existing row with values
"""
colrange = format_addr((index, 1), 'label') + ':' + format_addr((index, len(values)), 'label')
self.update_cells(crange=colrange, values=[values], majordim='ROWS')
def resize(self, rows=None, cols=None):
"""Resizes the worksheet.
:param rows: New rows number.
:param cols: New columns number.
"""
self.unlink()
self.rows = rows
self.cols = cols
self.link()
def add_rows(self, rows):
"""Adds rows to worksheet.
:param rows: Rows number to add.
"""
self.resize(rows=self.rows + rows, cols=self.cols)
def add_cols(self, cols):
"""Adds colums to worksheet.
:param cols: Columns number to add.
"""
self.resize(cols=self.cols + cols, rows=self.rows)
def delete_cols(self, index, number=1):
"""
delete a number of colums stating from index
:param index: indenx of first col to delete
:param number: number of cols to delete
"""
index -= 1
if number < 1:
raise InvalidArgumentValue('number')
request = {'deleteDimension': {'range': {'sheetId': self.id, 'dimension': 'COLUMNS',
'endIndex': (index+number), 'startIndex': index}}}
self.client.sh_batch_update(self.spreadsheet.id, request, batch=self.spreadsheet.batch_mode)
self.jsonSheet['properties']['gridProperties']['columnCount'] = self.cols-number
def delete_rows(self, index, number=1):
"""
delete a number of rows stating from index
:param index: index of first row to delete
:param number: number of rows to delete
"""
index -= 1
if number < 1:
raise InvalidArgumentValue
request = {'deleteDimension': {'range': {'sheetId': self.id, 'dimension': 'ROWS',
'endIndex': (index+number), 'startIndex': index}}}
self.client.sh_batch_update(self.spreadsheet.id, request, batch=self.spreadsheet.batch_mode)
self.jsonSheet['properties']['gridProperties']['rowCount'] = self.rows-number
def insert_cols(self, col, number=1, values=None, inherit=False):
"""
Insert a colum after the colum <col> and fill with values <values>
Widens the worksheet if there are more values than columns.
:param col: columer after which new colum should be inserted
:param number: number of colums to be inserted
:param values: values matrix to filled in new colum
:param inherit: If dimension properties should be extended from the dimensions before or after
the newly inserted dimensions
"""
request = {'insertDimension': {'inheritFromBefore': inherit,
'range': {'sheetId': self.id, 'dimension': 'COLUMNS',
'endIndex': (col+number), 'startIndex': col}
}}
self.client.sh_batch_update(self.spreadsheet.id, request, batch=self.spreadsheet.batch_mode)
self.jsonSheet['properties']['gridProperties']['columnCount'] = self.cols+number
if values and number == 1:
if len(values) > self.rows:
self.rows = len(values)
self.update_col(col+1, values)
def insert_rows(self, row, number=1, values=None, inherit=False):
"""
Insert a row after the row <row> and fill with values <values>
Widens the worksheet if there are more values than columns.
:param row: row after which new colum should be inserted
:param number: number of rows to be inserted
:param values: values matrix to be filled in new row
:param inherit: If dimension properties should be extended from the dimensions before or after
the newly inserted dimensions
"""
request = {'insertDimension': {'inheritFromBefore': inherit,
'range': {'sheetId': self.id, 'dimension': 'ROWS',
'endIndex': (row+number), 'startIndex': row}}}
self.client.sh_batch_update(self.spreadsheet.id, request, batch=self.spreadsheet.batch_mode)
self.jsonSheet['properties']['gridProperties']['rowCount'] = self.rows + number
# @TODO fore multiple rows inserted change
if values and number == 1:
if len(values) > self.cols:
self.cols = len(values)
self.update_row(row+1, values)
def clear(self, start='A1', end=None):
"""
clears the worksheet by default, if range given then clears range
:param start: topright cell address
:param end: bottom left cell of range
"""
if not end:
end = (self.rows, self.cols)
body = {'ranges': [self._get_range(start, end)]}
self.client.sh_batch_clear(self.spreadsheet.id, body)
def append_row(self, start='A1', end=None, values=None):
"""Search for a table in the given range and will
append it with values
:param start: start cell of range
:param end: end cell of range
:param values: List of values for the new row.
"""
if type(values[0]) != list:
values = [values]
if not end:
end = (self.rows, self.cols)
body = {"values": values}
self.client.sh_append(self.spreadsheet.id, body=body, rranage=self._get_range(start, end))
def find(self, query, replace=None, force_fetch=True):
"""Finds first cell matching query.
:param query: A text string or compiled regular expression.
:param replace: string to replace
:param force_fetch: if local datagrid should be updated before searching, even if file is not modified
"""
self._update_grid(force_fetch)
found_list = []
if isinstance(query, type(re.compile(""))):
match = lambda x: query.search(x.value)
else:
match = lambda x: x.value == query
for row in self.data_grid:
found_list.extend(filter(match, row))
if replace:
for cell in found_list:
cell.value = replace
return found_list
def create_named_range(self, name, start, end):
"""
Create a named range in this sheet
:param name: Name of the named range
:param start: top right cell adress
:param end: bottom right cell adress
"""
request = {"addNamedRange": {
"namedRange": {
"name": name,
"range": {
"sheetId": self.id,
"startRowIndex": start[0]-1,
"endRowIndex": end[0],
"startColumnIndex": start[1]-1,
"endColumnIndex": end[1],
}
}}}
self.client.sh_batch_update(self.spreadsheet.id, request, batch=self.spreadsheet.batch_mode)
self.spreadsheet.named_ranges.append(request['addNamedRange']['namedRange'])
return DataRange(start, end, self, name)
def get_named_range(self, name):
"""
get a named range given name
:param name: Name of the named range to be retrived
:return: :class: DataRange
"""
nrange = [x for x in self.spreadsheet.named_ranges if x['name'] == name and x['range']['sheetId']==self.id]
if len(nrange) == 0:
# fetch
raise RangeNotFound
DataRange(start=(nrange['range']['startRowIndex'], nrange['range']['startRowIndex']),
end=(nrange['range']['endRowIndex'], nrange['range']['endRowIndex']), name_id=nrange['namedRangeId'],
worksheet=self)
def delete_named_range(self, name):
"""
delete a named range
:param name: name of named range to be deleted
"""
pass
# @TODO optimize with unlink
def set_dataframe(self, df, start, copy_index=False, copy_head=True, fit=False, escape_formulae=False):
"""
set the values of a pandas dataframe at cell <start>
:param df: pandas dataframe
:param start: top right cell address from where values are inserted
:param copy_index: if index should be copied (multi index supported)
:param copy_head: if headers should be copied
:param fit: should the worksheet should be resized to fit the dataframe
:param escape_formulae: If any value starts with an equals sign =, it will be
prefixed with a apostrophe ', to avoid being interpreted as a formula.
"""
start = format_addr(start, 'tuple')
values = df.values.tolist()
(df_rows, df_cols) = df.shape
if copy_index:
if isinstance(df.index, MultiIndex):
for i, indexes in enumerate(df.index):
for index_item in reversed(indexes):
values[i].insert(0, index_item)
df_cols += len(df.index[0])
else:
for i, val in enumerate(df.index):
values[i].insert(0, val)
df_cols += 1
if copy_head:
head = []
if isinstance(df.index, MultiIndex) and copy_index:
head = [""] * len(df.index[0])
elif copy_index:
head = [""]
head.extend(df.columns.tolist())
values.insert(0, head)
df_rows += 1
end = format_addr(tuple([start[0]+df_rows, start[1]+df_cols]))
if fit:
self.cols = start[1] - 1 + df_cols
self.rows = start[0] - 1 + df_rows
print (self.cols, self.rows)
print (values)
# @TODO optimize this
if escape_formulae:
for row in values:
for i in range(len(row)):
if type(row[i]) == str and row[i].startswith('='):
row[i] = "'" + str(row[i])
crange = format_addr(start) + ':' + end
self.update_cells(crange=crange, values=values)
def get_as_df(self, has_header=True, index_colum=None, start=None, end=None, numerize=True, empty_value=''):
"""
get value of worksheet as a pandas dataframe
:param has_header: If is True intrept first row as DF header
:param index_colum: worksheet column number to use as DF index
:param numerize: If True, cell values will be numerized
:param empty_value: value used to indicate empty cell value
:param start: top left cell of dataframe, if not set whole sheet will be fetched
:param end: bottom right cell of dataframe, if not set whole sheet will be fetched
:returns: pandas.Dataframe
"""
if not DataFrame:
raise ImportError("pandas")
if start is not None and end is not None:
values = self.get_values(start, end, include_empty=True)
else:
values = self.get_all_values(returnas='matrix', include_empty=True)
if numerize:
values = [numericise_all(row[:len(values[0])], empty_value) for row in values]
if has_header:
keys = values[0]
values = [row[:len(values[0])] for row in values[1:]]
df = DataFrame(values, columns=keys)
else:
df = DataFrame(values)
if index_colum:
if index_colum < 1 or index_colum > len(df.columns):
raise ValueError("index_column %s not found" % index_colum)
| |
If True, the regressors X are normalized
solver : {'auto', 'dense_cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'dense_cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
`coef_` : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(alpha=alpha,
fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y, solver=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
if self.class_weight is None:
class_weight = {}
else:
class_weight = self.class_weight
if solver is None:
solver = self.solver
else:
warnings.warn("""solver option in fit is deprecated and will be
removed in v0.14.""")
sample_weight_classes = np.array([class_weight.get(k, 1.0) for k in y])
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
_BaseRidge.fit(self, X, Y, solver=solver,
sample_weight=sample_weight_classes)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=[0.1, 1.0, 10.0], fit_intercept=True,
normalize=False, score_func=None, loss_func=None,
copy_X=True, gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.score_func = score_func
self.loss_func = loss_func
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X) and hasattr(X, 'toarray'):
X = X.toarray()
U, s, _ = np.linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=1.0):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X = safe_asarray(X, dtype=np.float)
y = np.asarray(y, dtype=np.float)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(X, y,
self.fit_intercept, self.normalize, self.copy_X)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
error = self.score_func is None and self.loss_func is None
for i, alpha in enumerate(self.alphas):
if error:
out, c = _errors(sample_weight * alpha, y, v, Q, QT_y)
else:
out, c = _values(sample_weight * alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
func = self.score_func if self.score_func else self.loss_func
out = [func(y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out) if self.score_func else np.argmin(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
@property
def best_alpha(self):
warnings.warn("Use alpha_. Using best_alpha is deprecated"
"since version 0.12, and backward compatibility "
"won't be maintained from version 0.14 onward. ",
DeprecationWarning, stacklevel=2)
return self.alpha_
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=np.array([0.1, 1.0, 10.0]),
fit_intercept=True, normalize=False, score_func=None,
loss_func=None, cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.score_func = score_func
self.loss_func = loss_func
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=1.0):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
score_func=self.score_func,
loss_func=self.loss_func,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
# FIXME: sample_weight must be split into training/validation data
# too!
#fit_params = {'sample_weight' : sample_weight}
fit_params = {}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient | |
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from climateeconomics.core.core_witness.climateeco_discipline import ClimateEcoDiscipline
from climateeconomics.core.core_witness.population_model import Population
from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart
from sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter
from os.path import join, dirname
from pathlib import Path
from copy import deepcopy
import pandas as pd
import numpy as np
class PopulationDiscipline(ClimateEcoDiscipline):
" Temperature evolution"
# ontology information
_ontology_data = {
'label': 'WITNESS Population Model',
'type': 'Research',
'source': 'SoSTrades Project',
'validated': '',
'validated_by': 'SoSTrades Project',
'last_modification_date': '',
'category': '',
'definition': '',
'icon': 'fas fa-users fa-fw',
'version': '',
}
years = np.arange(1960, 2101)
list_years = years.tolist()
global_data_dir = join(Path(__file__).parents[3], 'data')
pop_init_df = pd.read_csv(
join(global_data_dir, 'population_by_age_2020.csv'))
default_death_rate_params_df = pd.read_csv(
join(global_data_dir, 'death_rate_params_v2.csv'))
# Provided by WHO. (2014). Quantitative risk assessment of the effects of climate
# change on selected causes of death, 2030s and 2050s. Geneva:
# World Health Organization.
default_climate_mortality_param_df = pd.read_csv(
join(global_data_dir, 'climate_additional_deaths_V2.csv'))
# ADD DICTIONARY OF VALUES FOR DEATH RATE
DESC_IN = {
'year_start': ClimateEcoDiscipline.YEAR_START_DESC_IN,
'year_end': ClimateEcoDiscipline.YEAR_END_DESC_IN,
'time_step': ClimateEcoDiscipline.TIMESTEP_DESC_IN,
'population_start': {'type': 'dataframe', 'default': pop_init_df, 'unit': 'millions of people'},
'economics_df': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_witness'},
'temperature_df': {'type': 'dataframe', 'visibility': 'Shared', 'namespace': 'ns_witness', 'unit': '°C'},
'climate_mortality_param_df': {'type': 'dataframe', 'default': default_climate_mortality_param_df, 'user_level': 3, 'unit': '-'},
'calibration_temperature_increase': {'type': 'float', 'default': 2.5, 'user_level': 3 , 'unit': '°C'},
'theta': {'type': 'float', 'default': 2, 'user_level': 3, 'unit': '-'},
'death_rate_param': {'type': 'dataframe', 'default': default_death_rate_params_df, 'user_level': 3, 'unit': '-'},
'birth_rate_upper': {'type': 'float', 'default': 1.12545946e-01, 'user_level': 3, 'unit': '-'},
# 2.2e-2
'birth_rate_lower': {'type': 'float', 'default': 2.02192894e-02, 'user_level': 3, 'unit': '-'},
# 1.92403581e-04
'birth_rate_delta': {'type': 'float', 'default': 6.19058508e-04, 'user_level': 3, 'unit': '-'},
# 4.03359157e+03
'birth_rate_phi': {'type': 'float', 'default': 4.03360000e+03, 'user_level': 3, 'unit': '-'},
# 3.93860555e-01
'birth_rate_nu': {'type': 'float', 'default': 1.75808789e-01, 'user_level': 3, 'unit': '-'},
'lower_knowledge': {'type': 'float', 'default': 10, 'user_level': 3, 'unit': '%'},
'upper_knowledge': {'type': 'float', 'default': 100, 'user_level': 3, 'unit': '%'},
'delta_knowledge': {'type': 'float', 'default': 0.0293357, 'user_level': 3, 'unit': '-'},
'phi_knowledge': {'type': 'float', 'default': 149.7919, 'user_level': 3, 'unit': '-'},
'nu_knowledge': {'type': 'float', 'default': 1.144062855, 'user_level': 3, 'unit': '-'},
'constant_birthrate_know': {'type': 'float', 'default': 1.99999838e-02, 'user_level': 3, 'unit': '-'},
'alpha_birthrate_know': {'type': 'float', 'default': 1.02007061e-01, 'user_level': 3, 'unit': '-'},
'beta_birthrate_know': {'type': 'float', 'default': 8.01923418e-01, 'user_level': 3, 'unit': '-'},
'share_know_birthrate': {'type': 'float', 'default': 7.89207064e-01, 'user_level': 3, 'unit': '-'},
}
DESC_OUT = {
'population_df': {'type': 'dataframe', 'unit': 'millions of people', 'visibility': 'Shared', 'namespace': 'ns_witness'},
'working_age_population_df': {'type': 'dataframe', 'unit': 'millions of people', 'visibility': 'Shared',
'namespace': 'ns_witness'},
'population_detail_df': {'type': 'dataframe', 'unit': 'people'},
'birth_rate_df': {'type': 'dataframe', 'unit': '-'},
'death_rate_dict': {'type': 'dict', 'unit': '-'},
'death_dict': {'type': 'dict', 'unit': 'people' },
'birth_df': {'type': 'dataframe', 'unit': 'people'},
'life_expectancy_df': {'type': 'dataframe', 'unit': 'age'}
}
_maturity = 'Research'
def init_execution(self):
in_dict = self.get_sosdisc_inputs()
self.model = Population(in_dict)
def run(self):
''' model execution '''
# get inputs
in_dict = self.get_sosdisc_inputs()
# model execution
population_detail_df, birth_rate_df, death_rate_dict, birth_df, death_dict, life_expectancy_df, working_age_population_df = self.model.compute(
in_dict)
population_df = population_detail_df[['years', 'total']]
population_df = population_df.rename(columns={"total": "population"})
# Convert population in billion of people
population_df['population'] = population_df['population'] / \
self.model.million
population_detail_df['population_1570'] = working_age_population_df['population_1570']
working_age_population_df['population_1570'] = working_age_population_df['population_1570'] / self.model.million
# store output data
out_dict = {"population_df": population_df,
"working_age_population_df": working_age_population_df,
"population_detail_df": population_detail_df,
"birth_rate_df": birth_rate_df,
"death_rate_dict": death_rate_dict,
"birth_df": birth_df,
"death_dict": death_dict,
"life_expectancy_df": life_expectancy_df}
self.store_sos_outputs_values(out_dict)
def compute_sos_jacobian(self):
"""
Compute jacobian for each coupling variable
gradiant of coupling variable to compute:
"""
d_pop_d_output, d_working_pop_d_output = self.model.compute_d_pop_d_output()
self.set_partial_derivative_for_other_types(
('population_df', 'population'), ('economics_df', 'output_net_of_d'), d_pop_d_output / self.model.million)
self.set_partial_derivative_for_other_types(
('working_age_population_df', 'population_1570'), ('economics_df', 'output_net_of_d'), d_working_pop_d_output / self.model.million)
d_pop_d_temp, d_working_pop_d_temp = self.model.compute_d_pop_d_temp()
self.set_partial_derivative_for_other_types(
('population_df', 'population'), ('temperature_df', 'temp_atmo'), d_pop_d_temp / self.model.million)
self.set_partial_derivative_for_other_types(
('working_age_population_df', 'population_1570'), ('temperature_df', 'temp_atmo'), d_working_pop_d_temp / self.model.million)
def get_chart_filter_list(self):
# For the outputs, making a graph for tco vs year for each range and for specific
# value of ToT with a shift of five year between then
chart_filters = []
chart_list = ['World population', 'Population detailed', 'Population detailed year start', 'Population detailed mid year', '15-49 age range birth rate',
'knowledge', 'death rate per age range', 'Number of birth and death per year',
'Cumulative climate deaths', 'Number of climate death per year',
'Life expectancy evolution', 'working-age population over years']
# First filter to deal with the view : program or actor
chart_filters.append(ChartFilter(
'Charts', chart_list, chart_list, 'charts'))
year_start, year_end = self.get_sosdisc_inputs(
['year_start', 'year_end'])
years = list(np.arange(year_start, year_end + 1, 5))
chart_filters.append(ChartFilter(
'Years for population', years, [year_start, year_end], 'years'))
return chart_filters
def get_post_processing_list(self, chart_filters=None):
# For the outputs, making a graph for tco vs year for each range and for specific
# value of ToT with a shift of five year between then
instanciated_charts = []
# Overload default value with chart filter
if chart_filters is not None:
for chart_filter in chart_filters:
if chart_filter.filter_key == 'charts':
chart_list = chart_filter.selected_values
if chart_filter.filter_key == 'years':
years_list = chart_filter.selected_values
pop_df = deepcopy(
self.get_sosdisc_outputs('population_detail_df'))
birth_rate_df = deepcopy(
self.get_sosdisc_outputs('birth_rate_df'))
birth_df = deepcopy(
self.get_sosdisc_outputs('birth_df'))
death_rate_dict = deepcopy(
self.get_sosdisc_outputs('death_rate_dict'))
death_dict = deepcopy(
self.get_sosdisc_outputs('death_dict'))
life_expectancy_df = deepcopy(
self.get_sosdisc_outputs('life_expectancy_df'))
if 'World population' in chart_list:
years = list(pop_df.index)
year_start = years[0]
year_end = years[len(years) - 1]
min_value, max_value = self.get_greataxisrange(
pop_df['total'])
chart_name = 'World population over years'
new_chart = TwoAxesInstanciatedChart('years', 'population',
[year_start - 5, year_end + 5],
[min_value, max_value],
chart_name)
visible_line = True
ordonate_data = list(pop_df['total'])
new_series = InstanciatedSeries(
years, ordonate_data, 'population', 'lines', visible_line)
new_chart.series.append(new_series)
instanciated_charts.append(new_chart)
if 'working-age population over years' in chart_list:
years = list(pop_df.index)
year_start = years[0]
year_end = years[len(years) - 1]
min_value, max_value = self.get_greataxisrange(
pop_df['population_1570'])
chart_name = 'working-age population over years'
new_chart = TwoAxesInstanciatedChart('years', '15-70 age range population',
[year_start - 5, year_end + 5],
[min_value, max_value],
chart_name)
visible_line = True
ordonate_data = list(pop_df['population_1570'])
new_series = InstanciatedSeries(
years, ordonate_data, 'population', 'lines', visible_line)
new_chart.series.append(new_series)
instanciated_charts.append(new_chart)
if '15-49 age range birth rate' in chart_list:
years = list(pop_df.index)
year_start = years[0]
year_end = years[len(years) - 1]
min_value, max_value = self.get_greataxisrange(
birth_rate_df['birth_rate'])
chart_name = '15-49 age range birth rate'
new_chart = TwoAxesInstanciatedChart('years', ' birth rate',
[year_start - 5, year_end + 5],
[min_value, max_value],
chart_name)
visible_line = True
ordonate_data = list(birth_rate_df['birth_rate'])
new_series = InstanciatedSeries(
years, ordonate_data, '15-49 birth rate', 'lines', visible_line)
new_chart.series.append(new_series)
instanciated_charts.append(new_chart)
if 'knowledge' in chart_list:
years = list(pop_df.index)
year_start = years[0]
year_end = years[len(years) - 1]
min_value, max_value = self.get_greataxisrange(
birth_rate_df['knowledge'])
chart_name = 'Knowledge yearly evolution'
new_chart = TwoAxesInstanciatedChart('years', 'knowledge',
[year_start - 5, year_end + 5],
[min_value, max_value],
chart_name)
visible_line = True
ordonate_data = list(birth_rate_df['knowledge'])
new_series = InstanciatedSeries(
years, ordonate_data, 'knowledge', 'lines', visible_line)
new_chart.series.append(new_series)
instanciated_charts.append(new_chart)
if 'death rate per age range' in chart_list:
years = list(pop_df.index)
headers = list(death_rate_dict['total'].columns.values)
to_plot = headers[1:]
year_start = years[0]
year_end = years[len(years) - 1]
min_values = {}
max_values = {}
for key in to_plot:
min_values[key], max_values[key] = self.get_greataxisrange(
death_rate_dict['total'][key])
min_value = min(min_values.values())
max_value = max(max_values.values())
chart_name = 'Death rate per age range'
new_chart = TwoAxesInstanciatedChart('years', ' death rate',
[year_start - 5, year_end + 5],
[min_value, max_value],
chart_name)
for key in to_plot:
visible_line = True
ordonate_data = list(death_rate_dict['total'][key])
new_series = InstanciatedSeries(
years, ordonate_data, f'death rate for age range {key}', 'lines', visible_line)
new_chart.series.append(new_series)
instanciated_charts.append(new_chart)
if 'Number of birth and death per year' in chart_list:
years = list(birth_df.index)
year_start = years[0]
year_end = years[len(years) - 1]
min_values = {}
max_values = {}
min_values['number_of_birth'], max_values['number_of_birth'] = self.get_greataxisrange(
birth_df['number_of_birth'])
min_values['number_of_death'], max_values['number_of_death'] = self.get_greataxisrange(
death_dict['total']['total'])
min_value = min(min_values.values())
max_value = max(max_values.values())
chart_name = 'Number of birth and death per year'
new_chart = TwoAxesInstanciatedChart('years', ' Number of birth and death',
[year_start - 5, year_end + 5],
[min_value, max_value],
chart_name)
visible_line = True
ordonate_data = list(birth_df['number_of_birth'])
new_series = InstanciatedSeries(
years, ordonate_data, 'Number of birth per year', 'lines', visible_line)
new_chart.series.append(new_series)
ordonate_data = list(death_dict['total']['total'])
new_series = InstanciatedSeries(
years, ordonate_data, 'Number of death per year', 'lines', visible_line)
new_chart.series.append(new_series)
instanciated_charts.append(new_chart)
if 'Number of climate death per year' in chart_list:
years = list(death_dict['total'].index)
year_start = years[0]
year_end = years[len(years) - 1]
min_value, max_value = | |
638,
"Xbar": -639, "nXah": -640, "Xirv": 641, "Xnor": 642, "yXec": 643,
"hU X": -644, "yIXm": 645, "UraX": -646, "eyaX": -647, "I Xb": 648,
"jlIX": -649, "Xehl": -650, "hoXs": 651, "Xkez": 652, "Xomi": 653,
"abXe": 654, "Xdig": 655, "Xiru": 656, "diXu": 657, "GdOX": 658,
"UXir": 659, "Xico": -660, "fiSX": 661, "Xyin": 662, "IXii": 663,
"crIX": -664, "uXey": -665, "OXer": -666, "Xhay": 667, "yruX": 668,
"thaX": -669, "Xitc": -670, "utiX": 671, "kahX": -672, "Xash": -673,
"Xfan": -674, "Xenu": 675, "beXd": -676, "Xenz": 677, "uyX ": 678,
"rpuX": 679, "Xug ": 680, "gXor": 681, "CkuX": 682, "Xcii": -683,
"pl X": 684, "SikX": 685, "Xuhp": 686, "riXn": 687, " pIX": -688,
"nuXe": 689, "Xasu": -690, "ayuX": -691, "uXba": 692, "ukIX": -693,
"IniX": 694, "ryuX": 695, " ilX": 696, "Xiat": -697, "Xpay": -698,
"Xkav": 699, "keXo": -700, "yyuX": -701, "osaX": -702, "Xivg": 703,
"saXs": -704, "Xulg": 705, "meXv": 706, "Xeyc": -707, "omIX": -708,
"egiX": -709, "SIXl": 710, "ISIX": 711, "ipaX": 712, "GlIX": -713,
"pIXl": -714, "aXgi": 715, "rhaX": -716, "zUXu": 717, "Ximt": 718,
"SapX": 719, "Xvar": -720, "OXek": -721, "eXum": 722, "uXuc": 723,
"daXd": 724, "gUX ": 725, "nuXi": -726, "aXce": 727, "ilXu": 728,
"Xapc": 729, "tyaX": -730, "tyuX": 731, "oXay": 732, "guaX": 733,
"eXih": -734, "lnaX": -735, "zkuX": 736, "yXab": 737, "Xemu": 738,
"Xife": -739, "Xaso": -740, "iXoz": 741, "speX": -742, "Xupa": -743,
"bliX": -744, "Xyi ": 745, "Xney": -746, "Xfek": -747, "m3 X": -748,
"eguX": -749, "hXar": -750, "vlaX": -751, "SviX": 752, "eXam": -753,
"oe X": -754, " yUX": 755, "Xni ": 756, "SuXl": 757, "vXam": -758,
"braX": -759, "tuXi": 760, " uXm": -761, "Xsay": 762, "mpuX": -763, "nnoX": 764,
"tuXk": -765, "ttuX": 766, "oXus": 767, "oXup": 768, "leaX": -769,
"Xiku": -770, "Ximp": -771, "Xihl": 772, "rraX": -773, "fuhX": 774,
"Xetb": -775, "smIX": 776, "aXpo": -777, "aXuc": 778, "ndUX": -779,
"IsiX": 780, "Xuuu": 781, "uguX": -782, "vmiX": 783, "Xtia": -784,
"Xeyn": -785, " uSX": 786, "uXuk": 787, "IbaX": 788, "SuX ": 789,
"77 X": -790, "eXge": 791, "pmiX": 792, "Xsi ": -793, "Xeda": -794,
"nmuX": 795, "kiXo": 796, "kmiX": 797, "zmiX": 798, "eXk ": -799,
"ryaX": -800, "hXat": -801, "fliX": -802, "zbaX": 803, "lmUX": 804,
"Xubi": -805, "Xset": -806, "iXab": -807, "UlaX": -808, "UnaX": -809,
"UXva": -810, "iraX": -811, "Xai ": -812, "Xohr": 813, "apuX": -814,
"oXic": -815, "eXus": -816, "Xaac": -817, "iXay": -818, "aXem": -819,
"uliX": -820, "Xeld": -821, "Xtis": -822, "graX": -823, "e Xt": -824,
"binX": -825, "Xess": -826, "Xuro": 827, "olXa": -828, "oXki": -829,
"rUXv": 830, "Xru ": 831, "uriX": -832, "rmUX": 833, "tCIX": -834,
"SmIX": 835, "eXas": -836, "hiXa": -837, "inaX": -838, "SmUX": 839,
"rgiX": -840, " riX": -841, "umXu": -842, "cUXu": -843, "akiX": -844,
"Xuri": -845, "ntIX": -846, "kaXm": 847, "kXek": -848, "tlIX": -849,
"Xizl": -850, "toXu": -851, "Xuzl": -852, "Xeci": -853, "lmaX": -854,
"OXi": -855, "aX7": 856, "bXl": 857, "uX1": 858, "mXl": -859,
"axX": 860, "Xtv": -861, "Xzm": 862, "Xny": -863, "Xlm": -864,
"Xsr": -865, "flX": -866, "fX ": -867, "Xbt": -868, "Xtj": -869,
"uXn": 870, "Xss": -871, "oIX": -872, "chX": -873, "Xty": -874,
"Xuo": -875, " kX": -876, "lXh": -877, "Xkc": -878, "IzX": -879,
"Xae": -880, "efX": -881, "utX": -882, "feX": -883, "nXu": -884,
" Xk": -885, "Xaj": -886, "t in baXinda ": -887, "sunda baXina": -888, "in aXilmasiy": -889,
"n ve baXini ": 890, "nlar yaXama ": -891, "vleri baXi": 892, "eri baXinda ": -893, "Ginde yaXama": -894,
"k onlarI iX": 895, "kaldIrIXind": 896, "slerin baXi": -897, "i yeni baXi": -898, "Unden baXin": 899,
"i de baXina": 900, "Cindeki maX": 901, "ile baXinin": 902, "dan baXini ": 903, "masInda Xen": 904,
"seri nde iX": 905, "a Cin in Xi": 906, "u ve baXind": 907, "erlerini aX": -908, "duGunu baXi": -909,
" nin baXini": 910, "e aXilmayac": -911, "iCtikleri X": -912, "Cin yaXama ": -913, "a dUnyada X": -914,
"kilinin yaX": -915, "Iyla yaXama": -916, "srIn yarIX": 917, "disini aXt": -918, "elecek yaX": -919,
"dsi nin Xu": -920, "man baXind": -921, "on yarIXin": 922, "Cin baXina": -923, " baXindak": -924,
"layI baXin": -925, "ra da baXi": -926, "n Once iXe": -927, "nlikle baX": 928, "e anlaXin ": 929,
"oyle bir X": -930, " besiktaX": -931, "n besiktaX": -932, "dar baXina": -933, "or yaXama ": -934,
" Cok yaXa ": 935, "k iCin Xok": -936, "alarI baXi": -937, "icilere Xu": -938, "Inda miloX": 939,
"i baXindak": -940, "dil yanlIX": 941, "larIyla Xu": -942, "ak iCin Xu": -943, "e olan Xu ": -944,
"alarInI Xu": -945, "steriXin ": 946, "hal gUreX": -947, "ap yanlIX": 948, "Slerin aX": -949,
"an baXkim": 950, "na baXini": -951, "buraya Xu": -952, "ta baXina": -953, "eman kurX": -954,
"bi baXind": -955, " kaldIrIX": 956, " yeni iXe": 957, "nin aXild": -958, "ama baXin": -959,
"rI da Xu ": -960, "ra aXilac": -961, "el Xoyler": 962, "yan baXin": -963, " dava Xu ": 964,
"meyi baXi": -965, "re baXind": -966, "ane baXin": 967, "stlar baX": 968, "aSInda Xu": -969,
"esiyle Xu": -970, "on yaXama": -971, "mer kaleX": 972, "li baXina": -973, "r o nun X": 974,
"in yaXini": -975, "Syerini X": -976, "dinci baX": -977, "kInda yaX": -978, "o da baXi": -979,
"a aXilmaz": 980, "zaman Xen": -981, "lan baXin": -982, "ulusu baX": 983, "lIGI baXi": -984,
"her ne iX": -985, "nlarI Xu ": -986, "Ik aXilm": 987, "en ekXi ": 988, "bile iXe": 989,
"sine Xut": 990, "man Xik ": 991, "ep gOGUX": 992, "eski taX": -993, "U baXina": -994,
"s yaXin ": -995, "S yaXama": -996, "m ilk iX": 997, "onun iXe": 998, " aXmayal": -999,
"n de iXe": 1000, "et Xansa": -1001, "dece iXe": 1002, "ade Xik ": 1003, "len Xam ": -1004,
"elim yaX": 1005, "rIn yaXi": -1006, "I Xanlit": -1007, " bu Xu ": -1008, "Siyi iXe": 1009,
"pille iX": 1010, "rdi baXi": -1011, "ganI yaX": -1012, "an Xaka ": -1013, "re aXilm": -1014,
"naya baX": -1015, "sene baX": 1016, "smini aX": -1017, "ki taXi ": -1018, "m yaXin ": -1019,
"a gene X": -1020, "ereden X": -1021, "kiden Xu": -1022, "rdeki Xu": -1023, "azin baX": -1024,
"mun Xeri": 1025, "cdet baX": -1026, "la aXilm": 1027, "man iXme": 1028, "hyol iXe": 1029,
"can Xen ": -1030, "sIr da X": -1031, "daha Xu ": -1032, "bun baXi": -1033, "I beXte ": 1034,
" an kurX": -1035, "emen iXe": 1036, "zli Xirk": 1037, "ir eXin ": 1038, "p de baX": 1039,
"Once iXe": 1040, "ak miloX": 1041, " bin yaX": 1042, "um baXin": -1043, "SCiyi iX": 1044,
"nlar kaX": -1045, "u baXina": -1046, "ller aXi": 1047, "arasI Xu": -1048, "k beXte ": 1049,
"sIz baXi": -1050, "asan meX": 1051, "yasa taX": -1052, "u daki X": -1053, "le yarIX": 1054,
"zor kaX": 1055, "blo Xu ": 1056, "yaraXan": 1057, "Gur kOX": -1058, " Su iXe": 1059,
"met akX": 1060, "U faSiX": 1061, "anI iX ": -1062, "GI baX ": -1063, "vrim Xe": 1064,
"GI Xik ": 1065, " aXimiz": 1066, " Xanda ": 1067, "yetiXin": 1068, " aXinda": -1069,
"Xarkisy": -1070, "haXani ": 1071, "on baX ": -1072, "piXtim ": 1073, " ka laX": 1074,
"a Xoke ": 1075, "aha iXe": 1076, "tanIXin": -1077, " araXiy": 1078, "gemi Xu": -1079,
"Ska iXe": 1080, "san aX ": 1081, "masabaX": 1082, "e iXimd": 1083, "kle iXe": 1084,
"er baX ": -1085, "ap baXi": -1086, " aXimi ": 1087, "nli Xu ": -1088, "ti kurX": -1089,
"Gu yaXi": -1090, "ok yaX ": -1091, "y de Xu": -1092, "na aXar": -1093, "n baXal": 1094,
"eye Xut": 1095, "sse baX": 1096, "Uk yaX ": -1097, "alI aXa": -1098, "Xevketl": 1099,
"In taXl": 1100, "pa baXi": -1101, "Xahabet": 1102, "yikli X": -1103, "I Xoke ": 1104,
"al Xen ": 1105, "en Xis ": 1106, "amIn Xu": -1107, "InI peX": 1108, "lle baX": 1109,
"han kOX": -1110, "Unya Xu": -1111, "alI Xu ": 1112, " iXmen": 1113, "ir Xokt": | |
setting -standbyMode
standbyModeMv = ixNet.getAttribute(pccInit2, '-standbyMode')
ixNet.add(standbyModeMv, 'alternate')
# Adding overlay 1 for standbyMode
ovrly = ixNet.add(standbyModeMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '2',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 2 for standbyMode
ovrly = ixNet.add(standbyModeMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '4',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 3 for standbyMode
ovrly = ixNet.add(standbyModeMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '12',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# Adding overlay 4 for standbyMode
ovrly = ixNet.add(standbyModeMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '14',
'-indexStep', '0',
'-valueStep', 'false',
'-value', 'false')
ixNet.commit()
# setting -protectionLsp
protectionLspMv = ixNet.getAttribute(pccInit2, '-protectionLsp')
ixNet.add(protectionLspMv, 'singleValue')
ixNet.setMultiAttribute(protectionLspMv + '/singleValue',
'-value', 'false')
# Adding overlay 1 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '1',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 2 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '2',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 3 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '3',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 4 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '4',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 5 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '5',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 6 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '11',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 7 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '12',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 8 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '13',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 9 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '14',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# Adding overlay 10 for protectionLsp
ovrly = ixNet.add(protectionLspMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '15',
'-indexStep', '0',
'-valueStep', 'true',
'-value', 'true')
ixNet.commit()
# setting -associationId
associationIdMv = ixNet.getAttribute(pccInit2, '-associationId')
ixNet.add(associationIdMv, 'singleValue')
ixNet.setMultiAttribute(associationIdMv + '/singleValue',
'-value', '1')
# Adding overlay 1 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '1',
'-indexStep', '0',
'-valueStep', '11',
'-value', '11')
ixNet.commit()
# Adding overlay 2 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '2',
'-indexStep', '0',
'-valueStep', '11',
'-value', '11')
ixNet.commit()
# Adding overlay 3 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '3',
'-indexStep', '0',
'-valueStep', '12',
'-value', '12')
ixNet.commit()
# Adding overlay 4 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '4',
'-indexStep', '0',
'-valueStep', '12',
'-value', '12')
ixNet.commit()
# Adding overlay 5 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '5',
'-indexStep', '0',
'-valueStep', '13',
'-value', '13')
ixNet.commit()
# Adding overlay 6 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '6',
'-indexStep', '0',
'-valueStep', '13',
'-value', '13')
ixNet.commit()
# Adding overlay 7 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '11',
'-indexStep', '0',
'-valueStep', '111',
'-value', '111')
ixNet.commit()
# Adding overlay 8 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '12',
'-indexStep', '0',
'-valueStep', '111',
'-value', '111')
ixNet.commit()
# Adding overlay 9 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '13',
'-indexStep', '0',
'-valueStep', '112',
'-value', '112')
ixNet.commit()
# Adding overlay 10 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '14',
'-indexStep', '0',
'-valueStep', '112',
'-value', '112')
ixNet.commit()
# Adding overlay 11 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '15',
'-indexStep', '0',
'-valueStep', '113',
'-value', '113')
ixNet.commit()
# Adding overlay 12 for associationId
ovrly = ixNet.add(associationIdMv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '16',
'-indexStep', '0',
'-valueStep', '113',
'-value', '113')
ixNet.commit()
pccInit2 = pccGroup2+'/pceInitiateLspParameters:10'
# setting -numberOfEroSubObjects
ixNet.setAttribute(pccInit2, '-numberOfEroSubObjects', '1')
ixNet.commit()
# setting -srcEndPointIpv4
srcEndPointIpv4Mv = ixNet.getAttribute(pccInit2, '-srcEndPointIpv4')
ixNet.add(srcEndPointIpv4Mv, 'singleValue')
ixNet.setMultiAttribute(srcEndPointIpv4Mv + '/singleValue',
'-value', '0.0.0.0')
# Adding overlay 1 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '1',
'-indexStep', '0',
'-valueStep', '1.0.0.11',
'-value', '1.0.0.11')
ixNet.commit()
# Adding overlay 2 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '2',
'-indexStep', '0',
'-valueStep', '1.0.0.11',
'-value', '1.0.0.11')
ixNet.commit()
# Adding overlay 3 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '3',
'-indexStep', '0',
'-valueStep', '1.0.0.12',
'-value', '1.0.0.12')
ixNet.commit()
# Adding overlay 4 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '4',
'-indexStep', '0',
'-valueStep', '1.0.0.12',
'-value', '1.0.0.12')
ixNet.commit()
# Adding overlay 5 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '5',
'-indexStep', '0',
'-valueStep', '1.0.0.13',
'-value', '1.0.0.13')
ixNet.commit()
# Adding overlay 6 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '6',
'-indexStep', '0',
'-valueStep', '1.0.0.13',
'-value', '1.0.0.13')
ixNet.commit()
# Adding overlay 7 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '7',
'-indexStep', '0',
'-valueStep', '1.0.0.14',
'-value', '1.0.0.14')
ixNet.commit()
# Adding overlay 8 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '8',
'-indexStep', '0',
'-valueStep', '1.0.0.14',
'-value', '1.0.0.14')
ixNet.commit()
# Adding overlay 9 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '9',
'-indexStep', '0',
'-valueStep', '1.0.0.15',
'-value', '1.0.0.15')
ixNet.commit()
# Adding overlay 10 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '10',
'-indexStep', '0',
'-valueStep', '1.0.0.15',
'-value', '1.0.0.15')
ixNet.commit()
# Adding overlay 11 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '11',
'-indexStep', '0',
'-valueStep', '192.168.127.12',
'-value', '192.168.127.12')
ixNet.commit()
# Adding overlay 12 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '12',
'-indexStep', '0',
'-valueStep', '192.168.127.12',
'-value', '192.168.127.12')
ixNet.commit()
# Adding overlay 13 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '13',
'-indexStep', '0',
'-valueStep', '192.168.3.11',
'-value', '192.168.3.11')
ixNet.commit()
# Adding overlay 14 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '14',
'-indexStep', '0',
'-valueStep', '192.168.3.11',
'-value', '192.168.3.11')
ixNet.commit()
# Adding overlay 15 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '15',
'-indexStep', '0',
'-valueStep', '172.16.17.32',
'-value', '172.16.17.32')
ixNet.commit()
# Adding overlay 16 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '16',
'-indexStep', '0',
'-valueStep', '172.16.17.32',
'-value', '172.16.17.32')
ixNet.commit()
# Adding overlay 17 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '17',
'-indexStep', '0',
'-valueStep', '192.168.127.12',
'-value', '192.168.127.12')
ixNet.commit()
# Adding overlay 18 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '18',
'-indexStep', '0',
'-valueStep', '192.168.127.12',
'-value', '192.168.127.12')
ixNet.commit()
# Adding overlay 19 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '19',
'-indexStep', '0',
'-valueStep', '192.168.127.12',
'-value', '192.168.127.12')
ixNet.commit()
# Adding overlay 20 for srcEndPointIpv4
ovrly = ixNet.add(srcEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '20',
'-indexStep', '0',
'-valueStep', '192.168.127.12',
'-value', '192.168.127.12')
ixNet.commit()
# setting -destEndPointIpv4
destEndPointIpv4Mv = ixNet.getAttribute(pccInit2, '-destEndPointIpv4')
ixNet.add(destEndPointIpv4Mv, 'singleValue')
ixNet.setMultiAttribute(destEndPointIpv4Mv + '/singleValue',
'-value', '0.0.0.0')
# Adding overlay 1 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '1',
'-indexStep', '0',
'-valueStep', '2.0.0.11',
'-value', '2.0.0.11')
ixNet.commit()
# Adding overlay 2 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '2',
'-indexStep', '0',
'-valueStep', '2.0.0.11',
'-value', '2.0.0.11')
ixNet.commit()
# Adding overlay 3 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '3',
'-indexStep', '0',
'-valueStep', '2.0.0.12',
'-value', '2.0.0.12')
ixNet.commit()
# Adding overlay 4 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '4',
'-indexStep', '0',
'-valueStep', '2.0.0.12',
'-value', '2.0.0.12')
ixNet.commit()
# Adding overlay 5 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '5',
'-indexStep', '0',
'-valueStep', '2.0.0.13',
'-value', '2.0.0.13')
ixNet.commit()
# Adding overlay 6 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '6',
'-indexStep', '0',
'-valueStep', '2.0.0.13',
'-value', '2.0.0.13')
ixNet.commit()
# Adding overlay 7 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '7',
'-indexStep', '0',
'-valueStep', '2.0.0.14',
'-value', '2.0.0.14')
ixNet.commit()
# Adding overlay 8 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '8',
'-indexStep', '0',
'-valueStep', '2.0.0.14',
'-value', '2.0.0.14')
ixNet.commit()
# Adding overlay 9 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '9',
'-indexStep', '0',
'-valueStep', '2.0.0.15',
'-value', '2.0.0.15')
ixNet.commit()
# Adding overlay 10 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '10',
'-indexStep', '0',
'-valueStep', '2.0.0.15',
'-value', '2.0.0.15')
ixNet.commit()
# Adding overlay 11 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '11',
'-indexStep', '0',
'-valueStep', '172.16.17.32',
'-value', '172.16.17.32')
ixNet.commit()
# Adding overlay 12 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '12',
'-indexStep', '0',
'-valueStep', '172.16.17.32',
'-value', '172.16.17.32')
ixNet.commit()
# Adding overlay 13 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '13',
'-indexStep', '0',
'-valueStep', '172.16.58.3',
'-value', '172.16.58.3')
ixNet.commit()
# Adding overlay 14 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '14',
'-indexStep', '0',
'-valueStep', '172.16.58.3',
'-value', '172.16.58.3')
ixNet.commit()
# Adding overlay 15 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '15',
'-indexStep', '0',
'-valueStep', '192.168.3.11',
'-value', '192.168.3.11')
ixNet.commit()
# Adding overlay 16 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '16',
'-indexStep', '0',
'-valueStep', '192.168.3.11',
'-value', '192.168.3.11')
ixNet.commit()
# Adding overlay 17 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '17',
'-indexStep', '0',
'-valueStep', '172.16.58.3',
'-value', '172.16.58.3')
ixNet.commit()
# Adding overlay 18 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', '18',
'-indexStep', '0',
'-valueStep', '172.16.58.3',
'-value', '172.16.58.3')
ixNet.commit()
# Adding overlay 19 for destEndPointIpv4
ovrly = ixNet.add(destEndPointIpv4Mv, 'overlay')
ixNet.setMultiAttribute(ovrly,
'-count', '1',
'-index', | |
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online # noqa: E501
OpenAPI spec version: 0.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class DogmaApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_dogma_attributes(self, **kwargs): # noqa: E501
"""Get attributes # noqa: E501
Get a list of dogma attribute ids --- This route expires daily at 11:05 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_dogma_attributes(async=True)
>>> result = thread.get()
:param async bool
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[int]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_dogma_attributes_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_dogma_attributes_with_http_info(**kwargs) # noqa: E501
return data
def get_dogma_attributes_with_http_info(self, **kwargs): # noqa: E501
"""Get attributes # noqa: E501
Get a list of dogma attribute ids --- This route expires daily at 11:05 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_dogma_attributes_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[int]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_dogma_attributes" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/dogma/attributes/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[int]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_dogma_attributes_attribute_id(self, attribute_id, **kwargs): # noqa: E501
"""Get attribute information # noqa: E501
Get information on a dogma attribute --- This route expires daily at 11:05 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_dogma_attributes_attribute_id(attribute_id, async=True)
>>> result = thread.get()
:param async bool
:param int attribute_id: A dogma attribute ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetDogmaAttributesAttributeIdOk
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_dogma_attributes_attribute_id_with_http_info(attribute_id, **kwargs) # noqa: E501
else:
(data) = self.get_dogma_attributes_attribute_id_with_http_info(attribute_id, **kwargs) # noqa: E501
return data
def get_dogma_attributes_attribute_id_with_http_info(self, attribute_id, **kwargs): # noqa: E501
"""Get attribute information # noqa: E501
Get information on a dogma attribute --- This route expires daily at 11:05 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_dogma_attributes_attribute_id_with_http_info(attribute_id, async=True)
>>> result = thread.get()
:param async bool
:param int attribute_id: A dogma attribute ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: GetDogmaAttributesAttributeIdOk
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['attribute_id', 'datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_dogma_attributes_attribute_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'attribute_id' is set
if ('attribute_id' not in params or
params['attribute_id'] is None):
raise ValueError("Missing the required parameter `attribute_id` when calling `get_dogma_attributes_attribute_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'attribute_id' in params:
path_params['attribute_id'] = params['attribute_id'] # noqa: E501
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/dogma/attributes/{attribute_id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetDogmaAttributesAttributeIdOk', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_dogma_effects(self, **kwargs): # noqa: E501
"""Get effects # noqa: E501
Get a list of dogma effect ids --- This route expires daily at 11:05 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_dogma_effects(async=True)
>>> result = thread.get()
:param async bool
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[int]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_dogma_effects_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_dogma_effects_with_http_info(**kwargs) # noqa: E501
return data
def get_dogma_effects_with_http_info(self, **kwargs): # noqa: E501
"""Get effects # noqa: E501
Get a list of dogma effect ids --- This route expires daily at 11:05 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_dogma_effects_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
:param str x_user_agent: Client identifier, takes precedence over User-Agent
:return: list[int]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['datasource', 'user_agent', 'x_user_agent'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_dogma_effects" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'datasource' in params:
query_params.append(('datasource', params['datasource'])) # noqa: E501
if 'user_agent' in params:
query_params.append(('user_agent', params['user_agent'])) # noqa: E501
header_params = {}
if 'x_user_agent' in params:
header_params['X-User-Agent'] = params['x_user_agent'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/v1/dogma/effects/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[int]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_dogma_effects_effect_id(self, effect_id, **kwargs): # noqa: E501
"""Get effect information # noqa: E501
Get information on a dogma effect --- This route expires daily at 11:05 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_dogma_effects_effect_id(effect_id, async=True)
>>> result = thread.get()
:param async bool
:param int effect_id: A dogma effect ID (required)
:param str datasource: The server name you would like data from
:param str user_agent: Client identifier, takes precedence over headers
| |
<reponame>empymod/frequency-design
import emg3d
import empymod
import numpy as np
import ipywidgets as widgets
import scipy.interpolate as si
import matplotlib.pyplot as plt
from IPython.display import display
from scipy.signal import find_peaks
# Define all errors we want to catch with the variable-checks and setting of
# default values. This is not perfect, but better than 'except Exception'.
VariableCatch = (LookupError, AttributeError, ValueError, TypeError, NameError)
# Interactive Frequency Selection
class InteractiveFrequency(emg3d.utils.Fourier):
"""App to create required frequencies for Fourier Transform."""
def __init__(self, src_z, rec_z, depth, res, time, signal=0, ab=11,
aniso=None, **kwargs):
"""App to create required frequencies for Fourier Transform.
No thorough input checks are carried out. Rubbish in, rubbish out.
See empymod.model.dipole for details regarding the modelling.
Parameters
----------
src_z, rec_z : floats
Source and receiver depths and offset. The source is located at
src=(0, 0, src_z), the receiver at rec=(off, 0, rec_z).
depth : list
Absolute layer interfaces z (m); #depth = #res - 1
(excluding +/- infinity).
res : array_like
Horizontal resistivities rho_h (Ohm.m); #res = #depth + 1.
time : array_like
Times t (s).
signal : {0, 1, -1}, optional
Source signal, default is 0:
- -1 : Switch-off time-domain response
- 0 : Impulse time-domain response
- +1 : Switch-on time-domain response
ab : int, optional
Source-receiver configuration, defaults to 11. (See
empymod.model.dipole for all possibilities.)
aniso : array_like, optional
Anisotropies lambda = sqrt(rho_v/rho_h) (-); #aniso = #res.
Defaults to ones.
**kwargs : Optional parameters:
- ``fmin`` : float
Initial minimum frequency. Default is 1e-3.
- ``fmax`` : float
Initial maximum frequency. Default is 1e1.
- ``off`` : float
Initial offset. Default is 500.
- ``ft`` : str {'dlf', 'fftlog'}
Initial Fourier transform method. Default is 'dlf'.
- ``ftarg`` : dict
Initial Fourier transform arguments corresponding to ``ft``.
Default is None.
- ``pts_per_dec`` : int
Initial points per decade. Default is 5.
- ``linlog`` : str {'linear', 'log'}
Initial display scaling. Default is 'linear'.
- ``xtfact`` : float
Factor for linear x-dimension: t_max = xtfact*offset/1000.
- ``verb`` : int
Verbosity. Only for debugging purposes.
"""
# Get initial values or set to default.
fmin = kwargs.pop('fmin', 1e-3)
fmax = kwargs.pop('fmax', 1e1)
off = kwargs.pop('off', 5000)
ft = kwargs.pop('ft', 'dlf')
ftarg = kwargs.pop('ftarg', None)
self.pts_per_dec = kwargs.pop('pts_per_dec', 5)
self.linlog = kwargs.pop('linlog', 'linear')
self.xtfact = kwargs.pop('xtfact', 1)
self.verb = kwargs.pop('verb', 1)
# Ensure no kwargs left.
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
# Collect model from input.
self.model = {
'src': [0, 0, src_z],
'rec': [off, 0, rec_z],
'depth': depth,
'res': res,
'aniso': aniso,
'ab': ab,
'verb': self.verb,
}
# Initiate a Fourier instance.
super().__init__(time, fmin, fmax, signal, ft, ftarg, verb=self.verb)
# Create the figure.
self.initiate_figure()
def initiate_figure(self):
"""Create the figure."""
# Create figure and all axes
fig = plt.figure("Interactive frequency selection for the Fourier "
"Transform.", figsize=(9, 4))
plt.subplots_adjust(hspace=0.03, wspace=0.04, bottom=0.15, top=0.9)
# plt.tight_layout(rect=[0, 0, 1, 0.95]) # Leave space for suptitle.
ax1 = plt.subplot2grid((3, 2), (0, 0), rowspan=2)
plt.grid('on', alpha=0.4)
ax2 = plt.subplot2grid((3, 2), (0, 1), rowspan=2)
plt.grid('on', alpha=0.4)
ax3 = plt.subplot2grid((3, 2), (2, 0))
plt.grid('on', alpha=0.4)
ax4 = plt.subplot2grid((3, 2), (2, 1))
plt.grid('on', alpha=0.4)
# Synchronize x-axis, switch upper labels off
ax1.get_shared_x_axes().join(ax1, ax3)
ax2.get_shared_x_axes().join(ax2, ax4)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.setp(ax2.get_xticklabels(), visible=False)
# Move labels of t-domain to the right
ax2.yaxis.set_ticks_position('right')
ax4.yaxis.set_ticks_position('right')
# Set fixed limits
ax1.set_xscale('log')
ax3.set_yscale('log')
ax3.set_yscale('log')
ax3.set_ylim([0.007, 141])
ax3.set_yticks([0.01, 0.1, 1, 10, 100])
ax3.set_yticklabels(('0.01', '0.1', '1', '10', '100'))
ax4.set_yscale('log')
ax4.set_yscale('log')
ax4.set_ylim([0.007, 141])
ax4.set_yticks([0.01, 0.1, 1, 10, 100])
ax4.set_yticklabels(('0.01', '0.1', '1', '10', '100'))
# Labels etc
ax1.set_ylabel('Amplitude (V/m)')
ax3.set_ylabel('Rel. Error (%)')
ax3.set_xlabel('Frequency (Hz)')
ax4.set_xlabel('Time (s)')
ax3.axhline(1, c='k')
ax4.axhline(1, c='k')
# Add instances
self.fig = fig
self.axs = [ax1, ax2, ax3, ax4]
# Plot initial base model
self.update_ftfilt(self.ftarg)
self.plot_base_model()
# Initiate the widgets
self.create_widget()
def reim(self, inp):
"""Return real or imaginary part as a function of signal."""
if self.signal < 0:
return inp.real
else:
return inp.imag
def create_widget(self):
"""Create widgets and their layout."""
# Offset slider.
off = widgets.interactive(
self.update_off,
off=widgets.IntSlider(
min=500,
max=10000,
description='Offset (m)',
value=self.model['rec'][0],
step=250,
continuous_update=False,
style={'description_width': '60px'},
layout={'width': '260px'},
),
)
# Pts/dec slider.
pts_per_dec = widgets.interactive(
self.update_pts_per_dec,
pts_per_dec=widgets.IntSlider(
min=1,
max=10,
description='pts/dec',
value=self.pts_per_dec,
step=1,
continuous_update=False,
style={'description_width': '60px'},
layout={'width': '260px'},
),
)
# Linear/logarithmic selection.
linlog = widgets.interactive(
self.update_linlog,
linlog=widgets.ToggleButtons(
value=self.linlog,
options=['linear', 'log'],
description='Display',
style={'description_width': '60px', 'button_width': '100px'},
),
)
# Frequency-range slider.
freq_range = widgets.interactive(
self.update_freq_range,
freq_range=widgets.FloatRangeSlider(
value=[np.log10(self.fmin), np.log10(self.fmax)],
description='f-range',
min=-4,
max=3,
step=0.1,
continuous_update=False,
style={'description_width': '60px'},
layout={'width': '260px'},
),
)
# Signal selection (-1, 0, 1).
signal = widgets.interactive(
self.update_signal,
signal=widgets.ToggleButtons(
value=self.signal,
options=[-1, 0, 1],
description='Signal',
style={'description_width': '60px', 'button_width': '65px'},
),
)
# Fourier transform method selection.
def _get_init():
"""Return initial choice of Fourier Transform."""
if self.ft == 'fftlog':
return self.ft
else:
return self.ftarg['dlf'].savename
ftfilt = widgets.interactive(
self.update_ftfilt,
ftfilt=widgets.Dropdown(
options=['fftlog', 'key_81_CosSin_2009',
'key_241_CosSin_2009', 'key_601_CosSin_2009',
'key_101_CosSin_2012', 'key_201_CosSin_2012'],
description='Fourier',
value=_get_init(), # Initial value
style={'description_width': '60px'},
layout={'width': 'max-content'},
),
)
# Group them together.
t1col1 = widgets.VBox(children=[pts_per_dec, freq_range],
layout={'width': '310px'})
t1col2 = widgets.VBox(children=[off, ftfilt],
layout={'width': '310px'})
t1col3 = widgets.VBox(children=[signal, linlog],
layout={'width': '310px'})
# Group them together.
display(widgets.HBox(children=[t1col1, t1col2, t1col3]))
# Plotting and calculation routines.
def clear_handle(self, handles):
"""Clear `handles` from figure."""
for hndl in handles:
if hasattr(self, 'h_'+hndl):
getattr(self, 'h_'+hndl).remove()
def adjust_lim(self):
"""Adjust axes limits."""
# Adjust y-limits f-domain
if self.linlog == 'linear':
self.axs[0].set_ylim([1.1*min(self.reim(self.f_dense)),
1.5*max(self.reim(self.f_dense))])
else:
self.axs[0].set_ylim([5*min(self.reim(self.f_dense)),
5*max(self.reim(self.f_dense))])
# Adjust x-limits f-domain
self.axs[0].set_xlim([min(self.freq_req), max(self.freq_req)])
# Adjust y-limits t-domain
if self.linlog == 'linear':
self.axs[1].set_ylim(
[min(-max(self.t_base)/20, 0.9*min(self.t_base)),
max(-min(self.t_base)/20, 1.1*max(self.t_base))])
else:
self.axs[1].set_ylim([10**(np.log10(max(self.t_base))-5),
1.5*max(self.t_base)])
# Adjust x-limits t-domain
if self.linlog == 'linear':
if self.signal == 0:
self.axs[1].set_xlim(
[0, self.xtfact*self.model['rec'][0]/1000])
else:
self.axs[1].set_xlim([0, max(self.time)])
else:
self.axs[1].set_xlim([min(self.time), max(self.time)])
def print_suptitle(self):
"""Update suptitle."""
plt.suptitle(
f"Offset = {np.squeeze(self.model['rec'][0])/1000} km; "
f"No. freq. coarse: {self.freq_calc.size}; No. freq. full: "
f"{self.freq_req.size} ({self.freq_req.min():.1e} $-$ "
f"{self.freq_req.max():.1e} Hz)")
def plot_base_model(self):
"""Update smooth, 'correct' model."""
# Calculate responses
self.f_dense = empymod.dipole(freqtime=self.freq_dense, **self.model)
self.t_base = empymod.dipole(
freqtime=self.time, signal=self.signal, **self.model)
# Clear existing handles
self.clear_handle(['f_base', 't_base'])
# Plot new result
self.h_f_base, = self.axs[0].plot(
self.freq_dense, self.reim(self.f_dense), 'C3')
self.h_t_base, = self.axs[1].plot(self.time, self.t_base, 'C3')
self.adjust_lim()
def plot_coarse_model(self):
"""Update coarse model."""
# Calculate the f-responses for required and the calculation range.
f_req = empymod.dipole(freqtime=self.freq_req, **self.model)
f_calc = empymod.dipole(freqtime=self.freq_calc, **self.model)
# Interpolate from calculated to required frequencies and transform.
f_int = self.interpolate(f_calc)
t_int = self.freq2time(f_calc, self.model['rec'][0])
# Calculate the errors.
f_error = np.clip(100*abs((self.reim(f_int)-self.reim(f_req)) /
self.reim(f_req)), 0.01, 100)
t_error = np.clip(100*abs((t_int-self.t_base)/self.t_base), 0.01, 100)
# Clear existing handles
self.clear_handle(['f_int', 't_int', 'f_inti', 'f_inte', 't_inte'])
# Plot frequency-domain result
self.h_f_inti, = self.axs[0].plot(
self.freq_req, self.reim(f_int), 'k.', ms=4)
self.h_f_int, = self.axs[0].plot(
self.freq_calc, self.reim(f_calc), 'C0.', ms=8)
self.h_f_inte, = self.axs[2].plot(self.freq_req, f_error, 'k.')
# Plot time-domain result
self.h_t_int, = self.axs[1].plot(self.time, t_int, 'k--')
self.h_t_inte, = self.axs[3].plot(self.time, t_error, 'k.')
# Update suptitle
self.print_suptitle()
# Interactive routines
def update_off(self, off):
"""Offset-slider"""
# Update model
self.model['rec'] = [off, self.model['rec'][1], self.model['rec'][2]]
# Redraw models
self.plot_base_model()
self.plot_coarse_model()
def update_pts_per_dec(self, pts_per_dec):
"""pts_per_dec-slider."""
# Store pts_per_dec.
self.pts_per_dec = pts_per_dec
# Redraw through update_ftfilt.
self.update_ftfilt(self.ftarg)
def update_freq_range(self, freq_range):
"""Freq-range slider."""
# Update values
self.fmin = 10**freq_range[0]
self.fmax = 10**freq_range[1]
# Redraw models
self.plot_coarse_model()
def update_ftfilt(self, ftfilt):
"""Ftfilt dropdown."""
# Check if FFTLog or DLF; git DLF filter.
if isinstance(ftfilt, str):
fftlog = ftfilt == 'fftlog'
else:
if 'dlf' in ftfilt:
fftlog = False
ftfilt = ftfilt['dlf'].savename
else:
fftlog = True
# Update Fourier arguments.
if fftlog:
self.fourier_arguments('fftlog', {'pts_per_dec': self.pts_per_dec})
self.freq_inp = None
else:
# Calculate input frequency from min to max with pts_per_dec.
lmin = np.log10(self.freq_req.min())
lmax = np.log10(self.freq_req.max())
self.freq_inp = np.logspace(
lmin, lmax, int(self.pts_per_dec*np.ceil(lmax-lmin)))
self.fourier_arguments(
'dlf', {'dlf': ftfilt, 'pts_per_dec': -1})
# Dense frequencies for comparison reasons
self.freq_dense = np.logspace(np.log10(self.freq_req.min()),
np.log10(self.freq_req.max()), 301)
# Redraw models
self.plot_base_model()
self.plot_coarse_model()
def update_linlog(self, linlog):
"""Adjust x- and y-scaling of both frequency- and time-domain."""
# Store linlog
self.linlog = linlog
# f-domain: x-axis always log; y-axis linear or symlog.
if linlog == 'log':
sym_dec = 10 # Number of decades to show on symlog
lty = int(max(np.log10(abs(self.reim(self.f_dense))))-sym_dec)
self.axs[0].set_yscale('symlog', linthresh=10**lty, linscaley=0.7)
# Remove the zero line becouse of the overlapping ticklabels.
nticks = len(self.axs[0].get_yticks())//2
iticks = np.arange(nticks)
iticks = np.r_[iticks, iticks+nticks+1]
self.axs[0].set_yticks(self.axs[0].get_yticks()[iticks])
else:
self.axs[0].set_yscale(linlog)
# t-domain: either linear or loglog
self.axs[1].set_yscale(linlog)
self.axs[1].set_xscale(linlog)
# Adjust limits
self.adjust_lim()
def update_signal(self, | |
value:
# Update self.charset with the charset from the header
match = charset_re_match(value)
if match:
self.charset = match.group(1)
else:
# Update the header value with self.charset
if value.startswith('text/'):
value = value + '; charset=' + self.charset
name = literal and name or key
self.headers[name] = value
def appendHeader(self, name, value, delimiter=", "):
""" Append a value to an HTTP return header.
Set an HTTP return header "name" with value "value",
appending it following a comma if there was a previous value
set for the header.
'name' is always lowercased before use.
"""
name, value = _scrubHeader(name, value)
name = name.lower()
headers = self.headers
if name in headers:
h = headers[name]
h = "%s%s%s" % (h, delimiter, value)
else:
h = value
self.setHeader(name, h, scrubbed=True)
def addHeader(self, name, value):
""" Set a new HTTP return header with the given value,
Retain any previously set headers with the same name.
Note that this API appneds to the 'accumulated_headers' attribute;
it does not update the 'headers' mapping.
"""
name, value = _scrubHeader(name, value)
self.accumulated_headers.append((name, value))
__setitem__ = setHeader
def setBase(self, base):
"""Set the base URL for the returned document.
If base is None, set to the empty string.
If base is not None, ensure that it has a trailing slach.
"""
if base is None:
base = ''
elif not base.endswith('/'):
base = base + '/'
self.base = str(base)
def insertBase(self):
# Only insert a base tag if content appears to be html.
content_type = self.headers.get('content-type', '').split(';')[0]
if content_type and (content_type != 'text/html'):
return
if self.base and self.body:
text = self.text
match = start_of_header_search(text)
if match is not None:
index = match.start(0) + len(match.group(0))
ibase = base_re_search(text)
if ibase is None:
text = (
text[:index] +
'\n<base href="' +
escape(self.base, True) +
'" />\n' +
text[index:]
)
self.text = text
self.setHeader('content-length', len(self.body))
def isHTML(self, text):
if isinstance(text, bytes):
try:
text = text.decode(self.charset)
except UnicodeDecodeError:
pass
text = text.lstrip()
# Note that the string can be big, so text.lower().startswith()
# is more expensive than s[:n].lower().
if (text[:6].lower() == '<html>' or
text[:14].lower() == '<!doctype html'):
return True
if text.find('</') > 0:
return True
return False
def setBody(self, body, title='', is_error=False, lock=None):
""" Set the body of the response
Sets the return body equal to the (string) argument "body". Also
updates the "content-length" return header.
If the body is already locked via a previous call, do nothing and
return None.
You can also specify a title, in which case the title and body
will be wrapped up in html, head, title, and body tags.
If the body is a 2-element tuple, then it will be treated
as (title,body)
If body is unicode, encode it.
If body is not a string or unicode, but has an 'asHTML' method, use
the result of that method as the body; otherwise, use the 'str'
of body.
If is_error is true, format the HTML as a Zope error message instead
of a generic HTML page.
Return 'self' (XXX as a true value?).
"""
# allow locking of the body in the same way as the status
if self._locked_body:
return
elif lock:
self._locked_body = 1
if not body:
return self
if isinstance(body, tuple) and len(body) == 2:
title, body = body
if hasattr(body, 'asHTML'):
body = body.asHTML()
if isinstance(body, text_type):
body = self._encode_unicode(body)
elif isinstance(body, bytes):
pass
else:
try:
body = bytes(body)
except UnicodeError:
body = self._encode_unicode(text_type(body))
# At this point body is always binary
l = len(body)
if ((l < 200) and body[:1] == b'<' and body.find(b'>') == l - 1 and
bogus_str_search(body) is not None):
self.notFoundError(body[1:-1].decode(self.charset))
else:
if title:
title = text_type(title)
if not is_error:
self.body = body = self._html(
title, body.decode(self.charset)).encode(self.charset)
else:
self.body = body = self._error_html(
title, body.decode(self.charset)).encode(self.charset)
else:
self.body = body
content_type = self.headers.get('content-type')
if content_type is None:
if self.isHTML(body):
content_type = 'text/html; charset=%s' % self.charset
else:
content_type = 'text/plain; charset=%s' % self.charset
self.setHeader('content-type', content_type)
else:
if (content_type.startswith('text/') and
'charset=' not in content_type):
content_type = '%s; charset=%s' % (content_type,
self.charset)
self.setHeader('content-type', content_type)
self.setHeader('content-length', len(self.body))
self.insertBase()
if (self.use_HTTP_content_compression and
self.headers.get('content-encoding', 'gzip') == 'gzip'):
# use HTTP content encoding to compress body contents unless
# this response already has another type of content encoding
if content_type.split('/')[0] not in uncompressableMimeMajorTypes:
# only compress if not listed as uncompressable
body = self.body
startlen = len(body)
co = zlib.compressobj(6, zlib.DEFLATED, -zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL, 0)
chunks = [_gzip_header, co.compress(body),
co.flush(),
struct.pack("<LL",
zlib.crc32(body) & 0xffffffff,
startlen)]
z = b''.join(chunks)
newlen = len(z)
if newlen < startlen:
self.body = z
self.setHeader('content-length', newlen)
self.setHeader('content-encoding', 'gzip')
if self.use_HTTP_content_compression == 1:
# use_HTTP_content_compression == 1 if force was
# NOT used in enableHTTPCompression().
# If we forced it, then Accept-Encoding
# was ignored anyway, so cache should not
# vary on it. Otherwise if not forced, cache should
# respect Accept-Encoding client header
vary = self.getHeader('Vary')
if vary is None or 'Accept-Encoding' not in vary:
self.appendHeader('Vary', 'Accept-Encoding')
return self
def enableHTTPCompression(self, REQUEST={}, force=0, disable=0, query=0):
"""Enable HTTP Content Encoding with gzip compression if possible
REQUEST -- used to check if client can accept compression
force -- set true to ignore REQUEST headers
disable -- set true to disable compression
query -- just return if compression has been previously requested
returns -- 1 if compression will be attempted, 2 if compression
is forced, 0 if no compression
The HTTP specification allows for transfer encoding and content
encoding. Unfortunately many web browsers still do not support
transfer encoding, but they all seem to support content encoding.
This function is designed to be called on each request to specify
on a request-by-request basis that the response content should
be compressed.
The REQUEST headers are used to determine if the client accepts
gzip content encoding. The force parameter can force the use
of gzip encoding regardless of REQUEST, and the disable parameter
can be used to "turn off" previously enabled encoding (but note
that any existing content-encoding header will not be changed).
The query parameter can be used to determine the if compression
has been previously requested.
In setBody, the major mime type is used to determine if content
encoding should actually be performed.
By default, image types are not compressed.
Additional major mime types can be specified by setting the
environment variable DONT_GZIP_MAJOR_MIME_TYPES to a comma-seperated
list of major mime types that should also not be gzip compressed.
"""
if query:
return self.use_HTTP_content_compression
elif disable:
# in the future, a gzip cache manager will need to ensure that
# compression is off
self.use_HTTP_content_compression = 0
elif (force or
(REQUEST.get('HTTP_ACCEPT_ENCODING', '').find('gzip') != -1)):
if force:
self.use_HTTP_content_compression = 2
else:
self.use_HTTP_content_compression = 1
return self.use_HTTP_content_compression
def _encode_unicode(self, text):
# Fixes the encoding in the XML preamble according
# to the charset specified in the content-type header.
if text.startswith('<?xml'):
pos_right = text.find('?>') # right end of the XML preamble
text = ('<?xml version="1.0" encoding="' +
self.charset +
'" ?>' +
text[pos_right + 2:])
# Encode the text data using the response charset
text = text.encode(self.charset, 'replace')
return text
def _cookie_list(self):
cookie_list = []
for name, attrs in self.cookies.items():
# Note that as of May 98, IE4 ignores cookies with
# quoted cookie attr values, so only the value part
# of name=value pairs may be quoted.
if attrs.get('quoted', True):
cookie = '%s="%s"' % (name, quote(attrs['value']))
else:
cookie = '%s=%s' % (name, quote(attrs['value']))
for name, v in attrs.items():
name = name.lower()
if name == 'expires':
cookie = '%s; Expires=%s' % (cookie, v)
elif name == 'domain':
cookie = '%s; Domain=%s' % (cookie, v)
elif name == 'path':
cookie = '%s; Path=%s' % (cookie, v)
elif name == 'max_age':
cookie = '%s; Max-Age=%s' % (cookie, v)
elif name == 'comment':
cookie = '%s; Comment=%s' % (cookie, v)
elif name == 'secure' and v:
cookie = '%s; Secure' % cookie
# Some browsers recognize this cookie attribute
# | |
ix_, iy, iy_)
print('getCalData. gridpoint 1 position: ', xf[iy_,ix_], yf[iy_,ix_], xp1[iy_,ix_], yp1[iy_,ix_])
print('getCalData. gridpoint 2 position: ', xf[iy ,ix_], yf[iy ,ix_], xp1[iy ,ix_], yp1[iy ,ix_])
print('getCalData. gridpoint 3 position: ', xf[iy ,ix ], yf[iy ,ix ], xp1[iy ,ix ], yp1[iy ,ix ])
print('getCalData. gridpoint 4 position: ', xf[iy_,ix ], yf[iy_,ix ], xp1[iy_,ix ], yp1[iy_,ix ])
#
# exception at outer grid edges:
#
if ((ix == N1-1) ^ (iy == N1-1) ^ (ix_ == 0) ^ (iy_ == 0)):
# select only coefficient with order 4 (or 3 for wheelpos=955)
print("IMPORTANT:")
print("\nanchor point is outside the calibration array: extrapolating all data")
try:
if wheelpos == 955 :
# first order solution
q4 = np.where( c1n.flatten() == 3 )
xf = xf.flatten()[q4]
yf = yf.flatten()[q4]
xp1 = xp1.flatten()[q4]
yp1 = yp1.flatten()[q4]
th = th.flatten()[q4]
c10 = c10.flatten()[q4]
c11 = c11.flatten()[q4]
c12 = c12.flatten()[q4]
c13 = c13.flatten()[q4]
c14 = np.zeros(len(q4[0]))
c1n = c1n.flatten()[q4]
mode = 'bisplines'
# second order solution only when at lower or right boundary
if (ix == N1-1) ^ (iy == 0):
q2 = np.where( c2n.flatten() == 2 )[0]
xp2 = xp2.flatten()[q2]
yp2 = yp2.flatten()[q2]
c20 = c20.flatten()[q2]
c21 = c21.flatten()[q2]
c22 = c22.flatten()[q2]
c2n = c2n.flatten()[q2]
else:
N2 = N1/2
xp2 = np.zeros(N2)
yp2 = np.zeros(N2)
c20 = np.zeros(N2)
c21 = np.zeros(N2)
c22 = np.zeros(N2)
c2n = np.zeros(N2)
else:
q4 = np.where( c1n.flatten() == 4 )
xf = xf.flatten()[q4]
yf = yf.flatten()[q4]
xp1 = xp1.flatten()[q4]
yp1 = yp1.flatten()[q4]
th = th.flatten()[q4]
c10 = c10.flatten()[q4]
c11 = c11.flatten()[q4]
c12 = c12.flatten()[q4]
c13 = c13.flatten()[q4]
c14 = np.zeros(len(q4[0]))
c1n = c1n.flatten()[q4]
xp2 = xp2.flatten()[q4]
yp2 = yp2.flatten()[q4]
c20 = c20.flatten()[q4]
c21 = c21.flatten()[q4]
c22 = c22.flatten()[q4]
c2n = c2n.flatten()[q4]
# find the anchor positions by extrapolation
anker = np.zeros(2)
anker2 = np.zeros(2)
tck1x = interpolate.bisplrep(xf, yf, xp1, xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19,kx=3,ky=3,s=None)
tck1y = interpolate.bisplrep(xf, yf, yp1, xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19,kx=3,ky=3,s=None)
tck2x = interpolate.bisplrep(xf, yf, xp1, xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19,kx=3,ky=3,s=None)
tck2y = interpolate.bisplrep(xf, yf, yp1, xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19,kx=3,ky=3,s=None)
anker[0] = xp1i = interpolate.bisplev(rx,ry, tck1x)
anker[1] = yp1i = interpolate.bisplev(rx,ry, tck1y)
anker2[0] = xp2i = interpolate.bisplev(rx,ry, tck2x)
anker2[1] = yp2i = interpolate.bisplev(rx,ry, tck2y)
# find the angle
tck = interpolate.bisplrep(xf, yf, th,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
thi = interpolate.bisplev(rx,ry, tck)
# find the dispersion
tck = interpolate.bisplrep(xf, yf, c10,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
c10i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xf, yf, c11,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
c11i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xf, yf, c12,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
c12i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xf, yf, c13,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
c13i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xf, yf, c14,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
c14i = interpolate.bisplev(rx,ry, tck)
if ((ix == N1-1) ^ (iy == 0)):
tck = interpolate.bisplrep(xf, yf, c20,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
c20i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xf, yf, c21,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
c21i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xf, yf, c22,xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=3,ky=3,s=None)
c22i = interpolate.bisplev(rx,ry, tck)
else:
c20i = c21i = c22i = np.NaN
if chatter > 2:
print('getCalData. bicubic extrapolation ')
print('getCalData. first order anchor position = (%8.1f,%8.1f), angle theta = %7.1f ' % (xp1i,yp1i,thi ))
print('getCalData. dispersion first order = ',c10i,c11i,c12i,c13i,c14i)
if c20i == NaN:
print(" no second order extracted ")
else:
print('getCalData. second order anchor position = (%8.1f,%8.1f) ' % (xp2i,yp2i))
print('getCalData. dispersion second order = ', c20i,c21i, c22i)
except:
print("failed - ABORTING")
raise
return
else:
#
# reduce arrays to section surrounding point
# get interpolated quantities and pass them on
#
if mode == 'bisplines':
# compute the Bivariate-spline coefficients
# kx = ky = 3 # cubic splines (smoothing) and =1 is linear
task = 0 # find spline for given smoothing factor
# s = 0 # 0=spline goes through the given points
# eps = 1.0e-6 (0 < eps < 1)
m = N1*N1
if chatter > 2: print('\n getCalData. splines ')
qx = qy = np.where( (np.isfinite(xrf.reshape(m))) & (np.isfinite(yrf.reshape(m)) ) )
tck1 = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], xp1.reshape(m)[qx],xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
tck2 = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], yp1.reshape(m)[qx],xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
xp1i = interpolate.bisplev(rx,ry, tck1)
yp1i = interpolate.bisplev(rx,ry, tck2)
tck3 = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], th.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
thi = interpolate.bisplev(rx,ry, tck3)
xp2i = 0
yp2i = 0
if chatter > 2: print('getCalData. x,y,theta = ',xp1i,yp1i,thi, ' second order ', xp2i, yp2i)
tck = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], c10.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
c10i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], c11.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
c11i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], c12.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
c12i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], c13.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
c13i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], c14.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
c14i = interpolate.bisplev(rx,ry, tck)
if chatter > 2: print('getCalData. dispersion first order = ',c10i,c11i,c12i,c13i,c14i)
tck = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], c20.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
c20i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], c21.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
c21i = interpolate.bisplev(rx,ry, tck)
tck = interpolate.bisplrep(xrf.reshape(m)[qx], yrf.reshape(m)[qy], c22.reshape(m),xb=-0.19,xe=+0.19,yb=-0.19,ye=0.19, kx=kx,ky=ky,s=s)
c22i = interpolate.bisplev(rx,ry, tck)
if chatter > 2: print('getCalData. dispersion second order = ', c20i,c21i, c22i)
#
if mode == 'bilinear':
xp1i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), xp1 ,chatter=chatter)
yp1i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), yp1 ,chatter=chatter)
thi = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), th )# ,chatter=chatter)
c10i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), c10 )#,chatter=chatter)
c11i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), c11 )#,chatter=chatter)
c12i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), c12 )#,chatter=chatter)
c13i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), c13 )#,chatter=chatter)
c14i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), c14 )#,chatter=chatter)
xp2i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), xp2 )#,chatter=chatter)
yp2i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), yp2 )#,chatter=chatter)
c20i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), c20 )#,chatter=chatter)
c21i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), c21 )#,chatter=chatter)
c22i = bilinear( rx, ry, xf[0,:].squeeze(), yf[:,0].squeeze(), c22 )#,chatter=chatter)
if chatter > 1:
print('getCalData. bilinear interpolation')
print('getCalData. first order anchor position = (%8.1f,%8.1f), angle theta = %7.1f ' % (xp1i,yp1i,thi ))
print('getCalData. dispersion first order = ',c10i,c11i,c12i,c13i,c14i)
print('getCalData. second order anchor position = (%8.1f,%8.1f) ' % (xp2i,yp2i))
print('getCalData. dispersion second order = ', c20i,c21i, c22i)
if mode == 'interp2d':
x1 = xf[0,:].squeeze()
x2 = yf[:,0].squeeze()
xp1i = interpolate.interp2d(x1,x2,xp1,kind='linear')
#same as bisplines with s=0 and k=1
return
C_1 = np.array([c14i,c13i,c12i,c11i,c10i])
C_2 = np.array([c22i,c21i,c20i])
#
# only theta for the first order is available
cal.close()
anker = np.array([xp1i,yp1i])
anker2 = np.array([xp2i,yp2i])
if chatter > 0:
print('getCalData. anker [DET-pix] = ', anker)
print('getCalData. anker [DET-img] = ', anker - [77+27,77+1])
print('getCalData. second order anker at = ', anker2, ' [DET-pix] ')
return anker, anker2, C_1, C_2, thi, data, msg
def bilinear(x1,x2,x1a,x2a,f,chatter=0):
'''
Given function f(i,j) given as a 2d array of function values at
points x1a[i],x2a[j], derive the function value y=f(x1,x2)
by bilinear interpolation.
requirement: x1a[i] is increasing with i
x2a[j] is increasing with j
20080303 NPMK
'''
import numpy as np
# check that the arrays are numpy arrays
x1a = np.asarray(x1a)
x2a = np.asarray(x2a)
# find the index for sorting the arrays
n1 = len(x1a)
n2 = len(x2a)
x1a_ind = x1a.argsort()
x2a_ind = x2a.argsort()
# make a sorted copy
x1as = x1a.copy()[x1a_ind]
x2as = x2a.copy()[x2a_ind]
# find indices i,j for the square containing (x1, x2)
k1s = x1as.searchsorted(x1)-1
k2s = x2as.searchsorted(x2)-1
# find the indices of the four points in the original array
ki = x1a_ind[k1s]
kip1 = x1a_ind[k1s+1]
kj = x2a_ind[k2s]
kjp1 = x2a_ind[k2s+1]
if chatter > 2:
print('FIND solution in (x,y) = (',x1,x2,')')
print('array x1a[k-5 .. k+5] ',x1a[ki-5:ki+5])
print('array x2a[k-5 .. k+5] ',x2a[kj-5:kj+5])
print('length x1a=',n1,' x2a=',n2)
print('indices in sorted arrays = (',k1s,',',k2s,')')
print('indices in array x1a: ',ki, kip1)
print('indices in array x2a: ',kj, kjp1)
# exception at border:
if ((k1s+1 >= n1) ^ (k2s+1 >= n2) ^ (k1s < 0) ^ (k2s < 0) ):
print('bilinear. point outside grid x - use nearest neighbor ')
| |
'amount': '23.00',
'mark_canceled': False,
})
assert resp.status_code == 400
assert resp.data == {'detail': 'External error: We had trouble communicating with Stripe. Please try again and contact support if the problem persists.'}
with scopes_disabled():
r = order.refunds.last()
assert r.provider == "stripe"
assert r.state == OrderRefund.REFUND_STATE_FAILED
assert r.source == OrderRefund.REFUND_SOURCE_ADMIN
@pytest.mark.django_db
def test_refund_list(token_client, organizer, event, order):
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/{}/refunds/'.format(organizer.slug, event.slug,
order.code))
assert resp.status_code == 200
assert TEST_REFUNDS_RES == resp.data['results']
@pytest.mark.django_db
def test_refund_detail(token_client, organizer, event, order):
resp = token_client.get('/api/v1/organizers/{}/events/{}/orders/{}/refunds/1/'.format(organizer.slug, event.slug,
order.code))
assert resp.status_code == 200
assert TEST_REFUNDS_RES[0] == resp.data
@pytest.mark.django_db
def test_refund_done(token_client, organizer, event, order):
with scopes_disabled():
r = order.refunds.get(local_id=1)
r.state = 'transit'
r.save()
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/refunds/1/done/'.format(
organizer.slug, event.slug, order.code
))
with scopes_disabled():
r = order.refunds.get(local_id=1)
assert resp.status_code == 200
assert r.state == OrderRefund.REFUND_STATE_DONE
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/refunds/1/done/'.format(
organizer.slug, event.slug, order.code
))
assert resp.status_code == 400
@pytest.mark.django_db
def test_refund_process_mark_refunded(token_client, organizer, event, order):
with scopes_disabled():
p = order.payments.get(local_id=1)
p.create_external_refund()
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/refunds/2/process/'.format(
organizer.slug, event.slug, order.code
), format='json', data={'mark_canceled': True})
with scopes_disabled():
r = order.refunds.get(local_id=1)
assert resp.status_code == 200
assert r.state == OrderRefund.REFUND_STATE_DONE
order.refresh_from_db()
assert order.status == Order.STATUS_CANCELED
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/refunds/2/process/'.format(
organizer.slug, event.slug, order.code
), format='json', data={'mark_canceled': True})
assert resp.status_code == 400
@pytest.mark.django_db
def test_refund_process_mark_pending(token_client, organizer, event, order):
with scopes_disabled():
p = order.payments.get(local_id=1)
p.create_external_refund()
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/refunds/2/process/'.format(
organizer.slug, event.slug, order.code
), format='json', data={'mark_canceled': False})
with scopes_disabled():
r = order.refunds.get(local_id=1)
assert resp.status_code == 200
assert r.state == OrderRefund.REFUND_STATE_DONE
order.refresh_from_db()
assert order.status == Order.STATUS_PENDING
@pytest.mark.django_db
def test_refund_cancel(token_client, organizer, event, order):
with scopes_disabled():
r = order.refunds.get(local_id=1)
r.state = 'transit'
r.save()
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/refunds/1/cancel/'.format(
organizer.slug, event.slug, order.code
))
with scopes_disabled():
r = order.refunds.get(local_id=1)
assert resp.status_code == 200
assert r.state == OrderRefund.REFUND_STATE_CANCELED
resp = token_client.post('/api/v1/organizers/{}/events/{}/orders/{}/refunds/1/cancel/'.format(
organizer.slug, event.slug, order.code
))
assert resp.status_code == 400
@pytest.mark.django_db
def test_orderposition_list(token_client, organizer, event, order, item, subevent, subevent2, question):
i2 = copy.copy(item)
i2.pk = None
i2.save()
with scopes_disabled():
var = item.variations.create(value="Children")
var2 = item.variations.create(value="Children")
res = dict(TEST_ORDERPOSITION_RES)
op = order.positions.first()
op.variation = var
op.save()
res["id"] = op.pk
res["item"] = item.pk
res["variation"] = var.pk
res["answers"][0]["question"] = question.pk
resp = token_client.get('/api/v1/organizers/{}/events/{}/orderpositions/'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?order__status=n'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?order__status=p'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?item={}'.format(organizer.slug, event.slug, item.pk))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?item__in={},{}'.format(
organizer.slug, event.slug, item.pk, i2.pk
))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?item={}'.format(organizer.slug, event.slug, i2.pk))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?variation={}'.format(organizer.slug, event.slug, var.pk))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?variation={}'.format(organizer.slug, event.slug, var2.pk))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?attendee_name=Peter'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?attendee_name=peter'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?attendee_name=Mark'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?secret=z3fsn8jyufm5kpk768q69gkbyr5f4h6w'.format(
organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?secret=abc123'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?pseudonymization_id=ABCDEFGHKL'.format(
organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?pseudonymization_id=FOO'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?search=FO'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?search=z3fsn8j'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?search=Peter'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?search=5f4h6w'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?order=FOO'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?order=BAR'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?has_checkin=false'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?has_checkin=true'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
with scopes_disabled():
cl = event.checkin_lists.create(name="Default")
op.checkins.create(datetime=datetime.datetime(2017, 12, 26, 10, 0, 0, tzinfo=UTC), list=cl)
res['checkins'] = [{'datetime': '2017-12-26T10:00:00Z', 'list': cl.pk, 'auto_checked_in': False, 'type': 'entry'}]
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?has_checkin=true'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
op.subevent = subevent
op.save()
res['subevent'] = subevent.pk
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?subevent={}'.format(organizer.slug, event.slug, subevent.pk))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?subevent__in={},{}'.format(organizer.slug, event.slug,
subevent.pk, subevent2.pk))
assert [res] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?subevent={}'.format(organizer.slug, event.slug,
subevent.pk + 1))
assert [] == resp.data['results']
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?include_canceled_positions=false'.format(organizer.slug, event.slug))
assert len(resp.data['results']) == 1
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/orderpositions/?include_canceled_positions=true'.format(organizer.slug, event.slug))
assert len(resp.data['results']) == 2
@pytest.mark.django_db
def test_orderposition_detail(token_client, organizer, event, order, item, question):
res = dict(TEST_ORDERPOSITION_RES)
with scopes_disabled():
op = order.positions.first()
res["id"] = op.pk
res["item"] = item.pk
res["answers"][0]["question"] = question.pk
resp = token_client.get('/api/v1/organizers/{}/events/{}/orderpositions/{}/'.format(organizer.slug, event.slug,
op.pk))
assert resp.status_code == 200
assert res == resp.data
order.status = 'p'
order.save()
event.settings.ticketoutput_pdf__enabled = True
resp = token_client.get('/api/v1/organizers/{}/events/{}/orderpositions/{}/'.format(organizer.slug, event.slug,
op.pk))
assert len(resp.data['downloads']) == 1
@pytest.mark.django_db
def test_orderposition_detail_canceled(token_client, organizer, event, order, item, question):
with scopes_disabled():
op = order.all_positions.filter(canceled=True).first()
resp = token_client.get('/api/v1/organizers/{}/events/{}/orderpositions/{}/'.format(organizer.slug, event.slug,
op.pk))
assert resp.status_code == 404
resp = token_client.get('/api/v1/organizers/{}/events/{}/orderpositions/{}/?include_canceled_positions=true'.format(
organizer.slug, event.slug, op.pk))
assert resp.status_code == 200
@pytest.mark.django_db
def test_orderposition_delete(token_client, organizer, event, order, item, question):
with scopes_disabled():
op = order.positions.first()
resp = token_client.delete('/api/v1/organizers/{}/events/{}/orderpositions/{}/'.format(
organizer.slug, event.slug, op.pk
))
assert resp.status_code == 400
assert resp.data == ['This operation would leave the order empty. Please cancel the order itself instead.']
with scopes_disabled():
op2 = OrderPosition.objects.create(
order=order,
item=item,
variation=None,
price=Decimal("23"),
attendee_name_parts={"full_name": "Peter", "_scheme": "full"},
secret="foobar",
pseudonymization_id="BAZ",
)
order.refresh_from_db()
order.total = Decimal('46')
order.save()
assert order.positions.count() == 2
resp = token_client.delete('/api/v1/organizers/{}/events/{}/orderpositions/{}/'.format(
organizer.slug, event.slug, op2.pk
))
assert resp.status_code == 204
with scopes_disabled():
assert order.positions.count() == 1
assert order.all_positions.count() == 3
order.refresh_from_db()
assert order.total == Decimal('23.25')
@pytest.fixture
def invoice(order):
testtime = datetime.datetime(2017, 12, 10, 10, 0, 0, tzinfo=UTC)
with mock.patch('django.utils.timezone.now') as mock_now:
mock_now.return_value = testtime
return generate_invoice(order)
TEST_INVOICE_RES = {
"order": "FOO",
"number": "DUMMY-00001",
"is_cancellation": False,
"invoice_from": "",
"invoice_to": "Sample company\nNew Zealand\nVAT-ID: DE123",
"date": "2017-12-10",
"refers": None,
"locale": "en",
"introductory_text": "",
"internal_reference": "",
"additional_text": "",
"payment_provider_text": "",
"footer_text": "",
"foreign_currency_display": None,
"foreign_currency_rate": None,
"foreign_currency_rate_date": None,
"lines": [
{
"position": 1,
"description": "Budget Ticket<br />Attendee: Peter",
"gross_value": "23.00",
"tax_value": "0.00",
"tax_name": "",
"tax_rate": "0.00"
},
{
"position": 2,
"description": "Payment fee",
"gross_value": "0.25",
"tax_value": "0.05",
"tax_name": "",
"tax_rate": "19.00"
}
]
}
@pytest.mark.django_db
def test_invoice_list(token_client, organizer, event, order, invoice):
res = dict(TEST_INVOICE_RES)
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?order=FOO'.format(organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?order=BAR'.format(organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?number={}'.format(
organizer.slug, event.slug, invoice.number))
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?number=XXX'.format(
organizer.slug, event.slug))
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?locale=en'.format(
organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?locale=de'.format(
organizer.slug, event.slug))
assert [] == resp.data['results']
with scopes_disabled():
ic = generate_cancellation(invoice)
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?is_cancellation=false'.format(
organizer.slug, event.slug))
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?is_cancellation=true'.format(
organizer.slug, event.slug))
assert len(resp.data['results']) == 1
assert resp.data['results'][0]['number'] == ic.number
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?refers={}'.format(
organizer.slug, event.slug, invoice.number))
assert len(resp.data['results']) == 1
assert resp.data['results'][0]['number'] == ic.number
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/?refers={}'.format(
organizer.slug, event.slug, ic.number))
assert [] == resp.data['results']
@pytest.mark.django_db
def test_invoice_detail(token_client, organizer, event, invoice):
res = dict(TEST_INVOICE_RES)
resp = token_client.get('/api/v1/organizers/{}/events/{}/invoices/{}/'.format(organizer.slug, event.slug,
invoice.number))
assert resp.status_code == 200
assert res == resp.data
@pytest.mark.django_db
def test_invoice_regenerate(token_client, organizer, event, invoice):
with scopes_disabled():
InvoiceAddress.objects.filter(order=invoice.order).update(company="ACME Ltd")
resp = token_client.post('/api/v1/organizers/{}/events/{}/invoices/{}/regenerate/'.format(
organizer.slug, event.slug, invoice.number
))
assert resp.status_code == 204
invoice.refresh_from_db()
assert "ACME Ltd" in invoice.invoice_to
@pytest.mark.django_db
def test_invoice_reissue(token_client, organizer, event, invoice):
with scopes_disabled():
InvoiceAddress.objects.filter(order=invoice.order).update(company="ACME Ltd")
resp = token_client.post('/api/v1/organizers/{}/events/{}/invoices/{}/reissue/'.format(
organizer.slug, event.slug, invoice.number
))
assert resp.status_code == 204
invoice.refresh_from_db()
assert "ACME Ltd" not in invoice.invoice_to
with scopes_disabled():
assert invoice.order.invoices.count() == 3
invoice = invoice.order.invoices.last()
assert "ACME Ltd" in invoice.invoice_to
@pytest.mark.django_db
def test_order_mark_paid_pending(token_client, organizer, event, order):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_paid/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 200
assert resp.data['status'] == Order.STATUS_PAID
@pytest.mark.django_db
def test_order_mark_paid_canceled(token_client, organizer, event, order):
order.status = Order.STATUS_CANCELED
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_paid/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 400
order.refresh_from_db()
assert order.status == Order.STATUS_CANCELED
@pytest.mark.django_db
def test_order_mark_paid_expired_quota_free(token_client, organizer, event, order, quota):
order.status = Order.STATUS_EXPIRED
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_paid/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 200
order.refresh_from_db()
assert order.status == Order.STATUS_PAID
@pytest.mark.django_db
def test_order_mark_paid_expired_quota_fill(token_client, organizer, event, order, quota):
order.status = Order.STATUS_EXPIRED
order.save()
quota.size = 0
quota.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_paid/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 400
order.refresh_from_db()
assert order.status == Order.STATUS_EXPIRED
@pytest.mark.django_db
def test_order_mark_paid_locked(token_client, organizer, event, order):
order.status = Order.STATUS_EXPIRED
order.save()
with event.lock():
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_paid/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 409
order.refresh_from_db()
assert order.status == Order.STATUS_EXPIRED
@pytest.mark.django_db
def test_order_reactivate(token_client, organizer, event, order, quota):
order.status = Order.STATUS_CANCELED
order.save()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/reactivate/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 200
assert resp.data['status'] == Order.STATUS_PENDING
@pytest.mark.django_db
def test_order_reactivate_invalid(token_client, organizer, event, order):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/reactivate/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code == 400
@pytest.mark.django_db
def test_order_mark_canceled_pending(token_client, organizer, event, order):
djmail.outbox = []
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/orders/{}/mark_canceled/'.format(
organizer.slug, event.slug, order.code
)
)
assert resp.status_code | |
from enum import Enum
import os
from threading import Thread
from tkinter import (
Frame,
Scrollbar,
StringVar,
Canvas,
Event,
VERTICAL,
TRUE,
FALSE,
RIGHT,
Y,
NW,
LEFT,
BOTH,
Listbox,
SINGLE,
Widget,
END,
TclError,
)
from typing import Any, Optional, Union
import time
import numpy as np
import psutil
from tanager_tcp.tanager_client import TanagerClient
AZIMUTH_HOME = 0
INTERVAL = 0.25
BUFFER = 15
PI_BUFFER = 20
# These are related to the region of spectra that are sensitive to polarization artifacts. This is at high phase
# angles between 1000 and 1400 nm.
MIN_WAVELENGTH_ARTIFACT_FREE = 1000
MAX_WAVELENGTH_ARTIFACT_FREE = 1400
MIN_G_ARTIFACT_FREE = -20
MAX_G_ARTIFACT_FREE = 40
computer = "new"
NUMLEN = None # number of digits in the raw data filename. Could change from one version of software to next.
if computer == "old":
# Number of digits in spectrum number for spec save config
NUMLEN = 3
elif computer == "desktop":
# Number of digits in spectrum number for spec save config
NUMLEN = 5
# Time added to timeouts to account for time to read/write files
elif computer == "new":
# Number of digits in spectrum number for spec save config
NUMLEN = 5
class ConnectionManager:
LISTEN_FOR_PI_PORT = 12345
LISTEN_FOR_SPEC_PORT = 54321
REMOTE_PORT = 12345
def __init__(self, spec_ip="192.168.86.50", pi_ip="raspberrypi"):
self.spec_offline = True
self.pi_offline = True
self._spec_ip = spec_ip
self._pi_ip = pi_ip
self.spec_client = TanagerClient((spec_ip, self.REMOTE_PORT), self.LISTEN_FOR_SPEC_PORT)
self.pi_client = TanagerClient((pi_ip, self.REMOTE_PORT), self.LISTEN_FOR_PI_PORT)
@property
def spec_ip(self):
return self._spec_ip
@spec_ip.setter
def spec_ip(self, new_ip):
self._spec_ip = new_ip
self.spec_client = TanagerClient((new_ip, self.REMOTE_PORT), self.LISTEN_FOR_SPEC_PORT)
@property
def pi_ip(self):
return self._pi_ip
@pi_ip.setter
def pi_ip(self, new_ip):
self._pi_ip = new_ip
self.pi_client = TanagerClient((new_ip, self.REMOTE_PORT), self.LISTEN_FOR_PI_PORT)
def send_to_spec(self, message: str, connect_timeout=5) -> bool:
if self.spec_offline:
self.connect_spec(connect_timeout)
if not self.spec_offline:
sent = self.spec_client.send(message)
if not sent:
self.spec_offline = True
return sent
return False
def send_to_pi(self, message: str, connect_timeout=5) -> bool:
if self.pi_offline:
self.connect_pi(connect_timeout)
if not self.pi_offline:
sent = self.pi_client.send(message)
if not sent:
self.pi_offline = True
return sent
return False
def connect_spec(self, timeout: float):
self.spec_offline = not self.spec_client.connect(timeout)
return not self.spec_offline
def connect_pi(self, timeout: float):
self.pi_offline = not self.pi_client.connect(timeout)
return not self.pi_offline
class ConfigInfo:
def __init__(self, local_config_loc, global_config_loc, icon_loc, num_len, opsys):
self.local_config_loc = local_config_loc
self.global_config_loc = global_config_loc
self.icon_loc = icon_loc
self.opsys = opsys
self.num_len = num_len
class ControllerType:
"""This class, which is extended by Controller, is defined so as to avoid
circular imports when adding type hints to classes that are imported by
Controller and also reference an instance of Controller"""
def __init__(self, connection_tracker, config_info):
self.connection_tracker = connection_tracker
self.config_info = config_info
self.tk_format = None
self.view_notebook = None
self.master = None
self.incidence_entries = None
self.azimuth_entries = None
self.emission_entries = None
self.opt = None
self.wr = None
self.min_science_i = None
self.max_science_i = None
self.min_science_e = None
self.max_science_e = None
self.min_science_az = None
self.max_science_az = None
self.check_viewing_geom_for_manual_operation = None
self.spec_config_count = None
self.sample_label_entries = None
self.current_sample_gui_index = None
self.validate_sample_name = None
self.log = None
self.instrument_config_entry = None
self.manual_automatic = None
# for plot_manager
self.plot = None
self.plotter = None
self.goniometer_view = None
# for process_manager
self.remote_directory_worker = None
self.process_cmd = None
self.plot_manager = None
self.script_running = None
self.spec_listener = None
self.spec_commander = None
self.text_only = None
self.next_in_queue = None
# for console
self.execute_cmd = None
self.control_frame = None
self.view_frame = None
# for cli_manager
self.set_manual_automatic = None
self.fail_script_command = None
self.min_motor_i = None
self.max_motor_i = None
self.min_motor_e = None
self.max_motor_e = None
self.min_motor_az = None
self.max_motor_az = None
self.configure_pi = None
self.take_spectrum = None
self.acquire = None
self.add_geometry = None
self.set_individual_range = None
self.individual_range = None
self.light_start_entry = None
self.light_end_entry = None
self.detector_start_entry = None
self.detector_end_entry = None
self.azimuth_start_entry = None
self.azimuth_end_entry = None
self.light_increment_entry = None
self.detector_increment_entry = None
self.azimuth_increment_entry = None
self.incidence_entries = None
self.emission_entries = None
self.azimuth_entries = None
self.sample_frames = None
self.available_sample_positions = None
self.taken_sample_positions = None
self.remove_sample = None
self.add_sample = None
self.set_taken_sample_positions = None
self.unfreeze = None
self.spec_save_dir_entry = None
self.sample_pos_vars = None
self.spec_basename_entry = None
self.spec_startnum_entry = None
self.set_save_config = None
self.configure_instrument = None
self.wait_dialog = None
self.move_tray = None
self.set_emission = None
self.set_incidence = None
self.set_azimuth = None
self.get_movements = None
self.console = None
# Which spectrometer computer are you using? This should probably be desktop, but could be 'new' for the new lappy or
# 'old' for the ancient laptop.
computer = "desktop"
computer = "new"
def limit_len(input_str, max_len):
return input_str[:max_len]
def validate_int_input(input_int: Any, min_int: int, max_int: int):
try:
input_int = int(input_int)
except (ValueError, TypeError):
# TODO: all valueerror exception catching should probably be value, type
return False
if input_int > max_int:
return False
if input_int < min_int:
return False
return True
def validate_float_input(input_float: Any, min_float: float, max_float: float):
try:
input_float = float(input_float)
except ValueError:
return False
if input_float > max_float:
return False
if input_float < min_float:
return False
return True
def decrypt(encrypted):
cmd = encrypted.split("&")[0]
params = encrypted.split("&")[1:]
i = 0
for param in params:
params[i] = param
i = i + 1
return cmd, params
def rm_reserved_chars(input_str):
output = (
input_str.replace("&", "")
.replace("+", "")
.replace("=", "")
.replace("$", "")
.replace("^", "")
.replace("*", "")
.replace("(", "")
.replace(",", "")
.replace(")", "")
.replace("@", "")
.replace("!", "")
.replace("#", "")
.replace("{", "")
.replace("}", "")
.replace("[", "")
.replace("]", "")
.replace("|", "")
.replace(",", "")
.replace("?", "")
.replace("~", "")
.replace('"', "")
.replace("'", "")
.replace(";", "")
.replace("`", "")
)
return output
def numbers_only(input_str):
output = ""
for digit in input_str:
if digit in ("1", "2", "3", "4", "5", "6", "7", "8", "9", "0"):
output += digit
return output
class PretendEvent:
def __init__(self, widget, width, height):
self.widget = widget
self.width = width
self.height = height
class PrivateEntry:
def __init__(self, text):
self.text = text
def get(self):
return self.text
class SampleFrame:
def __init__(self):
self.position = "Sample 1"
class TkFormat:
def __init__(self, config_info=None):
# Yay formatting. Might not work for Macs.
self.bg = "#333333"
self.textcolor = "light gray"
self.buttontextcolor = "white"
self.bd = 2
self.padx = 3
self.pady = 3
self.border_color = "light gray"
self.button_width = 15
self.buttonbackgroundcolor = "#888888"
self.highlightbackgroundcolor = "#222222"
self.entry_background = "light gray"
if config_info is None or config_info.opsys == "Windows":
self.listboxhighlightcolor = "darkgray"
else:
self.listboxhighlightcolor = "white"
self.selectbackground = "#555555"
self.selectforeground = "white"
self.check_bg = "#444444"
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
class VerticalScrolledFrame(Frame):
# Use the 'interior' attribute to place widgets inside the scrollable frame
# Construct and pack/place/grid normally
# This frame only allows vertical scrolling
# pylint: disable = keyword-arg-before-vararg
def __init__(self, controller, parent, min_height=600, width=468, *args, **kw):
Frame.__init__(self, parent, *args, **kw)
self.controller = controller
self.min_height = min_height # Miniumum height for interior frame to show all elements. Changes as new samples
# or viewing geometries are added.
# create a canvas object and a vertical scrollbar for scrolling it
self.scrollbar = Scrollbar(self, orient=VERTICAL)
self.canvas = canvas = Canvas(self, bd=0, highlightthickness=0, yscrollcommand=self.scrollbar.set)
canvas.pack(side=LEFT, fill=BOTH, expand=TRUE)
canvas.config(width=width)
# canvas.config(height=height)
self.scrollbar.config(command=canvas.yview)
# reset the view
canvas.xview_moveto(0)
canvas.yview_moveto(0)
# create a frame inside the canvas which will be scrolled with it
# initialize height to the bigger of 1) screen height 2) 700 px
self.interior = interior = Frame(canvas)
interior.pack_propagate(
0
) # This makes it so we can easily manually set the interior frame's size as needed. See _configure_canvas()
# for how it's done.
self.interior_id = canvas.create_window(0, 0, window=interior, anchor=NW)
self.canvas.bind("<Configure>", self._configure_canvas)
self.width = width
def _configure_canvas(self, event: Optional[Event] = None):
# pylint: disable = unused-argument
if self.canvas.winfo_height() > self.min_height:
self.interior.config(height=self.canvas.winfo_height())
if self.scrollbar.winfo_ismapped():
self.scrollbar.pack_forget()
else:
self.interior.config(height=self.min_height)
try:
self.scrollbar.pack(fill=Y, side=RIGHT, expand=FALSE)
except TclError:
# Happens on shutdown if plots are open
print("TclError configuring scrollbar in VerticalScrolledFrame")
return
self.canvas.config(scrollregion=self.canvas.bbox("all"))
if self.interior.winfo_reqwidth() != self.canvas.winfo_width():
# update the inner frame's width to fill the canvas
if self.canvas.winfo_height() < self.min_height:
self.canvas.config(width=self.width - 20)
else:
self.canvas.config(width=self.width)
self.canvas.itemconfigure(self.interior_id, width=self.canvas.winfo_width())
def update(self, controller_resize=True):
self._configure_canvas(None)
if controller_resize:
self.controller.resize()
class StringVarWithEntry(StringVar):
def __init__(self):
super().__init__()
self.entry = None
class ScrollableListbox(Listbox):
def __init__(self, frame, bg, entry_background, listboxhighlightcolor, selectmode=SINGLE):
self.scroll_frame = Frame(frame, bg=bg)
self.scroll_frame.pack(fill=BOTH, expand=True)
self.scrollbar = Scrollbar(self.scroll_frame, orient=VERTICAL)
self.scrollbar.pack(side=RIGHT, fill=Y, padx=(0, 10))
self.scrollbar.config(command=self.yview)
super().__init__(
self.scroll_frame,
yscrollcommand=self.scrollbar.set,
selectmode=selectmode,
bg=entry_background,
selectbackground=listboxhighlightcolor,
height=15,
exportselection=0,
)
self.pack(side=LEFT, expand=True, fill=BOTH, padx=(10, 0))
self.bind("<Control-c>", self.copy)
def destroy(self):
self.scrollbar.destroy()
super().destroy()
def copy(self, event=None):
self.clipboard_clear()
all_items = self.get(0, END) # tuple with text of all items in Listbox
sel_idx = self.curselection() # tuple with indexes of selected items
sel_list = [all_items[item] for item in | |
#!/usr/bin/env python3
# Standard modules
import z3 # pip install z3-solver
# Local modules
from . import alloy
from .z3_wrapper import *
class Relation(dict):
"""A Relation.
This is a Relation within a relational model of the type that herd, Alloy,
or isla-axiomatic would use. Keys have type `tuple` of `str`, and values
are boolean expressions (either python `bool` or `z3.BoolRef`).
Relations are not created directly, but instead are created via
`RelationBuilder` instances.
"""
def __init__(self):
"""Creates an empty relation.
Use a `RelationBuilder` to create a non-empty relation.
"""
super().__init__
self.arity = None
def get(self, k, d=False):
"""Like `dict.get()`, except the default is `False` instead of `None`."""
return super().get(k, d)
def _set(self, k, v):
"""An internal API used for actually building relations.
Not meant to be used from outside the module.
"""
# Typechecking
if not isinstance(k, tuple):
raise TypeError(k)
if not isinstance(v, z3.BoolRef) and not isinstance(v, bool):
raise TypeError(v)
# Make sure the arity of `k` matches the arity of other keys in self
if self.arity is None:
self.arity = len(k)
elif self.arity != len(k):
raise TypeError(
f"`Relation` has arity {self.arity}, "
f"but adding a key of arity {len(k)}"
)
# No use adding `v` if it is known to be false.
if v is True:
# Convert Python type to z3 type
super().__setitem__(k, z3.BoolVal(True))
elif v is not False:
# v = simplify(v) # slower, but generates simpler expressions
if not z3.is_false(v):
super().__setitem__(k, v)
def __setitem__(self, k, v):
"""Use `update()` instead."""
raise Exception("setting elements in a Relation not permitted")
def __str__(self):
if not self:
return "{}"
s = "{\n"
for k, v in self.items():
v = simplify(v)
if not false(v):
s += f" {k}: {' '.join([i.strip() for i in str(v).split()])}\n"
s += "}"
return s
def simplify(self):
"""Return a version of the Relation with all variables simplified"""
r = Relation()
for k, v in self.items():
v = simplify(v)
r._set(k, v)
return r
def contains(self, other):
"""`self` contains all elements of `other`"""
return other.in_(self)
def cross(self, other):
"""Cross product of `self` and `other`"""
r = Relation()
for k1, v1 in self.items():
for k2, v2 in other.items():
r._set(k1 + k2, z3.And(v1, v2))
return r
def __eq__(self, other):
"""`self` and `other` are equal"""
return z3.And(self.contains(other), other.contains(self))
def iden(self):
"""The identity mapping on elements of `self`"""
r = Relation()
for k, v in self.items():
if len(k) != 1:
raise TypeError(
"`iden()` can be called only on relations of arity 1"
)
r._set(k + k, v)
return r
def if_(self, c):
"""If `c`, then self; else, an empty relation."""
r = Relation()
for k, v in self.items():
r._set(k, z3.And(c, v))
return r
def in_(self, other):
"""`self` is contained within `other`"""
conjuncts = []
for k, v in self.items():
conjuncts.append(z3.Implies(v, other.get(k)))
return z3.And(conjuncts)
def intersect(self, other):
"""The set intersection of `self` and `other`"""
r = Relation()
for k, v in self.items():
if k in other:
r._set(k, z3.And(v, other.get(k)))
return r
def irreflexive(self):
"""`self` is irreflexive: it has no elements `(a, a)` for any `a`"""
conjuncts = []
for k, v in self.items():
if len(k) != 2:
raise Exception(
"`irreflexive()` requires a `Relation` of arity 2"
)
if k[0] == k[1]:
conjuncts.append(z3.Not(v))
return z3.And(conjuncts)
def join(self, other):
"""Return the relational join of `self` with `other`.
Example: if
```
a = {('a', 'b'): v1, ('a', 'c'): v2}
b = {('b', 'd'): v3, ('c', 'd'): v4, ('a', 'd'): v5}
```
Then:
```
a.join(b) == {('a', 'd'): Or(And(v1, v3), And(v2, v4))}
```
"""
# Preprocess `other` by sorting its elements into a dict, where keys in
# the dict are the first atom in each key of `other`
other_dict = {}
for k, v in other.items():
other_dict.setdefault(k[0], {})[k[1:]] = v
# Find all pairs where the last atom of a member of `self` is the same
# as the first atom of a member of `other`.
r = RelationBuilder()
for k1, v1 in self.items():
for k2, v2 in other_dict.get(k1[-1], {}).items():
r.update(k1[:-1] + k2, z3.And(v1, v2))
return r.relation()
def __ne__(self, other):
return z3.Not(self == other)
def no(self):
"""`self` contains no members"""
return z3.Not(z3.Or(list(self.values())))
def some(self):
"""`self` contains at least one member"""
return z3.Or(list(self.values()))
def __sub__(self, other):
"""Set difference"""
r = Relation()
for k, v in self.items():
v2 = other.get(k)
# Optimization: for large expressions, the z3 simplify() function
# can't always statically simplify And(a, Not(a)) into False
if v == v2:
continue
r._set(k, z3.And(v, z3.Not(v2)))
return r
def transitive(self):
"""`self` is transitive"""
return self.join(self).in_(self)
def transpose(self):
"""The transpose of `self`"""
r = Relation()
for k, v in self.items():
r._set(k[::-1], v)
return r
def transitive_closure(self):
"""The transitive closure of self.
Transitive closure cannot be produced in general with first-order
logics, but since we are working only with finite relations here, we
can just calculate it with no issue.
"""
# Figure out how many atoms are touched by `self`.
nodes = set()
for k, v in self.items():
if len(k) != 2:
raise Exception(
"transitive closure requires a relation of arity 2"
)
nodes.add(k[0])
nodes.add(k[1])
# self + self.self + self.self.self + ... + self.<n-2 times>.self
# is sufficient to produce the transitive closure for a set of edges
# connecting n nodes. (self + self.self)^m for 2^m >= n is a
# conservative way to produce this with less calculation than manually
# producing all n join patterns in the first formula.
n = 1
r = self
while n < len(nodes):
r = r.union(r.join(r))
n <<= 1
return r
def union(self, other):
"""Set union"""
r = RelationBuilder()
for k, v in self.items():
r.update(k, v)
for k, v in other.items():
r.update(k, v)
return r.relation()
class RelationBuilder(dict):
"""A class that produces `Relation`s.
Each instance starts empty, but elements can be added using `update()`. An
instance can be converted into a `Relation` using `relation()`.
"""
def __init__(self):
"""Create an empty RelationBuilder.
No arguments are needed; members are added using `update()`.
"""
# Implementation detail: the dict part of self stores all values as a
# list, and z3.Or() of each list is used when creating the final
# relation in `relation()`. This just makes for less nesting of z3.Or
# calls for keys that are updated many times.
super().__init__()
self.arity = None
def __setitem__(self, k, v):
"""Use `update()` instead."""
raise Exception("use `update()` instead of `__setitem__()`")
def update(self, k, v):
"""If `k` is not already a key in the relation being built within
`self`, add `k: v` to the relation. If `k` is already a key, then
replace it with `k: z3.Or(previous_value, v)`.
"""
# Typechecking the key
if not isinstance(k, tuple):
raise TypeError(f"expected `k` to be a tuple, but got {k}")
for i in k:
if type(i) != str:
raise TypeError(
"expected each element of `k` to be a str, "
f"but got {type(i)} {i}"
)
# Typechecking the value
if not isinstance(v, bool) and not isinstance(v, z3.BoolRef):
raise TypeError(f"expected `v` to be a boolean type, but got {v}")
# Making sure the arity matches other elements of the relation
if self.arity is None:
self.arity = len(k)
elif self.arity != len(k):
raise TypeError(
f"`RelationBuilder` has arity {self.arity}, "
f"but adding a key of arity {len(k)}"
)
# See note in __init__ about how values are stored
super().setdefault(k, []).append(v)
def relation(self):
"""Return the `Relation` built up through `update()` calls."""
# See note in __init__ about how values are stored
r = Relation()
for k, v in self.items():
if len(v) == 1:
r._set(k, v[0])
else:
r._set(k, z3.Or(v))
return r
def Singleton(k, v=True):
"""Create a relation with a single element `k`, with condition `v`.
If Not(v), then the created relation will be empty.
This can be done with `RelationBuilder`s, but would be more verbose.
"""
r = Relation()
if isinstance(k, str):
k = (k,)
r._set(k, v)
return r
################################################################################
class Solution:
"""One valid instance of a solved `RelationalModel`.
Not meant to be created directly by users. Get one by calling `solve()` or
`solutions()` on a `RelationalModel`.
| |
import time
def print_msg_box(msg, indent=1, width=None, title=None):
"""Print message-box with optional title."""
lines = msg.split('\n')
space = " " * indent
if not width:
width = max(map(len, lines))
box = f'╔{"═" * (width + indent * 2)}╗\n' # upper_border
if title:
box += f'║{space}{title:<{width}}{space}║\n' # title
box += f'║{space}{"-" * len(title):<{width}}{space}║\n' # underscore
box += ''.join([f'║{space}{line:<{width}}{space}║\n' for line in lines])
box += f'╚{"═" * (width + indent * 2)}╝' # lower_border
print(box)
class sort:
# bubble sort algorithm
def bubble_sort(self,arr,hint=False):
start = time.time()
for i in range(len(arr)-1):
for j in range(len(arr)-i-1):
if arr[j] > arr[j+1] :
arr[j],arr[j+1] = arr[j+1],arr[j]
print(arr)
end = time.time()
print("Bubble Sort Runtime = {}".format(end-start))
if(hint is True):
self.bubble_sort_hint()
return arr
def bubble_sort_hint(self):
message ="""
Bubble Sort
------------------------------------
Purpose : sorting in increasing order
Method : Bubble Making, Swapping
Time Complexity: Worst Case - O(n^2)
Hint :
Try to kick out the greater value to the rightmost position by using loops
and value swapping.
Pseudocode:
--> for i in [0,length of array]
for j in [0,length of array - 1]
if(array[j] > array[i])
swap array[j] & array[i]
Visualization:
Given Array :
+-----+-----+-----+
| 5 | 4 | 3 |
+-----+-----+-----+
First Iteration :
+-----+-----+-----+
| 4 | 5 | 3 |
+-----+-----+-----+
Second Iteration :
+-----+-----+-----+
| 4 | 3 | 5 |
+-----+-----+-----+
Third Iteration :
+-----+-----+-----+
| 3 | 4 | 5 |
+-----+-----+-----+
Learn More Here - https://en.wikipedia.org/wiki/Bubble_sort
"""
print_msg_box(message)
# selection Sort Algorithm
def selection_sort(self,arr,hint=False):
start = time.time()
for i in range(len(arr)-1):
minimum = i
for j in range(i+1,len(arr)):
if arr[j] < arr[minimum]:
minimum = j
arr[minimum],arr[i] = arr[i],arr[minimum]
print(arr)
end = time.time()
print("Selection Sort Runtime = {}".format(end-start))
if(hint is True):
self.selection_sort_hint()
return arr
def selection_sort_hint(self):
message ="""
selection Sort
------------------------------------
Purpose : sorting in increasing order
Method : Pick Up minimum, swap with minimum
Time Complexity: Worst Case - O(n^2)
Hint :
In every iteration the minimum element from the unsorted subarray is picked and
moved to the sorted subarray.
Pseudocode:
--> for i in [0,length of array]
minimum = i
for j in [i+1,length of array]
if arr[j] < arr[minimum]
minimum = j
swap arr[i] & arr[minimum]
Visualization:
Given Array :
+-----+-----+-----+
| 5 | 4 | 3 |
+-----+-----+-----+
We have two buckets,
| | | |
| Unsorted | | sorted |
| | | |
| 5,4,3 | | empty |
-------------- --------------
Select the minimum from the unsorted bucket and put that in sorted bucket
| | | |
| Unsorted | | sorted |
| | | |
| 5,4 | | 3 |
-------------- --------------
Again select the minimum from the unsorted bucket and put that in
sorted bucket
| | | |
| Unsorted | | sorted |
| | | |
| 5 | | 3,4 |
-------------- --------------
Repeat the same till the unsorted bucket is empty
| | | |
| Unsorted | | sorted |
| | | |
| | | 3,4,5 |
-------------- --------------
Finally you have the sorted array.
Learn More Here - https://en.wikipedia.org/wiki/Selection_sort
"""
print_msg_box(message)
class string_algorithms:
def isUnique(self,input_string,hint=False):
mapp = []
for i in input_string:
if i not in mapp:
mapp.append(i)
if(hint is True):
self.isUnique_hint()
return len(mapp) == len(input_string)
def isUnique_hint(self):
message ="""
Unique Character Checking
------------------------------------
Purpose : checking if all the characters in a given string are unique
Method : list comprehension
Time Complexity: Worst Case - O(n), n = length of the input string
Hint :
How about using the inbuilt list data structure ?
Pseudocode:
--> create an empty list named mapp
--> for i in input string
if i not in mapp
add i to the empty list
--> The string is unique only when the
length of the map after the total
iterations is same as that of the
length of the input string
Visualization:
Given String :
"aabcc"
Empty List:
----------------
| |
----------------
after first iteration :
----------------
| a |
----------------
after second iteration :
----------------
| a |
----------------
[because a was already in the list]
after third iteration :
----------------
| a b |
----------------
Finally :
----------------
| a b c |
----------------
size = 3 which is not equal to length of "aabcc"
Learn More about Lists Below -
https://docs.python.org/3/tutorial/datastructures.html
"""
print_msg_box(message)
def isPermutation(self,input1,input2,hint=False):
if(hint is True):
self.isPermutation_hint()
if(len(input1)!=len(input2)):
return False
mapp1 = []
mapp2 = []
for i in input1:
mapp1.append(i)
for j in input2:
mapp2.append(j)
mapp1.sort()
mapp2.sort()
return mapp1==mapp2
def isPermutation_hint(self):
message = """
Two String Permutations
------------------------------------
Purpose : checking if one string is consisting of the permutation of
the characters in the other string
Method : list comprehension
Time Complexity: Worst Case - O(n), n = length of the strings
Hint :
How about using two inbuilt list data structure ?
Pseudocode:
--> check if length(string1) != len(string2)
return False
--> create two empty lists named mapp1 & mapp2
--> for i in input string 1
add i to mapp1
--> for i in input string 2
add i to mapp2
--> sort mapp1
--> sort mapp2
--> return true if mapp1 and mapp2 are equal
Visualization:
Given Two String :
"aabcc"
"abcac"
Two Empty List:
List 1 List 2
---------------- ----------------
| | | |
---------------- ----------------
After Filling Lists :
List 1 List 2
---------------- ----------------
| a a b c c | | a b c a c |
---------------- ----------------
Applying sort function :
List 1 List 2
---------------- ----------------
| a a b c c | | a a b c c |
---------------- ----------------
Final check :
------------------ +------+
| List 1 == List 2 | -------> | True |
------------------ +------+
Learn More about Lists Below -
https://docs.python.org/3/tutorial/datastructures.html
"""
print_msg_box(message)
def URLify(self,input_str,key,hint=False):
if(hint is True):
self.URLify_hint()
input2 = ""
for i in range(len(input_str)):
if(input_str[i] != ' '):
input2+=input_str[i]
elif((input_str[i]==' ') and (input_str[i+1] == ' ')):
return input2
elif((input_str[i]==' ') and (input_str[i+1] != ' ')):
input2 += key
return input2
def URLify_hint(self):
message = """
Making a URL From a String
------------------------------------
Purpose : Making a URL by replacing the spaces with a key value entered
by the user
Method : string manipulation
Time Complexity : Worst Case - O(n), n = length of the string
Hint :
Take a blank string, and add data from the input string to the blank
string to prepare the final URL
Pseudocode :
--> Take a blank string s2
--> for i in [0,length of input string]
if(not a whitespace)
add to s2
elif(whitespace and next place is also whitespace)
return s2
elif(whitespace and next place not whitespace)
add the key value to the blank string
Visualization:
Given String To Make URL :
"Python is love"
Key : "%20"
Break The Given String : /*/ ----> whitespace
+--------+-------+----+-------+------+
| Python | /*/ | is | /*/ | love |
+--------+-------+----+-------+------+
^ ^ ^
^ ^ ^
^ ^ ^
1 2 3
We will take 1, 2 and 3 sucessively and in place of whitespaces we will
concatenate the key value.
Empty String Addition :
+-+ +--------+ +-------+ +----+ +-------+ +------+
| | + | Python | + | %20 | + | is | + | %20 | + | love |
+-+ +--------+ +-------+ +----+ +-------+ +------+
Learn More about String Concatenation Below -
https://en.wikipedia.org/wiki/Concatenation
"""
print_msg_box(message)
def isPalindromicPermutation(self,input1,hint=False):
if(hint is True):
self.isPalindromicPermutation_hint()
mapp = {}
for i in range(len(input1)):
key = input1[i]
if(key in mapp.keys()):
mapp[key] += 1
else:
mapp.update({key:1})
flag = 0
for i in mapp.keys():
if(mapp[i] %2 == 1):
flag+=1
return flag<=1
def isPalindromicPermutation_hint(self):
message = """
Palindromic Permutation
------------------------------------
Purpose :To check if the permutation of the characters in a string can
make it palindromic
Method : string manipulation, palindromic behaviour
Time | |
<reponame>peterpwang/abstracttemplate
#from bs4 import BeautifulSoup
import argparse
import sys
import os
import random
import re
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import stanza
from util import read_text
# public variables
debug = 0
skip_extraction = 1
skip_upos = 1
skip_tfidf = 1
skip_common_word = 1
skip_first_sentence = 1
sentence_max_word_count = 75
sentence_min_word_count = 6
def read_original_data(html_data_dir, text_data_dir, upos_data_dir, tfidf_data_dir, input_common_word_dir, common_word_dir, first_sentence_dir):
global skip_extraction
global skip_upos
global skip_tfidf
global skip_common_word
global skip_first_sentense
# -----------------------
if skip_extraction == 0:
# Read text from html files and save into a list.
print("Extracting...", flush=True)
lines = extract_text(html_data_dir, text_data_dir)
print(str(len(lines)) + " extracted.", flush=True)
# Split and save text into text files
text_split(lines, text_data_dir)
print("Datasets created.", flush=True)
else:
print("Reading...", flush=True)
lines = read_text(text_data_dir + "/data.txt")
print(str(len(lines)) + " read from dataset.", flush=True)
# -----------------------
if skip_upos == 0:
print("Tagging...", flush=True)
lines = create_upos(lines, upos_data_dir + "/data_upos.txt", upos_data_dir + "/data_upos.html")
print("UPOS tags created.", flush=True)
else:
print("UPOS tags reading...", flush=True)
lines = read_text(upos_data_dir + "/data_upos.txt")
print(str(len(lines)) + " UPOS tagged read from dataset.", flush=True)
# -----------------------
# Create TFIDF text and split and save text into text files
#if skip_tfidf == 0:
# print("TFIDF calculating...", flush=True)
# lines = create_tfidf(lines, tfidf_data_dir)
#
# # Split and save text into text files
# text_split(lines, tfidf_data_dir)
# print(str(len(lines)) + " TFIDF calculated.", flush=True)
#else:
# print("TFIDF reading...", flush=True)
# lines = read_text(tfidf_data_dir + "/data_tfidf.txt")
# print(str(len(lines)) + " TFIDF read from dataset.", flush=True)
# -----------------------
# Create text contains only common words and split and save text into text files
if skip_common_word == 0:
print("Common word filtering...", flush=True)
lines = common_word_filter(lines, input_common_word_dir, common_word_dir)
# Split and save text into text files
text_split(lines, common_word_dir)
print(str(len(lines)) + " Common word filtered.", flush=True)
else:
print("Common word result reading...", flush=True)
lines = read_text(common_word_dir + "/data_common_word.txt")
print(str(len(lines)) + " Common word result read from dataset.", flush=True)
# -----------------------
# Create first sentence text and split and save text into text files
if skip_first_sentence == 0:
print("Extracting first sentence...", flush=True)
lines, lines_with_origin = create_first_sentence(lines, first_sentence_dir, common_word_dir)
# Sort by word counts
lines = output_sorted_sentence_by_words_count(lines, first_sentence_dir + "/data_sorted_by_len.txt")
# Sort by word counts
lines_with_origin = output_sorted_sentence_by_words_count(lines_with_origin, first_sentence_dir + "/data_sorted_by_len_origin.txt")
# Split and save text into text files
text_split(lines, first_sentence_dir)
print(str(len(lines)) + " first sentence text extracted.", flush=True)
def extract_text(html_data_dir, text_data_path):
global debug
lines = []
number_htmls = 0
# Read text from html files and save into a list.
for subdir, dirs, files in os.walk(html_data_dir):
for filename in files:
filepath = subdir + os.sep + filename
if filepath.endswith(".html"):
text = convert_html_to_text(filepath)
lines.append(text)
number_htmls = number_htmls + 1
if debug == 1 and number_htmls%1000 == 0:
print(".", end = '', flush=True)
# Write text file
f = open(text_data_path + "/data.txt", 'w')
for line in lines:
f.write(line)
f.write("\n");
f.close()
return lines
def text_split(lines, text_path):
# Split and save text into text files
number_lines = len(lines)
random.shuffle(lines)
number_train = int(number_lines * 0.8)
number_test = number_lines - number_train
f = open(text_path + "/train.txt", 'w')
for i in range(0, number_train):
f.write(lines[i] + "\n");
f.close()
#f = open(text_path + "/validation.txt", 'w')
#for i in range(number_train, number_train + number_validation):
# f.write(lines[i] + "\n");
#f.close()
f = open(text_path + "/test.txt", 'w')
for i in range(number_train, number_lines):
f.write(lines[i] + "\n");
f.close()
def sort_by_wordscount_alphabetically(sentence):
line_words = sentence.split(" ")
return "{:05d}".format(len(line_words)) + "." + sentence
def output_sorted_sentence_by_words_count(lines, text_path):
global sentence_max_word_count
global sentence_min_word_count
# Sort
lines_sorted = lines[:]
lines_sorted.sort(key=sort_by_wordscount_alphabetically)
# Filter out sentencs too short or too long
lines_new = []
for i in range(0, len(lines_sorted)):
line_words = lines_sorted[i].split(" ")
if (len(line_words) <= sentence_max_word_count and len(line_words) >= sentence_min_word_count):
lines_new.append(lines_sorted[i])
# Write to a file
f = open(text_path, 'w')
for i in range(0, len(lines_new)):
f.write(lines_new[i] + "\n");
f.close()
return lines_new
def create_upos(lines, upos_text_file, upos_html_file):
global debug
#stanza.download('en')
nlp = stanza.Pipeline(lang='en', processors='tokenize,ner')
docs = []
i = 0
for line in lines:
doc = nlp(line)
docs.append(doc)
i = i + 1
if debug == 1 and i%1000 == 0:
print(".", end = '', flush=True)
# Output 5 documents into sample HTML file
f = open(upos_html_file, 'w')
f.write("<!DOCTYPE html>\n")
f.write("<html lang=\"en\">\n")
f.write(" <head>\n")
f.write(" <meta charset=\"utf-8\">\n")
f.write(" <style>\n")
f.write(" .entity { color: green; }\n")
f.write(" </style>\n")
f.write(" </head>\n")
f.write(" <body>\n")
i = 0
for doc in docs:
if i >= 5:
break
for sentence in doc.sentences:
for token in sentence.tokens:
if token.ner != "O":
f.write('<span class="entity">' + token.text + '</span> ')
else:
f.write(token.text + ' ')
f.write("<br>\n");
i = i + 1
f.write(" </body>\n")
f.write("</html>\n")
f.write("\n");
f.close()
# Export documents into plain text and return in list
ner_list = []
f = open(upos_text_file, 'w')
for doc in docs:
s = ""
for sentence in doc.sentences:
previous_ner = False
for token in sentence.tokens:
if token.ner != "O":
#if not previous_ner:
f.write('NNNN[[[' + token.text + ']]] ')
s += 'NNNN[[[' + token.text + ']]] '
previous_ner = True
else:
f.write(token.text + ' ')
s += token.text + ' '
previous_ner = False
f.write("\n");
ner_list.append(s)
f.close()
return ner_list
def create_tfidf(lines_origin, tfidf_text_path):
# Create a new text array with the original text removed
lines = []
for line_origin in lines_origin:
words_origin = line_origin.split(" ")
line = ""
for word_with_origin in words_origin:
word = get_symbol(word_with_origin)
line = line + word + " "
lines.append(line)
# Calculate TFIDF
vectorizer = TfidfVectorizer(stop_words='english',
#min_df=5, max_df=.5,
ngram_range=(1,1))
tfidf = vectorizer.fit_transform(lines)
#print(tfidf)
# Get features and index
features = vectorizer.get_feature_names()
indices = np.argsort(vectorizer.idf_)[::-1]
lines_tfidf = []
f = open(tfidf_text_path + "/data_tfidf.txt", 'w')
# Replace words with TFIDF < 0.05 with RRRR
doc = 0
for line in lines:
feature_index = tfidf[doc,:].nonzero()[1]
tfidf_scores = dict(zip([features[x] for x in feature_index], [tfidf[doc, x] for x in feature_index]))
line_tfidf = ""
words = line.split()
words_origin = lines_origin[doc].split()
previous_tfidf = False
i = 0
for w in words:
# Get the original word
word = words_origin[i]
if w in tfidf_scores and tfidf_scores[w] > 0.05:
#if not previous_tfidf:
word = get_original(words_origin[i])
f.write("RRRR[[[" + word + "]]] ");
line_tfidf = line_tfidf + "RRRR[[[" + word + "]]] "
previous_tfidf = True
else:
f.write(word + " ");
line_tfidf = line_tfidf + word + " "
previous_tfidf = False
i = i + 1
f.write("\n");
lines_tfidf.append(line_tfidf)
doc = doc + 1
f.close()
# Get the top 20 features
#top_n = 20
#top_features = [features[i] for i in indices[:top_n]]
return lines_tfidf
def common_word_filter(lines, input_common_word_dir, output_common_word_dir):
# Read common words
common_word_list = read_text(input_common_word_dir + "/common_word.txt")
common_word_set = set(common_word_list)
# Filter out words not in common word list
lines_new = []
lines_new_with_origin = []
for i in range(0, len(lines)):
line_words = lines[i].split(" ")
line_new = ""
line_new_with_origin = ""
previous_rare_word = False
for word in line_words:
if (word.lower() in common_word_set or word in [',', '.', '?']):
if (line_new != ""):
line_new = line_new + " "
line_new = line_new + word
if (line_new_with_origin != ""):
line_new_with_origin = line_new_with_origin + " "
line_new_with_origin = line_new_with_origin + word
previous_rare_word = False
else:
if (word != ""):
#if (not previous_rare_word):
if (line_new != ""):
line_new = line_new + " "
line_new = line_new + "CCCC"
if (line_new_with_origin != ""):
line_new_with_origin = line_new_with_origin + " "
line_new_with_origin = line_new_with_origin + "CCCC[[[" + get_original(word) + "]]]"
previous_rare_word = True
lines_new.append(line_new)
lines_new_with_origin.append(line_new_with_origin)
# Write to a file
f = open(output_common_word_dir + "/data_common_word.txt", 'w')
for i in range(0, len(lines_new)):
f.write(lines_new[i] + "\n");
f.close()
# Write to a file with origin
f = open(output_common_word_dir + "/data_common_word_origin.txt", 'w')
for i in range(0, len(lines_new_with_origin)):
f.write(lines_new_with_origin[i] + "\n");
f.close()
return lines_new;
def create_first_sentence(lines, first_sentence_text_path, common_word_dir):
global debug
# parse the text
nlp = stanza.Pipeline(lang='en', processors='tokenize')
docs = []
i = 0
for line in lines:
doc = nlp(line)
docs.append(doc)
i = i + 1
if debug == 1 and i%1000 == 0:
print(".", end = '', flush=True)
# Save first sentence
lines_first_sentence = []
f = open(first_sentence_text_path + "/data_first_sentence.txt", 'w')
for doc in docs:
for sentence in doc.sentences:
lines_first_sentence.append(sentence.text)
f.write(sentence.text)
f.write("\n")
break
f.close()
#lines_first_sentence = read_text(first_sentence_text_path + "/data_first_sentence.txt")
# Create first sentence with origin text
lines_with_origin = read_text(common_word_dir + "/data_common_word_origin.txt")
i = 0
| |
interpolator.y
def pointwise(x):
# If new prob is smaller than smallest prob in function
if x < xs[0]:
return ys[0] + (x - xs[0]) * (ys[1] - ys[0]) / (xs[1] - xs[0])
# If new prob is larger than largest prob in function
elif x > xs[-1]:
return ys[-1] + (x - xs[-1]) * (ys[-1] - ys[-2]) / (xs[-1] - xs[-2])
# If prob falls within set range of prob in function
else:
return interpolator(x)
def ufunclike(xs):
return np.fromiter(map(pointwise, np.array(xs)), dtype=np.float)
return ufunclike
def compute_rp_change(self, rps, ref_impact, target_impact, rp, min_rp=2, max_rp=1000):
"""
Purpose: Compute how return period protection changes from one impact
distribution to another (e.g. present to future)
Input:
rps: return periods of impacts
ref_impact: set of reference impact
target_impacts: impacts to which protection standard should be mapped
(i.e. year the flood protection should be valid in)
rp, protection standard at reference impacts
"""
#logging.debug('[CBA, compute_rp_change]: start')
# interpolate to estimate impacts at protection level 'rp'
# atleast1_1d = scalar inputs are converted to 1-D arrays. Arrays of higher dimensions are preserved
p = 1. / np.atleast_1d(rps)
# Find the target impact given user-defined protection standard (rp) by running the interp_values function
if target_impact.sum() == 0:
new_prot = np.array([rp])
else:
prot_impact = self.interp_value(rps, ref_impact, rp)
new_prot = self.interp_value(target_impact, rps, prot_impact)
# lookup what the protection standard is within the target impact list
return new_prot
def find_startrp(self, x):
#logging.debug('[CBA, find_startrp]: start')
if pd.isnull(x):
rpstart = np.nan
else:
prot_start_unit = min(self.rps, key=lambda rp: abs(rp - x))
rpstart = "startrp" + str(prot_start_unit).zfill(5)
return rpstart
def find_dimension_v2(self, m, df_lookup, df_cost, user_urb):
#logging.debug('[CBA, find_dimension_v2]: start')
uniStartRPList = [x for x in list(set(df_lookup['startrp'].values)) if pd.notnull(x)]
erp = "endrp" + str(self.prot_fut).zfill(5)
logging.info(erp)
logging.info(str(self.prot_fut))
uniSRPdfList = []
targetColNameList = []
for srp in uniStartRPList:
df_lookup_temp = df_lookup[df_lookup['startrp'] == srp]
uniSRPdfList.append(df_lookup_temp)
targetColName = "_".join([self.scenarios.get(self.scenario)[0], m,
self.scenarios.get(self.scenario)[1], str(self.ref_year), srp, erp])
targetColNameList.append(targetColName)
df = pd.DataFrame(np.transpose([uniStartRPList, targetColNameList, uniSRPdfList]),
columns=['startrp', 'tgtCol', 'df'])
costList = []
for itl in np.arange(0, len(df.index), 1):
logging.info(itl)
df_itl = df['df'].iloc[itl]
tgtCol_itl = df['tgtCol'].iloc[itl]
logging.info(tgtCol_itl)
# if '00000' in tgtCol_itl:
# continue
cost_itl = pd.read_sql_query("SELECT sum({0}) FROM {1} where id in ({2})".format(tgtCol_itl, df_cost,
", ".join(map(str, df_itl[
'FID'].values))),
self.engine).values[0]
####-------------------------
# NEW CODE
if user_urb == None:
ppp_itl, con_itl = pd.read_sql_query(
"SELECT avg(ppp_mer_rate_2005_index) mean_1, avg(construction_cost_index*7) mean_2 FROM lookup_construction_factors_geogunit_108 where fid_aque in ({0}) ".format(
', '.join(map(str, self.fids))), self.engine).values[0]
costList.append((cost_itl * con_itl)/ ppp_itl)
else:
costList.append((cost_itl)/ppp_itl)
####-------------------------
totalCost = sum(costList)
return totalCost
def find_construction(self, m, exposure, user_rur=None, user_urb=None):
"""
Purpose: Calculate the total cost to construction the desired flood protection
Inputs:
m = model
user_cost = user-defined cost per km per m
Output:
cost = total cost of dike
"""
#logging.debug('[CBA, find_construction]: start')
lookup_c = pd.read_sql_query(
"SELECT * FROM lookup_{0} where {1} = '{2}' ".format(self.geogunit, self.geogunit_type, self.geogunit_name),
self.engine, 'id')
lookup_c["FID"] = lookup_c.index
lookup_c["riverine"] = self.prot_pres
logging.info(self.prot_pres)
lookup_c["startrp"] = lookup_c["riverine"].apply(lambda x: self.find_startrp(x))
urb_dimensions = self.find_dimension_v2(m, lookup_c, self.df_urb_all, user_urb)
# Find the Purchasing Power Parity to Market value rate
# Find the local cost to construct the dike ($/km/m)
# If the user did not input a cost, use the local cost and PPP conversion to find total cost. 7 million is a standard factor cost
if user_urb == None:
cost_urb = urb_dimensions * 7e6
cost = cost_urb
else:
cost_urb = urb_dimensions * user_urb * 1e6
cost = cost_urb
return cost
def average_prot(self, m, year, risk_data_input):
#logging.debug('[CBA, average_prot]: start')
idx = int(year) - self.implementation_start
#logging.debug(f'[CBA, average_prot, idx]: {idx} ==> {year} {self.implementation_start}')
clm = "histor" if year == '2010' else self.clim
sco = "base" if year == '2010' else self.socio
mdl = "wt" if year == '2010' else m
test_rps = np.linspace(min(self.rps), max(self.rps), 999)
try:
assert (len(risk_data_input) >= idx), f"the infrastructure lifetime ({self.infrastructure_life}) MUST be between {2080 - self.implementation_start} - {2100 - self.implementation_start}"
except AssertionError as e:
raise Error(message='computation failed: '+ str(e), status=400)
real_impact = risk_data_input[int(idx)]
# READ IN REFERENCE IMPACT
# READ IN RAW DATA
if real_impact == 0:
test = np.nan
else:
# PULL ONLY CURRENT DATA
cols = [col for col in sqlalchemy.Table(self.df_urb_agg, self.metadata).columns.keys() if
(clm.lower() in col) and (mdl.lower() in col) and (sco.lower() in col) and (year in col)]
# impact_present = df_urb_agg.loc[geogunit_name, [col for col in df_urb_agg.columns if (clm in col) and (mdl in col) and (sco in col) and (year in col)]]
impact_present = pd.read_sql_query(
"SELECT {0} FROM {1} where id ='{2}'".format(', '.join(cols), self.df_urb_agg, self.geogunit_name),
self.engine).iloc[0]
check = 1e25
for test in test_rps:
test_impact = self.expected_value(impact_present, self.rps, test, 1e5)
diff = abs(real_impact - test_impact)
if diff > check:
break
check = diff
return test
def risk_evolution(self, impact_cc, impact_urb, impact_pop, impact_gdp, prot, prot_idx):
"""
Creates a time series of how annual expected impact evolves through time, assuming given protection standard at some moment in time.
The protection standard is transformed into protection standards at other moments in time by lookup of the associated
impact at that protection standard using a climate change only scenario.
Input:
years: list of years [N]
rps: list of return periods [M] (in years)
impact_cc: list of lists: N lists containing M impacts (for each return period) with only climate change
impact_cc_socio: list of lists: N lists containing M impacts (for each return period) with climate and socio change
prot: protection standard at given moment (in years)
prot_idx: index of year (in array years) at which prot is valid.
"""
# determine risk evaolution
risk_prot, pop_impact, gdp_impact, prot_levels = [], [], [], []
#logging.debug('[CBA, risk_evolution]: start')
for year, imp_cc, imp_urb, imp_pop, imp_gdp in zip(self.years, impact_cc, impact_urb, impact_pop, impact_gdp):
prot_trans = self.compute_rp_change(self.rps, impact_cc[prot_idx], imp_cc, prot, min_rp=2,
max_rp=1000) # i.e. RP_zero
# compute the expected value risk with the given protection standard
risk_prot.append(self.expected_value(imp_urb, self.rps, prot_trans, 1e5))
pop_impact.append(self.expected_value(imp_pop, self.rps, prot_trans, 1e5))
gdp_impact.append(self.expected_value(imp_gdp, self.rps, prot_trans, 1e5))
# prot_levels.append(prot_trans[0])
# Interpolate annual expected risk to get estimate for every year in time series
# print prot_levels
risk_func = interp1d(self.years, risk_prot, kind='linear', bounds_error=False,
fill_value='extrapolate') # define interpolation function/relationship
pop_func = interp1d(self.years, pop_impact, kind='linear', bounds_error=False,
fill_value='extrapolate') # define interpolation function/relationship
gdp_func = interp1d(self.years, gdp_impact, kind='linear', bounds_error=False,
fill_value='extrapolate') # define interpolation function/relationship
# prot_func = extrap1d(interp1d(years, prot_levels))
annual_risk = risk_func(self.time_series) # Run timeseries through interpolation function
annual_pop = pop_func(self.time_series) # Run timeseries through interpolation function
annual_gdp = gdp_func(self.time_series) # Run timeseries through interpolation function
# annual_prot = prot_func(time_series)
return annual_risk, annual_pop, annual_gdp # , annual_prot
def calc_impact(self, m, pt, ptid):
"""this can be improved with threads and is where the leak happens, a more ammount of fids, the runtime increases"""
annual_risk, annual_pop, annual_gdp = 0, 0, 0
#logging.debug('[CBA, calc_impact]: start')
# cba_raw = pd.read_sql_query("SELECT {0} FROM {1} where id = {2} ".format(', '.join(columns), inData, inName), self.engine)
# impact_present = pd.read_sql_query("SELECT {0} FROM {1} where id = {2} ".format(', '.join(cols), inData, inName), self.engine).values[0]
df_urb = pd.read_sql_query(
"SELECT * FROM {0} where id in ({1}) ".format(self.df_urb, ', '.join(map(str, self.fids))), self.engine)
df_pop = pd.read_sql_query("SELECT * FROM {1} where id in ({2}) ".format(', '.join(
[col for col in sqlalchemy.Table(self.df_pop, self.metadata).columns.keys() if
(self.clim in col) and (self.socio in col) and (m in col)]), self.df_pop, ', '.join(map(str, self.fids))),
self.engine)
df_gdp = pd.read_sql_query("SELECT * FROM {1} where id in ({2}) ".format(', '.join(
[col for col in sqlalchemy.Table(self.df_gdp, self.metadata).columns.keys() if
(self.clim in col) and (self.socio in col) and (m in col)]), self.df_gdp, ', '.join(map(str, self.fids))),
self.engine)
# Present data = 2010 data
# impact_present = pd.read_sql_query("SELECT {0} FROM {1} where id = {2} ".format(', '.join(cols), inData, inName), self.engine).values[0]
for f in self.fids:
impact_cc = self.select_impact(m, df_urb, f, "base")
impact_urb = self.select_impact(m, df_urb, f, self.socio)
impact_pop = self.select_impact(m, df_pop, f, self.socio)
impact_gdp = self.select_impact(m, df_gdp, f, self.socio)
f_risk, f_pop, f_gdp = self.risk_evolution(impact_cc, impact_urb, impact_pop, impact_gdp, pt, ptid)
annual_risk = annual_risk + f_risk
annual_pop = annual_pop + f_pop
annual_gdp = annual_gdp + f_gdp
return annual_risk, annual_pop, annual_gdp
def precalc_present_benefits(self, model):
"""
Inputs:
prot_start_unit = present_day protection standard (assumed to beong to 0th year in list of years)
Output:
time series of costs and benefits | |
(1025*mckin**16)/mbkin**16 + (144*mckin**18)/mbkin**18)) +
(4*(-49 + (417*mckin**2)/mbkin**2 + (935*mckin**4)/mbkin**4 -
(7247*mckin**6)/mbkin**6 + (2361*mckin**8)/mbkin**8 +
(13385*mckin**10)/mbkin**10 + (16711*mckin**12)/mbkin**12 +
(4863*mckin**14)/mbkin**14 - (4600*mckin**16)/mbkin**16 -
(1018*mckin**18)/mbkin**18 + (162*mckin**20)/mbkin**20)*q_cut)/
mbkin**2 - (4*(-65 + (360*mckin**2)/mbkin**2 + (625*mckin**4)/
mbkin**4 - (1220*mckin**6)/mbkin**6 + (2577*mckin**8)/mbkin**8 +
(1258*mckin**10)/mbkin**10 - (1589*mckin**12)/mbkin**12 -
(3504*mckin**14)/mbkin**14 - (812*mckin**16)/mbkin**16 +
(210*mckin**18)/mbkin**18)*q_cut**2)/mbkin**4 -
(4*(-29 + (101*mckin**2)/mbkin**2 + (1989*mckin**4)/mbkin**4 +
(173*mckin**6)/mbkin**6 - (2343*mckin**8)/mbkin**8 -
(5835*mckin**10)/mbkin**10 - (4455*mckin**12)/mbkin**12 -
(359*mckin**14)/mbkin**14 + (102*mckin**16)/mbkin**16)*q_cut**3)/
mbkin**6 + (2*(-299 + (600*mckin**2)/mbkin**2 + (5761*mckin**4)/
mbkin**4 + (944*mckin**6)/mbkin**6 - (14463*mckin**8)/mbkin**8 -
(14012*mckin**10)/mbkin**10 - (1655*mckin**12)/mbkin**12 +
(972*mckin**14)/mbkin**14)*q_cut**4)/mbkin**8 -
(4*(-113 + (249*mckin**2)/mbkin**2 + (1309*mckin**4)/mbkin**4 -
(833*mckin**6)/mbkin**6 - (3006*mckin**8)/mbkin**8 -
(728*mckin**10)/mbkin**10 + (354*mckin**12)/mbkin**12)*q_cut**5)/
mbkin**10 - (4*(-29 - (268*mckin**2)/mbkin**2 - (177*mckin**4)/
mbkin**4 + (714*mckin**6)/mbkin**6 + (802*mckin**8)/mbkin**8 +
(102*mckin**10)/mbkin**10)*q_cut**6)/mbkin**12 +
(4*(-85 - (195*mckin**2)/mbkin**2 + (167*mckin**4)/mbkin**4 +
(563*mckin**6)/mbkin**6 + (270*mckin**8)/mbkin**8)*q_cut**7)/
mbkin**14 + ((179 + (204*mckin**2)/mbkin**2 - (571*mckin**4)/
mbkin**4 - (552*mckin**6)/mbkin**6)*q_cut**8)/mbkin**16 -
(32*(mbkin**4 - 3*mckin**4)*q_cut**9)/mbkin**22)*rG +
8*mbkin*(-((-1 + mckin**2/mbkin**2)**2*(21 - (236*mckin**2)/mbkin**2 +
(280*mckin**4)/mbkin**4 + (5400*mckin**6)/mbkin**6 -
(43634*mckin**8)/mbkin**8 - (31160*mckin**10)/mbkin**10 +
(14712*mckin**12)/mbkin**12 - (4424*mckin**14)/mbkin**14 -
(1619*mckin**16)/mbkin**16 + (180*mckin**18)/mbkin**18)) +
(2*(51 - (508*mckin**2)/mbkin**2 + (671*mckin**4)/mbkin**4 +
(7066*mckin**6)/mbkin**6 - (40432*mckin**8)/mbkin**8 -
(66010*mckin**10)/mbkin**10 - (36356*mckin**12)/mbkin**12 +
(22310*mckin**14)/mbkin**14 - (4915*mckin**16)/mbkin**16 -
(3242*mckin**18)/mbkin**18 + (405*mckin**20)/mbkin**20)*q_cut)/
mbkin**2 - (2*(75 - (535*mckin**2)/mbkin**2 + (234*mckin**4)/
mbkin**4 + (3612*mckin**6)/mbkin**6 - (2692*mckin**8)/mbkin**8 +
(6882*mckin**10)/mbkin**10 + (9870*mckin**12)/mbkin**12 -
(5092*mckin**14)/mbkin**14 - (2799*mckin**16)/mbkin**16 +
(525*mckin**18)/mbkin**18)*q_cut**2)/mbkin**4 +
(2*(-21 + (124*mckin**2)/mbkin**2 + (650*mckin**4)/mbkin**4 -
(4312*mckin**6)/mbkin**6 + (1928*mckin**8)/mbkin**8 +
(728*mckin**10)/mbkin**10 + (7154*mckin**12)/mbkin**12 +
(1396*mckin**14)/mbkin**14 - (255*mckin**16)/mbkin**16)*q_cut**3)/
mbkin**6 + (2*(168 - (595*mckin**2)/mbkin**2 - (1396*mckin**4)/
mbkin**4 + (3013*mckin**6)/mbkin**6 - (1930*mckin**8)/mbkin**8 -
(13117*mckin**10)/mbkin**10 - (3590*mckin**12)/mbkin**12 +
(1215*mckin**14)/mbkin**14)*q_cut**4)/mbkin**8 +
(2*(-147 + (436*mckin**2)/mbkin**2 + (723*mckin**4)/mbkin**4 +
(726*mckin**6)/mbkin**6 + (6157*mckin**8)/mbkin**8 +
(2478*mckin**10)/mbkin**10 - (885*mckin**12)/mbkin**12)*q_cut**5)/
mbkin**10 - (2*(21 + (227*mckin**2)/mbkin**2 + (112*mckin**4)/
mbkin**4 + (1090*mckin**6)/mbkin**6 + (1463*mckin**8)/mbkin**8 +
(255*mckin**10)/mbkin**10)*q_cut**6)/mbkin**12 +
(2*(105 + (140*mckin**2)/mbkin**2 + (248*mckin**4)/mbkin**4 +
(856*mckin**6)/mbkin**6 + (675*mckin**8)/mbkin**8)*q_cut**7)/
mbkin**14 - ((123 + (88*mckin**2)/mbkin**2 + (447*mckin**4)/
mbkin**4 + (690*mckin**6)/mbkin**6)*q_cut**8)/mbkin**16 +
(24*(mbkin**4 + 5*mckin**4)*q_cut**9)/mbkin**22)*rhoD + 60*sB -
(520*mckin**2*sB)/mbkin**2 - (8228*mckin**4*sB)/mbkin**4 +
(63424*mckin**6*sB)/mbkin**6 + (35656*mckin**8*sB)/mbkin**8 -
(82832*mckin**10*sB)/mbkin**10 - (57080*mckin**12*sB)/mbkin**12 -
(98912*mckin**14*sB)/mbkin**14 + (191356*mckin**16*sB)/mbkin**16 -
(30056*mckin**18*sB)/mbkin**18 - (14308*mckin**20*sB)/mbkin**20 +
(1440*mckin**22*sB)/mbkin**22 - (240*q_cut*sB)/mbkin**2 +
(880*mckin**2*q_cut*sB)/mbkin**4 + (28672*mckin**4*q_cut*sB)/mbkin**6 -
(57136*mckin**6*q_cut*sB)/mbkin**8 - (436016*mckin**8*q_cut*sB)/mbkin**10 -
(582320*mckin**10*q_cut*sB)/mbkin**12 - (280240*mckin**12*q_cut*sB)/
mbkin**14 - (229712*mckin**14*q_cut*sB)/mbkin**16 +
(134176*mckin**16*q_cut*sB)/mbkin**18 + (46016*mckin**18*q_cut*sB)/
mbkin**20 - (6480*mckin**20*q_cut*sB)/mbkin**22 + (240*q_cut**2*sB)/
mbkin**4 + (320*mckin**2*q_cut**2*sB)/mbkin**6 - (16896*mckin**4*q_cut**2*sB)/
mbkin**8 - (22800*mckin**6*q_cut**2*sB)/mbkin**10 +
(41120*mckin**8*q_cut**2*sB)/mbkin**12 - (18960*mckin**10*q_cut**2*sB)/
mbkin**14 + (28800*mckin**12*q_cut**2*sB)/mbkin**16 -
(95344*mckin**14*q_cut**2*sB)/mbkin**18 - (40080*mckin**16*q_cut**2*sB)/
mbkin**20 + (8400*mckin**18*q_cut**2*sB)/mbkin**22 +
(240*q_cut**3*sB)/mbkin**6 + (80*mckin**2*q_cut**3*sB)/mbkin**8 -
(23840*mckin**4*q_cut**3*sB)/mbkin**10 - (80912*mckin**6*q_cut**3*sB)/
mbkin**12 - (158912*mckin**8*q_cut**3*sB)/mbkin**14 -
(228848*mckin**10*q_cut**3*sB)/mbkin**16 - (174080*mckin**12*q_cut**3*sB)/
mbkin**18 - (13648*mckin**14*q_cut**3*sB)/mbkin**20 +
(4080*mckin**16*q_cut**3*sB)/mbkin**22 - (600*q_cut**4*sB)/mbkin**8 -
(2480*mckin**2*q_cut**4*sB)/mbkin**10 + (20488*mckin**4*q_cut**4*sB)/
mbkin**12 + (113552*mckin**6*q_cut**4*sB)/mbkin**14 +
(253960*mckin**8*q_cut**4*sB)/mbkin**16 + (257200*mckin**10*q_cut**4*sB)/
mbkin**18 + (41000*mckin**12*q_cut**4*sB)/mbkin**20 -
(19440*mckin**14*q_cut**4*sB)/mbkin**22 + (240*q_cut**5*sB)/mbkin**10 +
(1040*mckin**2*q_cut**5*sB)/mbkin**12 + (1056*mckin**4*q_cut**5*sB)/
mbkin**14 - (39792*mckin**6*q_cut**5*sB)/mbkin**16 -
(110944*mckin**8*q_cut**5*sB)/mbkin**18 - (35232*mckin**10*q_cut**5*sB)/
mbkin**20 + (14160*mckin**12*q_cut**5*sB)/mbkin**22 +
(240*q_cut**6*sB)/mbkin**12 + (2240*mckin**2*q_cut**6*sB)/mbkin**14 +
(5056*mckin**4*q_cut**6*sB)/mbkin**16 + (29104*mckin**6*q_cut**6*sB)/
mbkin**18 + (31664*mckin**8*q_cut**6*sB)/mbkin**20 +
(4080*mckin**10*q_cut**6*sB)/mbkin**22 - (240*q_cut**7*sB)/mbkin**14 -
(2000*mckin**2*q_cut**7*sB)/mbkin**16 - (10496*mckin**4*q_cut**7*sB)/
mbkin**18 - (20560*mckin**6*q_cut**7*sB)/mbkin**20 -
(10800*mckin**8*q_cut**7*sB)/mbkin**22 + (60*q_cut**8*sB)/mbkin**16 +
(440*mckin**2*q_cut**8*sB)/mbkin**18 + (5148*mckin**4*q_cut**8*sB)/mbkin**20 +
(5520*mckin**6*q_cut**8*sB)/mbkin**22 - (960*mckin**4*q_cut**9*sB)/mbkin**22 -
96*sE + (928*mckin**2*sE)/mbkin**2 + (5792*mckin**4*sE)/mbkin**4 -
(54880*mckin**6*sE)/mbkin**6 + (55616*mckin**8*sE)/mbkin**8 -
(48448*mckin**10*sE)/mbkin**10 + (78656*mckin**12*sE)/mbkin**12 +
(33728*mckin**14*sE)/mbkin**14 - (99040*mckin**16*sE)/mbkin**16 +
(20384*mckin**18*sE)/mbkin**18 + (8224*mckin**20*sE)/mbkin**20 -
(864*mckin**22*sE)/mbkin**22 + (432*q_cut*sE)/mbkin**2 -
(2656*mckin**2*q_cut*sE)/mbkin**4 - (22480*mckin**4*q_cut*sE)/mbkin**6 +
(67264*mckin**6*q_cut*sE)/mbkin**8 + (180512*mckin**8*q_cut*sE)/mbkin**10 +
(197120*mckin**10*q_cut*sE)/mbkin**12 + (166816*mckin**12*q_cut*sE)/
mbkin**14 + (84800*mckin**14*q_cut*sE)/mbkin**16 -
(97744*mckin**16*q_cut*sE)/mbkin**18 - (24992*mckin**18*q_cut*sE)/
mbkin**20 + (3888*mckin**20*q_cut*sE)/mbkin**22 - (560*q_cut**2*sE)/
mbkin**4 + (2000*mckin**2*q_cut**2*sE)/mbkin**6 +
(14336*mckin**4*q_cut**2*sE)/mbkin**8 + (1792*mckin**6*q_cut**2*sE)/
mbkin**10 - (20512*mckin**8*q_cut**2*sE)/mbkin**12 -
(23968*mckin**10*q_cut**2*sE)/mbkin**14 + (98176*mckin**12*q_cut**2*sE)/
mbkin**16 + (101504*mckin**14*q_cut**2*sE)/mbkin**18 +
(16592*mckin**16*q_cut**2*sE)/mbkin**20 - (5040*mckin**18*q_cut**2*sE)/
mbkin**22 - (272*q_cut**3*sE)/mbkin**6 - (32*mckin**2*q_cut**3*sE)/mbkin**8 +
(25472*mckin**4*q_cut**3*sE)/mbkin**10 + (46368*mckin**6*q_cut**3*sE)/
mbkin**12 + (54944*mckin**8*q_cut**3*sE)/mbkin**14 -
(1120*mckin**10*q_cut**3*sE)/mbkin**16 + (51072*mckin**12*q_cut**3*sE)/
mbkin**18 + (18016*mckin**14*q_cut**3*sE)/mbkin**20 -
(2448*mckin**16*q_cut**3*sE)/mbkin**22 + (1296*q_cut**4*sE)/mbkin**8 +
(80*mckin**2*q_cut**4*sE)/mbkin**10 - (32080*mckin**4*q_cut**4*sE)/mbkin**12 -
(69200*mckin**6*q_cut**4*sE)/mbkin**14 - (89296*mckin**8*q_cut**4*sE)/
mbkin**16 - (119440*mckin**10*q_cut**4*sE)/mbkin**18 -
(28400*mckin**12*q_cut**4*sE)/mbkin**20 + (11664*mckin**14*q_cut**4*sE)/
mbkin**22 - (944*q_cut**5*sE)/mbkin**10 + (992*mckin**2*q_cut**5*sE)/
mbkin**12 + (9584*mckin**4*q_cut**5*sE)/mbkin**14 +
(19072*mckin**6*q_cut**5*sE)/mbkin**16 + (49520*mckin**8*q_cut**5*sE)/
mbkin**18 + (15776*mckin**10*q_cut**5*sE)/mbkin**20 -
(8496*mckin**12*q_cut**5*sE)/mbkin**22 - (272*q_cut**6*sE)/mbkin**12 -
(3344*mckin**2*q_cut**6*sE)/mbkin**14 - (3424*mckin**4*q_cut**6*sE)/
mbkin**16 - (9312*mckin**6*q_cut**6*sE)/mbkin**18 -
(11408*mckin**8*q_cut**6*sE)/mbkin**20 - (2448*mckin**10*q_cut**6*sE)/
mbkin**22 + (720*q_cut**7*sE)/mbkin**14 + (2720*mckin**2*q_cut**7*sE)/
mbkin**16 + (4640*mckin**4*q_cut**7*sE)/mbkin**18 +
(8608*mckin**6*q_cut**7*sE)/mbkin**20 + (6480*mckin**8*q_cut**7*sE)/
mbkin**22 - (368*q_cut**8*sE)/mbkin**16 - (688*mckin**2*q_cut**8*sE)/
mbkin**18 - (2416*mckin**4*q_cut**8*sE)/mbkin**20 -
(3312*mckin**6*q_cut**8*sE)/mbkin**22 + (64*q_cut**9*sE)/mbkin**18 +
(576*mckin**4*q_cut**9*sE)/mbkin**22 - 3*sqB + (34*mckin**2*sqB)/
mbkin**2 + (893*mckin**4*sqB)/mbkin**4 - (6652*mckin**6*sqB)/
mbkin**6 - (19834*mckin**8*sqB)/mbkin**8 + (34484*mckin**10*sqB)/
mbkin**10 + (5222*mckin**12*sqB)/mbkin**12 - (4768*mckin**14*sqB)/
mbkin**14 - (11203*mckin**16*sqB)/mbkin**16 + (1514*mckin**18*sqB)/
mbkin**18 + (349*mckin**20*sqB)/mbkin**20 - (36*mckin**22*sqB)/
mbkin**22 + (6*q_cut*sqB)/mbkin**2 + (32*mckin**2*q_cut*sqB)/mbkin**4 -
(3202*mckin**4*q_cut*sqB)/mbkin**6 + (4780*mckin**6*q_cut*sqB)/mbkin**8 +
(72536*mckin**8*q_cut*sqB)/mbkin**10 + (107300*mckin**10*q_cut*sqB)/
mbkin**12 + (57856*mckin**12*q_cut*sqB)/mbkin**14 +
(9620*mckin**14*q_cut*sqB)/mbkin**16 - (6142*mckin**16*q_cut*sqB)/
mbkin**18 - (1028*mckin**18*q_cut*sqB)/mbkin**20 +
(162*mckin**20*q_cut*sqB)/mbkin**22 + (10*q_cut**2*sqB)/mbkin**4 -
(250*mckin**2*q_cut**2*sqB)/mbkin**6 + (1892*mckin**4*q_cut**2*sqB)/mbkin**8 +
(3880*mckin**6*q_cut**2*sqB)/mbkin**10 - (7312*mckin**8*q_cut**2*sqB)/
mbkin**12 + (7772*mckin**10*q_cut**2*sqB)/mbkin**14 +
(9004*mckin**12*q_cut**2*sqB)/mbkin**16 + (4712*mckin**14*q_cut**2*sqB)/
mbkin**18 + (662*mckin**16*q_cut**2*sqB)/mbkin**20 -
(210*mckin**18*q_cut**2*sqB)/mbkin**22 - (26*q_cut**3*sqB)/mbkin**6 +
(64*mckin**2*q_cut**3*sqB)/mbkin**8 + (2588*mckin**4*q_cut**3*sqB)/mbkin**10 +
(8856*mckin**6*q_cut**3*sqB)/mbkin**12 + (3320*mckin**8*q_cut**3*sqB)/
mbkin**14 + (4664*mckin**10*q_cut**3*sqB)/mbkin**16 +
(5580*mckin**12*q_cut**3*sqB)/mbkin**18 + (592*mckin**14*q_cut**3*sqB)/
mbkin**20 - (102*mckin**16*q_cut**3*sqB)/mbkin**22 -
(12*q_cut**4*sqB)/mbkin**8 + (410*mckin**2*q_cut**4*sqB)/mbkin**10 -
(2188*mckin**4*q_cut**4*sqB)/mbkin**12 - (10862*mckin**6*q_cut**4*sqB)/
mbkin**14 - (12784*mckin**8*q_cut**4*sqB)/mbkin**16 -
(8026*mckin**10*q_cut**4*sqB)/mbkin**18 - (800*mckin**12*q_cut**4*sqB)/
mbkin**20 + (486*mckin**14*q_cut**4*sqB)/mbkin**22 +
(58*q_cut**5*sqB)/mbkin**10 - (304*mckin**2*q_cut**5*sqB)/mbkin**12 -
(202*mckin**4*q_cut**5*sqB)/mbkin**14 + (1540*mckin**6*q_cut**5*sqB)/
mbkin**16 + (2402*mckin**8*q_cut**5*sqB)/mbkin**18 +
(476*mckin**10*q_cut**5*sqB)/mbkin**20 - (354*mckin**12*q_cut**5*sqB)/
mbkin**22 - (26*q_cut**6*sqB)/mbkin**12 - (62*mckin**2*q_cut**6*sqB)/
mbkin**14 + (8*mckin**4*q_cut**6*sqB)/mbkin**16 - (372*mckin**6*q_cut**6*sqB)/
mbkin**18 - (662*mckin**8*q_cut**6*sqB)/mbkin**20 -
(102*mckin**10*q_cut**6*sqB)/mbkin**22 - (30*q_cut**7*sqB)/mbkin**14 +
(80*mckin**2*q_cut**7*sqB)/mbkin**16 + (344*mckin**4*q_cut**7*sqB)/mbkin**18 +
(568*mckin**6*q_cut**7*sqB)/mbkin**20 + (270*mckin**8*q_cut**7*sqB)/
mbkin**22 + (31*q_cut**8*sqB)/mbkin**16 - (4*mckin**2*q_cut**8*sqB)/
mbkin**18 - (157*mckin**4*q_cut**8*sqB)/mbkin**20 -
(138*mckin**6*q_cut**8*sqB)/mbkin**22 - (8*q_cut**9*sqB)/mbkin**18 +
(24*mckin**4*q_cut**9*sqB)/mbkin**22))*
np.log((mbkin**2 + mckin**2 - q_cut - mbkin**2*np.sqrt(0j + (mbkin**4 - 2*mbkin**2*
mckin**2 + mckin**4 - 2*mbkin**2*q_cut - 2*mckin**2*q_cut + q_cut**2)/
mbkin**4))/(mbkin**2 + mckin**2 - q_cut +
mbkin**2*np.sqrt(0j + (mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 - 2*mbkin**2*
q_cut - 2*mckin**2*q_cut + q_cut**2)/mbkin**4))) -
(144*mckin**4*(-16*(-((-1 + mckin**2/mbkin**2)**4*(-16 + (84*mckin**2)/
mbkin**2 + (397*mckin**4)/mbkin**4 + (7*mckin**6)/mbkin**6 -
(273*mckin**8)/mbkin**8 + (53*mckin**10)/mbkin**10 +
(108*mckin**12)/mbkin**12)) + ((-1 + mckin**2/mbkin**2)**2*
(-83 + (309*mckin**2)/mbkin**2 + (1688*mckin**4)/mbkin**4 +
(1592*mckin**6)/mbkin**6 - (423*mckin**8)/mbkin**8 -
(1055*mckin**10)/mbkin**10 + (258*mckin**12)/mbkin**12 +
(594*mckin**14)/mbkin**14)*q_cut)/mbkin**2 -
((-158 + (492*mckin**2)/mbkin**2 + (2013*mckin**4)/mbkin**4 +
(1738*mckin**6)/mbkin**6 + (2412*mckin**8)/mbkin**8 +
(120*mckin**10)/mbkin**10 - (2075*mckin**12)/mbkin**12 -
(6*mckin**14)/mbkin**14 + (1224*mckin**16)/mbkin**16)*q_cut**2)/
mbkin**4 + ((-101 + (215*mckin**2)/mbkin**2 + (929*mckin**4)/
mbkin**4 + (641*mckin**6)/mbkin**6 - (688*mckin**8)/mbkin**8 -
(784*mckin**10)/mbkin**10 + (1330*mckin**12)/mbkin**12 +
(918*mckin**14)/mbkin**14)*q_cut**3)/mbkin**6 +
((-80 - (260*mckin**2)/mbkin**2 + (156*mckin**4)/mbkin**4 +
(895*mckin**6)/mbkin**6 + (742*mckin**8)/mbkin**8 - (45*mckin**10)/
mbkin**10 + (540*mckin**12)/mbkin**12)*q_cut**4)/mbkin**8 -
((-179 - (393*mckin**2)/mbkin**2 + (414*mckin**4)/mbkin**4 +
(1540*mckin**6)/mbkin**6 + (1914*mckin**8)/mbkin**8 +
(1602*mckin**10)/mbkin**10)*q_cut**5)/mbkin**10 +
(2*(-61 - (110*mckin**2)/mbkin**2 + (307*mckin**4)/mbkin**4 +
(770*mckin**6)/mbkin**6 + (648*mckin**8)/mbkin**8)*q_cut**6)/
mbkin**12 + ((37 + (37*mckin**2)/mbkin**2 - (366*mckin**4)/mbkin**4 -
(486*mckin**6)/mbkin**6)*q_cut**7)/mbkin**14 +
((-4 + (72*mckin**4)/mbkin**4)*q_cut**8)/mbkin**16)*rE +
((-1 + mckin**2/mbkin**2)**2 - (2*(mbkin**2 + mckin**2)*q_cut)/mbkin**4 +
q_cut**2/mbkin**4)*((-60*mckin**2*muG**2)/mbkin**2 + (384*mckin**4*muG**2)/
mbkin**4 - (1704*mckin**6*muG**2)/mbkin**6 + (6120*mckin**8*muG**2)/
mbkin**8 - (8700*mckin**10*muG**2)/mbkin**10 + (4080*mckin**12*muG**2)/
mbkin**12 + (96*mckin**14*muG**2)/mbkin**14 - (216*mckin**16*muG**2)/
mbkin**16 - (60*mckin**2*muG*mupi)/mbkin**2 +
(576*mckin**4*muG*mupi)/mbkin**4 - (1296*mckin**6*muG*mupi)/
mbkin**6 - (1320*mckin**8*muG*mupi)/mbkin**8 +
(4500*mckin**10*muG*mupi)/mbkin**10 - (2160*mckin**12*muG*mupi)/
mbkin**12 - (456*mckin**14*muG*mupi)/mbkin**14 +
(216*mckin**16*muG*mupi)/mbkin**16 - (60*mckin**2*muG**2*q_cut)/
mbkin**4 + (876*mckin**4*muG**2*q_cut)/mbkin**6 -
(1128*mckin**6*muG**2*q_cut)/mbkin**8 - (2388*mckin**8*muG**2*q_cut)/
mbkin**10 - (7248*mckin**10*muG**2*q_cut)/mbkin**12 +
(552*mckin**12*muG**2*q_cut)/mbkin**14 + (756*mckin**14*muG**2*q_cut)/
mbkin**16 + (180*mckin**2*muG*mupi*q_cut)/mbkin**4 -
(1236*mckin**4*muG*mupi*q_cut)/mbkin**6 + (408*mckin**6*muG*mupi*q_cut)/
mbkin**8 + (5508*mckin**8*muG*mupi*q_cut)/mbkin**10 +
(4008*mckin**10*muG*mupi*q_cut)/mbkin**12 + (528*mckin**12*muG*mupi*q_cut)/
mbkin**14 - (756*mckin**14*muG*mupi*q_cut)/mbkin**16 +
(120*mckin**2*muG**2*q_cut**2)/mbkin**6 - (360*mckin**4*muG**2*q_cut**2)/
mbkin**8 - (1188*mckin**6*muG**2*q_cut**2)/mbkin**10 +
(2976*mckin**8*muG**2*q_cut**2)/mbkin**12 - (108*mckin**10*muG**2*q_cut**2)/
mbkin**14 - (720*mckin**12*muG**2*q_cut**2)/mbkin**16 -
(120*mckin**2*muG*mupi*q_cut**2)/mbkin**6 + (600*mckin**4*muG*mupi*q_cut**2)/
mbkin**8 - (12*mckin**6*muG*mupi*q_cut**2)/mbkin**10 -
(1296*mckin**8*muG*mupi*q_cut**2)/mbkin**12 -
(612*mckin**10*muG*mupi*q_cut**2)/mbkin**14 +
(720*mckin**12*muG*mupi*q_cut**2)/mbkin**16 + (360*mckin**2*muG**2*q_cut**3)/
mbkin**8 - (1020*mckin**4*muG**2*q_cut**3)/mbkin**10 -
(2868*mckin**6*muG**2*q_cut**3)/mbkin**12 - (3048*mckin**8*muG**2*q_cut**3)/
mbkin**14 - (360*mckin**10*muG**2*q_cut**3)/mbkin**16 -
(120*mckin**2*muG*mupi*q_cut**3)/mbkin**8 + (780*mckin**4*muG*mupi*q_cut**3)/
mbkin**10 + (1668*mckin**6*muG*mupi*q_cut**3)/mbkin**12 +
(2328*mckin**8*muG*mupi*q_cut**3)/mbkin**14 +
(360*mckin**10*muG*mupi*q_cut**3)/mbkin**16 - (540*mckin**2*muG**2*q_cut**4)/
mbkin**10 + (2160*mckin**4*muG**2*q_cut**4)/mbkin**12 +
(3732*mckin**6*muG**2*q_cut**4)/mbkin**14 + (1080*mckin**8*muG**2*q_cut**4)/
mbkin**16 + (180*mckin**2*muG*mupi*q_cut**4)/mbkin**10 -
(1440*mckin**4*muG*mupi*q_cut**4)/mbkin**12 -
(2652*mckin**6*muG*mupi*q_cut**4)/mbkin**14 -
(1080*mckin**8*muG*mupi*q_cut**4)/mbkin**16 + (180*mckin**2*muG**2*q_cut**5)/
mbkin**12 - (1224*mckin**4*muG**2*q_cut**5)/mbkin**14 -
(684*mckin**6*muG**2*q_cut**5)/mbkin**16 - (60*mckin**2*muG*mupi*q_cut**5)/
mbkin**12 + (864*mckin**4*muG*mupi*q_cut**5)/mbkin**14 +
(684*mckin**6*muG*mupi*q_cut**5)/mbkin**16 + (144*mckin**4*muG**2*q_cut**6)/
mbkin**16 - (144*mckin**4*muG*mupi*q_cut**6)/mbkin**16 | |
57:
return 'uvbdiff'
if table2Version == 200 and indicatorOfParameter == 56:
return 'mn2d24diff'
if table2Version == 200 and indicatorOfParameter == 55:
return 'mean2t24diff'
if table2Version == 200 and indicatorOfParameter == 54:
return 'presdiff'
if table2Version == 200 and indicatorOfParameter == 53:
return 'montdiff'
if table2Version == 200 and indicatorOfParameter == 52:
return 'mn2t24diff'
if table2Version == 200 and indicatorOfParameter == 51:
return 'mx2t24diff'
if table2Version == 200 and indicatorOfParameter == 50:
return 'lspfdiff'
if table2Version == 200 and indicatorOfParameter == 49:
return '10fgdiff'
if table2Version == 200 and indicatorOfParameter == 48:
return 'magssdiff'
if table2Version == 200 and indicatorOfParameter == 47:
return 'dsrpdiff'
if table2Version == 200 and indicatorOfParameter == 46:
return 'sdurdiff'
if table2Version == 200 and indicatorOfParameter == 45:
return 'smltdiff'
if table2Version == 200 and indicatorOfParameter == 44:
return 'esdiff'
if table2Version == 200 and indicatorOfParameter == 43:
return 'sltdiff'
if table2Version == 200 and indicatorOfParameter == 42:
return 'swvl4diff'
if table2Version == 200 and indicatorOfParameter == 41:
return 'swvl3diff'
if table2Version == 200 and indicatorOfParameter == 40:
return 'swvl2diff'
if table2Version == 200 and indicatorOfParameter == 39:
return 'swvl1diff'
if table2Version == 200 and indicatorOfParameter == 38:
return 'istl4diff'
if table2Version == 200 and indicatorOfParameter == 37:
return 'istl3diff'
if table2Version == 200 and indicatorOfParameter == 36:
return 'istl2diff'
if table2Version == 200 and indicatorOfParameter == 35:
return 'istl1diff'
if table2Version == 200 and indicatorOfParameter == 34:
return 'sstdiff'
if table2Version == 200 and indicatorOfParameter == 33:
return 'rsndiff'
if table2Version == 200 and indicatorOfParameter == 32:
return 'asndiff'
if table2Version == 200 and indicatorOfParameter == 31:
return 'sicdiff'
if table2Version == 200 and indicatorOfParameter == 30:
return 'tvhdiff'
if table2Version == 200 and indicatorOfParameter == 29:
return 'tvldiff'
if table2Version == 200 and indicatorOfParameter == 28:
return 'cvhdiff'
if table2Version == 200 and indicatorOfParameter == 27:
return 'cvldiff'
if table2Version == 200 and indicatorOfParameter == 26:
return 'cldiff'
if table2Version == 200 and indicatorOfParameter == 25:
return '~'
if table2Version == 200 and indicatorOfParameter == 24:
return '~'
if table2Version == 200 and indicatorOfParameter == 23:
return 'ucdvdiff'
if table2Version == 200 and indicatorOfParameter == 22:
return 'uclndiff'
if table2Version == 200 and indicatorOfParameter == 21:
return 'uctpdiff'
if table2Version == 200 and indicatorOfParameter == 14:
return 'vrtwdiff'
if table2Version == 200 and indicatorOfParameter == 13:
return 'urtwdiff'
if table2Version == 200 and indicatorOfParameter == 12:
return 'vdvwdiff'
if table2Version == 200 and indicatorOfParameter == 11:
return 'udvwdiff'
if table2Version == 200 and indicatorOfParameter == 5:
return 'septdiff'
if table2Version == 200 and indicatorOfParameter == 4:
return 'eqptdiff'
if table2Version == 200 and indicatorOfParameter == 3:
return 'ptdiff'
if table2Version == 200 and indicatorOfParameter == 2:
return 'vpotdiff'
if table2Version == 200 and indicatorOfParameter == 1:
return 'strfdiff'
if table2Version == 190 and indicatorOfParameter == 255:
return '~'
if table2Version == 180 and indicatorOfParameter == 255:
return '~'
if table2Version == 170 and indicatorOfParameter == 255:
return '~'
if table2Version == 160 and indicatorOfParameter == 255:
return '~'
if table2Version == 132 and indicatorOfParameter == 255:
return '~'
if table2Version == 130 and indicatorOfParameter == 255:
return '~'
if table2Version == 128 and indicatorOfParameter == 255:
return '~'
if table2Version == 128 and indicatorOfParameter == 254:
return 'atmw'
if table2Version == 128 and indicatorOfParameter == 253:
return 'atze'
if table2Version == 128 and indicatorOfParameter == 252:
return 'athe'
if table2Version == 128 and indicatorOfParameter == 251:
return 'atte'
if table2Version == 128 and indicatorOfParameter == 250:
return 'ice'
if table2Version == 128 and indicatorOfParameter == 249:
return 'aiw'
if table2Version == 128 and indicatorOfParameter == 248:
return 'cc'
if table2Version == 128 and indicatorOfParameter == 247:
return 'ciwc'
if table2Version == 128 and indicatorOfParameter == 246:
return 'clwc'
if table2Version == 160 and indicatorOfParameter == 245:
return 'flsr'
if table2Version == 128 and indicatorOfParameter == 245:
return 'flsr'
if table2Version == 160 and indicatorOfParameter == 244:
return 'fsr'
if table2Version == 128 and indicatorOfParameter == 244:
return 'fsr'
if table2Version == 128 and indicatorOfParameter == 243:
return 'fal'
if table2Version == 128 and indicatorOfParameter == 242:
return 'alw'
if table2Version == 128 and indicatorOfParameter == 241:
return 'acf'
if table2Version == 128 and indicatorOfParameter == 240:
return 'lsf'
if table2Version == 128 and indicatorOfParameter == 239:
return 'csf'
if table2Version == 160 and indicatorOfParameter == 238:
return 'tsn'
if table2Version == 128 and indicatorOfParameter == 238:
return 'tsn'
if table2Version == 160 and indicatorOfParameter == 237:
return 'swl4'
if table2Version == 128 and indicatorOfParameter == 237:
return 'swl4'
if table2Version == 160 and indicatorOfParameter == 236:
return 'stl4'
if table2Version == 128 and indicatorOfParameter == 236:
return 'stl4'
if table2Version == 160 and indicatorOfParameter == 235:
return 'skt'
if table2Version == 128 and indicatorOfParameter == 235:
return 'skt'
if table2Version == 160 and indicatorOfParameter == 234:
return 'lsrh'
if table2Version == 128 and indicatorOfParameter == 234:
return 'lsrh'
if table2Version == 160 and indicatorOfParameter == 233:
return 'asq'
if table2Version == 128 and indicatorOfParameter == 233:
return 'asq'
if table2Version == 160 and indicatorOfParameter == 232:
return 'ie'
if table2Version == 128 and indicatorOfParameter == 232:
return 'ie'
if table2Version == 128 and indicatorOfParameter == 231:
return 'ishf'
if table2Version == 160 and indicatorOfParameter == 230:
return 'inss'
if table2Version == 128 and indicatorOfParameter == 230:
return 'inss'
if table2Version == 160 and indicatorOfParameter == 229:
return 'iews'
if table2Version == 128 and indicatorOfParameter == 229:
return 'iews'
if table2Version == 190 and indicatorOfParameter == 228:
return 'tp'
if table2Version == 170 and indicatorOfParameter == 228:
return 'tp'
if table2Version == 160 and indicatorOfParameter == 228:
return 'tp'
if table2Version == 128 and indicatorOfParameter == 228:
return 'tp'
if table2Version == 130 and indicatorOfParameter == 227:
return 'crnh'
if table2Version == 128 and indicatorOfParameter == 227:
return 'crnh'
if table2Version == 128 and indicatorOfParameter == 226:
return 'htlc'
if table2Version == 128 and indicatorOfParameter == 225:
return 'htcc'
if table2Version == 128 and indicatorOfParameter == 224:
return 'vdh'
if table2Version == 130 and indicatorOfParameter == 223:
return 'ctmw'
if table2Version == 128 and indicatorOfParameter == 223:
return 'ctmw'
if table2Version == 130 and indicatorOfParameter == 222:
return 'ctzw'
if table2Version == 128 and indicatorOfParameter == 222:
return 'ctzw'
if table2Version == 128 and indicatorOfParameter == 221:
return 'nsgd'
if table2Version == 128 and indicatorOfParameter == 220:
return 'ewgd'
if table2Version == 128 and indicatorOfParameter == 219:
return 'vdmw'
if table2Version == 128 and indicatorOfParameter == 218:
return 'vdzw'
if table2Version == 128 and indicatorOfParameter == 217:
return 'dhlc'
if table2Version == 128 and indicatorOfParameter == 216:
return 'dhcc'
if table2Version == 128 and indicatorOfParameter == 215:
return 'dhvd'
if table2Version == 128 and indicatorOfParameter == 214:
return 'dhr'
if table2Version == 128 and indicatorOfParameter == 213:
return 'vimd'
if table2Version == 128 and indicatorOfParameter == 212:
return 'tisr'
if table2Version == 128 and indicatorOfParameter == 211:
return 'strc'
if table2Version == 128 and indicatorOfParameter == 210:
return 'ssrc'
if table2Version == 128 and indicatorOfParameter == 209:
return 'ttrc'
if table2Version == 128 and indicatorOfParameter == 208:
return 'tsrc'
if table2Version == 128 and indicatorOfParameter == 207:
return '10si'
if table2Version == 128 and indicatorOfParameter == 206:
return 'tco3'
if table2Version == 180 and indicatorOfParameter == 205:
return 'ro'
if table2Version == 128 and indicatorOfParameter == 205:
return 'ro'
if table2Version == 160 and indicatorOfParameter == 204:
return 'paw'
if table2Version == 128 and indicatorOfParameter == 204:
return 'paw'
if table2Version == 128 and indicatorOfParameter == 203:
return 'o3'
if table2Version == 190 and indicatorOfParameter == 202:
return 'mn2t'
if table2Version == 170 and indicatorOfParameter == 202:
return 'mn2t'
if table2Version == 128 and indicatorOfParameter == 202:
return 'mn2t'
if table2Version == 190 and indicatorOfParameter == 201:
return 'mx2t'
if table2Version == 170 and indicatorOfParameter == | |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 CERN.
#
# Asclepias Broker is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Relationshps ingestion functions."""
import uuid
from typing import Tuple
from invenio_db import db
from sqlalchemy.orm import aliased
from ..models import Group, GroupM2M, GroupMetadata, GroupRelationship, \
GroupRelationshipM2M, GroupRelationshipMetadata, GroupType, Identifier, \
Identifier2Group, Relation, Relationship, Relationship2GroupRelationship
def merge_group_relationships(group_a, group_b, merged_group):
"""Merge the relationships of merged groups A and B to avoid collisions.
Groups 'group_a' and 'group_b' will be merged as 'merged_group'.
This function takes care of moving any duplicate group relations, e.g.:
If we have 4 relations:
A Cites X
B Cites X
Y Cites A
Y Cites B
and we merge groups A and B, we also need to squash the first two and last
two relations together:
{AB} Cites X
Y Cites {AB}
before we can perform the actual marging of A and B. Otherwise we will
violate the unique constraint. We do that by removing the duplicate
relationships (only one of each duplicate pair), so that we can later
execute and UPDATE.
"""
# Determine if this is an Identity-type group merge
identity_groups = group_a.type == GroupType.Identity
# Remove all GroupRelationship objects between groups A and B.
# Correspnding GroupRelationshipM2M objects will cascade
(
GroupRelationship.query.filter(
((GroupRelationship.source_id == group_a.id) &
(GroupRelationship.target_id == group_b.id)) |
((GroupRelationship.source_id == group_b.id) &
(GroupRelationship.target_id == group_a.id)))
.delete(synchronize_session='fetch')
)
# We need to execute the same group relation merging twice, first for the
# 'outgoing' relations ('A Cites X' + 'B Cites X' = 'AB Cites X'), and then
# for the 'incoming' edges ('Y Cites A' + 'Y Cites B' = 'Y Cites AB').
# Instead of repeating the code twice, we parametrize it as seen below
merge_groups_ids = [group_a.id, group_b.id]
for queried_fk, grouping_fk in [('source_id', 'target_id'),
('target_id', 'source_id'), ]:
left_gr = aliased(GroupRelationship, name='left_gr')
right_gr = aliased(GroupRelationship, name='right_gr')
left_queried_fk = getattr(left_gr, queried_fk)
right_queried_fk = getattr(right_gr, queried_fk)
left_grouping_fk = getattr(left_gr, grouping_fk)
right_grouping_fk = getattr(right_gr, grouping_fk)
# 'duplicate_relations' holds GroupRelations, which should be
# "squashed" after group merging. If we didn't do this, we would
# violate the UNIQUE constraint
# Generate 'duplicate_relations' by joining the table with itself
# by the "grouping_fk" (target_id/source_id)
duplicate_relations = (
db.session.query(left_gr, right_gr)
.filter(
left_gr.id < right_gr.id, # Don't repeat the same pairs
left_queried_fk.in_(merge_groups_ids),
right_queried_fk.in_(merge_groups_ids),
right_queried_fk != left_queried_fk,
right_gr.relation == left_gr.relation)
.join(
right_gr,
left_grouping_fk == right_grouping_fk)
)
del_rel = set()
for rel_a, rel_b in duplicate_relations:
kwargs = {
queried_fk: merged_group.id,
grouping_fk: getattr(rel_a, grouping_fk),
'relation': rel_a.relation,
'id': uuid.uuid4(),
'type': rel_a.type
}
new_grp_rel = GroupRelationship(**kwargs)
db.session.add(new_grp_rel)
if identity_groups:
group_rel_meta = GroupRelationshipMetadata(
group_relationship_id=new_grp_rel.id)
db.session.add(group_rel_meta)
json1, json2 = rel_a.data.json, rel_b.data.json
if rel_b.data.updated < rel_a.data.updated:
json1, json2 = json2, json1
group_rel_meta.json = json1
group_rel_meta.update(json2, validate=False, multi=True)
# Delete the duplicate pairs of relationship M2Ms before updating
delete_duplicate_relationship_m2m(rel_a, rel_b)
rel_ids = [rel_a.id, rel_b.id]
(
GroupRelationshipM2M.query
.filter(GroupRelationshipM2M.relationship_id.in_(rel_ids))
.update({GroupRelationshipM2M.relationship_id: new_grp_rel.id},
synchronize_session='fetch')
)
(
GroupRelationshipM2M.query
.filter(GroupRelationshipM2M.subrelationship_id.in_(rel_ids))
.update(
{GroupRelationshipM2M.subrelationship_id: new_grp_rel.id},
synchronize_session='fetch')
)
if identity_groups:
cls = Relationship2GroupRelationship
delete_duplicate_relationship_m2m(rel_a, rel_b, cls=cls)
(
cls.query
.filter(cls.group_relationship_id.in_(rel_ids))
.update({cls.group_relationship_id: new_grp_rel.id},
synchronize_session='fetch')
)
del_rel.add(rel_a.id)
del_rel.add(rel_b.id)
# Delete the duplicate relations
(
GroupRelationship.query
.filter(GroupRelationship.id.in_(del_rel))
.delete(synchronize_session='fetch')
)
queried_fk_inst = getattr(GroupRelationship, queried_fk)
# Update the other non-duplicated relations
(
GroupRelationship.query
.filter(queried_fk_inst.in_(merge_groups_ids))
.update({queried_fk_inst: merged_group.id},
synchronize_session='fetch')
)
def delete_duplicate_relationship_m2m(group_a, group_b,
cls=GroupRelationshipM2M):
"""Delete any duplicate relationship M2M objects.
Deletes any duplicate (unique-constraint violating) M2M objects
between relationships and group relationships. This step is required
before merging of two groups.
"""
if cls == GroupRelationshipM2M:
queried_fk = 'subrelationship_id'
grouping_fk = 'relationship_id'
elif cls == Relationship2GroupRelationship:
queried_fk = 'group_relationship_id'
grouping_fk = 'relationship_id'
else:
raise ValueError(
"Parameter 'cls' must be either 'GroupRelationshipM2M' or "
"'Relationship2GroupRelationship'.")
for queried_fk, grouping_fk in [(queried_fk, grouping_fk),
(grouping_fk, queried_fk), ]:
left_gr = aliased(cls, name='left_gr')
right_gr = aliased(cls, name='right_gr')
left_queried_fk = getattr(left_gr, queried_fk)
right_queried_fk = getattr(right_gr, queried_fk)
left_grouping_fk = getattr(left_gr, grouping_fk)
right_grouping_fk = getattr(right_gr, grouping_fk)
merge_groups_ids = [group_a.id, group_b.id]
duplicate_relations = (
db.session.query(left_gr, right_gr)
.filter(
# Because we join in the same table by grouping_fk, we will
# have pairs [(A,B), (B,A)] on the list. We can impose an
# inequality condition on one FK to reduce this to just one
# pair [(A,B)]
left_queried_fk < right_queried_fk,
left_queried_fk.in_(merge_groups_ids),
right_queried_fk.in_(merge_groups_ids),
right_queried_fk != left_queried_fk,
)
.join(
right_gr,
left_grouping_fk == right_grouping_fk)
)
# TODO: Delete in a query
for rel_a, rel_b in duplicate_relations:
db.session.delete(rel_a)
def delete_duplicate_group_m2m(group_a: Group, group_b: Group):
"""
Delete any duplicate GroupM2M objects.
Removes one of each pair of GroupM2M objects for groups A and B.
"""
cls = GroupM2M
queried_fk = 'group_id'
grouping_fk = 'subgroup_id'
for queried_fk, grouping_fk in [(queried_fk, grouping_fk),
(grouping_fk, queried_fk), ]:
left_gr = aliased(cls, name='left_gr')
right_gr = aliased(cls, name='right_gr')
left_queried_fk = getattr(left_gr, queried_fk)
right_queried_fk = getattr(right_gr, queried_fk)
left_grouping_fk = getattr(left_gr, grouping_fk)
right_grouping_fk = getattr(right_gr, grouping_fk)
merge_groups_ids = [group_a.id, group_b.id]
duplicate_relations = (
db.session.query(left_gr, right_gr)
.filter(
# Because we join in the same table by grouping_fk, we will
# have pairs [(A,B), (B,A)] on the list. We impose an
# inequality condition on one FK to reduce this to just one
# pair [(A,B)]
left_queried_fk < right_queried_fk,
left_queried_fk.in_(merge_groups_ids),
right_queried_fk.in_(merge_groups_ids),
right_queried_fk != left_queried_fk,
)
.join(
right_gr,
left_grouping_fk == right_grouping_fk)
)
# TODO: Delete in a query
for rel_a, rel_b in duplicate_relations:
db.session.delete(rel_a)
def merge_identity_groups(group_a: Group, group_b: Group):
"""Merge two groups of type "Identity".
Merges the groups together into one group, taking care of migrating
all group relationships and M2M objects.
"""
# Nothing to do if groups are already merged
if group_a == group_b:
return None, None
if not (group_a.type == group_b.type == GroupType.Identity):
raise ValueError("Can only merge Identity groups.")
# TODO: Should join with Group and filter by Group.type=GroupType.Version
version_group_a = GroupM2M.query.filter_by(
subgroup=group_a).one().group
version_group_b = GroupM2M.query.filter_by(
subgroup=group_b).one().group
merged_version_group = merge_version_groups(
version_group_a, version_group_b)
merged_group = Group(type=GroupType.Identity, id=uuid.uuid4())
db.session.add(merged_group)
merged_group_meta = GroupMetadata(group_id=merged_group.id)
db.session.add(merged_group_meta)
json1, json2 = group_a.data.json, group_b.data.json
if group_b.data.updated < group_a.data.updated:
json1, json2 = json2, json1
merged_group_meta.json = json1
merged_group_meta.update(json2)
merge_group_relationships(group_a, group_b, merged_group)
(Identifier2Group.query
.filter(Identifier2Group.group_id.in_([group_a.id, group_b.id]))
.update({Identifier2Group.group_id: merged_group.id},
synchronize_session='fetch'))
# Delete the duplicate GroupM2M entries and update the remaining with
# the new Group
delete_duplicate_group_m2m(group_a, group_b)
(GroupM2M.query
.filter(GroupM2M.subgroup_id.in_([group_a.id, group_b.id]))
.update({GroupM2M.subgroup_id: merged_group.id},
synchronize_session='fetch'))
Group.query.filter(Group.id.in_([group_a.id, group_b.id])).delete(
synchronize_session='fetch')
# After merging identity groups, we need to merge the version groups
return merged_group, merged_version_group
def merge_version_groups(group_a: Group, group_b: Group):
"""Merge two Version groups into one."""
# Nothing to do if groups are already merged
if group_a == group_b:
return
if group_a.type != group_b.type:
raise ValueError("Cannot merge groups of different type.")
if group_a.type == GroupType.Identity:
# Merging Identity groups is done separately
raise ValueError("Cannot merge groups of type 'Identity'.")
merged_group = Group(type=group_a.type, id=uuid.uuid4())
db.session.add(merged_group)
merge_group_relationships(group_a, group_b, merged_group)
# Delete the duplicate GroupM2M entries and update the remaining with
# the new Group
delete_duplicate_group_m2m(group_a, group_b)
(GroupM2M.query
.filter(GroupM2M.group_id.in_([group_a.id, group_b.id]))
.update({GroupM2M.group_id: merged_group.id},
synchronize_session='fetch'))
(GroupM2M.query
.filter(GroupM2M.subgroup_id.in_([group_a.id, group_b.id]))
.update({GroupM2M.subgroup_id: merged_group.id},
synchronize_session='fetch'))
Group.query.filter(Group.id.in_([group_a.id, group_b.id])).delete(
synchronize_session='fetch')
return merged_group
def get_or_create_groups(identifier: Identifier) -> Tuple[Group, Group]:
"""Given an Identifier, fetch or create its Identity and Version groups."""
id2g = Identifier2Group.query.filter(
Identifier2Group.identifier == identifier).one_or_none()
if not id2g:
group = Group(type=GroupType.Identity, id=uuid.uuid4())
db.session.add(group)
gm = GroupMetadata(group_id=group.id)
db.session.add(gm)
id2g = Identifier2Group(identifier=identifier, group=group)
db.session.add(id2g)
g2g = (GroupM2M.query
.join(Group, GroupM2M.group_id == Group.id)
.filter(GroupM2M.subgroup == id2g.group,
Group.type == GroupType.Version)
.one_or_none())
if not g2g:
group = Group(type=GroupType.Version, id=uuid.uuid4())
db.session.add(group)
g2g = GroupM2M(group=group, subgroup=id2g.group)
db.session.add(g2g)
return id2g.group, g2g.group
def get_group_from_id(identifier_value, id_type='doi',
group_type=GroupType.Identity):
"""Resolve from 'A' to Identity Group of A or to a Version Group of A."""
# TODO: Move this method to api.utils or to models?
id_ = Identifier.get(identifier_value, id_type)
id_grp = id_.id2groups[0].group
if group_type == GroupType.Identity:
return id_grp
else:
return GroupM2M.query.filter_by(subgroup=id_grp).one().group
def add_group_relationship(relationship, src_id_grp, tar_id_grp,
src_ver_grp, tar_ver_grp):
"""Add a group relationship between corresponding groups."""
# Add GroupRelationship between Identity groups
id_grp_rel = GroupRelationship(source=src_id_grp, target=tar_id_grp,
relation=relationship.relation,
type=GroupType.Identity, id=uuid.uuid4())
grm = GroupRelationshipMetadata(
group_relationship_id=id_grp_rel.id)
db.session.add(grm)
db.session.add(id_grp_rel)
rel2grp_rel = Relationship2GroupRelationship(
relationship=relationship, group_relationship=id_grp_rel)
db.session.add(rel2grp_rel)
# Add GroupRelationship between Version groups
ver_grp_rel = GroupRelationship(source=src_ver_grp, target=tar_ver_grp,
relation=relationship.relation,
type=GroupType.Version)
db.session.add(ver_grp_rel)
g2g_rel = GroupRelationshipM2M(relationship=ver_grp_rel,
subrelationship=id_grp_rel)
db.session.add(g2g_rel)
def update_groups(relationship, delete=False):
"""Update groups and related M2M objects for given relationship."""
src_idg, src_vg = get_or_create_groups(relationship.source)
tar_idg, tar_vg = get_or_create_groups(relationship.target)
merged_group = None
merged_version_group = None
| |
1012,
"hu": 26,
"ws": 4,
"wd": 60,
"ic": "11n"
},
{
"ts": "2017-08-31T01:00:00.000Z",
"tp": 32,
"pr": 1011,
"hu": 20,
"ws": 6,
"wd": 80,
"ic": "11n"
},
{
"ts": "2017-08-31T00:00:00.000Z",
"tp": 33,
"pr": 1011,
"hu": 16,
"ws": 3,
"wd": 120,
"ic": "11n"
},
{
"ts": "2017-08-30T23:00:00.000Z",
"tp": 34,
"pr": 1005,
"hu": 38,
"ws": 2,
"ic": "01d",
"wd": null
},
{
"ts": "2017-08-30T22:00:00.000Z",
"tp": 34,
"pr": 1011,
"hu": 15,
"ws": 7,
"wd": 300,
"ic": "01d"
},
{
"ts": "2017-08-30T21:00:00.000Z",
"tp": 34,
"pr": 1007,
"hu": 38,
"ws": 1,
"ic": "01d",
"wd": null
},
{
"ts": "2017-08-30T20:00:00.000Z",
"tp": 33,
"pr": 1007,
"hu": 41,
"ws": 1,
"wd": 225,
"ic": "01d"
},
{
"ts": "2017-08-30T19:00:00.000Z",
"tp": 33,
"pr": 1008,
"hu": 43,
"ws": 2,
"ic": "01d",
"wd": null
},
{
"ts": "2017-08-30T18:00:00.000Z",
"tp": 32,
"pr": 1008,
"hu": 41,
"ws": 1,
"ic": "01d",
"wd": null
},
{
"ts": "2017-08-30T17:00:00.000Z",
"tp": 29,
"pr": 1008,
"hu": 48,
"ws": 1,
"wd": 204,
"ic": "01d"
},
{
"ts": "2017-08-30T16:00:00.000Z",
"tp": 27,
"pr": 1008,
"hu": 61,
"ws": 1,
"wd": 204,
"ic": "01d"
},
{
"ts": "2017-08-30T15:00:00.000Z",
"tp": 24,
"pr": 1010,
"hu": 42,
"ws": 0,
"wd": 64,
"ic": "50d"
},
{
"ts": "2017-08-30T14:00:00.000Z",
"tp": 22,
"pr": 1007,
"hu": 78,
"ws": 0,
"wd": 64,
"ic": "50d"
},
{
"ts": "2017-08-30T13:00:00.000Z",
"tp": 22,
"pr": 1007,
"hu": 73,
"ws": 0,
"wd": 64,
"ic": "50n"
},
{
"ts": "2017-08-30T12:00:00.000Z",
"tp": 21,
"pr": 1007,
"hu": 78,
"ws": 0,
"wd": 40,
"ic": "50n"
},
{
"ts": "2017-08-30T11:00:00.000Z",
"tp": 22,
"pr": 1008,
"hu": 78,
"ws": 0,
"wd": 40,
"ic": "50n"
},
{
"ts": "2017-08-30T10:00:00.000Z",
"tp": 22,
"pr": 1008,
"hu": 78,
"ws": 0,
"wd": 40,
"ic": "50n"
},
{
"ts": "2017-08-30T09:00:00.000Z",
"tp": 23,
"pr": 1008,
"hu": 73,
"ws": 0,
"wd": 73,
"ic": "50n"
},
{
"ts": "2017-08-30T08:00:00.000Z",
"tp": 23,
"pr": 1008,
"hu": 73,
"ws": 0,
"wd": 35,
"ic": "50n"
},
{
"ts": "2017-08-30T07:00:00.000Z",
"tp": 24,
"pr": 1008,
"hu": 69,
"ws": 0,
"wd": 35,
"ic": "50n"
},
{
"ts": "2017-08-30T06:00:00.000Z",
"tp": 25,
"pr": 1008,
"hu": 69,
"ws": 0,
"wd": 310,
"ic": "50n"
},
{
"ts": "2017-08-30T01:00:00.000Z",
"tp": 32,
"pr": 1006,
"hu": 45,
"ws": 2,
"ic": "01n",
"wd": null
},
{
"ts": "2017-08-30T00:00:00.000Z",
"tp": 33,
"pr": 1007,
"hu": 43,
"ws": 2,
"wd": 250,
"ic": "01d"
},
{
"ts": "2017-08-29T23:00:00.000Z",
"tp": 34,
"pr": 1007,
"hu": 41,
"ws": 1,
"wd": 237,
"ic": "01d"
},
{
"ts": "2017-08-29T22:00:00.000Z",
"tp": 34,
"pr": 1008,
"hu": 46,
"ws": 1,
"ic": "10d",
"wd": null
},
{
"ts": "2017-08-29T21:00:00.000Z",
"tp": 34,
"pr": 1008,
"hu": 46,
"ws": 1,
"wd": 223,
"ic": "11d"
},
{
"ts": "2017-08-29T20:00:00.000Z",
"tp": 33,
"pr": 1009,
"hu": 41,
"ws": 2,
"ic": "01d",
"wd": null
},
{
"ts": "2017-08-29T19:00:00.000Z",
"tp": 31,
"pr": 1009,
"hu": 38,
"ws": 0,
"wd": 192,
"ic": "01d"
},
{
"ts": "2017-08-29T18:00:00.000Z",
"tp": 30,
"pr": 1010,
"hu": 45,
"ws": 2,
"wd": 130,
"ic": "50d"
},
{
"ts": "2017-08-29T17:00:00.000Z",
"tp": 27,
"pr": 1010,
"hu": 51,
"ws": 0,
"wd": 192,
"ic": "50d"
},
{
"ts": "2017-08-29T16:00:00.000Z",
"tp": 26,
"pr": 1010,
"hu": 61,
"ws": 0,
"wd": 57,
"ic": "50d"
},
{
"ts": "2017-08-29T15:00:00.000Z",
"tp": 24,
"pr": 1009,
"hu": 73,
"ws": 0,
"wd": 57,
"ic": "50d"
},
{
"ts": "2017-08-29T14:00:00.000Z",
"tp": 22,
"pr": 1009,
"hu": 78,
"ws": 0,
"wd": 57,
"ic": "50d"
},
{
"ts": "2017-08-29T13:00:00.000Z",
"tp": 21,
"pr": 1009,
"hu": 78,
"ws": 0,
"wd": 94,
"ic": "50n"
},
{
"ts": "2017-08-29T12:00:00.000Z",
"tp": 22,
"pr": 1008,
"hu": 78,
"ws": 0,
"wd": 94,
"ic": "50n"
},
{
"ts": "2017-08-29T11:00:00.000Z",
"tp": 22,
"pr": 1008,
"hu": 78,
"ws": 0,
"wd": 94,
"ic": "50n"
},
{
"ts": "2017-08-29T10:00:00.000Z",
"tp": 22,
"pr": 1009,
"hu": 78,
"ws": 0,
"wd": 80,
"ic": "50n"
}
],
"pollution": [
{
"ts": "2019-08-04T19:00:00.000Z",
"aqius": 70,
"mainus": "p2",
"aqicn": 30,
"maincn": "p2",
"p2": {
"conc": 21,
"aqius": 70,
"aqicn": 30
},
"p1": {
"conc": 30,
"aqius": 27,
"aqicn": 30
},
"o3": {
"conc": 48,
"aqius": 38,
"aqicn": 30
},
"n2": {
"conc": 8,
"aqius": 2,
"aqicn": 8
},
"s2": {
"conc": 1,
"aqius": 1,
"aqicn": 3
},
"co": {
"conc": 0.2,
"aqius": 2,
"aqicn": 2
}
},
{
"ts": "2019-08-04T18:00:00.000Z",
"aqius": 57,
"mainus": "p2",
"aqicn": 28,
"maincn": "o3",
"p2": {
"conc": 15,
"aqius": 57,
"aqicn": 21
},
"p1": {
"conc": 22,
"aqius": 20,
"aqicn": 22
},
"o3": {
"conc": 45,
"aqius": 36,
"aqicn": 28
},
"n2": {
"conc": 8,
"aqius": 2,
"aqicn": 8
},
"co": {
"conc": 0.2,
"aqius": 2,
"aqicn": 2
}
},
{
"ts": "2019-08-04T17:00:00.000Z",
"aqius": 45,
"mainus": "p2",
"aqicn": 21,
"maincn": "o3",
"p2": {
"conc": 11,
"aqius": 45,
"aqicn": 16
},
"p1": {
"conc": 8,
"aqius": 7,
"aqicn": 8
},
"o3": {
"conc": 34,
"aqius": 27,
"aqicn": 21
},
"n2": {
"conc": 8,
"aqius": 2,
"aqicn": 8
},
"co": {
"conc": 0.2,
"aqius": 2,
"aqicn": 2
}
},
{
"ts": "2019-08-04T16:00:00.000Z",
"aqius": 29,
"mainus": "p2",
"aqicn": 18,
"maincn": "o3",
"p2": {
"conc": 7,
"aqius": 29,
"aqicn": 10
},
"p1": {
"conc": 9,
"aqius": 8,
"aqicn": 9
},
"o3": {
"conc": 28,
"aqius": 22,
"aqicn": 18
},
"n2": {
"conc": 6,
"aqius": 2,
"aqicn": 6
},
"co": {
"conc": 0.2,
"aqius": 2,
"aqicn": 2
}
},
{
"ts": "2019-08-04T15:00:00.000Z",
"aqius": 29,
"mainus": "p2",
"aqicn": 14,
"maincn": "p1",
"p2": {
"conc": 7,
"aqius": 29,
"aqicn": 10
},
"p1": {
"conc": 14,
"aqius": 13,
"aqicn": 14
},
"o3": {
"conc": 21,
"aqius": 17,
"aqicn": 13
},
"n2": {
"conc": 8,
"aqius": 2,
"aqicn": 8
},
"co": {
"conc": 0.1,
"aqius": 2,
"aqicn": 2
}
},
{
"ts": "2019-08-04T14:00:00.000Z",
"aqius": 29,
"mainus": "p2",
"aqicn": 27,
"maincn": "p1",
"p2": {
"conc": 7,
"aqius": 29,
"aqicn": 10
},
"p1": {
"conc": 27,
"aqius": 25,
"aqicn": 27
},
"o3": {
"conc": 18,
"aqius": 14,
"aqicn": 11
},
"n2": {
"conc": 8,
"aqius": 2,
"aqicn": 8
},
"co": {
"conc": 0.1,
"aqius": 2,
"aqicn": 2
}
},
{
"ts": "2019-08-04T13:00:00.000Z",
"aqius": 41,
"mainus": "p2",
"aqicn": 14,
"maincn": "p2",
"p2": {
"conc": 10,
"aqius": 41,
"aqicn": 14
},
"p1": {
"conc": 13,
"aqius": 12,
"aqicn": 13
},
"o3": {
"conc": 15,
"aqius": 12,
"aqicn": 9
},
"n2": {
"conc": 10,
"aqius": 3,
"aqicn": 9
},
"co": {
"conc": 0.1,
"aqius": 2,
"aqicn": 2
}
},
{
"ts": "2019-08-04T12:00:00.000Z",
"aqius": 45,
"mainus": "p2",
"aqicn": 16,
"maincn": "p2",
"p2": {
"conc": 11,
"aqius": 45,
"aqicn": 16
},
"p1": {
"conc": 14,
"aqius": 13,
"aqicn": 14
},
"o3": {
"conc": 18,
"aqius": 14,
"aqicn": 11
},
"n2": {
"conc": 8,
"aqius": 2,
"aqicn": 8
},
"co": {
"conc": 0.1,
"aqius": 1,
"aqicn": 1
}
},
{
"ts": "2019-08-04T11:00:00.000Z",
"aqius": 29,
"mainus": "p2",
"aqicn": 14,
"maincn": "o3",
"p2": {
"conc": 7,
"aqius": 29,
"aqicn": 10
},
"p1": {
"conc": 13,
"aqius": 12,
"aqicn": 13
},
"o3": {
"conc": 23,
"aqius": 18,
"aqicn": 14
},
"n2": {
"conc": 5,
"aqius": 2,
"aqicn": 5
},
"co": {
"conc": 0.1,
"aqius": 1,
"aqicn": 1
}
},
{
"ts": "2019-08-04T10:00:00.000Z",
"aqius": 29,
"mainus": "p2",
"aqicn": 16,
"maincn": "o3",
"p2": {
"conc": 7,
"aqius": 29,
"aqicn": 10
},
"p1": {
"conc": 12,
"aqius": 11,
"aqicn": 12
},
"o3": {
"conc": 25,
"aqius": 20,
"aqicn": 16
},
"n2": {
"conc": 5,
"aqius": 2,
"aqicn": 5
},
"co": {
"conc": 0.1,
"aqius": 1,
"aqicn": 1
}
},
{
"ts": "2019-08-04T04:00:00.000Z",
"aqius": 29,
"mainus": "p2",
"aqicn": 16,
"maincn": "o3",
"p2": {
"conc": 7,
"aqius": 29,
"aqicn": 10
},
"p1": {
"conc": 3,
"aqius": 3,
"aqicn": 3
},
"o3": {
"conc": 25,
"aqius": 20,
"aqicn": 16
},
"n2": {
"conc": 5,
"aqius": 2,
"aqicn": 5
},
"co": {
"conc": 0.1,
"aqius": 1,
"aqicn": 1
}
},
{
"ts": "2019-08-04T03:00:00.000Z",
"aqius": 21,
"mainus": "p2",
"aqicn": 16,
"maincn": "o3",
"p2": {
"conc": 5,
"aqius": 21,
"aqicn": 7
},
"p1": {
"conc": 5,
"aqius": 5,
"aqicn": 5
},
"o3": {
"conc": 25,
"aqius": 20,
"aqicn": 16
},
"n2": {
"conc": 5,
"aqius": 2,
"aqicn": 5
},
"co": {
"conc": 0.1,
"aqius": 1,
"aqicn": 1
| |
import asyncio
import time
import typing
import logging
from aioupnp.protocols.scpd import scpd_post
from aioupnp.device import Service
from aioupnp.fault import UPnPError
from aioupnp.util import is_valid_public_ipv4
log = logging.getLogger(__name__)
def soap_optional_str(x: typing.Optional[typing.Union[str, int]]) -> typing.Optional[str]:
return str(x) if x is not None and str(x).lower() not in ['none', 'nil'] else None
def soap_bool(x: typing.Optional[typing.Union[str, int]]) -> bool:
return False if not x or str(x).lower() in ['false', 'False'] else True
class GetSpecificPortMappingEntryResponse(typing.NamedTuple):
internal_port: int
lan_address: str
enabled: bool
description: str
lease_time: int
class GetGenericPortMappingEntryResponse(typing.NamedTuple):
gateway_address: str
external_port: int
protocol: str
internal_port: int
lan_address: str
enabled: bool
description: str
lease_time: int
class SCPDRequestDebuggingInfo(typing.NamedTuple):
method: str
kwargs: typing.Dict[str, typing.Union[str, int, bool]]
response_xml: bytes
result: typing.Optional[typing.Union[str, int, bool, GetSpecificPortMappingEntryResponse,
GetGenericPortMappingEntryResponse]]
err: typing.Optional[Exception]
ts: float
def recast_return(return_annotation, result: typing.Union[str, int, bool, typing.Dict[str, typing.Union[int, str]]],
result_keys: typing.List[str]) -> typing.Optional[
typing.Union[str, int, bool, GetSpecificPortMappingEntryResponse, GetGenericPortMappingEntryResponse]]:
if len(result_keys) == 1:
if isinstance(result, (str, int, bool)):
single_result = result
else:
if result_keys[0] in result:
single_result = result[result_keys[0]]
else: # check for the field having incorrect capitalization
flattened = {k.lower(): v for k, v in result.items()}
if result_keys[0].lower() in flattened:
single_result = flattened[result_keys[0].lower()]
else:
raise UPnPError(f"expected response key {result_keys[0]}, got {list(result.keys())}")
if return_annotation is bool:
return soap_bool(single_result)
if return_annotation is str:
return soap_optional_str(single_result)
return None if single_result is None else int(single_result)
elif return_annotation in [GetGenericPortMappingEntryResponse, GetSpecificPortMappingEntryResponse]:
assert isinstance(result, dict)
arg_types: typing.Dict[str, typing.Type[typing.Any]] = return_annotation._field_types
assert len(arg_types) == len(result_keys)
recast_results: typing.Dict[str, typing.Optional[typing.Union[str, int, bool]]] = {}
for i, (field_name, result_key) in enumerate(zip(arg_types, result_keys)):
result_field_name = result_keys[i]
field_type = arg_types[field_name]
if field_type is bool:
recast_results[field_name] = soap_bool(result.get(result_field_name, None))
elif field_type is str:
recast_results[field_name] = soap_optional_str(result.get(result_field_name, None))
elif field_type is int:
recast_results[field_name] = int(result[result_field_name]) if result_field_name in result else None
return return_annotation(**recast_results)
return None
class SOAPCommands:
"""
Type annotated wrappers for common UPnP SOAP functions
A SOAPCommands object has its command attributes overridden during device discovery with SOAPCommand objects
for the commands implemented by the gateway.
SOAPCommand will use the typing annotations provided here to properly cast the types of arguments and results
to their expected types.
"""
SOAP_COMMANDS: typing.List[str] = [
'AddPortMapping',
'GetGenericPortMappingEntry',
'GetSpecificPortMappingEntry',
'DeletePortMapping',
'GetExternalIPAddress',
# 'SetConnectionType',
# 'GetNATRSIPStatus',
# 'GetConnectionTypeInfo',
# 'GetStatusInfo',
# 'ForceTermination',
# 'RequestConnection',
# 'GetCommonLinkProperties',
# 'GetTotalBytesSent',
# 'GetTotalBytesReceived',
# 'GetTotalPacketsSent',
# 'GetTotalPacketsReceived',
# 'X_GetICSStatistics',
# 'GetDefaultConnectionService',
# 'SetDefaultConnectionService',
# 'SetEnabledForInternet',
# 'GetEnabledForInternet',
# 'GetMaximumActiveConnections',
# 'GetActiveConnections'
]
def __init__(self, loop: asyncio.AbstractEventLoop, base_address: bytes, port: int) -> None:
self._loop = loop
self._registered: typing.Dict[Service,
typing.Dict[str, typing.Tuple[typing.List[str], typing.List[str]]]] = {}
self._wrappers_no_args: typing.Dict[str, typing.Callable[[], typing.Awaitable[typing.Any]]] = {}
self._wrappers_kwargs: typing.Dict[str, typing.Callable[..., typing.Awaitable[typing.Any]]] = {}
self._base_address = base_address
self._port = port
self._request_debug_infos: typing.List[SCPDRequestDebuggingInfo] = []
def is_registered(self, name: str) -> bool:
if name not in self.SOAP_COMMANDS:
raise ValueError("unknown command") # pragma: no cover
for service in self._registered.values():
if name in service:
return True
return False
def get_service(self, name: str) -> Service:
if name not in self.SOAP_COMMANDS:
raise ValueError("unknown command") # pragma: no cover
for service, commands in self._registered.items():
if name in commands:
return service
raise ValueError(name) # pragma: no cover
def _register_soap_wrapper(self, name: str) -> None:
annotations: typing.Dict[str, typing.Any] = typing.get_type_hints(getattr(self, name))
service = self.get_service(name)
input_names: typing.List[str] = self._registered[service][name][0]
output_names: typing.List[str] = self._registered[service][name][1]
async def wrapper(**kwargs: typing.Any) -> typing.Optional[
typing.Union[str, int, bool, GetSpecificPortMappingEntryResponse, GetGenericPortMappingEntryResponse]]:
assert service.controlURL is not None
assert service.serviceType is not None
response, xml_bytes, err = await scpd_post(
service.controlURL, self._base_address.decode(), self._port, name, input_names,
service.serviceType.encode(), self._loop, **kwargs
)
if err is not None:
assert isinstance(xml_bytes, bytes)
self._request_debug_infos.append(SCPDRequestDebuggingInfo(name, kwargs, xml_bytes, None, err, time.time()))
raise err
assert 'return' in annotations
try:
result = recast_return(annotations['return'], response, output_names)
self._request_debug_infos.append(SCPDRequestDebuggingInfo(name, kwargs, xml_bytes, result, None, time.time()))
except Exception as err:
if isinstance(err, asyncio.CancelledError):
raise # pragma: no cover
self._request_debug_infos.append(SCPDRequestDebuggingInfo(name, kwargs, xml_bytes, None, err, time.time()))
raise UPnPError(f"Raised {str(type(err).__name__)}({str(err)}) parsing response for {name}")
return result
if not len(list(k for k in annotations if k != 'return')):
self._wrappers_no_args[name] = wrapper
else:
self._wrappers_kwargs[name] = wrapper
return None
def register(self, name: str, service: Service, inputs: typing.List[str], outputs: typing.List[str]) -> None:
if name not in self.SOAP_COMMANDS:
raise AttributeError(name)
if self.is_registered(name):
raise AttributeError(f"{name} is already a registered SOAP command")
if service not in self._registered:
self._registered[service] = {}
self._registered[service][name] = inputs, outputs
self._register_soap_wrapper(name)
async def AddPortMapping(self, NewRemoteHost: str, NewExternalPort: int, NewProtocol: str, NewInternalPort: int,
NewInternalClient: str, NewEnabled: int, NewPortMappingDescription: str,
NewLeaseDuration: str) -> None:
"""Returns None"""
name = "AddPortMapping"
if not self.is_registered(name):
raise NotImplementedError() # pragma: no cover
assert name in self._wrappers_kwargs
await self._wrappers_kwargs[name](
NewRemoteHost=NewRemoteHost, NewExternalPort=NewExternalPort, NewProtocol=NewProtocol,
NewInternalPort=NewInternalPort, NewInternalClient=NewInternalClient, NewEnabled=NewEnabled,
NewPortMappingDescription=NewPortMappingDescription, NewLeaseDuration=NewLeaseDuration
)
return None
async def GetGenericPortMappingEntry(self, NewPortMappingIndex: int) -> GetGenericPortMappingEntryResponse:
"""
Returns (NewRemoteHost, NewExternalPort, NewProtocol, NewInternalPort, NewInternalClient, NewEnabled,
NewPortMappingDescription, NewLeaseDuration)
"""
name = "GetGenericPortMappingEntry"
if not self.is_registered(name):
raise NotImplementedError() # pragma: no cover
assert name in self._wrappers_kwargs
result: GetGenericPortMappingEntryResponse = await self._wrappers_kwargs[name](
NewPortMappingIndex=NewPortMappingIndex
)
return result
async def GetSpecificPortMappingEntry(self, NewRemoteHost: str, NewExternalPort: int,
NewProtocol: str) -> GetSpecificPortMappingEntryResponse:
"""Returns (NewInternalPort, NewInternalClient, NewEnabled, NewPortMappingDescription, NewLeaseDuration)"""
name = "GetSpecificPortMappingEntry"
if not self.is_registered(name):
raise NotImplementedError() # pragma: no cover
assert name in self._wrappers_kwargs
result: GetSpecificPortMappingEntryResponse = await self._wrappers_kwargs[name](
NewRemoteHost=NewRemoteHost, NewExternalPort=NewExternalPort, NewProtocol=NewProtocol
)
return result
async def DeletePortMapping(self, NewRemoteHost: str, NewExternalPort: int, NewProtocol: str) -> None:
"""Returns None"""
name = "DeletePortMapping"
if not self.is_registered(name):
raise NotImplementedError() # pragma: no cover
assert name in self._wrappers_kwargs
await self._wrappers_kwargs[name](
NewRemoteHost=NewRemoteHost, NewExternalPort=NewExternalPort, NewProtocol=NewProtocol
)
return None
async def GetExternalIPAddress(self) -> str:
"""Returns (NewExternalIPAddress)"""
name = "GetExternalIPAddress"
if not self.is_registered(name):
raise NotImplementedError() # pragma: no cover
assert name in self._wrappers_no_args
external_ip: str = await self._wrappers_no_args[name]()
if not is_valid_public_ipv4(external_ip):
raise UPnPError(f"Got invalid external ipv4 address: {external_ip}")
return external_ip
# async def GetNATRSIPStatus(self) -> Tuple[bool, bool]:
# """Returns (NewRSIPAvailable, NewNATEnabled)"""
# name = "GetNATRSIPStatus"
# if not self.is_registered(name):
# raise NotImplementedError() # pragma: no cover
# assert name in self._wrappers_no_args
# result: Tuple[bool, bool] = await self._wrappers_no_args[name]()
# return result[0], result[1]
#
# async def SetConnectionType(self, NewConnectionType: str) -> None:
# """Returns None"""
# name = "SetConnectionType"
# if not self.is_registered(name):
# raise NotImplementedError() # pragma: no cover
# assert name in self._wrappers_kwargs
# await self._wrappers_kwargs[name](NewConnectionType=NewConnectionType)
# return None
#
# async def GetConnectionTypeInfo(self) -> Tuple[str, str]:
# """Returns (NewConnectionType, NewPossibleConnectionTypes)"""
# name = "GetConnectionTypeInfo"
# if not self.is_registered(name):
# raise NotImplementedError() # pragma: no cover
# assert name in self._wrappers_no_args
# result: Tuple[str, str] = await self._wrappers_no_args[name]()
# return result
#
# async def GetStatusInfo(self) -> Tuple[str, str, int]:
# """Returns (NewConnectionStatus, NewLastConnectionError, NewUptime)"""
# name = "GetStatusInfo"
# if not self.is_registered(name):
# raise NotImplementedError() # pragma: no cover
# assert name in self._wrappers_no_args
# result: Tuple[str, str, int] = await self._wrappers_no_args[name]()
# return result
#
# async def ForceTermination(self) -> None:
# """Returns None"""
# name = "ForceTermination"
# if not self.is_registered(name):
# raise NotImplementedError() # pragma: no cover
# assert name in self._wrappers_no_args
# await self._wrappers_no_args[name]()
# return None
#
# async def RequestConnection(self) -> None:
# """Returns None"""
# name = "RequestConnection"
# if not self.is_registered(name):
# raise NotImplementedError() # pragma: no cover
# assert name in self._wrappers_no_args
# await self._wrappers_no_args[name]()
# return None
#
# async def GetCommonLinkProperties(self) -> Tuple[str, int, int, str]:
# """Returns (NewWANAccessType, NewLayer1UpstreamMaxBitRate, NewLayer1DownstreamMaxBitRate,
# NewPhysicalLinkStatus)"""
# name = "GetCommonLinkProperties"
# if not self.is_registered(name):
# raise NotImplementedError() # pragma: no cover
# assert name in self._wrappers_no_args
# result: Tuple[str, int, int, str] = await self._wrappers_no_args[name]()
# return result
#
# async def GetTotalBytesSent(self) -> int:
# """Returns (NewTotalBytesSent)"""
# name = "GetTotalBytesSent"
# if not self.is_registered(name):
# raise NotImplementedError() # pragma: no cover
# assert name in self._wrappers_no_args
# result: Tuple[int] = await self._wrappers_no_args[name]()
# return result[0]
#
# async def GetTotalBytesReceived(self) -> int:
# """Returns (NewTotalBytesReceived)"""
# name = "GetTotalBytesReceived"
# if not self.is_registered(name):
# raise NotImplementedError() # pragma: no cover
# assert name in self._wrappers_no_args
# result: Tuple[int] = await self._wrappers_no_args[name]()
# return result[0]
#
# async def GetTotalPacketsSent(self) -> int:
# """Returns (NewTotalPacketsSent)"""
# name = "GetTotalPacketsSent"
# if not self.is_registered(name):
# raise NotImplementedError() # pragma: no cover
# assert name in self._wrappers_no_args
# result: Tuple[int] = await self._wrappers_no_args[name]()
# return result[0]
#
# async def GetTotalPacketsReceived(self) -> int:
# """Returns | |
#!/usr/bin/env python
#
# $Id$
#
"""psutil is a module providing convenience functions for managing
processes in a portable way by using Python.
"""
__version__ = "0.2.1"
version_info = tuple([int(num) for num in __version__.split('.')])
__all__ = [
# exceptions
"Error", "NoSuchProcess", "AccessDenied", "TimeoutExpired",
# constants
"NUM_CPUS", "TOTAL_PHYMEM", "BOOT_TIME",
"version_info", "__version__",
"STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
"STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
"STATUS_WAKING", "STATUS_LOCKED",
# classes
"Process", "Popen",
# functions
"test", "pid_exists", "get_pid_list", "process_iter", "get_process_list",
"avail_phymem", "used_phymem", "total_virtmem", "avail_virtmem",
"used_virtmem", "cpu_times", "cpu_percent",
]
import sys
import os
import time
import signal
import warnings
import errno
import subprocess
try:
import pwd
except ImportError:
pwd = None
from psutil.error import Error, NoSuchProcess, AccessDenied, TimeoutExpired
from psutil._compat import property
from psutil._common import (STATUS_RUNNING, STATUS_IDLE, STATUS_SLEEPING,
STATUS_DISK_SLEEP, STATUS_STOPPED,
STATUS_TRACING_STOP, STATUS_ZOMBIE, STATUS_DEAD,
STATUS_WAKING, STATUS_LOCKED)
# import the appropriate module for our platform only
if sys.platform.lower().startswith("linux"):
from psutil._pslinux import *
__all__.extend(["cached_phymem", "phymem_buffers"])
elif sys.platform.lower().startswith("win32"):
from psutil._psmswindows import *
__all__.extend(["ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS",
"HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS",
"NORMAL_PRIORITY_CLASS", "REALTIME_PRIORITY_CLASS"])
elif sys.platform.lower().startswith("darwin"):
from psutil._psosx import *
elif sys.platform.lower().startswith("freebsd"):
from psutil._psbsd import *
else:
raise NotImplementedError('platform %s is not supported' % sys.platform)
class Process(object):
"""Represents an OS process."""
def __init__(self, pid):
"""Create a new Process object, raises NoSuchProcess if the PID
does not exist, and ValueError if the parameter is not an
integer PID."""
if not isinstance(pid, int):
raise ValueError("An integer is required")
if not pid_exists(pid):
raise NoSuchProcess(pid, None, "no process found with PID %s" % pid)
self._pid = pid
# platform-specific modules define an PlatformProcess
# implementation class
self._platform_impl = PlatformProcess(pid)
self._last_sys_cpu_times = None
self._last_proc_cpu_times = None
def __str__(self):
try:
pid = self.pid
name = repr(self.name)
cmdline = self.cmdline and repr(' '.join(self.cmdline))
except NoSuchProcess:
details = "(pid=%s (terminated))" % self.pid
except AccessDenied:
details = "(pid=%s)" % (self.pid)
else:
if cmdline:
details = "(pid=%s, name=%s, cmdline=%s)" % (pid, name, cmdline)
else:
details = "(pid=%s, name=%s)" % (pid, name)
return "%s.%s%s" % (self.__class__.__module__,
self.__class__.__name__, details)
def __repr__(self):
return "<%s at %s>" % (self.__str__(), id(self))
def __eq__(self, other):
"""Test for equality with another Process object based on pid
and creation time.
"""
h1 = (self.pid, self.create_time)
try:
h2 = (other.pid, other.create_time)
except AttributeError:
return False
else:
return h1 == h2
@property
def pid(self):
"""The process pid."""
return self._pid
@property
def ppid(self):
"""The process parent pid."""
return self._platform_impl.get_process_ppid()
@property
def parent(self):
"""Return the parent process as a Process object. If no parent
pid is known return None.
"""
ppid = self.ppid
if ppid is not None:
try:
return Process(ppid)
except NoSuchProcess:
pass
@property
def name(self):
"""The process name."""
name = self._platform_impl.get_process_name()
if os.name == 'posix':
# On UNIX the name gets truncated to the first 15 characters.
# If it matches the first part of the cmdline we return that
# one instead because it's usually more explicative.
# Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
cmdline = self.cmdline
if cmdline:
extended_name = os.path.basename(cmdline[0])
if extended_name.startswith(name):
name = extended_name
# XXX - perhaps needs refactoring
self._platform_impl._process_name = name
return name
@property
def exe(self):
"""The process executable as an absolute path name."""
exe = self._platform_impl.get_process_exe()
# if we have the cmdline but not the exe, figure it out from argv[0]
if not exe:
cmdline = self.cmdline
if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
_exe = os.path.realpath(cmdline[0])
if os.path.isfile(_exe) and os.access(_exe, os.X_OK):
return _exe
if not exe:
raise AccessDenied(self.pid, self._platform_impl._process_name)
return exe
@property
def path(self):
msg = "'path' property is deprecated; use 'os.path.dirname(exe)' instead"
warnings.warn(msg, DeprecationWarning)
return os.path.dirname(self.exe)
@property
def cmdline(self):
"""The command line process has been called with."""
return self._platform_impl.get_process_cmdline()
@property
def status(self):
"""The process current status as a STATUS_* constant."""
return self._platform_impl.get_process_status()
@property
def nice(self):
"""Get or set process niceness (priority)."""
return self._platform_impl.get_process_nice()
@nice.setter
def nice(self, value):
# invoked on "p.nice = num"; change process niceness
return self._platform_impl.set_process_nice(value)
if os.name == 'posix':
@property
def uids(self):
"""Return a named tuple denoting the process real,
effective, and saved user ids.
"""
return self._platform_impl.get_process_uids()
@property
def gids(self):
"""Return a named tuple denoting the process real,
effective, and saved group ids.
"""
return self._platform_impl.get_process_gids()
@property
def uid(self):
"""The real user id of the current process (deprecated)."""
warnings.warn("'uid' property is deprecated; use 'uids.real' instead",
DeprecationWarning)
if os.name != 'posix':
return -1
return self.uids.real
@property
def gid(self):
"""The real group id of the current process (deprecated)."""
warnings.warn("'gid' property is deprecated; use 'uids.real' instead",
DeprecationWarning)
if os.name != 'posix':
return -1
return self.gids.real
@property
def username(self):
"""The name of the user that owns the process.
On UNIX this is calculated by using *real* process uid.
"""
if os.name == 'posix':
if pwd is None:
# might happen if python was installed from sources
raise ImportError("requires pwd module shipped with standard python")
return pwd.getpwuid(self.uids.real).pw_name
else:
return self._platform_impl.get_process_username()
@property
def create_time(self):
"""The process creation time as a floating point number
expressed in seconds since the epoch, in UTC.
"""
return self._platform_impl.get_process_create_time()
# available for Windows and Linux only
if hasattr(PlatformProcess, "get_process_cwd"):
def getcwd(self):
"""Return a string representing the process current working
directory.
"""
return self._platform_impl.get_process_cwd()
# Linux, BSD and Windows only
if hasattr(PlatformProcess, "get_process_io_counters"):
def get_io_counters(self):
"""Return process I/O statistics as a namedtuple including
the number of read/write calls performed and the amount of
bytes read and written by the process.
"""
return self._platform_impl.get_process_io_counters()
# available only on Linux
if hasattr(PlatformProcess, "get_process_ionice"):
def get_ionice(self):
"""Return process I/O niceness (priority) as a namedtuple."""
return self._platform_impl.get_process_ionice()
def set_ionice(self, ioclass, iodata=None):
"""Set process I/O niceness (priority).
ioclass is one of the IOPRIO_CLASS_* constants.
iodata is a number which goes from 0 to 7. The higher the
value, the lower the I/O priority of the process.
"""
return self._platform_impl.set_process_ionice(ioclass, iodata)
def get_num_threads(self):
"""Return the number of threads used by this process."""
return self._platform_impl.get_process_num_threads()
def get_threads(self):
"""Return threads opened by process as a list of namedtuples
including thread id and thread CPU times (user/system).
"""
return self._platform_impl.get_process_threads()
def get_children(self):
"""Return the children of this process as a list of Process
objects.
"""
if not self.is_running():
name = self._platform_impl._process_name
raise NoSuchProcess(self.pid, name)
retlist = []
for proc in process_iter():
try:
if proc.ppid == self.pid:
retlist.append(proc)
except NoSuchProcess:
pass
return retlist
def get_cpu_percent(self, interval=0.1):
"""Return a float representing the current process CPU
utilization as a percentage.
When interval is > 0.0 compares process times to system CPU
times elapsed before and after the interval (blocking).
When interval is 0.0 or None compares process times to system CPU
times elapsed since last call, returning immediately.
In this case is recommended for accuracy that this function be
called with at least 0.1 seconds between calls.
"""
blocking = interval is not None and interval > 0.0
if blocking:
st1 = sum(cpu_times())
pt1 = self._platform_impl.get_cpu_times()
time.sleep(interval)
st2 = sum(cpu_times())
pt2 = self._platform_impl.get_cpu_times()
else:
st1 = self._last_sys_cpu_times
pt1 = self._last_proc_cpu_times
st2 = sum(cpu_times())
pt2 = self._platform_impl.get_cpu_times()
if st1 is None or pt1 is None:
self._last_sys_cpu_times = st2
self._last_proc_cpu_times = pt2
return 0.0
delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
delta_time = st2 - st1
# reset values for next call in case of interval == None
self._last_sys_cpu_times = st2
self._last_proc_cpu_times = pt2
try:
# the utilization split between all CPUs
overall_percent = (delta_proc / delta_time) * 100
except ZeroDivisionError:
# interval was too low
return 0.0
# the utilization of a single CPU
single_cpu_percent = overall_percent * NUM_CPUS
# ugly hack to avoid troubles with float precision issues
if single_cpu_percent > 100.0:
return 100.0
return round(single_cpu_percent, 1)
def get_cpu_times(self):
"""Return a tuple whose values are process CPU user and system
times. The same as os.times() but per-process.
"""
return self._platform_impl.get_cpu_times()
def get_memory_info(self):
"""Return a tuple representing RSS (Resident Set Size) and VMS
(Virtual Memory Size) in bytes.
On UNIX RSS and VMS are the same values shown by ps.
On Windows RSS and VMS refer to "Mem Usage" and "VM Size" columns
of taskmgr.exe.
"""
return self._platform_impl.get_memory_info()
def get_memory_percent(self):
"""Compare physical system memory to process resident memory and
calculate process memory utilization as a percentage.
"""
rss = self._platform_impl.get_memory_info()[0]
try:
return (rss / float(TOTAL_PHYMEM)) * 100
except ZeroDivisionError:
return 0.0
def get_open_files(self):
"""Return files opened by process | |
"""
This module serves as a Python wrapper around the nrfjprog DLL.
Note: Please look at the nrfjprogdll.h file provided with the tools for a more elaborate description of the API functions and their side effects.
"""
from __future__ import print_function
import weakref
from builtins import int
import ctypes
import os
import sys
import datetime
import logging
from pathlib import Path
try:
from . import JLink
from .Parameters import *
from .APIError import *
except Exception:
import JLink
from Parameters import *
from APIError import *
def logger_cb(msg, logger):
logging.getLogger(decode_string(logger).strip()).debug(decode_string(msg).strip())
"""
Deprecated: Do not use, use log parameter in API constructor instead.
"""
DEBUG_OUTPUT = False
QSPIIniFile = Path(__file__).parent / "QspiDefault.ini"
class API(object):
"""
Main class of the module. Instance the class to get access to nrfjprog.dll functions in Python.
Note: A copy of nrfjprog.dll must be found in the working directory.
"""
_DEFAULT_JLINK_SPEED_KHZ = 2000
def __init__(self, device_family, jlink_arm_dll_path=None, log_str_cb=None, log=False, log_str=None,
log_file_path=None, log_stringio=None):
"""
Constructor.
@param enum, str or int device_family: The series of device pynrfjprog will interact with.
@param (optional) str jlink_arm_dll_path: Absolute path to the JLinkARM DLL that you want nrfjprog to use. Must be provided if your environment is not standard or your SEGGER installation path is not the default path. See JLink.py for details. Does not support unicode paths.
@param (optional) callable object log_str_cb: If present, the log_str_cb will be called to receive log and error information. The log_str_cb object should be callable, expect to receive a string as the only parameter and does not need to return anything.
@param (optional) bool log: If present and true, will enable logging to sys.stderr with the default log string appended to the beginning of each debug output line.
@param (optional) str log_str: If present, will enable logging to sys.stderr with overwriten default log string appended to the beginning of each debug output line.
@param (optional) str log_file_path: If present, will enable logging to log_file specified. This file will be opened in write mode in API.__init__() and api.open(), and closed when api.close() is called.
@param (optional) str log_stringio: If present, will enable logging to open file-like object specified.
"""
self._device_family = None
self._jlink_arm_dll_path = None
self._handle = ctypes.c_void_p(None)
# Make a default "dead" finalizer. We'll initialize this in self.open.
self._finalizer = weakref.finalize(self, lambda : None)
self._device_family = decode_enum(device_family, DeviceFamily)
if self._device_family is None:
raise ValueError('Parameter device_family must be of type int, str or DeviceFamily enumeration.')
if not isinstance(jlink_arm_dll_path, str) and not jlink_arm_dll_path is None:
raise ValueError('Parameter jlink_arm_dll_path must be a string.')
self._jlink_arm_dll_path = os.path.abspath(jlink_arm_dll_path).encode(
'ascii') if jlink_arm_dll_path is not None else None
# Redirect writeable log endpoints to log_stringio
if hasattr(log_file_path, "write") and log_stringio is None:
log_stringio = log_file_path
log_file_path = None
_logger = logging.getLogger(__name__)
self._logger = Parameters.LoggerAdapter(_logger, None, log=log, log_str_cb=log_str_cb, log_str=log_str, log_file_path=log_file_path,
log_stringio=log_stringio)
os_name = sys.platform.lower()
if os_name.startswith('win'):
nrfjprog_dll_name = 'nrfjprog.dll'
elif os_name.startswith('linux'):
nrfjprog_dll_name = 'libnrfjprogdll.so'
elif os_name.startswith('dar'):
nrfjprog_dll_name = 'libnrfjprogdll.dylib'
else:
raise ValueError("Unsupported OS")
nrfjprog_dll_path = os.path.join(find_lib_dir(), nrfjprog_dll_name)
if os.path.exists(nrfjprog_dll_path):
try:
self._lib = ctypes.cdll.LoadLibrary(nrfjprog_dll_path)
except Exception as error:
raise RuntimeError("Could not load the NRFJPROG DLL: '{}'.".format(error))
else:
try:
self._lib = ctypes.cdll.LoadLibrary(nrfjprog_dll_name)
except Exception as error:
raise RuntimeError("Failed to load the NRFJPROG DLL by name: '{}.'".format(error))
"""
nrfjprog.DLL functions.
"""
def dll_version(self):
"""
Returns the JLinkARM.dll version.
@return (int, int, str): Tuple containing the major, minor and revision of the dll.
"""
major = ctypes.c_uint32()
minor = ctypes.c_uint32()
revision = ctypes.c_uint8()
result = self._lib.NRFJPROG_dll_version_inst(self._handle, ctypes.byref(major), ctypes.byref(minor), ctypes.byref(revision))
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
return major.value, minor.value, chr(revision.value)
def find_jlink_path(self):
"""
Searches for newest installation of JLink shared library (DLL/SO/dylib). The JLink path returned by this function
will be the same found by the internal auto-detection used when no default JLink install location is provided.
(See parameter jlink_arm_dll_path in function DebugProbe.__init__ for an example.)
On unix-like systems the function may also return a library name compatible with dlopen if no library file is
found in the default search path.
@return (str): Path to JLink shared library.
"""
buffer_len = ctypes.c_uint32(0)
result = self._lib.NRFJPROG_find_jlink_path(None, ctypes.c_uint32(0), ctypes.byref(buffer_len))
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors(), log=self._logger.error)
buffer = (ctypes.c_char * buffer_len.value)(0)
result = self._lib.NRFJPROG_find_jlink_path(buffer, buffer_len, ctypes.byref(buffer_len))
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors(), log=self._logger.error)
return buffer.value.decode('utf-8')
def is_open(self):
"""
Checks if the JLinkARM.dll is open.
@return bool: True if open.
"""
opened = ctypes.c_bool()
result = self._lib.NRFJPROG_is_dll_open_inst(self._handle, ctypes.byref(opened))
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
return opened.value
def open(self):
"""
Opens the JLinkARM.dll and sets the log callback. Prepares the dll for work with an nRF device.
"""
# No need to encode self._jlink_arm_dll_path since it is an ASCII string and that is what is expected by ctypes.
# Function self._log_str_cb has already been encoded in __init__() function.
device_family = ctypes.c_int(self._device_family.value)
result = self._lib.NRFJPROG_open_dll_inst(ctypes.byref(self._handle), self._jlink_arm_dll_path, self._logger.log_cb, None, device_family)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
# Make sure that api is closed before api is destroyed
self._finalizer = weakref.finalize(self, self.close)
def close(self):
"""
Closes and frees the JLinkARM DLL.
"""
self._lib.NRFJPROG_close_dll_inst(ctypes.byref(self._handle))
# Disable the api finalizer, as it's no longer necessary when the api is closed.
self._finalizer.detach()
def get_errors(self):
"""
Gets last logged error messages from the nrfjprog dll.
Used to fill in APIError messages.
@Return list of error strings.
"""
return self._logger.get_errors()
def enum_emu_com_ports(self, serial_number):
"""
Finds all comports currently associated with the serial number.
@param int serial_number: Serial number of the debug probe to find the com port of.
@Return list of ComPortInfo
"""
if not is_u32(serial_number):
raise ValueError('The serial_number parameter must be an unsigned 32-bit value.')
serial_number = ctypes.c_uint32(serial_number)
com_ports_len = ctypes.c_uint32(NRFJPROG_COM_PER_JLINK)
num_com_ports = ctypes.c_uint32()
com_ports = (ComPortInfoStruct * NRFJPROG_COM_PER_JLINK)()
result = self._lib.NRFJPROG_enum_emu_com_inst(self._handle, serial_number, ctypes.byref(com_ports), com_ports_len,
ctypes.byref(num_com_ports))
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
return [ComPortInfo(comport) for comport in com_ports[0:num_com_ports.value]]
def enum_emu_snr(self):
"""
Enumerates the serial numbers of connected USB J-Link emulators.
@return [int]: A list with the serial numbers.
"""
serial_numbers_len = ctypes.c_uint32(127)
serial_numbers = (ctypes.c_uint32 * serial_numbers_len.value)()
num_available = ctypes.c_uint32()
result = self._lib.NRFJPROG_enum_emu_snr_inst(self._handle, ctypes.byref(serial_numbers), serial_numbers_len,
ctypes.byref(num_available))
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
snr = [int(serial_numbers[i]) for i in range(0, min(num_available.value, serial_numbers_len.value))]
if len(snr) == 0:
return None
else:
return snr
def is_connected_to_emu(self):
"""
Checks if the emulator has an established connection with Segger emulator/debugger.
@return boolean: True if connected.
"""
is_connected_to_emu = ctypes.c_bool()
result = self._lib.NRFJPROG_is_connected_to_emu_inst(self._handle, ctypes.byref(is_connected_to_emu))
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
return is_connected_to_emu.value
def connect_to_emu_with_snr(self, serial_number, jlink_speed_khz=_DEFAULT_JLINK_SPEED_KHZ):
"""
Connects to a given emulator/debugger.
@param int serial_number: Serial number of the emulator to connect to.
@param int jlink_speed_khz: SWDCLK speed [kHz].
"""
if not is_u32(serial_number):
raise ValueError('The serial_number parameter must be an unsigned 32-bit value.')
if not is_u32(jlink_speed_khz):
raise ValueError('The jlink_speed_khz parameter must be an unsigned 32-bit value.')
self._logger.set_id(serial_number)
serial_number = ctypes.c_uint32(serial_number)
jlink_speed_khz = ctypes.c_uint32(jlink_speed_khz)
result = self._lib.NRFJPROG_connect_to_emu_with_snr_inst(self._handle, serial_number, jlink_speed_khz)
if result != NrfjprogdllErr.SUCCESS:
self._logger.set_id(None)
raise APIError(result, error_data=self.get_errors())
def connect_to_emu_without_snr(self, jlink_speed_khz=_DEFAULT_JLINK_SPEED_KHZ):
"""
Connects to an emulator/debugger.
@param int jlink_speed_khz: SWDCLK speed [kHz].
"""
if not is_u32(jlink_speed_khz):
raise ValueError('The jlink_speed_khz parameter must be an unsigned 32-bit value.')
jlink_speed_khz = ctypes.c_uint32(jlink_speed_khz)
result = self._lib.NRFJPROG_connect_to_emu_without_snr_inst(self._handle, jlink_speed_khz)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
self._logger.set_id(self.read_connected_emu_snr())
def reset_connected_emu(self):
"""
Resets the connected emulator.
"""
result = self._lib.NRFJPROG_reset_connected_emu_inst(self._handle)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def replace_connected_emu_fw(self):
"""
Replaces the firmware of the connected emulator.
"""
result = self._lib.NRFJPROG_replace_connected_emu_fw_inst(self._handle)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def read_connected_emu_snr(self):
"""
Reads the serial number of the emu connected to.
@return int: emu serial number.
"""
snr = ctypes.c_uint32()
result = self._lib.NRFJPROG_read_connected_emu_snr_inst(self._handle, ctypes.byref(snr))
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
return snr.value
def read_connected_emu_fwstr(self):
"""
Reads the firmware identification string of the connected emulator.
@return str: firmware identification string.
"""
buffer_size = ctypes.c_uint32(255)
fwstr = ctypes.create_string_buffer(buffer_size.value)
result = self._lib.NRFJPROG_read_connected_emu_fwstr_inst(self._handle, fwstr, buffer_size)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
return fwstr.value if sys.version_info[0] == 2 else fwstr.value.decode('utf-8')
def disconnect_from_emu(self):
"""
Disconnects from an emulator.
"""
result = self._lib.NRFJPROG_disconnect_from_emu_inst(self._handle)
if result != NrfjprogdllErr.SUCCESS:
raise APIError(result, error_data=self.get_errors())
def select_family(self, family):
"""
Select device family
@param DeviceFamily family: Target family for further | |
= Constraint(expr= m.b296 + m.b536 <= 1)
m.c3945 = Constraint(expr= m.b297 + m.b537 <= 1)
m.c3946 = Constraint(expr= m.b298 + m.b538 <= 1)
m.c3947 = Constraint(expr= m.b299 + m.b539 <= 1)
m.c3948 = Constraint(expr= m.b300 + m.b540 <= 1)
m.c3949 = Constraint(expr= m.b301 + m.b541 <= 1)
m.c3950 = Constraint(expr= m.b302 + m.b542 <= 1)
m.c3951 = Constraint(expr= m.b303 + m.b543 <= 1)
m.c3952 = Constraint(expr= m.b304 + m.b544 <= 1)
m.c3953 = Constraint(expr= m.b305 + m.b545 <= 1)
m.c3954 = Constraint(expr= m.b306 + m.b546 <= 1)
m.c3955 = Constraint(expr= m.b307 + m.b547 <= 1)
m.c3956 = Constraint(expr= m.b308 + m.b548 <= 1)
m.c3957 = Constraint(expr= m.b309 + m.b549 <= 1)
m.c3958 = Constraint(expr= m.b310 + m.b550 <= 1)
m.c3959 = Constraint(expr= m.b311 + m.b551 <= 1)
m.c3960 = Constraint(expr= m.b312 + m.b552 <= 1)
m.c3961 = Constraint(expr= m.b313 + m.b553 <= 1)
m.c3962 = Constraint(expr= m.b290 + m.b530 <= 1)
m.c3963 = Constraint(expr= m.b291 + m.b531 <= 1)
m.c3964 = Constraint(expr= m.b292 + m.b532 <= 1)
m.c3965 = Constraint(expr= m.b293 + m.b533 <= 1)
m.c3966 = Constraint(expr= m.b294 + m.b534 <= 1)
m.c3967 = Constraint(expr= m.b295 + m.b535 <= 1)
m.c3968 = Constraint(expr= m.b296 + m.b536 <= 1)
m.c3969 = Constraint(expr= m.b297 + m.b537 <= 1)
m.c3970 = Constraint(expr= m.b298 + m.b538 <= 1)
m.c3971 = Constraint(expr= m.b299 + m.b539 <= 1)
m.c3972 = Constraint(expr= m.b300 + m.b540 <= 1)
m.c3973 = Constraint(expr= m.b301 + m.b541 <= 1)
m.c3974 = Constraint(expr= m.b302 + m.b542 <= 1)
m.c3975 = Constraint(expr= m.b303 + m.b543 <= 1)
m.c3976 = Constraint(expr= m.b304 + m.b544 <= 1)
m.c3977 = Constraint(expr= m.b305 + m.b545 <= 1)
m.c3978 = Constraint(expr= m.b306 + m.b546 <= 1)
m.c3979 = Constraint(expr= m.b307 + m.b547 <= 1)
m.c3980 = Constraint(expr= m.b308 + m.b548 <= 1)
m.c3981 = Constraint(expr= m.b309 + m.b549 <= 1)
m.c3982 = Constraint(expr= m.b310 + m.b550 <= 1)
m.c3983 = Constraint(expr= m.b311 + m.b551 <= 1)
m.c3984 = Constraint(expr= m.b312 + m.b552 <= 1)
m.c3985 = Constraint(expr= m.b313 + m.b553 <= 1)
m.c3986 = Constraint(expr= m.b315 + m.b554 <= 1)
m.c3987 = Constraint(expr= m.b316 + m.b555 <= 1)
m.c3988 = Constraint(expr= m.b317 + m.b556 <= 1)
m.c3989 = Constraint(expr= m.b318 + m.b557 <= 1)
m.c3990 = Constraint(expr= m.b319 + m.b558 <= 1)
m.c3991 = Constraint(expr= m.b320 + m.b559 <= 1)
m.c3992 = Constraint(expr= m.b321 + m.b560 <= 1)
m.c3993 = Constraint(expr= m.b322 + m.b561 <= 1)
m.c3994 = Constraint(expr= m.b323 + m.b562 <= 1)
m.c3995 = Constraint(expr= m.b324 + m.b563 <= 1)
m.c3996 = Constraint(expr= m.b325 + m.b564 <= 1)
m.c3997 = Constraint(expr= m.b326 + m.b565 <= 1)
m.c3998 = Constraint(expr= m.b327 + m.b566 <= 1)
m.c3999 = Constraint(expr= m.b328 + m.b567 <= 1)
m.c4000 = Constraint(expr= m.b329 + m.b568 <= 1)
m.c4001 = Constraint(expr= m.b330 + m.b569 <= 1)
m.c4002 = Constraint(expr= m.b331 + m.b570 <= 1)
m.c4003 = Constraint(expr= m.b332 + m.b571 <= 1)
m.c4004 = Constraint(expr= m.b333 + m.b572 <= 1)
m.c4005 = Constraint(expr= m.b334 + m.b573 <= 1)
m.c4006 = Constraint(expr= m.b335 + m.b574 <= 1)
m.c4007 = Constraint(expr= m.b336 + m.b575 <= 1)
m.c4008 = Constraint(expr= m.b337 + m.b576 <= 1)
m.c4009 = Constraint(expr= m.b577 <= 1)
m.c4010 = Constraint(expr= m.b316 + m.b554 <= 1)
m.c4011 = Constraint(expr= m.b317 + m.b555 <= 1)
m.c4012 = Constraint(expr= m.b318 + m.b556 <= 1)
m.c4013 = Constraint(expr= m.b319 + m.b557 <= 1)
m.c4014 = Constraint(expr= m.b320 + m.b558 <= 1)
m.c4015 = Constraint(expr= m.b321 + m.b559 <= 1)
m.c4016 = Constraint(expr= m.b322 + m.b560 <= 1)
m.c4017 = Constraint(expr= m.b323 + m.b561 <= 1)
m.c4018 = Constraint(expr= m.b324 + m.b562 <= 1)
m.c4019 = Constraint(expr= m.b325 + m.b563 <= 1)
m.c4020 = Constraint(expr= m.b326 + m.b564 <= 1)
m.c4021 = Constraint(expr= m.b327 + m.b565 <= 1)
m.c4022 = Constraint(expr= m.b328 + m.b566 <= 1)
m.c4023 = Constraint(expr= m.b329 + m.b567 <= 1)
m.c4024 = Constraint(expr= m.b330 + m.b568 <= 1)
m.c4025 = Constraint(expr= m.b331 + m.b569 <= 1)
m.c4026 = Constraint(expr= m.b332 + m.b570 <= 1)
m.c4027 = Constraint(expr= m.b333 + m.b571 <= 1)
m.c4028 = Constraint(expr= m.b334 + m.b572 <= 1)
m.c4029 = Constraint(expr= m.b335 + m.b573 <= 1)
m.c4030 = Constraint(expr= m.b336 + m.b574 <= 1)
m.c4031 = Constraint(expr= m.b337 + m.b575 <= 1)
m.c4032 = Constraint(expr= m.b576 <= 1)
m.c4033 = Constraint(expr= m.b577 <= 1)
m.c4034 = Constraint(expr= m.b317 + m.b554 <= 1)
m.c4035 = Constraint(expr= m.b318 + m.b555 <= 1)
m.c4036 = Constraint(expr= m.b319 + m.b556 <= 1)
m.c4037 = Constraint(expr= m.b320 + m.b557 <= 1)
m.c4038 = Constraint(expr= m.b321 + m.b558 <= 1)
m.c4039 = Constraint(expr= m.b322 + m.b559 <= 1)
m.c4040 = Constraint(expr= m.b323 + m.b560 <= 1)
m.c4041 = Constraint(expr= m.b324 + m.b561 <= 1)
m.c4042 = Constraint(expr= m.b325 + m.b562 <= 1)
m.c4043 = Constraint(expr= m.b326 + m.b563 <= 1)
m.c4044 = Constraint(expr= m.b327 + m.b564 <= 1)
m.c4045 = Constraint(expr= m.b328 + m.b565 <= 1)
m.c4046 = Constraint(expr= m.b329 + m.b566 <= 1)
m.c4047 = Constraint(expr= m.b330 + m.b567 <= 1)
m.c4048 = Constraint(expr= m.b331 + m.b568 <= 1)
m.c4049 = Constraint(expr= m.b332 + m.b569 <= 1)
m.c4050 = Constraint(expr= m.b333 + m.b570 <= 1)
m.c4051 = Constraint(expr= m.b334 + m.b571 <= 1)
m.c4052 = Constraint(expr= m.b335 + m.b572 <= 1)
m.c4053 = Constraint(expr= m.b336 + m.b573 <= 1)
m.c4054 = Constraint(expr= m.b337 + m.b574 <= 1)
m.c4055 = Constraint(expr= m.b575 <= 1)
m.c4056 = Constraint(expr= m.b576 <= 1)
m.c4057 = Constraint(expr= m.b577 <= 1)
m.c4058 = Constraint(expr= m.b318 + m.b554 <= 1)
m.c4059 = Constraint(expr= m.b319 + m.b555 <= 1)
m.c4060 = Constraint(expr= m.b320 + m.b556 <= 1)
m.c4061 = Constraint(expr= m.b321 + m.b557 <= 1)
m.c4062 = Constraint(expr= m.b322 + m.b558 <= 1)
m.c4063 = Constraint(expr= m.b323 + m.b559 <= 1)
m.c4064 = Constraint(expr= m.b324 + m.b560 <= 1)
m.c4065 = Constraint(expr= m.b325 + m.b561 <= 1)
m.c4066 = Constraint(expr= m.b326 + m.b562 <= 1)
m.c4067 = Constraint(expr= m.b327 + m.b563 <= 1)
m.c4068 = Constraint(expr= m.b328 + m.b564 <= 1)
m.c4069 = Constraint(expr= m.b329 + m.b565 <= 1)
m.c4070 = Constraint(expr= m.b330 + m.b566 <= 1)
m.c4071 = Constraint(expr= m.b331 + m.b567 <= 1)
m.c4072 = Constraint(expr= m.b332 + m.b568 <= 1)
m.c4073 = Constraint(expr= m.b333 + m.b569 <= 1)
m.c4074 = Constraint(expr= m.b334 + m.b570 <= 1)
m.c4075 = Constraint(expr= m.b335 + m.b571 <= 1)
m.c4076 = Constraint(expr= m.b336 + m.b572 <= 1)
m.c4077 = Constraint(expr= m.b337 + m.b573 <= 1)
m.c4078 = Constraint(expr= m.b574 <= 1)
m.c4079 = Constraint(expr= m.b575 <= 1)
m.c4080 = Constraint(expr= m.b576 <= 1)
m.c4081 = Constraint(expr= m.b577 <= 1)
m.c4082 = Constraint(expr= m.b319 + m.b554 <= 1)
m.c4083 = Constraint(expr= m.b320 + m.b555 <= 1)
m.c4084 = Constraint(expr= m.b321 + m.b556 <= 1)
m.c4085 = Constraint(expr= m.b322 + m.b557 <= 1)
m.c4086 = Constraint(expr= m.b323 + m.b558 <= 1)
m.c4087 = Constraint(expr= m.b324 + m.b559 <= 1)
m.c4088 = Constraint(expr= m.b325 + m.b560 <= 1)
m.c4089 = Constraint(expr= m.b326 + m.b561 <= 1)
m.c4090 = Constraint(expr= m.b327 + m.b562 <= 1)
m.c4091 = Constraint(expr= m.b328 + m.b563 <= 1)
m.c4092 = Constraint(expr= m.b329 + m.b564 <= 1)
m.c4093 = Constraint(expr= m.b330 + m.b565 <= 1)
m.c4094 = Constraint(expr= m.b331 + m.b566 <= 1)
m.c4095 = Constraint(expr= m.b332 + m.b567 <= 1)
m.c4096 = Constraint(expr= m.b333 + m.b568 <= 1)
m.c4097 = Constraint(expr= m.b334 + m.b569 <= 1)
m.c4098 = Constraint(expr= m.b335 + m.b570 <= 1)
m.c4099 = Constraint(expr= m.b336 + m.b571 <= 1)
m.c4100 = Constraint(expr= m.b337 + m.b572 <= 1)
m.c4101 = Constraint(expr= m.b573 <= 1)
m.c4102 = Constraint(expr= m.b574 <= 1)
m.c4103 = Constraint(expr= m.b575 <= 1)
m.c4104 = Constraint(expr= m.b576 <= 1)
m.c4105 = Constraint(expr= m.b577 <= 1)
m.c4106 = Constraint(expr= m.b314 + m.b554 <= 1)
m.c4107 = Constraint(expr= m.b315 + m.b555 <= 1)
m.c4108 = Constraint(expr= m.b316 + m.b556 <= 1)
m.c4109 = Constraint(expr= m.b317 + m.b557 <= 1)
m.c4110 = Constraint(expr= m.b318 + m.b558 <= 1)
m.c4111 = Constraint(expr= m.b319 + m.b559 <= 1)
m.c4112 = Constraint(expr= m.b320 + m.b560 <= 1)
m.c4113 = Constraint(expr= m.b321 + m.b561 <= 1)
m.c4114 = Constraint(expr= m.b322 + m.b562 <= 1)
m.c4115 = Constraint(expr= m.b323 + m.b563 <= 1)
m.c4116 = Constraint(expr= m.b324 + m.b564 <= 1)
m.c4117 = Constraint(expr= m.b325 + m.b565 <= 1)
m.c4118 = Constraint(expr= m.b326 + m.b566 <= 1)
m.c4119 = Constraint(expr= m.b327 + m.b567 <= 1)
m.c4120 = Constraint(expr= m.b328 + m.b568 <= 1)
m.c4121 = Constraint(expr= m.b329 + m.b569 <= 1)
m.c4122 = Constraint(expr= m.b330 + m.b570 <= 1)
m.c4123 = Constraint(expr= m.b331 + m.b571 <= 1)
m.c4124 = Constraint(expr= m.b332 + m.b572 <= 1)
m.c4125 = Constraint(expr= m.b333 + m.b573 <= 1)
m.c4126 = Constraint(expr= m.b334 + m.b574 <= 1)
m.c4127 = Constraint(expr= m.b335 + m.b575 <= 1)
m.c4128 = Constraint(expr= m.b336 + m.b576 <= 1)
m.c4129 = Constraint(expr= m.b337 + m.b577 <= 1)
m.c4130 = Constraint(expr= m.b314 + m.b554 <= 1)
m.c4131 = | |
# Copyright 2013 Devsim LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from devsim import *
from .model_create import *
def SetUniversalParameters(device, region):
universal = {
'q' : 1.6e-19, #, 'coul'),
'k' : 1.3806503e-23, #, 'J/K'),
'Permittivity_0' : 8.85e-14 #, 'F/cm^2')
}
for k, v in universal.items():
set_parameter(device=device, region=region, name=k, value=v)
def SetSiliconParameters(device, region):
'''
Sets Silicon device parameters on the specified region.
'''
SetUniversalParameters(device, region)
##<NAME>, <NAME>, and <NAME>, "Unified apparent bandgap narrowing in n- and p-type Silicon," Solid-State Electronics, vol. 35, no. 2, pp. 125-29, 1992.
par = {
'Permittivity' : 11.1*get_parameter(device=device, region=region, name='Permittivity_0'),
'NC300' : 2.8e19, # '1/cm^3'
'NV300' : 3.1e19, # '1/cm^3'
'EG300' : 1.12, # 'eV'
'EGALPH' : 2.73e-4, # 'eV/K'
'EGBETA' : 0 , # 'K'
'Affinity' : 4.05 , # 'K'
# Canali model
'BETAN0' : 2.57e-2, # '1'
'BETANE' : 0.66, # '1'
'BETAP0' : 0.46, # '1'
'BETAPE' : 0.17, # '1'
'VSATN0' : 1.43e9,
'VSATNE' : -0.87,
'VSATP0' : 1.62e8,
'VSATPE' : -0.52,
# Arora model
'MUMN' : 88,
'MUMEN' : -0.57,
'MU0N' : 7.4e8,
'MU0EN' : -2.33,
'NREFN' : 1.26e17,
'NREFNE' : 2.4,
'ALPHA0N' : 0.88,
'ALPHAEN' : -0.146,
'MUMP' : 54.3,
'MUMEP' : -0.57,
'MU0P' : 1.36e8,
'MU0EP' : -2.23,
'NREFP' : 2.35e17,
'NREFPE' : 2.4,
'ALPHA0P' : 0.88,
'ALPHAEP' : -0.146,
# SRH
"taun" : 1e-5,
"taup" : 1e-5,
"n1" : 1e10,
"p1" : 1e10,
# TEMP
"T" : 300
}
for k, v in par.items():
set_parameter(device=device, region=region, name=k, value=v)
def CreateQuasiFermiLevels(device, region, electron_model, hole_model, variables):
'''
Creates the models for the quasi-Fermi levels. Assuming Boltzmann statistics.
'''
eq = (
('EFN', 'EC + V_t * log(%s/NC)' % electron_model, ('Potential', 'Electrons')),
('EFP', 'EV - V_t * log(%s/NV)' % hole_model, ('Potential', 'Holes')),
)
for (model, equation, variable_list) in eq:
#print "MODEL: " + model + " equation " + equation
CreateNodeModel(device, region, model, equation)
vset = set(variable_list)
for v in variables:
if v in vset:
CreateNodeModelDerivative(device, region, model, equation, v)
def CreateDensityOfStates(device, region, variables):
'''
Set up models for density of states.
Neglects Bandgap narrowing.
'''
eq = (
('NC', 'NC300 * (T/300)^1.5', ('T',)),
('NV', 'NV300 * (T/300)^1.5', ('T',)),
('NTOT', 'Donors + Acceptors', ()),
# Band Gap Narrowing
('DEG', '0', ()),
#('DEG', 'V0.BGN * (log(NTOT/N0.BGN) + ((log(NTOT/N0.BGN)^2 + CON.BGN)^(0.5)))', ()),
('EG', 'EG300 + EGALPH*((300^2)/(300+EGBETA) - (T^2)/(T+EGBETA)) - DEG', ('T')),
('NIE', '((NC * NV)^0.5) * exp(-EG/(2*V_t))*exp(DEG)', ('T')),
('EC', '-Potential - Affinity - DEG/2', ('Potential',)),
('EV', 'EC - EG + DEG/2', ('Potential', 'T')),
('EI', '0.5 * (EC + EV + V_t*log(NC/NV))', ('Potential', 'T')),
)
for (model, equation, variable_list) in eq:
#print "MODEL: " + model + " equation " + equation
CreateNodeModel(device, region, model, equation)
vset = set(variable_list)
for v in variables:
if v in vset:
CreateNodeModelDerivative(device, region, model, equation, v)
def GetContactBiasName(contact):
return "{0}_bias".format(contact)
def GetContactNodeModelName(contact):
return "{0}nodemodel".format(contact)
def CreateVT(device, region, variables):
'''
Calculates the thermal voltage, based on the temperature.
V_t : node model
V_t_edge : edge model from arithmetic mean
'''
CreateNodeModel(device, region, 'V_t', "k*T/q")
CreateArithmeticMean(device, region, 'V_t', 'V_t_edge')
if 'T' in variables:
CreateArithmeticMeanDerivative(device, region, 'V_t', 'V_t_edge', 'T')
def CreateEField(device, region):
'''
Creates the EField and DField.
'''
edge_average_model(device=device, region=region, node_model="Potential",
edge_model="EField", average_type="negative_gradient")
edge_average_model(device=device, region=region, node_model="Potential",
edge_model="EField", average_type="negative_gradient", derivative="Potential")
def CreateDField(device, region):
CreateEdgeModel(device, region, "DField", "Permittivity * EField")
CreateEdgeModel(device, region, "DField:Potential@n0", "Permittivity * EField:Potential@n0")
CreateEdgeModel(device, region, "DField:Potential@n1", "Permittivity * EField:Potential@n1")
def CreateSiliconPotentialOnly(device, region):
'''
Creates the physical models for a Silicon region for equilibrium simulation.
'''
variables = ("Potential",)
CreateVT(device, region, variables)
CreateDensityOfStates(device, region, variables)
SetSiliconParameters(device, region)
# require NetDoping
for i in (
("IntrinsicElectrons", "NIE*exp(Potential/V_t)"),
("IntrinsicHoles", "NIE^2/IntrinsicElectrons"),
("IntrinsicCharge", "kahan3(IntrinsicHoles, -IntrinsicElectrons, NetDoping)"),
("PotentialIntrinsicCharge", "-q * IntrinsicCharge")
):
n = i[0]
e = i[1]
CreateNodeModel(device, region, n, e)
CreateNodeModelDerivative(device, region, n, e, 'Potential')
CreateQuasiFermiLevels(device, region, 'IntrinsicElectrons', 'IntrinsicHoles', variables)
CreateEField(device, region)
CreateDField(device, region)
equation(device=device, region=region, name="PotentialEquation", variable_name="Potential",
node_model="PotentialIntrinsicCharge", edge_model="DField", variable_update="log_damp")
def CreateSiliconPotentialOnlyContact(device, region, contact, is_circuit=False):
'''
Creates the potential equation at the contact
if is_circuit is true, than use node given by GetContactBiasName
'''
if not InNodeModelList(device, region, "contactcharge_node"):
CreateNodeModel(device, region, "contactcharge_node", "q*IntrinsicCharge")
celec_model = "(1e-10 + 0.5*abs(NetDoping+(NetDoping^2 + 4 * NIE^2)^(0.5)))"
chole_model = "(1e-10 + 0.5*abs(-NetDoping+(NetDoping^2 + 4 * NIE^2)^(0.5)))"
contact_model = "Potential -{0} + ifelse(NetDoping > 0, \
-V_t*log({1}/NIE), \
V_t*log({2}/NIE))".format(GetContactBiasName(contact), celec_model, chole_model)
contact_model_name = GetContactNodeModelName(contact)
CreateContactNodeModel(device, contact, contact_model_name, contact_model)
CreateContactNodeModel(device, contact, "{0}:{1}".format(contact_model_name,"Potential"), "1")
if is_circuit:
CreateContactNodeModel(device, contact, "{0}:{1}".format(contact_model_name,GetContactBiasName(contact)), "-1")
if is_circuit:
contact_equation(device=device, contact=contact, name="PotentialEquation", variable_name="Potential",
node_model=contact_model_name, edge_model="",
node_charge_model="contactcharge_node", edge_charge_model="DField",
node_current_model="", edge_current_model="", circuit_node=GetContactBiasName(contact))
else:
contact_equation(device=device, contact=contact, name="PotentialEquation", variable_name="Potential",
node_model=contact_model_name, edge_model="",
node_charge_model="contactcharge_node", edge_charge_model="DField",
node_current_model="", edge_current_model="")
def CreateSRH(device, region, variables):
'''
Shockley Read hall recombination model in terms of generation.
'''
USRH="(Electrons*Holes - NIE^2)/(taup*(Electrons + n1) + taun*(Holes + p1))"
Gn = "-q * USRH"
Gp = "+q * USRH"
CreateNodeModel(device, region, "USRH", USRH)
CreateNodeModel(device, region, "ElectronGeneration", Gn)
CreateNodeModel(device, region, "HoleGeneration", Gp)
for i in ("Electrons", "Holes", "T"):
if i in variables:
CreateNodeModelDerivative(device, region, "USRH", USRH, i)
CreateNodeModelDerivative(device, region, "ElectronGeneration", Gn, i)
CreateNodeModelDerivative(device, region, "HoleGeneration", Gp, i)
def CreateECE(device, region, Jn):
'''
Electron Continuity Equation using specified equation for Jn
'''
NCharge = "q * Electrons"
CreateNodeModel(device, region, "NCharge", NCharge)
CreateNodeModelDerivative(device, region, "NCharge", NCharge, "Electrons")
equation(device=device, region=region, name="ElectronContinuityEquation", variable_name="Electrons",
time_node_model = "NCharge",
edge_model=Jn, variable_update="positive", node_model="ElectronGeneration")
def CreateHCE(device, region, Jp):
'''
Hole Continuity Equation using specified equation for Jp
'''
PCharge = "-q * Holes"
CreateNodeModel(device, region, "PCharge", PCharge)
CreateNodeModelDerivative(device, region, "PCharge", PCharge, "Holes")
equation(device=device, region=region, name="HoleContinuityEquation", variable_name="Holes",
time_node_model = "PCharge",
edge_model=Jp, variable_update="positive", node_model="HoleGeneration")
def CreatePE(device, region):
'''
Create Poisson Equation assuming the Electrons and Holes as solution variables
'''
pne = "-q*kahan3(Holes, -Electrons, NetDoping)"
CreateNodeModel(device, region, "PotentialNodeCharge", pne)
CreateNodeModelDerivative(device, region, "PotentialNodeCharge", pne, "Electrons")
CreateNodeModelDerivative(device, region, "PotentialNodeCharge", pne, "Holes")
equation(device=device, region=region, name="PotentialEquation", variable_name="Potential",
node_model="PotentialNodeCharge", edge_model="DField",
time_node_model="", variable_update="log_damp")
def CreateSiliconDriftDiffusion(device, region, mu_n="mu_n", mu_p="mu_p", Jn='Jn', Jp='Jp'):
'''
Instantiate all equations for drift diffusion simulation
'''
CreateDensityOfStates(device, region, ("Potential",))
CreateQuasiFermiLevels(device, region, "Electrons", "Holes", ("Electrons", "Holes", "Potential"))
CreatePE(device, region)
CreateSRH(device, region, ("Electrons", "Holes", "Potential"))
CreateECE(device, region, Jn)
CreateHCE(device, region, Jp)
def CreateSiliconDriftDiffusionContact(device, region, contact, Jn, Jp, is_circuit=False):
'''
Restrict electrons and holes to their equilibrium values
Integrates current into circuit
'''
CreateSiliconPotentialOnlyContact(device, region, contact, is_circuit)
celec_model = "(1e-10 + 0.5*abs(NetDoping+(NetDoping^2 + 4 * NIE^2)^(0.5)))"
chole_model = "(1e-10 + 0.5*abs(-NetDoping+(NetDoping^2 + 4 * NIE^2)^(0.5)))"
contact_electrons_model = "Electrons - ifelse(NetDoping > 0, {0}, NIE^2/{1})".format(celec_model, chole_model)
contact_holes_model = "Holes - ifelse(NetDoping < 0, +{1}, +NIE^2/{0})".format(celec_model, chole_model)
contact_electrons_name = "{0}nodeelectrons".format(contact)
contact_holes_name = "{0}nodeholes".format(contact)
CreateContactNodeModel(device, contact, contact_electrons_name, contact_electrons_model)
CreateContactNodeModel(device, contact, "{0}:{1}".format(contact_electrons_name, "Electrons"), "1")
CreateContactNodeModel(device, contact, contact_holes_name, contact_holes_model)
CreateContactNodeModel(device, contact, "{0}:{1}".format(contact_holes_name, "Holes"), "1")
if is_circuit:
contact_equation(device=device, contact=contact, name="ElectronContinuityEquation", variable_name="Electrons",
node_model=contact_electrons_name,
edge_current_model=Jn, circuit_node=GetContactBiasName(contact))
contact_equation(device=device, contact=contact, name="HoleContinuityEquation", variable_name="Holes",
node_model=contact_holes_name,
edge_current_model=Jp, circuit_node=GetContactBiasName(contact))
else:
contact_equation(device=device, contact=contact, name="ElectronContinuityEquation", variable_name="Electrons",
node_model=contact_electrons_name,
edge_current_model=Jn)
contact_equation(device=device, contact=contact, name="HoleContinuityEquation", variable_name="Holes",
node_model=contact_holes_name,
edge_current_model=Jp)
def CreateBernoulliString (Potential="Potential", scaling_variable="V_t", sign=-1):
'''
Creates the Bernoulli function for Scharfetter Gummel
sign -1 for potential
sign +1 for energy
scaling variable should be V_t
Potential should be scaled by V_t in V
Ec, Ev should scaled by V_t in eV
returns the Bernoulli expression and its argument
Caller should understand that B(-x) = B(x) + x
'''
tdict = {
"Potential" : Potential,
"V_t" : scaling_variable
}
#### test for requisite models here
if sign == -1:
vdiff="(%(Potential)s@n0 - %(Potential)s@n1)/%(V_t)s" % tdict
elif sign == 1:
vdiff="(%(Potential)s@n1 - %(Potential)s@n0)/%(V_t)s" % tdict
else:
raise NameError("Invalid Sign %s" % sign)
Bern01 = "B(%s)" % vdiff
return (Bern01, vdiff)
def CreateElectronCurrent(device, region, mu_n, Potential="Potential", sign=-1, ElectronCurrent="ElectronCurrent", V_t="V_t_edge"):
'''
Electron current
mu_n = mobility name
Potential is the driving potential
'''
EnsureEdgeFromNodeModelExists(device, region, "Potential")
EnsureEdgeFromNodeModelExists(device, region, "Electrons")
EnsureEdgeFromNodeModelExists(device, region, "Holes")
if Potential == "Potential":
(Bern01, vdiff) = CreateBernoulliString(scaling_variable=V_t, Potential=Potential, sign=sign)
else:
raise NameError("Implement proper call")
tdict = {
'Bern01' : | |
jsonFiles = glob.glob(os.path.join(self.sampleDir, "resources", "data", "json", "voa[12].txt.json"))
# Import some documents.
w.importFiles(jsonFiles, "core", document_status = "reconciled",
strip_suffix = ".txt.json")
# Build a model.
w.runFolderOperation("core", "modelbuild")
# Import some more documents.
w.importFiles([os.path.join(self.sampleDir, "resources", "data", "raw", "voa3.txt")], "core", file_type = "raw",
strip_suffix = ".txt")
# Now, mark the voa3 document as read-only, so writing fails.
import stat
p = w.folders["core"].getFiles(["voa2"])[0]
# Cache the document contents of voa3.
voa3p = w.folders["core"].getFiles(["voa3"])[0]
fp = codecs.open(voa3p, "r", "utf-8")
voa3s = fp.read()
fp.close()
origMode = os.stat(p)[stat.ST_MODE]
# Now, make the voa2 path unwriteable. NOTE: This will generate a
# warning when trying to restore when we unwind the autotag transaction.
os.chmod(p, origMode & ~stat.S_IWUSR & ~stat.S_IWGRP & ~stat.S_IWOTH)
# Now, we tag, and bad things will happen.
try:
w.runFolderOperation("core", "autotag")
self.fail("Should have hit an error")
except MAT.Workspace.WorkspaceError, err:
pass
# And the document should still be unannotated.
self.failUnless(w.getDB().basenameInfo(["voa3"])[0][2] == 'unannotated')
# And the contents of voa3 should be undisturbed.
fp = codecs.open(voa3p, "r", "utf-8")
voa3sNow = fp.read()
fp.close()
self.failUnless(voa3s == voa3sNow)
# Now, restore the permissions.
os.chmod(p, origMode)
# Can't get the permission stuff to work on
# Windows with directories.
if sys.platform != "win32":
# And set the directory to be unreadable.
origMode = os.stat(os.path.dirname(p))[stat.ST_MODE]
os.chmod(os.path.dirname(p), origMode & ~stat.S_IWUSR & ~stat.S_IWGRP & ~stat.S_IWOTH)
# Now, we tag, and bad things will happen.
try:
w.runFolderOperation("core", "autotag")
self.fail("Should have hit an error")
except MAT.Workspace.WorkspaceError, err:
# Restore it first, in case the test fails.
pass
# And the document should still be unannotated.
self.assertEqual(w.getDB().basenameInfo(["voa3"])[0][2], 'unannotated')
# And the contents of voa3 should be undisturbed.
fp = codecs.open(voa3p, "r", "utf-8")
voa3sNow = fp.read()
fp.close()
self.failUnless(voa3s == voa3sNow)
# And restore it.
os.chmod(os.path.dirname(p), origMode)
def testAssignmentRollback(self):
w = self._createWorkspace(taskName = "Named Entity", create = True,
initialUsers = ["user1", "user2"])
import glob
rawDocs = glob.glob(os.path.join(self.sampleDir, "resources", "data", "raw", "voa[67].txt"))
w.importFiles(rawDocs, "core", file_type = "raw", strip_suffix = ".txt")
# Now, lock the second one.
d, lockId = w.openWorkspaceFile("core", "voa7", user = "user1")
try:
w.runOperation("assign", ('voa6', 'voa7'), user = "user1")
self.fail("assignment should have failed")
except MAT.Workspace.WorkspaceError, e:
self.failUnless(str(e).find("because it's locked") > -1)
# Now, make sure that the basename info is still intact. At the moment,
# we can't set up the transaction so that a stray file that's created
# is removed.
bInfo = w.getDB().basenameInfo(["voa6", "voa7"])
self.failUnlessEqual(set([r[0] for r in bInfo]), set(["voa6", "voa7"]))
def testUsers(self):
w = self._createWorkspace(taskName = "Named Entity", create = True,
initialUsers = ["user1"])
import glob, codecs
jsonFiles = glob.glob(os.path.join(self.sampleDir, "resources", "data", "json", "voa[12].txt.json"))
# Import some documents.
w.importFiles(jsonFiles, "core", document_status = "reconciled",
strip_suffix = ".txt.json")
try:
d, lockId = w.openWorkspaceFile("core", "voa1")
self.fail("shouldn't have been able to open the file")
except MAT.Workspace.WorkspaceError, e:
self.failUnless(str(e).find("unknown user") > -1)
d, lockId = w.openWorkspaceFile("core", "voa1", user = "user1")
# You should be able to open a document as yourself, not as anyone else.
try:
w.openWorkspaceFile("core", "voa1", user = "user2")
self.fail("shouldn't have been able to open the file")
except MAT.Workspace.WorkspaceError, e:
self.failUnless(str(e).find("locked document") > -1)
try:
d, lockId = w.openWorkspaceFile("core", "voa1", user = "user1")
except MAT.Workspace.WorkspaceError, e:
self.fail("should have been able to open the file")
otherD, otherTxId = w.openWorkspaceFile("core", "voa1", read_only = True)
self.failUnless(otherTxId is None)
# Close the file.
w.runFolderOperation("core", "save", basenames = ["voa1"],
lock_id = lockId, release_lock = True)
def testSimpleRemoval(self):
w = self._createWorkspace(taskName = "Named Entity", create = True,
initialUsers = ["user1"])
import glob, codecs
rawDocs = glob.glob(os.path.join(self.sampleDir, "resources", "data", "raw", "voa[67].txt"))
w.importFiles(rawDocs, "core", file_type = "raw",
strip_suffix = ".txt")
w.removeAllBasenames()
# Nothing should be left.
self.failUnlessEqual(os.listdir(w.folders["core"].dir), [])
self.failUnlessEqual(w.getDB().allBasenames(), [])
def testPartialRemoval(self):
w = self._createWorkspace(taskName = "Named Entity", create = True,
initialUsers = ["user1"])
import glob, codecs
rawDocs = glob.glob(os.path.join(self.sampleDir, "resources", "data", "raw", "voa[678].txt"))
w.importFiles(rawDocs, "core", file_type = "raw",
strip_suffix = ".txt")
w.removeBasenames(['voa7', 'voa6'])
# Nothing should be left.
self.failUnlessEqual(os.listdir(w.folders["core"].dir), ['voa8'])
self.failUnlessEqual(w.getDB().allBasenames(), ['voa8'])
def testSimpleAssignment(self):
w = self._createWorkspace(taskName = "Named Entity", create = True,
initialUsers = ["user1", "user2"])
import glob, codecs
rawDocs = glob.glob(os.path.join(self.sampleDir, "resources", "data", "raw", "voa[67].txt"))
w.importFiles(rawDocs, "core", file_type = "raw",
strip_suffix = ".txt")
w.runOperation("assign", ('voa6',), user = "user1,user2")
bInfo = w.getDB().basenameInfo(["voa6", "voa7"])
self.failUnlessEqual(set([r[0] for r in bInfo]), set(['voa6_user1', 'voa6_user2', 'voa7']))
self.failUnlessEqual(set(os.listdir(w.getFolder("core").dir)),
set(['voa6_user1', 'voa6_user2', 'voa7']))
w.runOperation("assign", ('voa7',), user = "user1")
bInfo = w.getDB().basenameInfo(["voa6", "voa7"])
self.failUnlessEqual(set([r[0] for r in bInfo]), set(['voa6_user1', 'voa6_user2', 'voa7_user1']))
self.failUnlessEqual(set(os.listdir(w.getFolder("core").dir)),
set(['voa6_user1', 'voa6_user2', 'voa7_user1']))
def testAssignmentOnImport(self):
w = self._createWorkspace(taskName = "Named Entity", create = True,
initialUsers = ["user1", "user2"])
import glob, codecs
rawDocs = glob.glob(os.path.join(self.sampleDir, "resources", "data", "raw", "voa[67].txt"))
w.importFiles(rawDocs, "core", file_type = "raw",
strip_suffix = ".txt", assign = True, users = 'user1,user2')
bInfo = w.getDB().basenameInfo(["voa6", "voa7"])
self.failUnlessEqual(set([r[0] for r in bInfo]), set(['voa6_user1', 'voa6_user2', 'voa7_user1', 'voa7_user2']))
self.failUnlessEqual(set(os.listdir(w.getFolder("core").dir)),
set(['voa6_user1', 'voa6_user2', 'voa7_user1', 'voa7_user2']))
def testBadAssignmentOnImport(self):
w = self._createWorkspace(taskName = "Named Entity", create = True,
initialUsers = ["user1", "user2"])
import glob, codecs
jsonFiles = glob.glob(os.path.join(self.sampleDir, "resources", "data", "json", "voa[12].txt.json"))
# Import some gold standard documents
try:
w.importFiles(jsonFiles, "core", document_status = "reconciled",
strip_suffix = ".txt.json", assign = True, users = "user1")
self.fail("gold standard import should have failed")
except MAT.Workspace.WorkspaceError, e:
self.failUnless(str(e).find("can't assign reconciled documents to users") > -1)
def testMultipleAutotag(self):
w = self._createWorkspace(taskName = "Named Entity", create = True,
initialUsers = ["user1", "user2"])
import glob, codecs
jsonFiles = glob.glob(os.path.join(self.sampleDir, "resources", "data", "json", "voa[12].txt.json"))
w.importFiles(jsonFiles, "core", document_status = "reconciled",
strip_suffix = ".txt.json")
# Build a model.
w.runFolderOperation("core", "modelbuild")
# Insert another document, assign to each user.
w.importFiles([os.path.join(self.sampleDir, "resources", "data", "raw", "voa3.txt")], "core", file_type = "raw",
strip_suffix = ".txt", assign=True, users="user1,user2")
# Autotag.
w.runFolderOperation("core", "autotag")
# Three docs, two reconciled, one uncorrected but
# assigned to multiple people.
bsDict = {("voa1", "voa1"): ["reconciled", None, None],
("voa2", "voa2"): ["reconciled", None, None],
("voa3_user1", "voa3"): ["uncorrected", "user1", None],
("voa3_user2", "voa3"): ["uncorrected", "user2", None]}
# bsDict is a hash from (docname, basename) to (status, assigned, locked)
basenames = set([k[1] for k in bsDict.keys()])
for docName, basename, status, assignedUser, lockedBy in w.getDB().basenameInfo(list(basenames)):
try:
bStatus, bAssigned, bLocked = bsDict[(docName, basename)]
except KeyError:
continue
self.failUnless(status == bStatus and assignedUser == bAssigned and lockedBy == bLocked,
"%s != %s or %s != %s or %s != %s" % (status, bStatus, assignedUser, bAssigned, lockedBy, bLocked))
def testUncorrectedImport(self):
w = self._createWorkspace(taskName = "Named Entity", create = True,
initialUsers = ["user1"])
xmlFile1 = os.path.join(self.sampleDir, "resources", "data", "xml", "voa1.xml")
w.importFiles([xmlFile1], "core", users = "MACHINE",
fileIO = MAT.DocumentIO.getDocumentIO("xml-inline", task = self.task))
self.assertEqual(["voa1.xml"], [r[1] for r in w.getDB().basenameInfo(["voa1.xml"]) if r[2] == "uncorrected"])
def testForceUnlock(self):
w = self._createWorkspace(taskName = "Named Entity", create = True,
initialUsers = ["user1"])
import glob, codecs
rawDocs = glob.glob(os.path.join(self.sampleDir, "resources", "data", "raw", "voa[67].txt"))
w.importFiles(rawDocs, "core", file_type = "raw",
strip_suffix = ".txt")
doc, lockId = w.openWorkspaceFile("core", "voa6", user = "user1")
# Now, unlock it.
w.runOperation("force_unlock", ("core", "voa6"), user = "user1")
# Now, it better be unlocked.
self.assertEqual(w.getDB().coreGetLockIDInfo(lockId), (None, None, None))
# Now, let's test the experiment stuff. We have to test this both from the experiment
# engine and from the workspace. And really, what we need to do is test the document
# selection.
from MAT.CarafeTrain import TestRun, TrainingRun, ExperimentEngine, \
WorkspaceCorpusSet, WorkspaceCorpus, fromXML
class WorkspaceExperimentTestCase(WorkspaceBaseTestCase):
def testWorkspaceExperiment(self):
# I need a third user that I'm not going to test against, which
# provides a gold document, so that there's ALWAYS something in the
# remainder when I check the workspace operation experiment results.
w = self._createWorkspace(taskName = "Named Entity", create = True,
initialUsers = ["user1", "user2", "user3"])
import glob
docs = glob.glob(os.path.join(self.sampleDir, "resources", "data", "json", "voa[1-6].txt.json"))
w.importFiles(docs, "core", strip_suffix = ".txt.json")
docs = glob.glob(os.path.join(self.sampleDir, "resources", "data", "raw", "voa[7-9].txt")) + \
[os.path.join(self.sampleDir, "resources", "data", "raw", "voa10.txt")]
# Now, these will be unannotated, and should never be grabbed.
w.importFiles(docs, "core", file_type = "raw", strip_suffix = ".txt")
w.runOperation("assign", ("voa7", "voa8"), user = "user1,user2,user3")
# Mark a couple of them gold.
w.runOperation("markgold", ("core", "voa9"), user = "user1")
w.runOperation("markgold", ("core", "voa8"), user = "user2")
w.runOperation("markgold", ("core", "voa8"), user = "user3")
w.runOperation("add_to_basename_set", ("set1", "voa1", "voa3", "voa5", | |
"""
https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/translate/beam_search.py
References:
Copyright (c) 2017 <NAME>
Licensed under The MIT License, see https://choosealicense.com/licenses/mit/
@inproceedings{klein-etal-2017-opennmt,
title = "{O}pen{NMT}: Open-Source Toolkit for Neural Machine Translation",
author = "<NAME> and
<NAME> and
Deng, Yuntian and
Senellart, Jean and
<NAME>",
booktitle = "Proceedings of {ACL} 2017, System Demonstrations",
month = jul,
year = "2017",
address = "Vancouver, Canada",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P17-4012",
pages = "67--72",
}
History:
https://github.com/jayleicn/recurrent-transformer
https://github.com/OpenNMT/OpenNMT-py
Current version 2021 https://github.com/gingsi/coot-videotext
"""
from __future__ import annotations
import logging
import torch
logger = logging.getLogger(__name__)
class DecodeStrategy(object):
"""
Base class for generation strategies.
Args:
pad (int): Magic integer in output vocab.
bos (int): Magic integer in output vocab.
eos (int): Magic integer in output vocab.
batch_size (int): Current batch size.
parallel_paths (int): Decoding strategies like beam search
use parallel paths. Each batch is repeated ``parallel_paths``
times in relevant state tensors.
min_length (int): Shortest acceptable generation, not counting
begin-of-sentence or end-of-sentence.
max_length (int): Longest acceptable sequence, not counting
begin-of-sentence (presumably there has been no EOS
yet if max_length is used as a cutoff).
block_ngram_repeat (int): Block beams where
``block_ngram_repeat``-grams repeat.
exclusion_tokens (set[int]): If a gram contains any of these
tokens, it may repeat.
use_cuda: Move tensors to GPU
Attributes:
pad (int): See above.
bos (int): See above.
eos (int): See above.
predictions (list[list[LongTensor]]): For each batch, holds a
list of beam prediction sequences.
scores (list[list[FloatTensor]]): For each batch, holds a
list of scores.
attention (list[list[FloatTensor or list[]]]): For each
batch, holds a list of attention sequence tensors
(or empty lists) having shape ``(step, inp_seq_len)`` where
``inp_seq_len`` is the length of the sample (not the max
length of all inp seqs).
alive_seq (LongTensor): Shape ``(B x parallel_paths, step)``.
This sequence grows in the ``step`` axis on each call to
:func:`advance()`.
is_finished (ByteTensor or NoneType): Shape
``(B, parallel_paths)``. Initialized to ``None``.
alive_attn (FloatTensor or NoneType): If tensor, shape is
``(step, B x parallel_paths, inp_seq_len)``, where ``inp_seq_len``
is the (max) length of the input sequence.
min_length (int): See above.
max_length (int): See above.
block_ngram_repeat (int): See above.
exclusion_tokens (set[int]): See above.
done (bool): See above.
"""
def __init__(
self,
pad,
bos,
eos,
batch_size,
parallel_paths,
min_length,
block_ngram_repeat,
exclusion_tokens,
max_length,
use_cuda: bool = True,
):
# magic indices
self.pad = pad
self.bos = bos
self.eos = eos
# result caching
self.predictions = [[] for _ in range(batch_size)]
self.scores = [[] for _ in range(batch_size)]
self.attention = [[] for _ in range(batch_size)]
# (N * B, step_size=1)
self.alive_seq = torch.full(
[batch_size * parallel_paths, 1], self.bos, dtype=torch.long
)
self.is_finished = torch.zeros([batch_size, parallel_paths], dtype=torch.uint8)
if use_cuda:
self.alive_seq = self.alive_seq.cuda()
self.is_finished = self.is_finished.cuda()
self.alive_attn = None
self.min_length = min_length
self.max_length = max_length
self.block_ngram_repeat = block_ngram_repeat
self.exclusion_tokens = exclusion_tokens
self.done = False
def __len__(self):
return self.alive_seq.shape[1] # steps length
def ensure_min_length(self, log_probs):
if len(self) <= self.min_length:
log_probs[:, self.eos] = -1e20
def ensure_max_length(self):
# add one to account for BOS. Don't account for EOS because hitting
# this implies it hasn't been found.
if len(self) == self.max_length + 1:
self.is_finished.fill_(1)
def block_ngram_repeats(self, log_probs):
# log_probs (N * B, vocab_size)
cur_len = len(self)
if self.block_ngram_repeat > 0 and cur_len > 1:
for path_idx in range(self.alive_seq.shape[0]): # N * B
# skip BOS
hyp = self.alive_seq[path_idx, 1:]
ngrams = set()
fail = False
gram = []
for i in range(cur_len - 1):
# Last n tokens, n = block_ngram_repeat
gram = (gram + [hyp[i].item()])[-self.block_ngram_repeat :]
# skip the blocking if any token in gram is excluded
if set(gram) & self.exclusion_tokens:
continue
if tuple(gram) in ngrams:
fail = True
ngrams.add(tuple(gram))
if fail:
log_probs[path_idx] = -10e20 # all the words in this path
def advance(self, log_probs):
"""
DecodeStrategy subclasses should override :func:`advance()`.
Advance is used to update ``self.alive_seq``, ``self.is_finished``,
and, when appropriate, ``self.alive_attn``.
"""
raise NotImplementedError()
def update_finished(self):
"""
DecodeStrategy subclasses should override :func:`update_finished()`.
``update_finished`` is used to update ``self.predictions``,
``self.scores``, and other "output" attributes.
"""
raise NotImplementedError()
def length_penalty_builder(length_penalty_name="none"):
"""
implement length penalty
"""
def length_wu(cur_len, alpha=0.0):
"""
GNMT length re-ranking score.
See "Google's Neural Machine Translation System" :cite:`wu2016google`.
"""
return ((5 + cur_len) / 6.0) ** alpha
def length_average(cur_len, _alpha=0.0):
"""
Returns the current sequence length.
"""
return cur_len
def length_none(_cur_len, _alpha=0.0):
"""
Returns unmodified scores.
"""
return 1.0
if length_penalty_name == "none":
return length_none
elif length_penalty_name == "wu":
return length_wu
elif length_penalty_name == "avg":
return length_average
else:
raise NotImplementedError
class BeamSearch(DecodeStrategy):
"""
Generation beam search.
Note that the attributes list is not exhaustive. Rather, it highlights
tensors to document their shape. (Since the state variables' "batch"
size decreases as beams finish, we denote this axis with a B rather than
``batch_size``).
Args:
beam_size (int): Number of beams to use (see base ``parallel_paths``).
batch_size (int): See base.
pad (int): See base.
bos (int): See base.
eos (int): See base.
n_best (int): Don't stop until at least this many beams have
reached EOS.
min_length (int): See base.
max_length (int): See base.
block_ngram_repeat (int): See base.
exclusion_tokens (set[int]): See base.
use_cuda: Move tensors to GPU
Attributes:
top_beam_finished (ByteTensor): Shape ``(B,)``.
_batch_offset (LongTensor): Shape ``(B,)``.
_beam_offset (LongTensor): Shape ``(batch_size x beam_size,)``.
alive_seq (LongTensor): See base.
topk_log_probs (FloatTensor): Shape ``(B x beam_size,)``. These
are the scores used for the topk operation.
select_indices (LongTensor or NoneType): Shape
``(B x beam_size,)``. This is just a flat view of the
``_batch_index``.
topk_scores (FloatTensor): Shape
``(B, beam_size)``. These are the
scores a sequence will receive if it finishes.
topk_ids (LongTensor): Shape ``(B, beam_size)``. These are the
word indices of the topk predictions.
_batch_index (LongTensor): Shape ``(B, beam_size)``.
_prev_penalty (FloatTensor or NoneType): Shape
``(B, beam_size)``. Initialized to ``None``.
_coverage (FloatTensor or NoneType): Shape
``(1, B x beam_size, inp_seq_len)``.
hypotheses (list[list[Tuple[Tensor]]]): Contains a tuple
of score (float), sequence (long), and attention (float or None).
"""
def __init__(
self,
beam_size,
batch_size,
pad,
bos,
eos,
n_best,
min_length,
max_length,
block_ngram_repeat,
exclusion_tokens,
length_penalty_name=None,
length_penalty_alpha=0.0,
use_cuda: bool = True,
):
super().__init__(
pad,
bos,
eos,
batch_size,
beam_size,
min_length,
block_ngram_repeat,
exclusion_tokens,
max_length,
use_cuda=use_cuda,
)
# beam parameters
self.beam_size = beam_size
self.n_best = n_best
self.batch_size = batch_size
self.length_penalty_name = length_penalty_name
self.length_penalty_func = length_penalty_builder(length_penalty_name)
self.length_penalty_alpha = length_penalty_alpha
# result caching
self.hypotheses = [[] for _ in range(batch_size)]
# beam state
self.top_beam_finished = torch.zeros([batch_size], dtype=torch.uint8)
self.best_scores = torch.full([batch_size], -1e10, dtype=torch.float) # (N, )
self._batch_offset = torch.arange(batch_size, dtype=torch.long) # (N, )
self._beam_offset = torch.arange(
0, batch_size * beam_size, step=beam_size, dtype=torch.long
) # (N, )
# (B*N), guess: store the current beam probabilities
self.topk_log_probs = torch.tensor(
[0.0] + [float("-inf")] * (beam_size - 1)
).repeat(batch_size)
self.select_indices = None
# buffers for the topk scores and 'backpointer'
self.topk_scores = torch.empty(
(batch_size, beam_size), dtype=torch.float
) # (N, B)
self.topk_ids = torch.empty((batch_size, beam_size), dtype=torch.long) # (N, B)
self._batch_index = torch.empty(
[batch_size, beam_size], dtype=torch.long
) # (N, B)
self.done = False
# "global state" of the old beam
self._prev_penalty = None
self._coverage = None
if use_cuda:
self.best_scores = self.best_scores.cuda()
self._beam_offset = self._beam_offset.cuda()
self.topk_log_probs = self.topk_log_probs.cuda()
self.topk_scores = self.topk_scores.cuda()
self.topk_ids = self.topk_ids.cuda()
self._batch_index = self._batch_index.cuda()
@property
def current_predictions(self):
return self.alive_seq[:, -1]
@property
def current_origin(self):
return self.select_indices
@property
def current_backptr(self):
# for testing
return self.select_indices.view(self.batch_size, self.beam_size).fmod(
self.beam_size
)
def advance(self, log_probs):
"""
current step log_probs (N * B, vocab_size), attn (1, N * B, L)
Which attention is this??? Guess: the one with the encoder outputs
"""
vocab_size = log_probs.size(-1)
# using integer division to get an integer _B without casting
_B = log_probs.shape[0] // self.beam_size # batch_size
# force the output to be longer than self.min_length,
# by setting prob(EOS) to be a very small number when < min_length
self.ensure_min_length(log_probs)
# Multiply probs by the beam probability.
# logger.info("after log_probs {} {}".format(log_probs.shape, log_probs))
log_probs += self.topk_log_probs.view(_B * self.beam_size, 1)
# logger.info("after log_probs {} {}".format(log_probs.shape, log_probs))
self.block_ngram_repeats(log_probs)
# if the sequence ends now, then the penalty is the current
# length + 1, to include the EOS token, length_penalty is a float number
step = len(self)
length_penalty = self.length_penalty_func(step + 1, self.length_penalty_alpha)
# Flatten probs into a list of possibilities.
# | |
in infotext)
infotext = "table%s: %s" % \
("" if single_table else "s", infotext)
if not single_table:
infotext += "; %s in total" % \
util.plural("result", result_count)
final_text = "No matches found."
if self._drop_results:
result["output"] = ""
if result_count:
final_text = "Finished searching %s." % infotext
if not self._is_working:
final_text += " Stopped by user."
elif "messages" == result_type and is_html \
and count >= conf.MaxSearchMessages:
final_text += " Stopped at %s limit %s." % \
(result_type, conf.MaxSearchMessages)
elif "table row" == result_type and is_html \
and count >= conf.MaxSearchTableRows:
final_text += " Stopped at %s limit %s." % \
(result_type, conf.MaxSearchTableRows)
result["output"] += "</table><br /><br />%s</font>" % final_text
if not is_html: result["output"] = ""
result["done"] = True
result["count"] = result_count
self.postback(result)
logger.info("Search found %s results.", result["count"])
except Exception as e:
if not result:
result = {}
result["done"], result["error"] = True, traceback.format_exc()
result["error_short"] = repr(e)
self.postback(result)
finally:
self._is_working = False
class MergeThread(WorkerThread):
"""
Merge background thread, compares conversations in two databases, yielding
results back to main thread in chunks, or merges compared differences.
"""
# Difftext to compare will be assembled from other fields for these types.
MESSAGE_TYPES_IGNORE_BODY = [
skypedata.MESSAGE_TYPE_GROUP, skypedata.MESSAGE_TYPE_PARTICIPANTS,
skypedata.MESSAGE_TYPE_REMOVE, skypedata.MESSAGE_TYPE_LEAVE,
skypedata.MESSAGE_TYPE_SHARE_DETAIL
]
# Number of iterations between allowing a UI refresh
REFRESH_COUNT = 20000
# Number of iterations between performing an intermediary postback
POSTBACK_COUNT = 5000
def run(self):
self._is_running = True
while self._is_running:
params = self._queue.get()
if not params: continue # while self._is_running
self._is_working, self._drop_results = True, False
try:
if "diff_left" == params.get("type"):
self.work_diff_left(params)
elif "diff_merge_left" == params.get("type"):
self.work_diff_merge_left(params)
elif "merge_left" == params.get("type"):
self.work_merge_left(params)
finally:
self._is_working = False
def work_diff_left(self, params):
"""
Worker branch that compares all chats on the left side for differences,
posting results back to application.
"""
# {"output": "html result for db1, db2",
# "index": currently processed chat index,
# "chats": [differing chats in db1]}
result = {"output": "", "chats": [], "count": 0,
"chatindex": 0, "chatcount": 0,
"params": params, "index": 0, "type": "diff_left"}
db1, db2 = params["db1"], params["db2"]
chats1 = params.get("chats") or db1.get_conversations()
chats2 = db2.get_conversations()
c2map = dict((c["identity"], c) for c in chats2)
for c in (c for c in chats2 if c.get("__link")):
c2map[c["__link"]["identity"]] = c
compared = []
for c1 in chats1:
c2 = c2map.get(c1["identity"])
if not c2 and c1.get("__link"):
c2 = c2map.get(c1["__link"]["identity"])
c = c1.copy()
c["messages1"] = c1["message_count"] or 0
c["messages2"] = c2["message_count"] or 0 if c2 else 0
c["c1"], c["c2"] = c1, c2
compared.append(c)
result["count"] += c["messages1"] + c["messages2"]
result["chatcount"] = len(chats1)
compared.sort(key=lambda x: x["title"].lower())
info_template = step.Template(templates.DIFF_RESULT_ITEM, escape=True)
for index, chat in enumerate(compared):
result["chatindex"] = index
postback = dict((k, v) for k, v in result.items()
if k not in ["output", "chats", "params"])
diff = self.get_chat_diff_left(chat, db1, db2, postback)
if not self._is_working:
break # for index, chat
if diff["messages"] \
or (chat["message_count"] and diff["participants"]):
new_chat = not chat["c2"]
newstr = "" if new_chat else "new "
info = info_template.expand(chat=chat)
if new_chat:
info += " - new chat"
if diff["messages"]:
info += ", %s" % util.plural("%smessage" % newstr,
diff["messages"])
else:
info += ", no messages"
if diff["participants"] and not new_chat:
info += ", %s" % (util.plural("%sparticipant" % newstr,
diff["participants"]))
info += ".<br />"
result["output"] += info
result["chats"].append({"chat": chat, "diff": diff})
result["index"] = postback["index"]
if not self._drop_results:
if index < len(compared) - 1:
result["status"] = ("Scanning %s." %
compared[index + 1]["title_long_lc"])
self.postback(result)
result = dict(result, output="", chats=[])
if not self._drop_results:
result["done"] = True
self.postback(result)
def work_diff_merge_left(self, params):
"""
Worker branch that compares all chats on the left side for differences,
copies them over to the right, posting progress back to application.
"""
result = {"output": "", "index": 0, "count": 0, "chatindex": 0,
"chatcount": 0, "params": params, "chats": [],
"type": "diff_merge_left"}
error, e = None, None
compared = []
db1, db2 = params["db1"], params["db2"]
try:
chats1 = params.get("chats") or db1.get_conversations()
chats2 = db2.get_conversations()
c2map = dict((c["identity"], c) for c in chats2)
for c in (c for c in chats2 if c.get("__link")):
c2map[c["__link"]["identity"]] = c
for c1 in chats1:
c2 = c2map.get(c1["identity"])
if not c2 and c1.get("__link"):
c2 = c2map.get(c1["__link"]["identity"])
c = c1.copy()
c["messages1"] = c1["message_count"] or 0
c["messages2"] = c2["message_count"] or 0 if c2 else 0
c["c1"], c["c2"] = c1, c2
compared.append(c)
result["count"] += c["messages1"] + c["messages2"]
result["chatcount"] = len(chats1)
compared.sort(key=lambda x: x["title"].lower())
count_messages = 0
count_participants = 0
for index, chat in enumerate(compared):
result["chatindex"] = index
postback = dict((k, v) for k, v in result.items()
if k not in ["output", "chats", "params"])
diff = self.get_chat_diff_left(chat, db1, db2, postback)
if not self._is_working:
break # for index, chat
if diff["messages"] \
or (chat["message_count"] and diff["participants"]):
chat1 = chat["c1"]
chat2 = chat["c2"]
new_chat = not chat2
if new_chat:
chat2 = chat1.copy()
chat["c2"] = chat2
chat2["id"] = db2.insert_conversation(chat2, db1)
if diff["participants"]:
db2.insert_participants(chat2, diff["participants"],
db1)
count_participants += len(diff["participants"])
if diff["messages"]:
db2.insert_messages(chat2, diff["messages"], db1, chat1,
self.yield_ui, self.REFRESH_COUNT)
count_messages += len(diff["messages"])
newstr = "" if new_chat else "new "
info = "Merged %s" % chat["title_long_lc"]
if new_chat:
info += " - new chat"
if diff["messages"]:
info += ", %s" % util.plural("%smessage" % newstr,
diff["messages"])
else:
info += ", no messages"
result["output"] = info + "."
result["diff"] = diff
result["index"] = postback["index"]
result["chats"].append(chat)
if not self._drop_results:
if index < len(compared) - 1:
result["status"] = ("Scanning %s." %
compared[index+1]["title_long_lc"])
self.postback(result)
result = dict(result, output="", chats=[])
except Exception as e:
error = traceback.format_exc()
finally:
if not self._drop_results:
if compared:
info = "Merged %s" % util.plural("new message",
count_messages)
if count_participants:
info += " and %s" % util.plural("new participant",
count_participants)
info += " \n\nto %s." % db2
else:
info = "Nothing new to merge from %s to %s." % (db1, db2)
result = {"type": "diff_merge_left", "done": True,
"output": info, "params": params, "chats": [] }
if error:
result["error"] = error
if e: result["error_short"] = repr(e)
self.postback(result)
def work_merge_left(self, params):
"""
Worker branch that merges differences given in params, posting progress
back to application.
"""
error, e = None, None
db1, db2 = params["db1"], params["db2"]
chats = params["chats"]
count_messages = 0
count_participants = 0
result = {"count": sum(len(x["diff"]["messages"]) for x in chats),
"index": 0, "chatindex": 0, "chatcount": len(chats),
"type": "merge_left", "output": "", "chats": [],
"params": params}
try:
for index, chat_data in enumerate(chats):
if not self._is_working:
break # for index, chat_data
chat1 = chat_data["chat"]["c1"]
chat2 = chat_data["chat"]["c2"]
messages = chat_data["diff"]["messages"]
participants = chat_data["diff"]["participants"]
html = "Merged %s" % chat1["title_long_lc"]
if not chat2:
html += " - new chat"
if messages:
newstr = "" if not chat2 else "new "
html += ", %s" % util.plural("%smessage" % newstr, messages)
else:
html += ", no messages"
html += "."
if not chat2:
chat2 = chat1.copy()
chat_data["chat"]["c2"] = chat2
chat2["id"] = db2.insert_conversation(chat2, db1)
if participants:
db2.insert_participants(chat2, participants, db1)
count_participants += len(participants)
if messages:
db2.insert_messages(chat2, messages, db1, chat1,
self.yield_ui, self.REFRESH_COUNT)
count_messages += len(messages)
if not self._drop_results:
result.update(output=html, chatindex=index,
chats=[chat_data["chat"]])
result["index"] += len(messages)
if index < len(chats) - 1:
result["status"] = ("Merging %s."
% chats[index + 1]["chat"]["title_long_lc"])
self.postback(result)
result = dict(result, output="", chats=[])
except Exception as e:
error = traceback.format_exc()
finally:
html = "Nothing to merge."
if chats:
html = "Merged %s" % util.plural("new message",
count_messages)
if count_participants:
html += " and %s" % util.plural("new participant",
count_participants)
html += " \n\nto %s." % db2
if not self._drop_results:
result = {"type": "merge_left", "done": True, "output": html,
"params": params}
if error:
result["error"] = error
if e: result["error_short"] = repr(e)
self.postback(result)
def get_chat_diff_left(self, chat, db1, db2, postback=None):
"""
Compares the chat in the two databases and returns the differences from
the left as {"messages": [message IDs different in db1],
"participants": [participants different in db1] }.
@param postback if {"count": .., "index": ..}, updates index
and posts the result at POSTBACK_COUNT intervals
"""
c = chat
participants1 = c["c1"]["participants"] if c["c1"] else []
participants2 = c["c2"]["participants"] if c["c2"] else []
c2p_map = dict((p["identity"], p) for p in participants2)
c1p_diff = [p for p in participants1 if p["identity"] not in c2p_map]
c1m_diff = | |
_get_hash(head_hash, short_hash)
txt.append(" head_hash: %s" % head_hash)
#
remh_hash = get_remote_head_hash(dir_name)
remh_hash = _get_hash(remh_hash, short_hash)
txt.append(" remh_hash: %s" % remh_hash)
#
if dir_name != ".":
subm_hash = _get_submodule_hash(dir_name)
subm_hash = _get_hash(subm_hash, short_hash)
txt.append(" subm_hash: %s" % subm_hash)
txt_as_str = "\n".join(txt)
return txt_as_str
# #############################################################################
# GitHub repository name
# #############################################################################
# All functions should take as input `repo_short_name` and have a switch `mode`
# to distinguish full vs short repo name.
# TODO(gp): Maybe rename full -> long to keep it more symmetric "short vs long".
def _parse_github_repo_name(repo_name: str) -> Tuple[str, str]:
"""
Parse a repo name from `git remote`.
The supported formats are both SSH and HTTPS, e.g.,
- `git@github.com:alphamatic/amp`
- `https://github.com/alphamatic/amp`
For both of these strings the function returns ("github.com", "alphamatic/amp").
"""
# Try to parse the SSH format, e.g., `git@github.com:alphamatic/amp`
m = re.match(r"^git@(\S+.com):(\S+)$", repo_name)
if not m:
# Try tp parse the HTTPS format, e.g., `https://github.com/alphamatic/amp`
m = re.match(r"^https://(\S+.com)/(\S+)$", repo_name)
hdbg.dassert(m, "Can't parse '%s'", repo_name)
m: Match[str]
host_name = m.group(1)
repo_name = m.group(2)
_LOG.debug("host_name=%s repo_name=%s", host_name, repo_name)
# We expect something like "alphamatic/amp".
m = re.match(r"^\S+/\S+$", repo_name)
hdbg.dassert(m, "repo_name='%s'", repo_name)
# origin git@github.com:.../ORG_....git (fetch)
suffix_to_remove = ".git"
if repo_name.endswith(suffix_to_remove):
repo_name = repo_name[: -len(suffix_to_remove)]
return host_name, repo_name
def get_repo_full_name_from_dirname(
dir_name: str, include_host_name: bool
) -> str:
"""
Return the full name of the repo in `git_dir`, e.g., "alphamatic/amp".
This function relies on `git remote` to gather the required information.
:param include_hostname: prepend also the GitHub hostname, e.g., returning
"github.com/alphamatic/amp"
:return: the full name of the repo in `git_dir`, e.g., "alphamatic/amp".
"""
hdbg.dassert_exists(dir_name)
#
cmd = "cd %s; (git remote -v | grep origin | grep fetch)" % dir_name
_, output = hsysinte.system_to_string(cmd)
# > git remote -v
# origin <EMAIL>:alphamatic/amp (fetch)
# origin <EMAIL>:alphamatic/amp (push)
# TODO(gp): Make it more robust, by checking both fetch and push.
# "origin <EMAIL>:alphamatic/amp (fetch)"
data: List[str] = output.split()
_LOG.debug("data=%s", data)
hdbg.dassert_eq(len(data), 3, "data='%s'", str(data))
# Extract the middle string, e.g., "<EMAIL>:alphamatic/amp"
repo_name = data[1]
# Parse the string.
host_name, repo_name = _parse_github_repo_name(repo_name)
if include_host_name:
res = f"{host_name}/{repo_name}"
else:
res = repo_name
return res
def get_repo_full_name_from_client(super_module: bool) -> str:
"""
Return the full name of the repo (e.g., "alphamatic/amp") from a Git
client.
:param super_module: like in get_client_root()
"""
# Get the Git remote in the dir containing the Git repo.
git_dir = get_client_root(super_module)
repo_name = get_repo_full_name_from_dirname(git_dir, include_host_name=False)
return repo_name
# /////////////////////////////////////////////////////////////////////////
# Execute code from the `repo_config.py` in the super module.
def _get_repo_config_code(super_module: bool = True) -> str:
"""
Return the text of the code stored in `repo_config.py`.
"""
# TODO(gp): We should actually ask Git where the super-module is.
client_root = get_client_root(super_module)
file_name = os.path.join(client_root, "repo_config.py")
hdbg.dassert_file_exists(file_name)
code: str = hio.from_file(file_name)
return code
def execute_repo_config_code(code_to_execute: str) -> Any:
"""
Execute code in `repo_config.py`.
E.g.,
```
hgit.execute_repo_config_code("has_dind_support()")
```
"""
# Read the info from the current repo.
code = _get_repo_config_code()
# TODO(gp): make the linter happy creating this symbol that comes from the
# `exec()`.
exec(code, globals()) # pylint: disable=exec-used
ret = eval(code_to_execute)
return ret
# /////////////////////////////////////////////////////////////////////////
def _decorate_with_host_name(
dict_: Dict[str, str], host_name: str
) -> Dict[str, str]:
"""
Prepend the host name to all the values of the passed dictionary.
"""
res = {k: f"{host_name}/{v}" for k, v in dict_.items()}
return res
@functools.lru_cache()
def _get_repo_short_to_full_name(include_host_name: bool) -> Dict[str, str]:
"""
Return the map from short name (e.g., "amp") to full name (e.g.,
"alphamatic/amp") using the information in `repo_config.py`
"""
# From short name to long name.
repo_map = {
"amp": "alphamatic/amp",
"dev_tools": "alphamatic/dev_tools",
}
if include_host_name:
host_name = "github.com"
repo_map = _decorate_with_host_name(repo_map, host_name)
_LOG.debug(
"include_host_name=%s, repo_map=\n%s",
include_host_name,
pprint.pformat(repo_map),
)
# Read the info from the current repo.
code = _get_repo_config_code()
# TODO(gp): make the linter happy creating this symbol that comes from the
# `exec()`.
exec(code, globals()) # pylint: disable=exec-used
current_repo_map = (
get_repo_map() # type: ignore[name-defined] # noqa: F821 # pylint: disable=undefined-variable
)
if include_host_name:
host_name = (
get_host_name() # type: ignore[name-defined] # noqa: F821 # pylint: disable=undefined-variable
)
current_repo_map = _decorate_with_host_name(current_repo_map, host_name)
_LOG.debug(
"include_host_name=%s, current_repo_map=\n%s",
include_host_name,
pprint.pformat(current_repo_map),
)
# Update the map.
hdbg.dassert_not_intersection(repo_map.keys(), current_repo_map.keys())
repo_map.update(
get_repo_map() # type: ignore[name-defined] # noqa: F821 # pylint: disable=undefined-variable
)
hdbg.dassert_no_duplicates(repo_map.values())
_LOG.debug(
"include_host_name=%s, repo_map=\n%s",
include_host_name,
pprint.pformat(repo_map),
)
return repo_map
# /////////////////////////////////////////////////////////////////////////
def get_complete_repo_map(
in_mode: str, include_host_name: bool = False
) -> Dict[str, str]:
"""
Return the full / short name of a Git repo based on the alternative name.
:param in_mode: the values `full_name` or `short_name` determine how to interpret
`name`
"""
repo_map = _get_repo_short_to_full_name(include_host_name)
if in_mode == "full_name":
# Compute the reverse map.
repo_map = {v: k for (k, v) in repo_map.items()}
elif in_mode == "short_name":
pass
else:
raise ValueError("Invalid in_mode='%s'" % in_mode)
_LOG.debug(
"For in_mode=%s, include_host_name=%s, repo_map=\n%s",
in_mode,
include_host_name,
pprint.pformat(repo_map),
)
return repo_map
def get_repo_name(
name: str, in_mode: str, include_host_name: bool = False
) -> str:
"""
Return the full/short name of a Git repo based on the other name.
:param in_mode: the values `full_name` or `short_name` determine how to interpret
`name`
"""
repo_map = get_complete_repo_map(in_mode, include_host_name)
hdbg.dassert_in(
name, repo_map, "Invalid name='%s' for in_mode='%s'", name, in_mode
)
ret = repo_map[name]
return ret
def get_all_repo_names(
in_mode: str, include_host_name: bool = False
) -> List[str]:
"""
Return the names (full or short depending on `mode`) of all the Git repos.
:param in_mode: if "full_name" return the full names (e.g., "alphamatic/amp")
if "short_name" return the short names (e.g., "amp")
"""
repo_map = get_complete_repo_map(in_mode, include_host_name)
return sorted(list(repo_map.keys()))
def get_task_prefix_from_repo_short_name(short_name: str) -> str:
"""
Return the task prefix for a repo (e.g., "amp" -> "AmpTask").
"""
if short_name == "amp":
prefix = "AmpTask"
elif short_name == "dev_tools":
prefix = "DevToolsTask"
else:
# We assume that we can build the prefix from the name (e.g., "lm" ->
# "LmTask").
# TODO(gp): A more general approach is to save this information inside
# `repo_config.py`.
prefix = short_name.capitalize() + "Task"
return prefix
# #############################################################################
# Git path
# #############################################################################
@functools.lru_cache()
def find_file_in_git_tree(file_name: str, super_module: bool = True) -> str:
"""
Find the path of a file in a Git tree.
We get the Git root and then search for the file from there.
"""
root_dir = get_client_root(super_module=super_module)
# TODO(gp): Use -not -path '*/\.git/*'
cmd = "find %s -name '%s' | grep -v .git" % (root_dir, file_name)
_, file_name = hsysinte.system_to_one_line(cmd)
_LOG.debug("file_name=%s", file_name)
hdbg.dassert_ne(
file_name, "", "Can't find file '%s' in dir '%s'", file_name, root_dir
)
file_name: str = os.path.abspath(file_name)
hdbg.dassert_exists(file_name)
return file_name
def get_path_from_git_root(
file_name: str,
super_module: bool,
*,
git_root: Optional[str] = None,
) -> str:
"""
Get the path of `file_name` from the root of the Git client.
E.g., in Docker:
- `super_module=True` -> git_root=/app
- `super_module=False` -> git_root=/app/amp
:param super_module: like get_client_root()
"""
# Get the root of the Git client.
if git_root is None:
git_root = get_client_root(super_module)
#
git_root = os.path.normpath(git_root)
_LOG.debug("git_root=%s", git_root)
file_name = os.path.normpath(file_name)
_LOG.debug("file_name=%s", file_name)
if file_name.startswith(git_root):
# Remove the `git_root` from file_name.
ret = os.path.relpath(file_name, git_root)
else:
# If the file is not under the root, we can't normalize it.
raise ValueError(
"Can't normalize file_name='%s' for git_root='%s'"
% (file_name, git_root)
)
_LOG.debug(
"file_name=%s, git_root=%s (super_module=%s) -> ret=%s",
file_name,
git_root,
super_module,
ret,
)
return ret
@functools.lru_cache()
def get_amp_abs_path() -> str:
"""
Return the absolute path of `amp` dir.
"""
repo_sym_name = get_repo_full_name_from_client(super_module=False)
_LOG.debug("repo_sym_name=%s", repo_sym_name)
repo_sym_names = ["alphamatic/amp"]
code = "get_extra_amp_repo_sym_name()"
try:
repo_sym_names.append(execute_repo_config_code(code))
except NameError:
_LOG.debug("Can't execute the code '%s'", code)
if repo_sym_name in repo_sym_names:
# If we are in the amp repo, then the git client root is the amp
# directory.
git_root = get_client_root(super_module=False)
amp_dir = git_root
else:
# If we are not in the amp repo, then look for the amp dir.
amp_dir = find_file_in_git_tree("amp", super_module=True)
git_root = get_client_root(super_module=True)
amp_dir = os.path.join(git_root, amp_dir)
amp_dir = os.path.abspath(amp_dir)
# Sanity check.
hdbg.dassert_dir_exists(amp_dir)
return amp_dir
# TODO(gp): Is this needed?
def get_repo_dirs() -> List[str]:
"""
Return the list of the repo repositories, e.g., `[".", "amp", "infra"]`.
"""
dir_names = ["."]
dirs = ["amp"]
for dir_name in dirs:
if os.path.exists(dir_name):
dir_names.append(dir_name)
return dir_names
def find_docker_file(
file_name: str,
*,
root_dir: str = ".",
dir_depth: int = -1,
mode: str = "return_all_results",
candidate_files: Optional[List[str]] = | |
import rdkit
from rdkit import Chem
from optparse import OptionParser
from rdkit import RDLogger
lg = RDLogger.logger()
lg.setLevel(4)
'''
This script evaluates the quality of predictions from the rank_diff_wln model by applying the predicted
graph edits to the reactants, cleaning up the generated product, and comparing it to what was recorded
as the true (major) product of that reaction
'''
# Define some post-sanitization reaction cleaning scripts
# These are to align our graph edit representation of a reaction with the data for improved coverage
from rdkit.Chem import AllChem
clean_rxns_presani = [
AllChem.ReactionFromSmarts('[O:1]=[c:2][n;H0:3]>>[O:1]=[c:2][n;H1:3]'), # hydroxypyridine written with carbonyl, must invent H on nitrogen
]
clean_rxns_postsani = [
AllChem.ReactionFromSmarts('[n;H1;+0:1]:[n;H0;+1:2]>>[n;H0;+0:1]:[n;H0;+0:2]'), # two adjacent aromatic nitrogens should allow for H shift
AllChem.ReactionFromSmarts('[n;H1;+0:1]:[c:3]:[n;H0;+1:2]>>[n;H0;+0:1]:[*:3]:[n;H0;+0:2]'), # two aromatic nitrogens separated by one should allow for H shift
AllChem.ReactionFromSmarts('[#7;H0;+:1]-[O;H1;+0:2]>>[#7;H0;+:1]-[O;H0;-:2]'),
AllChem.ReactionFromSmarts('[C;H0;+0:1](=[O;H0;+0:2])[O;H0;-1:3]>>[C;H0;+0:1](=[O;H0;+0:2])[O;H1;+0:3]'), # neutralize C(=O)[O-]
AllChem.ReactionFromSmarts('[I,Br,F;H1;D0;+0:1]>>[*;H0;-1:1]'), # turn neutral halogens into anions EXCEPT HCl
AllChem.ReactionFromSmarts('[N;H0;-1:1]([C:2])[C:3]>>[N;H1;+0:1]([*:2])[*:3]'), # inexplicable nitrogen anion in reactants gets fixed in prods
]
for clean_rxn in clean_rxns_presani + clean_rxns_postsani:
if clean_rxn.Validate() != (0, 0):
raise ValueError('Invalid cleaning reaction - check your SMARTS!')
BOND_TYPE = [0, Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE, Chem.rdchem.BondType.TRIPLE, Chem.rdchem.BondType.AROMATIC]
def edit_mol(rmol, edits):
new_mol = Chem.RWMol(rmol)
# Keep track of aromatic nitrogens, might cause explicit hydrogen issues
aromatic_nitrogen_idx = set()
aromatic_carbonyl_adj_to_aromatic_nH = {}
aromatic_carbondeg3_adj_to_aromatic_nH0 = {}
for a in new_mol.GetAtoms():
if a.GetIsAromatic() and a.GetSymbol() == 'N':
aromatic_nitrogen_idx.add(a.GetIdx())
for nbr in a.GetNeighbors():
if a.GetNumExplicitHs() == 1 and nbr.GetSymbol() == 'C' and nbr.GetIsAromatic() and any(b.GetBondTypeAsDouble() == 2 for b in nbr.GetBonds()):
aromatic_carbonyl_adj_to_aromatic_nH[nbr.GetIdx()] = a.GetIdx()
elif a.GetNumExplicitHs() == 0 and nbr.GetSymbol() == 'C' and nbr.GetIsAromatic() and len(nbr.GetBonds()) == 3:
aromatic_carbondeg3_adj_to_aromatic_nH0[nbr.GetIdx()] = a.GetIdx()
else:
a.SetNumExplicitHs(0)
new_mol.UpdatePropertyCache()
amap = {}
for atom in rmol.GetAtoms():
amap[atom.GetIntProp('molAtomMapNumber')] = atom.GetIdx()
# Apply the edits as predicted
for x,y,t in edits:
bond = new_mol.GetBondBetweenAtoms(amap[x],amap[y])
a1 = new_mol.GetAtomWithIdx(amap[x])
a2 = new_mol.GetAtomWithIdx(amap[y])
if bond is not None:
new_mol.RemoveBond(amap[x],amap[y])
# Are we losing a bond on an aromatic nitrogen?
if bond.GetBondTypeAsDouble() == 1.0:
if amap[x] in aromatic_nitrogen_idx:
if a1.GetTotalNumHs() == 0:
a1.SetNumExplicitHs(1)
elif a1.GetFormalCharge() == 1:
a1.SetFormalCharge(0)
elif amap[y] in aromatic_nitrogen_idx:
if a2.GetTotalNumHs() == 0:
a2.SetNumExplicitHs(1)
elif a2.GetFormalCharge() == 1:
a2.SetFormalCharge(0)
# Are we losing a c=O bond on an aromatic ring? If so, remove H from adjacent nH if appropriate
if bond.GetBondTypeAsDouble() == 2.0:
if amap[x] in aromatic_carbonyl_adj_to_aromatic_nH:
new_mol.GetAtomWithIdx(aromatic_carbonyl_adj_to_aromatic_nH[amap[x]]).SetNumExplicitHs(0)
elif amap[y] in aromatic_carbonyl_adj_to_aromatic_nH:
new_mol.GetAtomWithIdx(aromatic_carbonyl_adj_to_aromatic_nH[amap[y]]).SetNumExplicitHs(0)
if t > 0:
new_mol.AddBond(amap[x],amap[y],BOND_TYPE[t])
# Special alkylation case?
if t == 1:
if amap[x] in aromatic_nitrogen_idx:
if a1.GetTotalNumHs() == 1:
a1.SetNumExplicitHs(0)
else:
a1.SetFormalCharge(1)
elif amap[y] in aromatic_nitrogen_idx:
if a2.GetTotalNumHs() == 1:
a2.SetNumExplicitHs(0)
else:
a2.SetFormalCharge(1)
# Are we getting a c=O bond on an aromatic ring? If so, add H to adjacent nH0 if appropriate
if t == 2:
if amap[x] in aromatic_carbondeg3_adj_to_aromatic_nH0:
new_mol.GetAtomWithIdx(aromatic_carbondeg3_adj_to_aromatic_nH0[amap[x]]).SetNumExplicitHs(1)
elif amap[y] in aromatic_carbondeg3_adj_to_aromatic_nH0:
new_mol.GetAtomWithIdx(aromatic_carbondeg3_adj_to_aromatic_nH0[amap[y]]).SetNumExplicitHs(1)
pred_mol = new_mol.GetMol()
# Clear formal charges to make molecules valid
# Note: because S and P (among others) can change valence, be more flexible
for atom in pred_mol.GetAtoms():
atom.ClearProp('molAtomMapNumber')
if atom.GetSymbol() == 'N' and atom.GetFormalCharge() == 1: # exclude negatively-charged azide
bond_vals = sum([bond.GetBondTypeAsDouble() for bond in atom.GetBonds()])
if bond_vals <= 3:
atom.SetFormalCharge(0)
elif atom.GetSymbol() == 'N' and atom.GetFormalCharge() == -1: # handle negatively-charged azide addition
bond_vals = sum([bond.GetBondTypeAsDouble() for bond in atom.GetBonds()])
if bond_vals == 3 and any([nbr.GetSymbol() == 'N' for nbr in atom.GetNeighbors()]):
atom.SetFormalCharge(0)
elif atom.GetSymbol() == 'N':
bond_vals = sum([bond.GetBondTypeAsDouble() for bond in atom.GetBonds()])
if bond_vals == 4 and not atom.GetIsAromatic(): # and atom.IsInRingSize(5)):
atom.SetFormalCharge(1)
elif atom.GetSymbol() == 'C' and atom.GetFormalCharge() != 0:
atom.SetFormalCharge(0)
elif atom.GetSymbol() == 'O' and atom.GetFormalCharge() != 0:
bond_vals = sum([bond.GetBondTypeAsDouble() for bond in atom.GetBonds()]) + atom.GetNumExplicitHs()
if bond_vals == 2:
atom.SetFormalCharge(0)
elif atom.GetSymbol() in ['Cl', 'Br', 'I', 'F'] and atom.GetFormalCharge() != 0:
bond_vals = sum([bond.GetBondTypeAsDouble() for bond in atom.GetBonds()])
if bond_vals == 1:
atom.SetFormalCharge(0)
elif atom.GetSymbol() == 'S' and atom.GetFormalCharge() != 0:
bond_vals = sum([bond.GetBondTypeAsDouble() for bond in atom.GetBonds()])
if bond_vals in [2, 4, 6]:
atom.SetFormalCharge(0)
elif atom.GetSymbol() == 'P': # quartenary phosphorous should be pos. charge with 0 H
bond_vals = [bond.GetBondTypeAsDouble() for bond in atom.GetBonds()]
if sum(bond_vals) == 4 and len(bond_vals) == 4:
atom.SetFormalCharge(1)
atom.SetNumExplicitHs(0)
elif sum(bond_vals) == 3 and len(bond_vals) == 3: # make sure neutral
atom.SetFormalCharge(0)
elif atom.GetSymbol() == 'B': # quartenary boron should be neg. charge with 0 H
bond_vals = [bond.GetBondTypeAsDouble() for bond in atom.GetBonds()]
if sum(bond_vals) == 4 and len(bond_vals) == 4:
atom.SetFormalCharge(-1)
atom.SetNumExplicitHs(0)
elif atom.GetSymbol() in ['Mg', 'Zn']:
bond_vals = [bond.GetBondTypeAsDouble() for bond in atom.GetBonds()]
if sum(bond_vals) == 1 and len(bond_vals) == 1:
atom.SetFormalCharge(1)
elif atom.GetSymbol() == 'Si':
bond_vals = [bond.GetBondTypeAsDouble() for bond in atom.GetBonds()]
if sum(bond_vals) == len(bond_vals):
atom.SetNumExplicitHs(max(0, 4 - len(bond_vals)))
# Bounce to/from SMILES to try to sanitize
pred_smiles = Chem.MolToSmiles(pred_mol)
pred_list = pred_smiles.split('.')
pred_mols = [Chem.MolFromSmiles(pred_smiles) for pred_smiles in pred_list]
for i, mol in enumerate(pred_mols):
# Check if we failed/succeeded in previous step
if mol is None:
print('##### Unparseable mol: {}'.format(pred_list[i]))
continue
# Else, try post-sanitiztion fixes in structure
mol = Chem.MolFromSmiles(Chem.MolToSmiles(mol))
if mol is None:
continue
for rxn in clean_rxns_postsani:
out = rxn.RunReactants((mol,))
if out:
try:
Chem.SanitizeMol(out[0][0])
pred_mols[i] = Chem.MolFromSmiles(Chem.MolToSmiles(out[0][0]))
except Exception as e:
print(e)
print('Could not sanitize postsani reaction product: {}'.format(Chem.MolToSmiles(out[0][0])))
print('Original molecule was: {}'.format(Chem.MolToSmiles(mol)))
pred_smiles = [Chem.MolToSmiles(pred_mol) for pred_mol in pred_mols if pred_mol is not None]
return pred_smiles
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-t", "--pred", dest="pred_path") # file containing predicted edits
parser.add_option("-g", "--gold", dest="gold_path") # file containing true edits
parser.add_option("-s", "--singleonly", dest="singleonly", default=False) # only compare single products
parser.add_option("--bonds_as_doubles", dest="bonds_as_doubles", default=False) # bond types are doubles, not indices
opts,args = parser.parse_args()
fpred = open(opts.pred_path)
fgold = open(opts.gold_path)
feval = open(opts.pred_path + '.eval_by_smiles', 'w')
print('## Bond types in output files are doubles? {}'.format(opts.bonds_as_doubles))
idxfunc = lambda a: a.GetAtomMapNum()
bond_types = [Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE, Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC]
bond_types_as_double = {0.0: 0, 1.0: 1, 2.0: 2, 3.0: 3, 1.5: 4}
# Define a standardization procedure so we can evaluate based on...
# a) RDKit-sanitized equivalence, and
# b) MOLVS-sanitized equivalence
from molvs import Standardizer
standardizer = Standardizer()
standardizer.prefer_organic = True
def sanitize_smiles(smi, largest_fragment=False):
mol = Chem.MolFromSmiles(smi)
if mol is None:
return smi
try:
mol = standardizer.standardize(mol) # standardize functional group reps
if largest_fragment:
mol = standardizer.largest_fragment(mol) # remove product counterions/salts/etc.
mol = standardizer.uncharge(mol) # neutralize, e.g., carboxylic acids
except Exception:
pass
return Chem.MolToSmiles(mol)
try:
rank = []
n,top1,top2,top3,top5,gfound = 0,0,0,0,0,0
top1_sani, top2_sani, top3_sani, top5_sani, gfound_sani = 0, 0, 0, 0, 0
for line in fpred:
thisrow = []
line = line.strip('\r\n |')
gold = fgold.readline()
rex,gedits = gold.split()
r,_,p = rex.split('>')
if opts.singleonly and '.' in p:
continue
rmol = Chem.MolFromSmiles(r)
pmol = Chem.MolFromSmiles(p)
thisrow.append(r)
thisrow.append(p)
# Save pbond information
pbonds = {}
for bond in pmol.GetBonds():
a1 = idxfunc(bond.GetBeginAtom())
a2 = idxfunc(bond.GetEndAtom())
t = bond_types.index(bond.GetBondType())
pbonds[(a1, a2)] = pbonds[(a2, a1)] = t + 1
for atom in pmol.GetAtoms():
atom.ClearProp('molAtomMapNumber')
psmiles = Chem.MolToSmiles(pmol)
psmiles_sani = set(sanitize_smiles(psmiles, True).split('.'))
psmiles = set(psmiles.split('.'))
thisrow.append('.'.join(psmiles))
thisrow.append('.'.join(psmiles_sani))
########### Use *true* edits to try to recover product
if opts.bonds_as_doubles:
cbonds = []
for gedit in gedits.split(';'):
x,y,t = gedit.split('-')
x,y,t = int(x), int(y), float(t)
cbonds.append((x, y, bond_types_as_double[t]))
else:
# check if psmiles is recoverable
cbonds = []
for gedit in gedits.split(';'):
x,y = gedit.split('-')
x,y = int(x), int(y)
if (x,y) in pbonds:
t = pbonds[(x,y)]
else:
t = 0
cbonds.append((x, y, t))
# Generate products by modifying reactants with predicted edits.
pred_smiles = edit_mol(rmol, cbonds)
pred_smiles_sani = set(sanitize_smiles(smi) for smi in pred_smiles)
pred_smiles = set(pred_smiles)
if not psmiles <= pred_smiles:
# Try again with kekulized form
Chem.Kekulize(rmol)
pred_smiles_kek = edit_mol(rmol, cbonds)
pred_smiles_kek = set(pred_smiles_kek)
if not psmiles <= pred_smiles_kek:
if psmiles_sani <= pred_smiles_sani:
print('\nwarn: mismatch, but only due to standardization')
gfound_sani += 1
else:
print('\nwarn: could not regenerate product {}'.format(psmiles))
print('sani product: {}'.format(psmiles_sani))
print(r)
print(p)
print(gedits)
print(cbonds)
print('pred_smiles: {}'.format(pred_smiles))
print('pred_smiles_kek: {}'.format(pred_smiles_kek))
print('pred_smiles_sani: {}'.format(pred_smiles_sani))
else:
gfound += 1
gfound_sani += 1
else:
gfound += 1
gfound_sani += 1
########### Now use candidate edits to try to recover product
rk,rk_sani = 11,11
pred_smiles_list = []
pred_smiles_sani_list = []
ctr = 0
for idx,edits in enumerate(line.split('|')):
prev_len_pred_smiles = len(set(pred_smiles_list))
couldnt_find_smiles = True
cbonds = | |
<reponame>leozz37/makani<filename>avionics/servo/firmware/generate_r22_param.py
#!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code generation module for Copley R22 Parameters."""
import csv
import struct
import sys
import textwrap
import gflags
import numpy as np
import yaml
FLAGS = gflags.FLAGS
gflags.DEFINE_string('ccx_file', None,
'Full path to Copley CCX configuration file.',
short_name='c')
gflags.DEFINE_string('dict_file', None,
'Full path to parameter dictionary file.',
short_name='d')
gflags.DEFINE_string('yaml_output', None,
'Full path to YAML parameter dictionary output file.',
short_name='y')
gflags.DEFINE_string('source_file', None,
'Full path to output source file.',
short_name='s')
gflags.DEFINE_string('header_file', None,
'Full path to output header file.',
short_name='h')
def _ServoConfigurationAileron1():
"""Override configuration for aileron 1 servos."""
config = {}
config['NegativeSoftwareLimit'] = [_DegreesToCounts(-45.0)]
config['PositiveSoftwareLimit'] = [_DegreesToCounts(45.0)]
config['DesiredState'] = [30] # 30 = CANopen mode.
# Profile velocity is in units of [0.1 output shaft counts / s]
config['ProfileVelocity'] = [
_RadiansPerSecondToProfileVelocity(1.0)]
# Velocity loop limit is in units of [0.1 drive shaft counts / s]
config['VelocityLoopVelocityLimit'] = [_RadiansPerSecondToLoopVelocity(1.15)]
# Profile acceleration is in units of [10 output shaft counts / s^2]
config['ProfileAcceleration'] = [
_RadiansPerSecondSquaredToProfileAcceleration(7.0)]
config['ProfileDeceleration'] = [
_RadiansPerSecondSquaredToProfileAcceleration(7.0)]
config['PositionPp'] = [600] # Proportional gain.
config['PositionVff'] = [1000] # Velocity feed-forward.
config['PositionAff'] = [7000] # Acceleration feed-forward.
config['PositionFollowingWarningLimit'] = [_DegreesToCountsRelative(5.0)]
config['UserContinuousCurrentLimit'] = [1400] # [0.01 Amps/count]
config['UserPeakCurrentTimeLimit'] = [2000] # [ms]
config['NodeIdConfiguration'] = [0x0004] # Bit rate 1Mbps, node ID 4.
config['MotorWiring'] = [1] # U/V swapped.
config['MotorHallWiring'] = [4] # W/V/U hall order.
return config
def _ServoConfigurationAileron2():
"""Override configuration for aileron 2 servos."""
return _ServoConfigurationAileron1()
def _ServoConfigurationAileron4():
"""Override configuration for aileron 4 servos."""
return _ServoConfigurationAileron1()
def _ServoConfigurationElevator():
"""Override configuration for elevator servos."""
return _ServoConfigurationAileron1()
def _ServoConfigurationRudder():
"""Override configuration for rudder servos."""
return _ServoConfigurationAileron1()
def _ServoConfigurationDetwist():
"""Override configuration for tether detwist servo."""
return _ServoConfigurationAileron1()
def GetPatchConfiguration():
"""Get dictionary of custom patch configurations."""
return {'aileron1': _ServoConfigurationAileron1(),
'aileron2': _ServoConfigurationAileron2(),
'aileron4': _ServoConfigurationAileron4(),
'detwist': _ServoConfigurationDetwist(),
'elevator': _ServoConfigurationElevator(),
'rudder': _ServoConfigurationRudder()}
_OUTPUT_ENCODER_COUNTS_PER_REV = 2**14
def _RadiansToCounts(angle):
"""Convert radians to encoder counts."""
counts = (angle + np.pi) / (2.0*np.pi) * _OUTPUT_ENCODER_COUNTS_PER_REV
return int(np.clip(counts, 0, _OUTPUT_ENCODER_COUNTS_PER_REV - 1))
def _DegreesToCounts(angle):
"""Convert degrees to output shaft encoder counts."""
return _RadiansToCounts(np.deg2rad(angle))
def _DegreesToCountsRelative(angle):
"""Convert degrees to output shaft encoder count delta."""
return int(angle / 360.0 * _OUTPUT_ENCODER_COUNTS_PER_REV)
def _RadiansPerSecondSquaredToProfileAcceleration(accel):
"""Convert radians per second squared to profile acceleration counts."""
counts = accel / (2.0 * np.pi) * _OUTPUT_ENCODER_COUNTS_PER_REV * 0.1
# Clip at 10 rev/sec/sec.
return int(np.clip(counts, 0, _OUTPUT_ENCODER_COUNTS_PER_REV))
def _RadiansPerSecondToProfileVelocity(velocity):
"""Convert radians per second to profile velocity counts."""
counts = velocity / (2.0 * np.pi) * _OUTPUT_ENCODER_COUNTS_PER_REV * 10
# Clip at 1 rev/sec.
return int(np.clip(counts, 0, _OUTPUT_ENCODER_COUNTS_PER_REV * 10))
def _RadiansPerSecondToLoopVelocity(velocity):
"""Convert radians per second to velocity loop velocity counts."""
encoder_counts = 160 * 36
counts = velocity / (2.0 * np.pi) * encoder_counts * 10
# Clip at 1 rev/sec.
return int(np.clip(counts, 0, encoder_counts * 10))
def _GetName(name):
"""Translate CCX parameter name to camel case name."""
name = name.replace('\'', '').replace('.', '').replace('/', '')
name = ''.join([n.capitalize() for n in name.split(' ')])
return name
class CopleyConfiguration(object):
"""Handle Copley parameter configuration parameters."""
def _AddTableEntry(self, serial_id, device_net, macro, can, bank, typ):
"""Add a new parameter to the parameter table."""
entry = {'serial_id': serial_id, 'device_net': device_net,
'macro': macro, 'can': can, 'bank': bank, 'typ': typ,
'index': len(self.table)}
if can == 'None':
can = None
# Reject duplicate entries.
if serial_id in self.by_serial_id:
raise ValueError('Serial ID %d already exists in the table.' % serial_id)
if can in self.by_can:
raise ValueError('CAN ID %d already exists in the table.' % can)
self.table.append(entry)
if serial_id is not None:
self.by_serial_id[serial_id] = entry
if can is not None:
# These parameters need to be mapped to legacy values.
param_can_remap = {0x2380: 0x60F6, 0x2381: 0x60F9, 0x2382: 0x60FB,
0x2383: 0x6410, 0x2384: 0x6510}
if isinstance(can, int):
entry['can_index'] = can
entry['can_sub'] = 0
elif ':' in can:
entry['can_index'] = int(can.split(':')[0], 16)
entry['can_sub'] = int(can.split(':')[1], 10)
if entry['can_index'] in param_can_remap:
entry['can_index'] = param_can_remap[entry['can_index']]
can = '0x%04X:%d' % (entry['can_index'], entry['can_sub'])
self.by_can[can] = entry
else:
# Index 0 is reserved as not used.
entry['can_index'] = 0
entry['can_sub'] = 0
def _AddDesc(self, entry, desc):
# Remove entry if already indexed by name and store new name.
if 'name' in entry and entry['name'] in self.by_name:
del self.by_name[entry['name']]
entry['desc'] = desc
entry['name'] = _GetName(desc)
self.by_name[entry['name']] = entry
def __init__(self):
self.table = []
self.by_name = {}
self.by_serial_id = {}
self.by_can = {}
def _AddExtraDescriptions(self):
"""Specify R22 dictionary parameters not found in the CCX file."""
# ASCII command ID and description.
serial_params = [
(0x03, 'Winding A Current'),
(0x04, 'Winding B Current'),
(0x05, 'Current Offset A'),
(0x06, 'Current Offset B'),
(0x07, 'Stator Current X Axis'),
(0x08, 'Stator Current Y Axis'),
(0x09, 'Current Loop Output X Axis'),
(0x0A, 'Current Loop Output Y Axis'),
(0x0B, 'Actual Current D'),
(0x0C, 'Actual Current Q'),
(0x0D, 'Commanded Current D'),
(0x0E, 'Commanded Current Q'),
(0x0F, 'Current Error D'),
(0x10, 'Current Error Q'),
(0x11, 'Current Integral D'),
(0x12, 'Current Integral Q'),
(0x13, 'Current Loop Output D'),
(0x14, 'Current Loop Output Q'),
(0x15, 'Commanded Motor Current'),
(0x17, 'Actual Position'),
(0x18, 'Actual Velocity'),
(0x1B, 'Sine Feedback Voltage'),
(0x1C, 'Cosine Feedback Voltage'),
(0x1D, 'Analog Reference Voltage'),
(0x1E, 'High Voltage'),
(0x20, 'Drive Temperature'),
(0x25, 'Limited Motor Current Command'),
(0x29, 'Velocity Loop Limited Velocity'),
(0x2A, 'Velocity Loop Error'),
(0x2B, 'Velocity Loop Integral Sum'),
(0x2C, 'Commanded Velocity'),
(0x2D, 'Commanded Position'),
(0x2E, 'Velocity Loop Acceleration Feed Forward'),
(0x32, 'Actual Motor Position'),
(0x35, 'Position Loop Error'),
(0x38, 'Actual Motor Current'),
(0x3B, 'Instantaneous Commanded Velocity'),
(0x3C, 'Instantaneous Commanded Acceleration'),
(0x3D, 'Trajectory Destination Position'),
(0x47, 'Motor Temperature Sensor Type'),
(0x4E, 'Motor Wiring'),
(0x52, 'Motor Hall Wiring'),
(0x5E, 'Load Encoder Velocity'),
(0x68, 'Captured Index Position'),
(0x69, 'Unfiltered Velocity'),
(0x6D, 'Position Capture Status Register'),
(0x81, 'Drive Serial Number'),
(0x85, 'PWM Period'),
(0x8B, 'Drive Rated Minimum Voltage'),
(0x8C, 'Drive Rated Maximum Temperature'),
(0x90, 'Baud Rate'),
(0x91, 'Maximum Data Words Per Command'),
(0x94, 'Firmware Version Number'),
(0x96, 'Analog Reference Calibration Offset'),
(0x97, 'Over Temperature Cutout Hysteresis'),
(0x9C, 'Over Voltage Cutout Hysteresis'),
(0x9D, 'PWM Dead Time Continuous Current'),
(0x9E, 'PWM Off Time Minimum'),
(0x9F, 'PWM Dead Time Zero Current'),
(0xA0, 'Event Status'),
(0xA1, 'Latched Event Status'),
(0xA2, 'Hall Input State'),
(0xA4, 'Latched Fault Status'),
(0xA6, 'Input Pin States'),
(0xAA, 'Raw Input State'),
(0xAB, 'Output States'),
(0xAC, 'Sticky Event Status'),
(0xB0, 'Motor Phase Angle'),
(0xB4, 'Encoder Phase Angle'),
(0xB5, 'Homing Adjustment'),
(0xB7, 'System Time'),
(0xC0, 'Network Node ID'),
(0xC9, 'Trajectory Status'),
(0xCA, 'Target Position'),
(0xCB, 'Profile Velocity'),
(0xCC, 'Profile Acceleration'),
(0xCD, 'Profile Deceleration'),
(0xDC, 'Regen Turn On Voltage'),
(0xDD, 'Regen Turn Off Voltage'),
(0xDE, 'Regen Peak Current Rating'),
(0xDF, 'Regen Continuous Current Rating'),
(0xE0, 'Regen Time At Peak Current'),
(0xE2, 'Regen Resistor Status'),
(0x100, 'CANopen Limit Status Mask'),
(0x101, 'Network Address Switch Value'),
(0x102, 'Network Status'),
(0x108, 'Trigger CANopen PDO 254'),
(0x10A, 'Captured Home Position'),
(0x10B, 'Firmware Version Number Extended'),
(0x110, 'Position Capture Timestamp'),
(0x111, 'Position Capture Position'),
(0x112, 'Position Encoder Position'),
(0x113, 'CANopen Emergency Inhibit Time'),
(0x116, 'CANopen Quick Stop Option Code'),
(0x117, 'CANopen Shutdown Option Code'),
(0x118, 'CANopen Disable Option Code'),
(0x119, 'CANopen Halt Option Code'),
(0x120, 'Number of Axis'),
(0x122, 'Internal Maximum Regen Current'),
(0x126, 'FPGA Firmware Version'),
(0x128, 'Gain Scheduling Key Parameter'),
(0x129, 'Reserved Drive Hardware Options'),
(0x12C, 'Secondary Firmware Version'),
(0x12E, 'Motor Encoder Status'),
(0x12F, 'Load Encoder Status'),
(0x130, 'RMS Current Calculation Period'),
(0x131, 'RMS Current Measurement'),
(0x132, 'User Current Limit Running Sum'),
(0x133, 'Amp Current Limit Running Sum'),
(0x134, 'Analog Output Configuration'),
(0x135, 'Analog Output Value'),
(0x136, 'Second Analog Reference Value'),
(0x137, 'Second Analog Reference Offset'),
(0x138, 'Second Analog Reference Calibration Offset'),
(0x139, 'Drive Safety Circuit Status'),
(0x13A, 'Analog Motor Temperature Sensor Voltage'),
(0x13B, 'Analog Motor Temperature Sensor Limit'),
(0x154, 'Servo Loop Configuration'),
(0x155, 'Position Loop Integral Gain'),
(0x156, 'Position Loop Derivative Gain'),
(0x157, 'Velocity Loop Command Feed Forward'),
(0x158, | |
self.rest.send_telegram_message(chat_id, message)
else:
self.rest.send_telegram_image(chat_id, image)
def login(self):
user_id = self.configure.common_config[lybconstant.LYB_DO_BOOLEAN_SAVE_LOGIN_ACCOUNT + '_id']
user_password = li<PASSWORD>ot_license.LYBLicense().get_decrypt(
self.configure.common_config[lybconstant.LYB_DO_BOOLEAN_SAVE_LOGIN_ACCOUNT + '_passwd'])
rest = likeyoubot_rest.LYBRest(self.configure.root_url, user_id, user_password)
return rest
def save_image(self, png_name):
# window_image_org = cv2.cvtColor(np.array(self.window_image), cv2.COLOR_RGB2BGR)
# img = Image.fromarray(window_image_org, 'RGB')
try:
directory = lybconfigure.LYBConfigure.resource_path('screenshot')
if not os.path.exists(directory):
os.makedirs(directory)
now = datetime.datetime.now()
now_time = now.strftime('%y%m%d_%H%M%S')
app_player_type, resolution = self.window.get_player(self.hwnd)
png_name = directory + '\\' + png_name + '_' + str(now_time) + '_' + str(app_player_type) + '.png'
crop_area = self.window.get_player_screen_rect(self.hwnd)
self.window_image.crop(crop_area).save(png_name)
return png_name
except:
self.logger.error('스크린샷 저장 중 에러 발생')
self.logger.error(traceback.format_exc())
return None
def build_iterator_key(self, index, work_name):
return str(index) + work_name + '_config_iterator'
def process_restart_app_player(self):
if self.player_type == 'nox':
if lybconstant.LYB_MULTI_APP_PLAYER_NAME_NOX in self.multi_hwnd_dic:
mHwnd = self.multi_hwnd_dic[lybconstant.LYB_MULTI_APP_PLAYER_NAME_NOX]
app_player_index = int(
self.window_config[lybconstant.LYB_DO_BOOLEAN_FIX_WINDOW_LOCATION + 'number']) - 1
self.logger.debug('app_player_index: ' + str(app_player_index))
self.logger.debug('mHwnd: ' + str(mHwnd))
while True:
self.window.mouse_click(mHwnd, 523, 116 + (57 * app_player_index))
time.sleep(1)
confirm_window_hwnd_list = self.window.getInnerWindow(mHwnd)
self.logger.debug(confirm_window_hwnd_list)
if len(confirm_window_hwnd_list) > 0:
for each_hwnd in confirm_window_hwnd_list:
self.logger.debug(each_hwnd)
time.sleep(1)
self.window.mouse_click(each_hwnd, 120, 180)
break
else:
time.sleep(1)
return
elif self.player_type == 'momo':
if lybconstant.LYB_MULTI_APP_PLAYER_NAME_MOMO in self.multi_hwnd_dic:
mHwnd = self.multi_hwnd_dic[lybconstant.LYB_MULTI_APP_PLAYER_NAME_MOMO]
app_player_index = int(
self.window_config[lybconstant.LYB_DO_BOOLEAN_FIX_WINDOW_LOCATION + 'number']) - 1
self.logger.debug('app_player_index: ' + str(app_player_index))
while True:
self.window.mouse_click(mHwnd, 387, 116 + (50 * app_player_index))
time.sleep(1)
confirm_window_hwnd_list = self.window.getInnerWindow(mHwnd)
self.logger.debug(confirm_window_hwnd_list)
if len(confirm_window_hwnd_list) > 0:
for each_hwnd in confirm_window_hwnd_list:
self.logger.debug(each_hwnd)
time.sleep(1)
self.window.mouse_click(each_hwnd, 310, 210)
break
else:
time.sleep(1)
return
self.set_option('restart_app_player_checkpoint', time.time())
self.logger.warn('앱플레이어 재시작 기능 사용 가능')
def is_freezing(self):
freezing_limit = int(self.common_config[lybconstant.LYB_DO_STRING_RECOVERY_COUNT + 'freezing_limit'])
if freezing_limit == 0:
return False
(x, y, x2, y2) = self.window.get_player_anchor_rect(self.hwnd)
w = x2 - x
h = y2 - y
check_position_list = [
(int(w * 0.5), int(h * 0.5)),
(int(w * 0.25), int(h * 0.25)),
(int(w * 0.75), int(h * 0.25)),
(int(w * 0.25), int(h * 0.75)),
(int(w * 0.75), int(h * 0.75))
]
for each_pos in check_position_list:
loc_x = each_pos[0]
loc_y = each_pos[1]
if self.last_window_pixels[loc_x, loc_y] != self.window_pixels[loc_x, loc_y]:
self.count_for_freeze = time.time()
return False
# self.logger.debug(str((loc_x, loc_y)) + ' ' + str(self.last_window_pixels[loc_x, loc_y]) + ' ' + str(self.window_pixels[loc_x, loc_y]))
elapsed_time = time.time() - self.count_for_freeze
if elapsed_time >= freezing_limit:
self.count_for_freeze = time.time()
self.telegram_send('창 이름 [' + str(self.window_title) + ']에서 화면 프리징이 감지되어 게임을 강제 종료합니다.')
png_name = self.save_image('freeze')
self.telegram_send('', image=png_name)
return False
# return True
else:
if elapsed_time > 10:
self.logger.warn('화면 프리징 감지됨...(' + str(int(elapsed_time)) + '/' + str(freezing_limit) + '초)')
return False
def click_back(self):
if self.player_type == 'nox':
if self.terminate_status == 0:
if self.side_hwnd == None:
self.logger.warn('녹스 사이드바 검색 실패로 종료 기능 사용 불가')
self.request_terminate = False
return
self.window.mouse_click(self.side_hwnd, 16, likeyoubot_win.LYBWin.HEIGHT - 115)
elif self.player_type == 'momo':
if self.terminate_status == 0:
self.window.mouse_click(self.parent_hwnd,
likeyoubot_win.LYBWin.WIDTH + 20,
likeyoubot_win.LYBWin.HEIGHT - 80)
def process_terminate_applications(self):
max_app_close_count = self.common_config[lybconstant.LYB_DO_STRING_CLOSE_APP_COUNT]
self.logger.debug('CloseMaxCount: ' + str(max_app_close_count))
if self.player_type == 'nox':
if self.terminate_status == 0:
# self.mouse_click_with_cursor(660, 350)
if self.side_hwnd == None:
self.logger.warn('녹스 사이드바 검색 실패로 종료 기능 사용 불가')
self.request_terminate = False
return
self.window.mouse_click(self.side_hwnd, 16, likeyoubot_win.LYBWin.HEIGHT - 40)
self.terminate_status += 1
elif self.terminate_status > 0 and self.terminate_status < max_app_close_count:
self.logger.info('녹스 앱 종료 중..')
if self.common_config[lybconstant.LYB_DO_STRING_CLOSE_APP_NOX_NEW] == True:
self.window.mouse_drag(self.hwnd,
int(likeyoubot_win.LYBWin.WIDTH * 0.5),
likeyoubot_win.LYBWin.HEIGHT - 90, 0,
likeyoubot_win.LYBWin.HEIGHT - 90,
delay=0,
move_away=self.common_config[
lybconstant.LYB_DO_BOOLEAN_MOUSE_POINTER + 'away'])
else:
# self.window.mouse_drag(self.hwnd, 630, 220, 630, 80, 0.5)
self.window.mouse_click(self.hwnd,
likeyoubot_win.LYBWin.WIDTH - 10,
likeyoubot_win.LYBWin.HEIGHT - 230,
delay=2)
time.sleep(2)
self.window.mouse_click(self.hwnd,
likeyoubot_win.LYBWin.WIDTH - 90,
likeyoubot_win.LYBWin.HEIGHT - 335,
)
self.terminate_status += 1
else:
self.terminate_status = 0
self.request_terminate = False
elif self.player_type == 'momo':
if self.terminate_status == 0:
self.window.mouse_click(self.parent_hwnd,
likeyoubot_win.LYBWin.WIDTH + 20,
likeyoubot_win.LYBWin.HEIGHT - 5)
# self.move_mouse_location(660, 355)
self.terminate_status += 1
elif self.terminate_status > 0 and self.terminate_status < max_app_close_count:
self.logger.info('모모 앱 종료 중...')
self.window.mouse_drag(self.hwnd,
int(likeyoubot_win.LYBWin.WIDTH * 0.5),
likeyoubot_win.LYBWin.HEIGHT - 90, 0,
likeyoubot_win.LYBWin.HEIGHT - 90,
0.5,
move_away=self.common_config[lybconstant.LYB_DO_BOOLEAN_MOUSE_POINTER + 'away'])
self.terminate_status += 1
else:
self.terminate_status = 0
self.request_terminate = False
def process_event(self, window_pixels, event_name):
if not event_name in self.event_limit:
self.event_limit[event_name] = time.time()
self.event_limit[event_name + '_count'] = 0
else:
# 동일한 이벤트 10초마다 발생
if time.time() - self.event_limit[event_name] < 10:
if self.event_limit[event_name + '_count'] > 10:
return False
else:
self.event_limit[event_name + '_count'] = 0
if self.custom_event(event_name) == True:
return True
# 이벤트 인식 오류가 있을 수 있기 때문에 10초 안에 2번 이상 반복 인식될 경우 클릭하는 걸로 하자/
# if self.event_limit[event_name + '_count'] % 2 == 0:
# match_rate = self.rateMatchedPixelBox(window_pixels, event_name + '_button')
# # self.loggingToGUI("%-47s %-2s 클릭위치매칭률: %10s%%" \
# # % (self.get_adjusted_name(self.current_matched_scene['name']), '', \
# # str(int(match_rate*self.weight_threshold*100))))
# if match_rate > 0.1 * self.weight_threshold:
# self.logger.debug('click success event: ' + str(event_name) + '_button ' + str(match_rate))
# self.mouse_click(event_name + '_button')
# else:
# self.logger.warn('click fail event: ' + str(event_name) + '_button ' + str(match_rate))
self.mouse_click(event_name + '_button')
self.event_limit[event_name] = time.time()
self.event_limit[event_name + '_count'] += 1
self.last_event['name'] = event_name
self.last_event['rate'] = self.current_matched_event['rate']
return True
def get_screen_by_location(self, window_image):
return ''
def get_scene(self, scene_name):
if not scene_name in self.scene_dic:
self.add_scene(scene_name)
return self.scene_dic[scene_name]
def clear_scene(self):
pass
def setAppPlayer(self, player_type):
self.player_type = player_type
def add_scene(self, scene_name):
self.scene_dic[scene_name] = lybscene.LYBScene(scene_name)
self.scene_dic[scene_name].setLoggingQueue(self.logging_queue)
self.scene_dic[scene_name].setGameObject(self)
def setGameTab(self, game_tab_object):
self.game_tab = game_tab_object
def setLoggingQueue(self, logging_queue):
self.logging_queue = logging_queue
def setWindowHandle(self, hwnd, side_hwnd, parent_hwnd, multi_hwnd_dic):
self.hwnd = hwnd
if parent_hwnd == 0 or parent_hwnd == None:
self.window_title = self.window.get_title(self.hwnd)
else:
self.window_title = self.window.get_title(parent_hwnd)
if parent_hwnd != None and parent_hwnd != 0:
self.parent_hwnd = parent_hwnd
else:
self.side_hwnd = side_hwnd
if self.side_hwnd == None:
self.logger.warn('녹스 사이드바가 검색 실패. 자동 재시작 기능 사용 불가')
else:
self.logger.critical('녹스 사이드바가 검색 성공. 자동 재시작 기능 사용 가능)')
self.multi_hwnd_dic = multi_hwnd_dic
def setStartFlag(self, flag):
self.start_status = flag
def setCommonConfig(self, config):
self.configure = config
self.common_config = self.configure.common_config
def setWindowConfig(self, config):
self.window_config = config
def get_adjusted_name(self, object_name):
return object_name.replace('lin2rev', '', 1).replace('clans', '', 1).replace('tera', '', 1).replace('scene', '',
1).replace(
'event', '', 1).replace('_', '', 10).upper()
def loggingElapsedTime(self, title, elapsed_time, limit_time, period=10):
if time.time() - self.last_logging < period:
return
self.last_logging = time.time()
if limit_time == 0:
self.logger.info("%s:%9s" % (
self.preformat_cjk(title, 1),
str(time.strftime('%H:%M:%S', time.gmtime(int(elapsed_time))))))
else:
self.logger.info("%s:%9s / %s" % (
self.preformat_cjk(title, 1),
str(time.strftime('%H:%M:%S', time.gmtime(int(elapsed_time)))),
str(time.strftime('%H:%M:%S', time.gmtime(int(limit_time))))))
def get_work_status(self, work_name):
return -1
def get_option(self, option_name):
if not option_name in self.options:
self.options[option_name] = None
return self.options[option_name]
def set_option(self, option_name, value):
self.options[option_name] = value
def preformat_cjk(self, string, width, align='<', fill=' '):
count = (width - sum(1 + (unicodedata.east_asian_width(c) in "WF")
for c in string))
return {
'>': lambda s: fill * count + s,
'<': lambda s: s + fill * count,
'^': lambda s: fill * (count / 2)
+ s
+ fill * (count / 2 + count % 2)
}[align](string)
def loggingToGUI(self, log_message, log_type='log'):
message = '%s %s' % (self.preformat_cjk(self.window_title, 1), log_message)
if log_type == 'log':
self.logger.debug(message)
else:
self.logger.info(message)
# if self.get_window_config('debug_booleanvar') == True or log_type != 'log':
# message = '%s%s'%(self.preformat_cjk(self.window_title, 20), log_message)
# self.logging_queue.put_nowait(likeyoubot_message.LYBMessage(log_type, message))
def locationOnWindowPart(self, parent, child,
custom_threshold=-1,
custom_below_level=-1,
custom_top_level=-1,
source_custom_below_level=-1,
source_custom_top_level=-1,
custom_flag=-1,
custom_rect=(-1, -1, -1, -1) # x, y, width, height
):
if custom_flag != -1:
adj_x, adj_y = self.get_player_adjust()
custom_rect2 = (
custom_rect[0] + adj_x, custom_rect[1] + adj_y, custom_rect[2] + adj_x, custom_rect[3] + adj_y)
else:
custom_rect2 = (-1, -1, -1, -1)
return LYBGame.locationOnWindowWithRate2(parent, child,
custom_threshold=custom_threshold,
custom_below_level=custom_below_level,
custom_top_level=custom_top_level,
source_custom_below_level=source_custom_below_level,
source_custom_top_level=source_custom_top_level,
custom_flag=custom_flag,
custom_rect=custom_rect2
)
@classmethod
def locationOnWindow(self, parent, child, custom_threshold=-1, custom_below_level=-1, custom_top_level=-1):
(loc_x, loc_y), rate = LYBGame.locationOnWindowWithRate2(parent, child,
custom_threshold=custom_threshold,
custom_below_level=custom_below_level,
custom_top_level=custom_top_level)
return (loc_x, loc_y)
def locationResourceOnWindowPart2(self, parent, child_resource,
custom_threshold=-1,
custom_below_level=-1,
custom_top_level=-1,
source_custom_below_level=-1,
source_custom_top_level=-1,
custom_flag=-1,
near=32,
average=False,
debug=False
):
if not child_resource in self.resource_manager.resource_dic:
return ((-1, -1), 0)
left = likeyoubot_win.LYBWin.WIDTH
top = likeyoubot_win.LYBWin.HEIGHT
right = -1
bottom = -1
pb_width = 0
pb_height = 0
resource = self.resource_manager.resource_dic[child_resource]
for each_pixel_box_name in resource:
pixel_box = self.resource_manager.pixel_box_dic[each_pixel_box_name]
if pb_width < pixel_box.width:
pb_width = pixel_box.width
if pb_height < pixel_box.height:
pb_height = pixel_box.height
(loc_x, loc_y) = self.get_location(each_pixel_box_name)
if loc_x < left:
left = loc_x
if loc_x > right:
right = loc_x
if loc_y < top:
top = loc_y
if loc_y > bottom:
bottom = loc_y
adj_x, adj_y = self.get_player_adjust()
left = left - pb_width - near + adj_x
top = top - pb_height - near + adj_y
right = right + pb_width + near - adj_x
bottom = bottom + pb_height + near - adj_y
if left < 0:
left = 0
if top < 1:
top | |
<gh_stars>0
import sys
import pickle
import json
import os
import math
import networkx as nx
from collections import defaultdict
from net_init import load_network
from net_init import generate_random_outs_conns_with_oracle as gen_rand_outs_with_oracle
from network.sparse_table import SparseTable
from network.communicator import Communicator
from network import comm_network
from network.oracle import SimpleOracle
from sec_hop.selector import Selector
from mat_complete.mat_comp_solver import construct_table
import random
import numpy as np
class Experiment:
def __init__(self, topo, in_lim, out_lim, name, num_keep, num_2hop, num_rand, num_epoch, adapts, num_msg, churn_rate):
self.in_lim = in_lim
self.out_lim = out_lim
self.num_out = out_lim
self.outdir = os.path.dirname(name)
self.loc, self.ld, self.roles, self.proc_delay, self.pub_prob = load_network(topo)
self.num_node = len(self.loc)
self.num_epoch = num_epoch
self.num_msg = num_msg
self.churn_rate = churn_rate
self.selectors = {i: Selector(i, num_keep, num_rand, num_msg, self.num_node)
for i in range(self.num_node)}
# elf.num_cand = num_node # could be less if num_node is large
self.snapshots = []
# self.pools = Pool(processes=num_thread)
self.directions = ['incoming', 'outgoing', 'bidirect']
self.nodes = {i: Communicator(i, self.proc_delay[i], in_lim, out_lim, [])
for i in range(self.num_node)}
self.oracle = SimpleOracle(in_lim, out_lim, self.num_node)
self.out_hist = []
self.sparse_tables = {i: SparseTable(i) for i in range(self.num_node)}
# self.conns_snapshot = []
# self.broad_nodes = [] # hist of broadcasting node
# self.timer = time.time()
# self.pubs = [k for k,v in self.roles.items() if v=='PUB']
self.adapts = adapts
self.pub_hist = []
self.dists_hist = defaultdict(list)
self.dist_file = name
# log setting
# self.use_logger = use_logger
# self.logdir = self.outdir + '/' + 'logs'
# if not os.path.exists(self.logdir):
# os.makedirs(self.logdir)
# self.loggers = {}
# self.init_logger()
self.init_graph_conn = os.path.join(self.outdir, 'init.json')
self.snapshot_dir = os.path.join(self.outdir, 'snapshots')
self.snapshot_exploit_dir = os.path.join(self.outdir, 'snapshots-exploit')
self.write_adapts_node(os.path.join(self.outdir, 'adapts'))
if not os.path.exists(self.snapshot_dir):
os.makedirs(self.snapshot_dir)
if not os.path.exists(self.snapshot_dir):
os.makedirs(self.snapshot_dir)
self.num_keep = num_keep
self.num_2hop = num_2hop
self.num_rand = num_rand
assert(num_keep + num_2hop + num_rand == self.out_lim)
def construct_graph(self):
G = nx.Graph()
for i, node in self.nodes.items():
for u in node.outs:
delay = self.ld[i][u] + node.node_delay/2 + self.nodes[u].node_delay/2
if i == u:
print('self loop', i)
sys.exit(1)
G.add_edge(i, u, weight=delay)
return G
def construct_exploit_graph(self, curr_outs):
G = nx.Graph()
for i, node in self.nodes.items():
out_peers = []
if i in self.adapts:
out_peers = curr_outs[i][:self.num_out-self.num_rand]
else:
out_peers = curr_outs[i]
for u in out_peers:
delay = self.ld[i][u] + node.node_delay/2 + self.nodes[u].node_delay/2
if i == u:
print('self loop', i)
sys.exit(1)
G.add_edge(i, u, weight=delay)
return G
def write_cost(self, outpath):
G = self.construct_graph()
with open(outpath, 'w') as w:
length = dict(nx.all_pairs_dijkstra_path_length(G))
for i in range(self.num_node):
for j in range(self.num_node):
cost = length[i][j] - self.proc_delay[i]/2.0 + self.proc_delay[j]/2.0
w.write(str(cost) + ' ')
w.write('\n')
def write_exploit_cost(self, outpath, curr_outs):
G = self.construct_exploit_graph(curr_outs)
with open(outpath, 'w') as w:
length = dict(nx.all_pairs_dijkstra_path_length(G))
for i in range(self.num_node):
for j in range(self.num_node):
cost = length[i][j] - self.proc_delay[i]/2.0 + self.proc_delay[j]/2.0
w.write(str(cost) + ' ')
w.write('\n')
def write_adapts_node(self, filename):
with open(filename, 'w') as w:
sorted_stars = sorted(self.adapts)
for star in sorted_stars:
w.write(str(star) + '\n')
def get_truth_distance(self, star_i, interested_peers, epoch):
# construct graph
G = nx.Graph()
for i, node in self.nodes.items():
if i == star_i:
for u in interested_peers:
# only connect interested edge from the interested node
delay = self.ld[i][u] + node.node_delay/2 + self.nodes[u].node_delay/2
if i == u:
print('self loop', i)
sys.exit(1)
G.add_edge(i, u, weight=delay)
else:
for u in node.outs:
# not connecting incoming edge to the interested node
if u != star_i:
delay = self.ld[i][u] + node.node_delay/2 + self.nodes[u].node_delay/2
if i == u:
print('self loop', i)
sys.exit(1)
G.add_edge(i, u, weight=delay)
dists = {} # key is the target pub, value is the best peer and length
pubs = [k for k,v in self.roles.items() if v=='PUB']
for m in pubs:
# the closest distance
length, path = nx.single_source_dijkstra(G, source=star_i, target=m, weight='weight')
assert(len(path)>=0)
topo_length = None
line_len = None
j = None
if len(path) == 1:
# itself
assert(star_i == m)
topo_length = 0
line_len = 0
j = star_i
else:
j = path[1]
topo_length = length - self.proc_delay[j]/2.0 + self.proc_delay[m]/2.0
line_len = self.ld[star_i][m] + self.proc_delay[m]
# line_len = (math.sqrt(
# (self.loc[star_i][0]-self.loc[m][0])**2+
# (self.loc[star_i][1]-self.loc[m][1])**2 ) +self.proc_delay[m])
dists[m] = (j, round(topo_length, 3), round(line_len, 3))
self.dists_hist[star_i].append((epoch, dists))
def save_dists_hist(self):
if self.dist_file == 'None':
return
with open(self.dist_file, 'wb') as w:
pickle.dump(self.dists_hist, w)
def write_init_graph(self):
with open(self.init_graph_conn, 'w') as w:
graph_json = []
for u in range(self.num_node):
node = self.nodes[u]
outs = sorted([int(i) for i in node.outs])
ins = sorted([int(i) for i in node.ins])
peer = {
'node': int(u),
'outs': outs,
'ins': ins
}
graph_json.append(peer)
json.dump(graph_json, w, indent=4)
def take_snapshot(self, epoch, curr_outs):
name = "epoch"+str(epoch)+".txt"
outpath = os.path.join(self.snapshot_dir, name)
self.write_cost(outpath)
outpath_exploit = os.path.join(self.snapshot_exploit_dir, name)
self.write_exploit_cost(outpath_exploit, curr_outs)
# def init_selectors(self, out_conns, in_conns):
# for u in range(self.num_node):
# # if smaller then it is adv
# if u in self.adversary.sybils:
# self.selectors[u] = Selector(u, True, out_conns[u], in_conns[u], None)
# else:
# self.selectors[u] = Selector(u, False, out_conns[u], in_conns[u], None)
def broadcast_msgs(self, num_msg):
time_tables = {i:defaultdict(list) for i in range(self.num_node)}
abs_time_tables = {i:defaultdict(list) for i in range(self.num_node)}
broads = []
pubs = []
probs = []
for k, v in self.pub_prob.items():
pubs.append(k)
probs.append(v)
for _ in range(num_msg):
# p = random.choice(self.pubs)
p = np.random.choice(pubs, size=1, replace=False, p=probs)[0]
self.pub_hist.append(p)
broads.append(p)
comm_network.broadcast_msg(
p,
self.nodes,
self.ld,
time_tables,
abs_time_tables
)
for i in range(self.num_node):
self.sparse_tables[i].append_time(abs_time_tables[i], num_msg, 'abs_time')
self.sparse_tables[i].append_time(time_tables[i], num_msg, 'rel_time')
return broads
def update_selectors(self, outs_conns, ins_conn):
for i in range(self.num_node):
self.selectors[i].update(outs_conns[i], ins_conn[i])
def get_curr_ins(self, curr_outs):
curr_ins = defaultdict(list)
for u in range(self.num_node):
for o in curr_outs[u]:
curr_ins[o].append(u)
return curr_ins
def setup_conn_graph(self, curr_outs):
curr_ins = self.get_curr_ins(curr_outs)
for u in range(self.num_node):
self.nodes[u].update_conns(curr_outs[u], curr_ins[u])
def run_2hop(self, adapt_i, curr_out, e):
slots = self.sparse_tables[adapt_i].table[-self.num_msg:]
incomplete_table,M,nM,max_time,ids,ids_direct = construct_table(slots, adapt_i, self.directions)
selected, rands = self.selectors[adapt_i].run(self.oracle, curr_out, ids, slots)
return selected + rands
def run(self):
curr_outs = gen_rand_outs_with_oracle(self.num_out, self.num_node, self.oracle)
self.oracle.check(curr_outs)
self.setup_conn_graph(curr_outs)
self.write_init_graph()
for e in range(self.num_epoch):
self.take_snapshot(e, curr_outs)
self.oracle.check(curr_outs)
ps = self.broadcast_msgs(self.num_msg)
churn_adapts = comm_network.get_network_churning_nodes(self.churn_rate, self.adapts)
for adapt_i in np.random.permutation(churn_adapts):
curr_outs[adapt_i] = self.run_2hop(adapt_i, curr_outs[adapt_i], e)
self.setup_conn_graph(curr_outs)
for adapt_i in self.adapts:
self.get_truth_distance(adapt_i, curr_outs[adapt_i][:self.num_keep], e)
self.save_dists_hist()
# while True:
# network_state.reset(self.num_node, self.in_lim)
# if num_snapshot == len(record_epochs):
# break
# if self.method == 'mc':
# outs_conns, start_mc = self.run_mc(max_epoch,record_epochs, num_msg, epoch, network_state)
# self.conns_snapshot.append(outs_conns)
# if epoch in record_epochs:
# self.take_snapshot(epoch)
# num_snapshot += 1
# elif self.method == '2hop':
# outs_conns = self.run_2hop(num_msg, epoch, network_state)
# self.conns_snapshot.append(outs_conns)
# if epoch in record_epochs:
# self.take_snapshot(epoch)
# num_snapshot += 1
# epoch += 1
# def select_nodes(nodes, ld, num_msg, selectors, oracle, update_nodes, time_tables, in_lim, out_lim, network_state, num_keep, num_2hop, num_random):
# outs_neighbors = {} # output container
# num_invalid_compose = 0
# # direct peers
# num_rand_1hop = 0
# for i in update_nodes:
# keep_candidates = list(nodes[i].outs | nodes[i].ins )
# composes = comb_subset.get_config(
# num_keep,
# keep_candidates,
# len(keep_candidates),
# network_state,
# i)
# num_invalid_compose += math.comb(len(keep_candidates), num_keep) - len(composes)
# if len(composes) == 0:
# peers = selectors[i].select_random_peers(nodes, num_keep, network_state)
# num_rand_1hop += 1
# # oracle needs to know the connection
# oracle.update_1_hop_peers(i, peers)
# outs_neighbors[i] = peers
# else:
# for compose in composes:
# if len(compose) != len(set(compose)):
# print('repeat in compose')
# print(i)
# print('composes', compose)
# print(keep_candidates)
# print('in', list(nodes[i].outs))
# print('out', list(nodes[i].ins))
# sys.exit(1)
# peers = selectors[i].select_1hops(time_tables[i], composes, num_msg, network_state)
# # oracle needs to know the connection
# oracle.update_1_hop_peers(i, peers)
# outs_neighbors[i] = peers
# num_added_2hop = 0
# num_added_3hop = 0
# num_added_random = 0
# tot_not_seen = 0
# random.shuffle(update_nodes)
# # two hop peers
# if num_2hop > 0:
# for u in update_nodes:
# peers_info = oracle.get_multi_hop_info(u)
# peers, num_not_seen = selectors[u].select_peers(
# config.num_2_hop, nodes, peers_info.two_hops, network_state)
# oracle.update_2_hop_peers(u, peers)
# outs_neighbors[u] += peers
# num_added_2hop += len(peers)
# tot_not_seen += num_not_seen
# # add 3hops
# if out_lim - len(outs_neighbors[u]) > num_random:
# num_3_hop = out_lim - len(outs_neighbors[u]) - num_random
# peers_info = oracle.get_multi_hop_info(u)
# peers, num_not_seen = selectors[u].select_peers(num_3_hop, nodes, peers_info.three_hops, network_state)
# oracle.update_3_hop_peers(u, peers)
# outs_neighbors[u] += peers
# num_added_3hop += len(peers)
# tot_not_seen += num_not_seen
# # add random
# for u in update_nodes:
# num_random = out_lim - len(outs_neighbors[u])
# num_added_random += num_random
# peers = selectors[u].select_random_peers(nodes, num_random, network_state)
# for p in | |
not silent:
# Ask which preset to use
print(f'Available ddrescue presets: {" / ".join(SETTING_PRESETS)}')
preset = std.choice(SETTING_PRESETS, 'Please select a preset:')
# Fix selection
for _p in SETTING_PRESETS:
if _p.startswith(preset):
preset = _p
# Add default settings
menu.add_action('Load Preset')
menu.add_action('Main Menu')
for name, details in DDRESCUE_SETTINGS['Default'].items():
menu.add_option(name, details.copy())
# Update settings using preset
if preset != 'Default':
for name, details in DDRESCUE_SETTINGS[preset].items():
menu.options[name].update(details.copy())
# Done
return menu
def build_sfdisk_partition_line(table_type, dev_path, size, details):
"""Build sfdisk partition line using passed details, returns str."""
line = f'{dev_path} : size={size}'
dest_type = ''
source_filesystem = str(details.get('fstype', '')).upper()
source_table_type = ''
source_type = details.get('parttype', '')
# Set dest type
if re.match(r'^0x\w+$', source_type):
# Both source and dest are MBR
source_table_type = 'MBR'
if table_type == 'MBR':
dest_type = source_type.replace('0x', '').lower()
elif re.match(r'^\w{8}-\w{4}-\w{4}-\w{4}-\w{12}$', source_type):
# Source is a GPT type
source_table_type = 'GPT'
if table_type == 'GPT':
dest_type = source_type.upper()
if not dest_type:
# Assuming changing table types, set based on FS
if source_filesystem in cfg.ddrescue.PARTITION_TYPES.get(table_type, {}):
dest_type = cfg.ddrescue.PARTITION_TYPES[table_type][source_filesystem]
line += f', type={dest_type}'
# Safety Check
if not dest_type:
std.print_error(f'Failed to determine partition type for: {dev_path}')
raise std.GenericAbort()
# Add extra details
if details.get('partlabel', ''):
line += f', name="{details["partlabel"]}"'
if details.get('partuuid', '') and source_table_type == table_type:
# Only add UUID if source/dest table types match
line += f', uuid={details["partuuid"].upper()}'
# Done
return line
def check_destination_health(destination):
"""Check destination health, returns str."""
result = ''
# Bail early
if not isinstance(destination, hw_obj.Disk):
# Return empty string
return result
# Run safety checks
try:
destination.safety_checks()
except hw_obj.CriticalHardwareError:
result = 'Critical hardware error detected on destination'
except hw_obj.SMARTSelfTestInProgressError:
result = 'SMART self-test in progress on destination'
except hw_obj.SMARTNotSupportedError:
pass
# Done
return result
def clean_working_dir(working_dir):
"""Clean working directory to ensure a fresh recovery session.
NOTE: Data from previous sessions will be preserved
in a backup directory.
"""
backup_dir = pathlib.Path(f'{working_dir}/prev')
backup_dir = io.non_clobber_path(backup_dir)
backup_dir.mkdir()
# Move settings, maps, etc to backup_dir
for entry in os.scandir(working_dir):
if entry.name.endswith(('.dd', '.json', '.map')):
new_path = f'{backup_dir}/{entry.name}'
new_path = io.non_clobber_path(new_path)
shutil.move(entry.path, new_path)
def format_status_string(status, width):
"""Format colored status string, returns str."""
color = None
percent = -1
status_str = str(status)
# Check if status is percentage
try:
percent = float(status_str)
except ValueError:
# Assuming status is text
pass
# Format status
if percent >= 0:
# Percentage
color = get_percent_color(percent)
status_str = f'{percent:{width-2}.2f} %'
if '100.00' in status_str and percent < 100:
# Always round down to 99.99%
LOG.warning('Rounding down to 99.99 from %s', percent)
status_str = f'{"99.99 %":>{width}}'
else:
# Text
color = STATUS_COLORS.get(status_str, None)
status_str = f'{status_str:>{width}}'
# Add color if necessary
if color:
status_str = std.color_string(status_str, color)
# Done
return status_str
def fstype_is_ok(path, map_dir=False):
"""Check if filesystem type is acceptable, returns bool."""
is_ok = False
fstype = None
# Get fstype
if PLATFORM == 'Darwin':
# Check all parent dirs until a mountpoint is found
test_path = pathlib.Path(path)
while test_path:
fstype = get_fstype_macos(test_path)
if fstype != 'UNKNOWN':
break
fstype = None
test_path = test_path.parent
elif PLATFORM == 'Linux':
cmd = [
'findmnt',
'--noheadings',
'--output', 'FSTYPE',
'--target', path,
]
proc = exe.run_program(cmd, check=False)
fstype = proc.stdout
fstype = fstype.strip().lower()
# Check fstype
if map_dir:
is_ok = RECOMMENDED_MAP_FSTYPES.match(fstype)
else:
is_ok = RECOMMENDED_FSTYPES.match(fstype)
# Done
return is_ok
def get_ddrescue_settings(settings_menu):
"""Get ddrescue settings from menu selections, returns list."""
settings = []
# Check menu selections
for name, details in settings_menu.options.items():
if details['Selected']:
if 'Value' in details:
settings.append(f'{name}={details["Value"]}')
else:
settings.append(name)
# Done
return settings
def get_etoc():
"""Get EToC from ddrescue output, returns str."""
delta = None
delta_dict = {}
etoc = 'Unknown'
now = datetime.datetime.now(tz=TIMEZONE)
output = tmux.capture_pane()
# Search for EToC delta
matches = re.findall(r'remaining time:.*$', output, re.MULTILINE)
if matches:
match = REGEX_REMAINING_TIME.search(matches[-1])
if match.group('na'):
etoc = 'N/A'
else:
for key in ('days', 'hours', 'minutes', 'seconds'):
delta_dict[key] = match.group(key)
delta_dict = {k: int(v) if v else 0 for k, v in delta_dict.items()}
delta = datetime.timedelta(**delta_dict)
# Calc EToC if delta found
if delta:
etoc_datetime = now + delta
etoc = etoc_datetime.strftime('%Y-%m-%d %H:%M %Z')
# Done
return etoc
def get_fstype_macos(path):
"""Get fstype for path under macOS, returns str."""
fstype = 'UNKNOWN'
proc = exe.run_program(['mount'], check=False)
# Bail early
if proc.returncode:
return fstype
# Parse output
match = re.search(rf'{path} \((\w+)', proc.stdout)
if match:
fstype = match.group(1)
# Done
return fstype
def get_object(path):
"""Get object based on path, returns obj."""
obj = None
# Bail early
if not path:
return obj
# Check path
path = pathlib.Path(path).resolve()
if path.is_block_device() or path.is_char_device():
obj = hw_obj.Disk(path)
# Child/Parent check
parent = obj.details['parent']
if parent:
std.print_warning(f'"{obj.path}" is a child device')
if std.ask(f'Use parent device "{parent}" instead?'):
obj = hw_obj.Disk(parent)
elif path.is_dir():
obj = path
elif path.is_file():
# Assuming file is a raw image, mounting
loop_path = mount_raw_image(path)
obj = hw_obj.Disk(loop_path)
# Abort if obj not set
if not obj:
std.print_error(f'Invalid source/dest path: {path}')
raise std.GenericAbort()
# Done
return obj
def get_partition_separator(name):
"""Get partition separator based on device name, returns str."""
separator = ''
if re.search(r'(loop|mmc|nvme)', name, re.IGNORECASE):
separator = 'p'
return separator
def get_percent_color(percent):
"""Get color based on percentage, returns str."""
color = None
if percent > 100:
color = 'PURPLE'
elif percent >= 99:
color = 'GREEN'
elif percent >= 90:
color = 'YELLOW'
elif percent > 0:
color = 'RED'
# Done
return color
def get_table_type(disk):
"""Get disk partition table type, returns str.
NOTE: If resulting table type is not GPT or MBR
then an exception is raised.
"""
table_type = str(disk.details.get('pttype', '')).upper()
table_type = table_type.replace('DOS', 'MBR')
# Check type
if table_type not in ('GPT', 'MBR'):
std.print_error(f'Unsupported partition table type: {table_type}')
raise std.GenericAbort()
# Done
return table_type
def get_working_dir(mode, destination, force_local=False):
"""Get working directory using mode and destination, returns path."""
ticket_id = None
working_dir = None
# Set ticket ID
while ticket_id is None:
ticket_id = std.input_text(
prompt='Please enter ticket ID:',
allow_empty_response=False,
)
ticket_id = ticket_id.replace(' ', '_')
if not re.match(r'^\d+', ticket_id):
ticket_id = None
# Use preferred path if possible
if mode == 'Image':
try:
path = pathlib.Path(destination).resolve()
except TypeError as err:
std.print_error(f'Invalid destination: {destination}')
raise std.GenericAbort() from err
if path.exists() and fstype_is_ok(path, map_dir=False):
working_dir = path
elif mode == 'Clone' and not force_local:
std.print_info('Mounting backup shares...')
net.mount_backup_shares(read_write=True)
for server in cfg.net.BACKUP_SERVERS:
path = pathlib.Path(
f'/{"Volumes" if PLATFORM == "Darwin" else "Backups"}/{server}',
)
if path.exists() and fstype_is_ok(path, map_dir=True):
# Acceptable path found
working_dir = path
break
# Default to current dir if necessary
if not working_dir:
LOG.error('Failed to set preferred working directory')
working_dir = pathlib.Path(os.getcwd())
# Set subdir using ticket ID
if mode == 'Clone':
working_dir = working_dir.joinpath(ticket_id)
# Create directory
working_dir.mkdir(parents=True, exist_ok=True)
os.chdir(working_dir)
# Done
LOG.info('Set working directory to: %s', working_dir)
return working_dir
def is_missing_source_or_destination(state):
"""Check if source or destination dissapeared, returns bool."""
missing = False
items = {
'Source': state.source,
'Destination': state.destination,
}
# Check items
for name, item in items.items():
if not item:
continue
if hasattr(item, 'path'):
if not item.path.exists():
missing = True
std.print_error(f'{name} disappeared')
elif hasattr(item, 'exists'):
if not item.exists():
missing = True
std.print_error(f'{name} disappeared')
else:
LOG.error('Unknown %s type: %s', name, item)
# Update top panes
state.update_top_panes()
# Done
return missing
def source_or_destination_changed(state):
"""Verify the source and destination objects are still valid."""
changed = False
# Compare objects
for obj in (state.source, state.destination):
if not obj:
changed = True
elif hasattr(obj, 'exists'):
# Assuming dest path
changed = changed or not obj.exists()
elif isinstance(obj, hw_obj.Disk):
compare_dev = hw_obj.Disk(obj.path)
for key in ('model', 'serial'):
changed = changed or obj.details[key] != compare_dev.details[key]
# Update top panes
state.update_top_panes()
# Done
if changed:
std.print_error('Source and/or Destination changed')
return changed
def main():
# pylint: disable=too-many-branches
"""Main function for ddrescue TUI."""
args = docopt(DOCSTRING)
log.update_log_path(dest_name='ddrescue-TUI', timestamp=True)
# Check if running inside tmux
if 'TMUX' not in os.environ:
LOG.error('tmux session not found')
raise RuntimeError('tmux session not found')
# Init
atexit.register(tmux.kill_all_panes)
main_menu = build_main_menu()
settings_menu = build_settings_menu()
state = State()
try:
state.init_recovery(args)
except (FileNotFoundError, std.GenericAbort):
is_missing_source_or_destination(state)
std.abort()
# Show menu
while True:
selection = main_menu.advanced_select()
# Change settings
if 'Change settings' in selection[0]:
while True:
selection = settings_menu.settings_select()
| |
einsum('ldpru,ypP->lydPru',bra[1][0],mpo[2]) # Bottom right site
Hbra[1][0].merge_inds([0,1])
Hbra[1][1] = bra[1][1].copy()
# Calculate Operator -------------------------------------
# Compute bottom environment as a boundary mpo
Hbot = update_bot_env2(0,
Hbra,
ket,
left[0],
left[1],
right[0],
right[1],
bot,
truncate=True,
chi=chi,
contracted_env=contracted_env)
# Compute top environment as a boundary mpo
Htop = update_top_env2(1,
Hbra,
ket,
left[2],
left[3],
right[2],
right[3],
top,
truncate=True,
chi=chi,
contracted_env=contracted_env)
# Contract top and bottom boundary mpos to get result
if contracted_env:
E = einsum('lkbKBr,lkbKBr->',Hbot,Htop)
else:
E = Hbot.contract(Htop)
# Calculate Norm -------------------------------------
if normalize:
# Compute bottom environment as a boundary mpo
Nbot = update_bot_env2(0,
bra,
ket,
left[0],
left[1],
right[0],
right[1],
bot,
truncate=True,
chi=chi,
contracted_env=contracted_env)
# Compute top environment as a boundary mpo
Ntop = update_top_env2(1,
bra,
ket,
left[2],
left[3],
right[2],
right[3],
top,
truncate=True,
chi=chi,
contracted_env=contracted_env)
# Contract top and bottom boundary mpos to get result
if contracted_env:
norm = einsum('lkbKBr,lkbKBr->',Nbot,Ntop)
else:
norm = Nbot.contract(Ntop)
if not isinstance(E,float): E = E.to_val()
if not isinstance(norm,float): norm = norm.to_val()
E /= norm
# Return result
return E
def calc_local_nn_op_ru(mpo,bra,ket,top,bot,left,right,normalize=True,contracted_env=False,chi=10):
"""
Calculate the value of an operator as an mpo acting on the right
and top bonds of a 2x2 peps grid
"""
# Check if it is a thermal state:
thermal = len(bra[0][1].legs[2]) == 2
# Absorb MPO into bra
Hbra = [[None,None],[None,None]]
if thermal:
bra[0][1].unmerge_ind(2)
Hbra[0][1] = einsum('ldparu,pPx->ldParxu',bra[0][1],mpo[0]) # Top Left Site
Hbra[0][1].merge_inds([2,3])
Hbra[0][1].merge_inds([3,4])
bra[0][1].merge_inds([2,3])
bra[1][1].unmerge_ind(2)
Hbra[1][1] = einsum('ldparu,xpPy->lxdyParu',bra[1][1],mpo[1]) # Top Right Site
Hbra[1][1].merge_inds([0,1])
Hbra[1][1].merge_inds([1,2])
Hbra[1][1].merge_inds([2,3])
bra[1][1].merge_inds([2,3])
bra[1][0].unmerge_ind(2)
Hbra[1][0] = einsum('ldparu,ypP->ldParuy',bra[1][0],mpo[2]) # Bottom right site
Hbra[1][0].merge_inds([2,3])
Hbra[1][0].merge_inds([4,5])
bra[1][0].merge_inds([2,3])
Hbra[0][0] = bra[0][0].copy()
else:
Hbra[0][1] = einsum('ldpru,pPx->ldPrxu',bra[0][1],mpo[0]) # Top Left Site
Hbra[0][1].merge_inds([3,4])
Hbra[1][1] = einsum('ldpru,xpPy->lxdyPru',bra[1][1],mpo[1]) # Top Right Site
Hbra[1][1].merge_inds([0,1])
Hbra[1][1].merge_inds([1,2])
Hbra[1][0] = einsum('ldpru,ypP->ldPruy',bra[1][0],mpo[2]) # Bottom right site
Hbra[1][0].merge_inds([4,5])
Hbra[0][0] = bra[0][0].copy()
# Calculate Operator -------------------------------------
# Compute bottom environment as a boundary mpo
Hbot = update_bot_env2(0,
Hbra,
ket,
left[0],
left[1],
right[0],
right[1],
bot,
truncate=True,
chi=chi,
contracted_env=contracted_env)
# Compute top environment as a boundary mpo
Htop = update_top_env2(1,
Hbra,
ket,
left[2],
left[3],
right[2],
right[3],
top,
truncate=True,
chi=chi,
contracted_env=contracted_env)
# Contract top and bottom boundary mpos to get result
if contracted_env:
E = einsum('lkbKBr,lkbKBr->',Hbot,Htop)
else:
E = Hbot.contract(Htop)
# Calculate Norm -------------------------------------
if normalize:
# Compute bottom environment as a boundary mpo
Nbot = update_bot_env2(0,
bra,
ket,
left[0],
left[1],
right[0],
right[1],
bot,
truncate=True,
chi=chi,
contracted_env=contracted_env)
# Compute top environment as a boundary mpo
Ntop = update_top_env2(1,
bra,
ket,
left[2],
left[3],
right[2],
right[3],
top,
truncate=True,
chi=chi,
contracted_env=contracted_env)
# Contract top and bottom boundary mpos to get result
if contracted_env:
norm = einsum('lkbKBr,lkbKBr->',Nbot,Ntop)
else:
norm = Nbot.contract(Ntop)
E /= norm
# Return result
return E
def calc_local_nn_op(row,bra,ops_col,left_bmpo,right_bmpo,bot_envs,top_envs,ket=None,normalize=True,contracted_env=False,chi=10):
"""
Calculate the value of an operator on a 2x2 square
Args:
row: int
The row of the ops_col to be evaluated
bra: list of list of ndarrays
The needed columns of the peps
left_bmpo:
The boundary mpo to the left of the two peps columns
right_bmpo:
The boundary mpo to the right of the two peps columns
bot_envs:
The boundary mpo version of the bottom environment
top_envs:
The boundary mpo version of the top environment
ops_col: list of list of ndarrays
The operators acting on next nearest neighboring sites
within the two columns
Kwargs:
normalize: bool
Whether to normalize the operator evaluations
ket: List of list of ndarrays
The needed columns of the ket
contracted_env: bool
Whether to contract the upper and lower environment
or leave it as a boundary mps
chi: int
Max bond dimension for the boundary mps on the top
and bottom
Returns:
E: float
The operator value for the given 2x2 plaquette
"""
# Copy bra if needed ----------------------------------
copy_ket = False
if ket is None: copy_ket = True
elif hasattr(ket,'__len__'):
if ket[0] is None: copy_ket = True
if copy_ket:
ket = [None]*len(bra)
for i in range(len(bra)):
ketcol = [None]*len(bra[i])
for j in range(len(bra[i])):
ketcol[j] = bra[i][j].copy()
# TODO - Conjugate this ket col?
ket[i] = ketcol
# Extract needed tensors -------------------------------
# Upper and lower environments =====
if row == 0:
if len(bra[0]) == 2:
# Only two sites in column, use identity at both ends
top,bot = None,None
else:
# At bottom unit cell, use identity on bottom
top=top_envs[row+2]
bot=None
elif row == len(bra[0])-2:
# At top unit cell, use identity on top
top = None
bot = bot_envs[row-1]
else:
# In the bulk, no identity needed
top = top_envs[row+2]
bot = bot_envs[row-1]
# PEPS tensors =====================
cell_bra = [[bra[0][row],bra[0][row+1]],
[bra[1][row],bra[1][row+1]]]
cell_ket = [[ket[0][row],ket[0][row+1]],
[ket[1][row],ket[1][row+1]]]
cell_lbmpo = left_bmpo[row*2,row*2+1,row*2+2,row*2+3]
cell_rbmpo = right_bmpo[row*2,row*2+1,row*2+2,row*2+3]
# Flip tensors where needed ========
# Flip bra and ket tensors
flip_bra = [[bra[1][row].copy().transpose([3,1,2,0,4]),bra[1][row+1].copy().transpose([3,1,2,0,4])],
[bra[0][row].copy().transpose([3,1,2,0,4]),bra[0][row+1].copy().transpose([3,1,2,0,4])]]
flip_ket = [[ket[1][row].copy().transpose([3,1,2,0,4]),ket[1][row+1].copy().transpose([3,1,2,0,4])],
[ket[0][row].copy().transpose([3,1,2,0,4]),ket[0][row+1].copy().transpose([3,1,2,0,4])]]
# Flip (contracted) top/bot environments
# Always contract bot/top env to make transpose easier
if not contracted_env:
if top is not None:
flip_top = einsum('ijk,klm->ijlm',top[0],top[1]).remove_empty_ind(0)
flip_top = einsum('jlm,mno->jlno',flip_top,top[2])
flip_top = einsum('jlno,opq->jlnpq',flip_top,top[3])
flip_top = einsum('jlnpq,qrs->jlnprs',flip_top,top[4])
flip_top = einsum('jlnprs,stu->jlnprtu',flip_top,top[5]).remove_empty_ind(6)
if bot is not None:
flip_bot = einsum('ijk,klm->ijlm',bot[0],bot[1]).remove_empty_ind(0)
flip_bot = einsum('jlm,mno->jlno',flip_bot,bot[2])
flip_bot = einsum('jlno,opq->jlnpq',flip_bot,bot[3])
flip_bot = einsum('jlnpq,qrs->jlnprs',flip_bot,bot[4])
flip_bot = einsum('jlnprs,stu->jlnprtu',flip_bot,bot[5]).remove_empty_ind(6)
if top is not None:
flip_top = flip_top.transpose([5,3,4,1,2,0])
else: flip_top = None
if bot is not None:
flip_bot = flip_bot.transpose([5,3,4,1,2,0])
else: flip_bot = None
# Calculation energy contribution from first MPO -------
E1 = calc_local_nn_op_lb(ops_col[row][0],
cell_bra,
cell_ket,
top,
bot,
cell_lbmpo,
cell_rbmpo,
normalize=normalize,
chi=chi,
contracted_env=contracted_env)
# Calculate energy contribution from third MPO ---------
# (must flip horizontally so we can use the lb procedure
E2 = calc_local_nn_op_lb(ops_col[row][1],
flip_bra,
flip_ket,
flip_top,
flip_bot,
cell_rbmpo,
cell_lbmpo,
normalize=normalize,
chi=chi,
contracted_env=True)
# Calculate energy contribution from third MPO -----------
E3 = calc_local_nn_op_ru(ops_col[row][2],
cell_bra,
cell_ket,
top,
bot,
cell_lbmpo,
cell_rbmpo,
normalize=normalize,
chi=chi,
contracted_env=contracted_env)
# Return resulting energy --------------------------------
E = E1+E2+E3
return E
def calc_single_column_nn_op(peps,left_bmpo,right_bmpo,ops_col,normalize=True,ket=None,chi=10,contracted_env=False):
"""
Calculate contribution to an operator with next nearest (nn) neighbr interactions
from two neighboring columns of a peps
Args:
peps: List of list of ndarrays
The needed columns of the peps
left_bmpo:
The boundary mpo to the left of the two peps columns
right_bmpo:
The boundary mpo to the right of the two peps columns
ops_col: list of list of ndarrays
The operators acting on next nearest neighboring sites
within the two columns
Kwargs:
normalize: bool
Whether to normalize the operator evaluations
ket: List of list of ndarrays
The needed columns of the ket
contracted_env: bool
Whether to contract the upper and lower environment
or leave it as a boundary mps
Returns:
E: float
The operator value for interactions between the two columns
"""
# Calculate top and bottom environments
top_envs = calc_top_envs2(peps,left_bmpo,right_bmpo,ket=ket,chi=chi,contracted_env=contracted_env)
bot_envs = calc_bot_envs2(peps,left_bmpo,right_bmpo,ket=ket,chi=chi,contracted_env=contracted_env)
# Calculate Energy
E = peps[0][0].backend.zeros(len(ops_col))
for row in range(len(ops_col)):
E[row] = calc_local_nn_op(row,
peps,
ops_col,
left_bmpo,
right_bmpo,
bot_envs,
top_envs,
ket=ket,
chi=chi,
normalize=normalize,
contracted_env=contracted_env)
return E
def calc_single_column_op(peps_col,left_bmpo,right_bmpo,ops_col,
normalize=True,ket_col=None,in_mem=True):
"""
Calculate contribution to operator from interactions within
a single column.
Args:
peps_col:
A single column of the peps
left_bmpo:
The boundary mpo to the left of the peps column
right_bmpo:
The boundary mpo to the right of the peps column
ops:
The operators acting on nearest neighboring sites
within the column
Kwargs:
in_mem: bool
if True, then the peps tensors will all be loaded into memory
and all calculations will be done with them in memory
"""
# Calculate top and bottom environments
top_envs = calc_top_envs(peps_col,left_bmpo,right_bmpo,ket_col=ket_col,in_mem=in_mem)
bot_envs = calc_bot_envs(peps_col,left_bmpo,right_bmpo,ket_col=ket_col,in_mem=in_mem)
# Set up array to hold resulting energies
E = peps_col[0].backend.zeros(len(ops_col))
# Loop through rows calculating local energies
for row in range(len(ops_col)):
# Calculate environment
res = calc_N(row,peps_col,left_bmpo,right_bmpo,top_envs,bot_envs,
hermitian=False,
positive=False,
ket_col=ket_col,
in_mem=in_mem)
_,phys_b,phys_t,_,_,phys_bk,phys_tk,_,N = res
# Calc the local operator
E[row] = calc_local_op(phys_b,phys_t,N,ops_col[row],normalize=normalize,phys_b_ket=phys_bk,phys_t_ket=phys_tk)
# Return the energy
return E
def calc_all_column_op(peps,ops,chi=10,return_sum=True,normalize=True,ket=None,allow_normalize=False,in_mem=True):
"""
Calculate contribution to operator from interactions within all columns,
ignoring interactions between columns
Args:
peps : A list of lists of peps tensors
The PEPS to be normalized
ops :
The operator to be contracted with the peps
Kwargs:
chi : int
The maximum bond dimension for the boundary mpo
return_sum : bool
Whether to return the summation of all energies or
a | |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 29 11:49:07 2020
@author: rubenandrebarreiro
"""
# Definition of the necessary Python Libraries
# a) General Libraries:
# Import NumPy Python's Library as np
import numpy as np
# Import Math Python's Library as mathematics
import math as mathematics
# Import SciKit-Learn as skl
import sklearn as skl
# Import Train/Test Split,
# from the SciKit-Learn's Model Selection Module,
# as split_train_test_sets
from sklearn.model_selection import train_test_split as split_train_test_sets
# Import Model Selection Sub-Module, from SciKit-Learn Python's Library,
# as skl_model_selection
from sklearn import model_selection as skl_model_selection
# Import Support Vector Classifier,
# from the SciKit-Learn's Support Vector Machine Module,
# as support_vector_machine
from sklearn.svm import SVC as support_vector_machine
# Import PyPlot Sub-Module, from Matplotlib Python's Library as plt
import matplotlib.pyplot as plt
# Import System Python's Library
import sys
# Append the Path "../" to the System's Path
sys.path.append('../')
# Import the Support Vector Machine Plot Functions,
# from the Customised T4_Aux Python's Library
from files.T4aux import plot_svm_mark_wrong_x as plot_support_vector_machine_corrects_o_wrongs_x
# Import Warnings
import warnings
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Ignore Warnings
warnings.filterwarnings("ignore")
# The file of the Dataset
dataset_file = "../files/data/T4data.txt"
# Load the Data for Dataset with NumPy function loadtxt
dataset_not_random = np.loadtxt(dataset_file, delimiter="\t")
# Shuffle the Dataset, not randomized
dataset_random = skl.utils.shuffle(dataset_not_random)
# Select the Classes of the Dataset, randomized
ys_dataset_classes = dataset_random[:,-1]
# Select the Features of the Dataset, randomized
xs_dataset_features = dataset_random[:,0:-1]
# The size of the Data for Dataset, randomized
dataset_size = len(xs_dataset_features)
# Computing the Means of the Dataset, randomized
dataset_means = np.mean(xs_dataset_features, axis=0)
# Computing the Standard Deviations of the Dataset, randomized
dataset_stdevs = np.std(xs_dataset_features, axis=0)
# Standardize the Dataset, randomized
xs_dataset_features_std = ( ( xs_dataset_features - dataset_means ) / dataset_stdevs )
# Split the Dataset Standardized, into Training and Testing Sets,
# by a ratio of 50% for each one
xs_train_features_std, xs_test_features_std, ys_train_classes, ys_test_classes = split_train_test_sets(xs_dataset_features_std, ys_dataset_classes, test_size=0.5)
# The number of Samples of the Testing Set
num_samples_test_set = len(xs_test_features_std)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Constants #1
# The Number of Features (i.e., 2 Features)
NUM_FEATURES = xs_train_features_std.shape[1]
# The Number of Folds, for Stratified K Folds, in Cross-Validation
NUM_FOLDS = 10
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The Function to Plot the Training and Validation, for the Support Vector Machine
def plot_train_valid_error_support_vector_machine(support_vector_machine_kernel, train_error_values, valid_error_values):
# Initialise the Plot
plt.figure(figsize=(8, 8), frameon=True)
# Set the line representing the continuous values,
# for the Functions of the Training and Validation Errors
plt.plot(train_error_values[:,0], train_error_values[:,1],'-', color="dodgerblue")
plt.plot(valid_error_values[:,0], valid_error_values[:,1],'-', color="orange")
# Set the axis for the Plot
plt.axis([min(valid_error_values[:,0]), max(valid_error_values[:,0]), min(train_error_values[:,1]), max(valid_error_values[:,1])])
# Set the laber for the X axis of the Plot
plt.xlabel("log(C)")
# Set the laber for the Y axis of the Plot
plt.ylabel("Training/Validation Errors")
support_vector_machine_kernel_name = ""
if(support_vector_machine_kernel[0] == "poly"):
support_vector_machine_kernel_name = "Polynomial"
if(support_vector_machine_kernel[0] == "sigmoid"):
support_vector_machine_kernel_name = "Sigmoid"
if(support_vector_machine_kernel[0] == "rbf"):
support_vector_machine_kernel_name = "Gaussian RBF"
# Set the Title of the Plot
plt.title('Support Vector Machine, with Kernel=\"{}\", varying the C parameter\n\nTraining Error (Blue) / Cross-Validation Error (Orange)'.format(support_vector_machine_kernel_name))
# Save the Plot, as a figure/image
plt.savefig('imgs/{}-training-validation-errors.png'.format(support_vector_machine_kernel[0]), dpi=600)
# Show the Plot
plt.show()
# Close the Plot
plt.close()
# The Function to Compute and Return the Errors for Training and Validation Sets,
# for the Support Vector Machines Classifier
def compute_svm_kernel_errors(xs, ys, train_idx, valid_idx, current_support_vector_machine_kernel, c_param_value, num_features):
support_vector_machine_classifier = support_vector_machine(kernel=current_support_vector_machine_kernel[0], degree=current_support_vector_machine_kernel[1], gamma=current_support_vector_machine_kernel[2], coef0=current_support_vector_machine_kernel[3], C=c_param_value)
# Fit the Support Vector Machine Classifier with the Data from the Training Set
support_vector_machine_classifier.fit(xs[train_idx,:2], ys[train_idx])
# Set the Filename for the Plot of the Data, for the current Kernel and its configurations
plot_support_vector_machine_image_filename = "imgs/support-vector-machine-plot-c-{}-{}-kernel.png".format(c_param_current_value, current_support_vector_machine_kernel[0])
# Compute the Training Set's Accuracy (Score), for the current Support Vector Machine
support_vector_machine_accuracy_train = support_vector_machine_classifier.score(xs[train_idx], ys[train_idx])
# Compute the Validation Set's Accuracy (Score), for the current Support Vector Machine
support_vector_machine_accuracy_valid = support_vector_machine_classifier.score(xs[valid_idx], ys[valid_idx])
# Compute the Training Error, regarding its Accuracy (Score)
support_vector_machine_train_error = ( 1 - support_vector_machine_accuracy_train )
# Compute the Validation Error, regarding its Accuracy (Score)
support_vector_machine_valid_error = ( 1 - support_vector_machine_accuracy_valid )
# Prepare Data from the Training Set, for the Plot Function
# Create a Matrix, with num_samples_train rows,
# and 3 columns (2 features + 1 class)
dataset_train = np.zeros((len(xs[train_idx,:2]), 3))
# Fill the Data from the Training Set, for the Plot Function
dataset_train[:,0:2] = xs[train_idx,:2][:,:]
dataset_train[:,2] = ys[train_idx]
plot_support_vector_machine_corrects_o_wrongs_x(False, dataset_train, support_vector_machine_classifier, current_support_vector_machine_kernel[0], plot_support_vector_machine_image_filename, c_param_current_value)
# Return the Training and Validation Errors, for the Logistic Regression
return support_vector_machine_train_error, support_vector_machine_valid_error
# The Function to Compute and Return the Errors for Training and Validation Sets,
# for the Support Vector Machines Classifier
def plot_svm_kernel_best_c(current_support_vector_machine_kernel, best_c_param_value, num_features):
support_vector_machine_classifier = support_vector_machine(kernel=current_support_vector_machine_kernel[0], degree=current_support_vector_machine_kernel[1], gamma=current_support_vector_machine_kernel[2], coef0=current_support_vector_machine_kernel[3], C=best_c_param_value)
# Fit the Support Vector Machine Classifier with the Data from the Training Set
support_vector_machine_classifier.fit(xs_train_features_std, ys_train_classes)
# Set the Filename for the Plot of the Data, for the current Kernel and its configurations
plot_support_vector_machine_image_filename = "imgs/support-vector-machine-plot-best-c-{}-{}-kernel.png".format(best_c_param_value, current_support_vector_machine_kernel[0])
# Prepare Data from the Training Set, for the Plot Function
# Create a Matrix, with num_samples_train rows,
# and 3 columns (2 features + 1 class)
dataset_train = np.zeros((len(xs_train_features_std), 3))
# Fill the Data from the Training Set, for the Plot Function
dataset_train[:,0:2] = xs_train_features_std[:,:]
dataset_train[:,2] = ys_train_classes
plot_support_vector_machine_corrects_o_wrongs_x(True, dataset_train, support_vector_machine_classifier, current_support_vector_machine_kernel[0], plot_support_vector_machine_image_filename, best_c_param_value)
# The Function to Estimate the True/Test Error of the Testing Set,
# for the Support Vector Machine Classifier
def estimate_svm_kernel_true_test_error(xs_train, ys_train, xs_test, ys_test, num_features, current_support_vector_machine_kernel, best_c_param_value=1e12):
support_vector_machine_classifier = support_vector_machine(kernel=current_support_vector_machine_kernel[0], degree=current_support_vector_machine_kernel[1], gamma=current_support_vector_machine_kernel[2], coef0=current_support_vector_machine_kernel[3], C=best_c_param_value)
# Fit the Support Vector Machine Classifier with the Data from the whole Training Set (together with Validation Set)
support_vector_machine_classifier.fit(xs_train, ys_train)
# Estimate the Testing Set's Accuracy (Score), for the current Support Vector Machine
support_vector_machine_estimated_accuracy_test = support_vector_machine_classifier.score(xs_test, ys_test)
# Estimate the Testing Error, regarding its Accuracy (Score)
support_vector_machine_estimated_true_test_error = ( 1 - support_vector_machine_estimated_accuracy_test )
# Predict the Probabilities of the Features of the Testing Set, belongs to a certain Class
ys_classes_support_vector_machine_prediction_xs_test = support_vector_machine_classifier.predict(xs_test)
# The Number of Samples, from the Testing Set
num_samples_test_set = len(xs_test)
# The Real Number of Incorrect Predictions, regarding the Support Vector Machine Classifier
support_vector_machine_num_incorrect_predictions = 0
# For each Sample, from the Testing Set
for current_sample_test in range(num_samples_test_set):
# If the Prediction/Classification of the Class for the current Sample, of the Testing Set is different from the Real Class of the same,
# it's considered an Real Error in Prediction/Classification, regarding the Logistic Regression Classifier
if(ys_classes_support_vector_machine_prediction_xs_test[current_sample_test] != ys_test[current_sample_test] ):
support_vector_machine_num_incorrect_predictions += 1
# Return the Predictions of the Samples,
# the Real Number of Incorrect Predictions and the Estimated True/Test Error, for the Logistic Regression Classifier
return ys_classes_support_vector_machine_prediction_xs_test, support_vector_machine_num_incorrect_predictions, support_vector_machine_estimated_true_test_error
def aproximate_normal_test(num_real_errors, prob_making_error, num_samples_test_set):
prob_errors_in_test_set = ( num_real_errors / num_samples_test_set )
prob_not_errors_in_test_set = ( 1 - prob_errors_in_test_set )
NormalTest_deviation = mathematics.sqrt( num_samples_test_set * prob_errors_in_test_set * prob_not_errors_in_test_set )
NormalTest_LowerDeviation = ( -1 * 1.96 * NormalTest_deviation )
NormalTest_UpperDeviation = ( 1.96 * NormalTest_deviation )
return NormalTest_LowerDeviation, NormalTest_UpperDeviation
def mc_nemar_test(predict_classes_xs_test_1, predict_classes_xs_test_2):
num_samples_test_set = len(xs_test_features_std)
first_wrong_second_right = 0
first_right_second_wrong = 0
for current_sample_test in range(num_samples_test_set):
if( ( predict_classes_xs_test_1[current_sample_test] != ys_test_classes[current_sample_test] ) and ( predict_classes_xs_test_2[current_sample_test] == ys_test_classes[current_sample_test] ) ):
first_wrong_second_right += 1
if( ( predict_classes_xs_test_1[current_sample_test] == ys_test_classes[current_sample_test] ) and ( predict_classes_xs_test_2[current_sample_test] != ys_test_classes[current_sample_test] ) ):
first_right_second_wrong += 1
mc_nemar_test_dividend = ( ( abs(first_wrong_second_right - first_right_second_wrong) - 1) ** 2 )
mc_nemar_test_divider = ( first_wrong_second_right + first_right_second_wrong )
mc_nemar_test_value = ( mc_nemar_test_dividend / mc_nemar_test_divider )
return mc_nemar_test_value
# Setting the Kernels and their Parameters,
# for the Support Vector Machines Classifiers
# 1. Polynomial,
# K(x; y) = (xT y + r)d,
# with degree 3, gamma of 0.5 and r of 0
# (the r value is set in the coef0 parameter and is 0 by default).
# 2. Sigmoid,
# K(x; y) = tanh(xT y + r),
# with gamma of 0.5 and r of -2.
# 3. Gaussian RBF,
# K(x; y) = exp(jjx yjj2), with gamma of 0.5.
# Parameters of the Kernels of the Support Vector Machine Classifiers
# - 1) Name of the Kernel;
# - 2) Degree of the Kernel;
# - 3) Gamma of the Kernel;
# - 4) r value (coef_0) of the Kernel;
support_vector_machine_kernels | |
<filename>sfepy/solvers/ts_solvers.py
"""
Time stepping solvers.
"""
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import (get_default, output, assert_,
Struct, IndexedStruct)
from sfepy.base.timing import Timer
from sfepy.linalg.utils import output_array_stats
from sfepy.solvers.solvers import TimeSteppingSolver
from sfepy.solvers.ts import TimeStepper, VariableTimeStepper
def standard_ts_call(call):
"""
Decorator handling argument preparation and timing for time-stepping
solvers.
"""
def _standard_ts_call(self, vec0=None, nls=None,
init_fun=None, prestep_fun=None, poststep_fun=None,
status=None, **kwargs):
timer = Timer(start=True)
nls = get_default(nls, self.nls,
'nonlinear solver has to be specified!')
init_fun = get_default(init_fun, lambda ts, vec0: vec0)
prestep_fun = get_default(prestep_fun, lambda ts, vec: None)
poststep_fun = get_default(poststep_fun, lambda ts, vec: None)
result = call(self, vec0=vec0, nls=nls, init_fun=init_fun,
prestep_fun=prestep_fun, poststep_fun=poststep_fun,
status=status, **kwargs)
elapsed = timer.stop()
if status is not None:
status['time'] = elapsed
status['n_step'] = self.ts.n_step
return result
return _standard_ts_call
#
# General solvers.
#
class StationarySolver(TimeSteppingSolver):
"""
Solver for stationary problems without time stepping.
This class is provided to have a unified interface of the time stepping
solvers also for stationary problems.
"""
name = 'ts.stationary'
def __init__(self, conf, nls=None, context=None, **kwargs):
TimeSteppingSolver.__init__(self, conf, nls=nls, context=context,
**kwargs)
self.ts = TimeStepper(0.0, 1.0, n_step=1, is_quasistatic=True)
@standard_ts_call
def __call__(self, vec0=None, nls=None, init_fun=None, prestep_fun=None,
poststep_fun=None, status=None, **kwargs):
ts = self.ts
nls = get_default(nls, self.nls)
vec0 = init_fun(ts, vec0)
prestep_fun(ts, vec0)
vec = nls(vec0)
poststep_fun(ts, vec)
return vec
class SimpleTimeSteppingSolver(TimeSteppingSolver):
"""
Implicit time stepping solver with a fixed time step.
"""
name = 'ts.simple'
_parameters = [
('t0', 'float', 0.0, False,
'The initial time.'),
('t1', 'float', 1.0, False,
'The final time.'),
('dt', 'float', None, False,
'The time step. Used if `n_step` is not given.'),
('n_step', 'int', 10, False,
'The number of time steps. Has precedence over `dt`.'),
('quasistatic', 'bool', False, False,
"""If True, assume a quasistatic time-stepping. Then the non-linear
solver is invoked also for the initial time."""),
]
def __init__(self, conf, nls=None, context=None, **kwargs):
TimeSteppingSolver.__init__(self, conf, nls=nls, context=context,
**kwargs)
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
format = '====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
self.verbose = self.conf.verbose
def solve_step0(self, nls, vec0):
if self.conf.quasistatic:
vec = nls(vec0)
else:
res = nls.fun(vec0)
err = nm.linalg.norm(res)
output('initial residual: %e' % err, verbose=self.verbose)
vec = vec0.copy()
return vec
def solve_step(self, ts, nls, vec, prestep_fun=None):
return nls(vec)
def output_step_info(self, ts):
output(self.format % (ts.time, ts.step + 1, ts.n_step),
verbose=self.verbose)
@standard_ts_call
def __call__(self, vec0=None, nls=None, init_fun=None, prestep_fun=None,
poststep_fun=None, status=None, **kwargs):
"""
Solve the time-dependent problem.
"""
ts = self.ts
nls = get_default(nls, self.nls)
vec0 = init_fun(ts, vec0)
self.output_step_info(ts)
if ts.step == 0:
prestep_fun(ts, vec0)
vec = self.solve_step0(nls, vec0)
poststep_fun(ts, vec)
ts.advance()
else:
vec = vec0
for step, time in ts.iter_from(ts.step):
self.output_step_info(ts)
prestep_fun(ts, vec)
vect = self.solve_step(ts, nls, vec, prestep_fun)
poststep_fun(ts, vect)
vec = vect
return vec
def get_min_dt(adt):
red = adt.red
while red >= adt.red_max:
red *= adt.red_factor
dt = adt.dt0 * red
return dt
def adapt_time_step(ts, status, adt, context=None, verbose=False):
"""
Adapt the time step of `ts` according to the exit status of the
nonlinear solver.
The time step dt is reduced, if the nonlinear solver did not converge. If it
converged in less then a specified number of iterations for several time
steps, the time step is increased. This is governed by the following
parameters:
- `red_factor` : time step reduction factor
- `red_max` : maximum time step reduction factor
- `inc_factor` : time step increase factor
- `inc_on_iter` : increase time step if the nonlinear solver converged in
less than this amount of iterations...
- `inc_wait` : ...for this number of consecutive time steps
Parameters
----------
ts : VariableTimeStepper instance
The time stepper.
status : IndexedStruct instance
The nonlinear solver exit status.
adt : Struct instance
The object with the adaptivity parameters of the time-stepping solver
such as `red_factor` (see above) as attributes.
context : object, optional
The context can be used in user-defined adaptivity functions. Not used
here.
Returns
-------
is_break : bool
If True, the adaptivity loop should stop.
"""
is_break = False
if status.condition == 0:
if status.n_iter <= adt.inc_on_iter:
adt.wait += 1
if adt.wait > adt.inc_wait:
if adt.red < 1.0:
adt.red = adt.red * adt.inc_factor
ts.set_time_step(adt.dt0 * adt.red)
output('+++++ new time step: %e +++++' % ts.dt,
verbose=verbose)
adt.wait = 0
else:
adt.wait = 0
is_break = True
else:
adt.red = adt.red * adt.red_factor
if adt.red < adt.red_max:
is_break = True
else:
ts.set_time_step(adt.dt0 * adt.red, update_time=True)
output('----- new time step: %e -----' % ts.dt,
verbose=verbose)
adt.wait = 0
return is_break
class AdaptiveTimeSteppingSolver(SimpleTimeSteppingSolver):
"""
Implicit time stepping solver with an adaptive time step.
Either the built-in or user supplied function can be used to adapt the time
step.
"""
name = 'ts.adaptive'
_parameters = SimpleTimeSteppingSolver._parameters + [
('adapt_fun', 'callable(ts, status, adt, context, verbose)',
None, False,
"""If given, use this function to set the time step in `ts`. The
function return value is a bool - if True, the adaptivity loop
should stop. The other parameters below are collected in `adt`,
`status` is the nonlinear solver status, `context` is
a user-defined context and `verbose` is a verbosity flag.
Solvers created by
:class:`Problem <sfepy.discrete.problem.Problem>` use the
Problem instance as the context."""),
('dt_red_factor', 'float', 0.2, False,
'The time step reduction factor.'),
('dt_red_max', 'float', 1e-3, False,
'The maximum time step reduction factor.'),
('dt_inc_factor', 'float', 1.25, False,
'The time step increase factor.'),
('dt_inc_on_iter', 'int', 4, False,
"""Increase the time step if the nonlinear solver converged in less
than this amount of iterations for `dt_inc_wait` consecutive time
steps."""),
('dt_inc_wait', 'int', 5, False,
'The number of consecutive time steps, see `dt_inc_on_iter`.'),
]
def __init__(self, conf, nls=None, context=None, **kwargs):
TimeSteppingSolver.__init__(self, conf, nls=nls, context=context,
**kwargs)
self.ts = VariableTimeStepper.from_conf(self.conf)
get = self.conf.get
adt = Struct(red_factor=get('dt_red_factor', 0.2),
red_max=get('dt_red_max', 1e-3),
inc_factor=get('dt_inc_factor', 1.25),
inc_on_iter=get('dt_inc_on_iter', 4),
inc_wait=get('dt_inc_wait', 5),
red=1.0, wait=0, dt0=0.0)
self.adt = adt
adt.dt0 = self.ts.get_default_time_step()
self.ts.set_n_digit_from_min_dt(get_min_dt(adt))
self.format = '====== time %e (dt %e, wait %d, step %d of %d) ====='
self.verbose = self.conf.verbose
self.adapt_time_step = self.conf.adapt_fun
if self.adapt_time_step is None:
self.adapt_time_step = adapt_time_step
def solve_step(self, ts, nls, vec, prestep_fun):
"""
Solve a single time step.
"""
status = IndexedStruct(n_iter=0, condition=0)
while 1:
vect = nls(vec, status=status)
is_break = self.adapt_time_step(ts, status, self.adt, self.context,
verbose=self.verbose)
if is_break:
break
prestep_fun(ts, vec)
return vect
def output_step_info(self, ts):
output(self.format % (ts.time, ts.dt, self.adt.wait,
ts.step + 1, ts.n_step),
verbose=self.verbose)
#
# Elastodynamics solvers.
#
def gen_multi_vec_packing(size, num):
assert_((size % num) == 0)
ii = size // num
def unpack(vec):
return [vec[ir:ir+ii] for ir in range(0, size, ii)]
def pack(*args):
return nm.concatenate(args)
return unpack, pack
def _cache(obj, attr, dep):
def decorate(fun):
def new_fun(*args, **kwargs):
if dep:
val = getattr(obj, attr)
if val is None:
val = fun(*args, **kwargs)
setattr(obj, attr, val)
else:
val = fun(*args, **kwargs)
return val
return new_fun
return decorate
class ElastodynamicsBaseTS(TimeSteppingSolver):
"""
Base class for elastodynamics solvers.
Assumes block-diagonal matrix in `u`, `v`, `a`.
"""
def __init__(self, conf, nls=None, context=None, **kwargs):
TimeSteppingSolver.__init__(self, conf, nls=nls, context=context,
**kwargs)
self.conf.quasistatic = False
self.ts = TimeStepper.from_conf(self.conf)
nd = self.ts.n_digit
format = '====== time %%e (step %%%dd of %%%dd) =====' % (nd, nd)
self.format = format
self.verbose = self.conf.verbose
self.constant_matrices = None
self.matrix = None
def get_matrices(self, nls, vec):
if self.conf.is_linear and self.constant_matrices is not None:
out = self.constant_matrices
else:
aux = nls.fun_grad(vec)
assert_((len(vec) % 3) == 0)
i3 = len(vec) // 3
K = aux[:i3, :i3]
C = aux[i3:2*i3, i3:2*i3]
M = aux[2*i3:, 2*i3:]
out = (M, C, K)
if self.conf.is_linear:
M.eliminate_zeros()
C.eliminate_zeros()
K.eliminate_zeros()
self.constant_matrices = (M, C, K)
return out
def get_a0(self, nls, u0, v0):
vec = nm.r_[u0, v0, nm.zeros_like(u0)]
aux = nls.fun(vec)
i3 = len(u0)
r = aux[:i3] + aux[i3:2*i3] + aux[2*i3:]
M = self.get_matrices(nls, vec)[0]
a0 = nls.lin_solver(-r, mtx=M)
output_array_stats(a0, 'initial acceleration', verbose=self.verbose)
return a0
def get_initial_vec(self, nls, vec0, init_fun, prestep_fun, poststep_fun):
ts = self.ts
vec0 = init_fun(ts, vec0)
unpack, pack = gen_multi_vec_packing(len(vec0), 3)
output(self.format % (ts.time, ts.step + 1, ts.n_step),
verbose=self.verbose)
if ts.step == 0:
prestep_fun(ts, vec0)
u0, v0, _ = unpack(vec0)
ut = u0
vt = v0
at = self.get_a0(nls, u0, v0)
vec = pack(ut, vt, at)
poststep_fun(ts, vec)
ts.advance()
else:
vec = vec0
return vec, unpack, pack
def _create_nlst_a(self, nls, dt, ufun, vfun, cc, | |
if min(self.input_resolution) <= self.win_size:
self.shift_size = 0
self.win_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.win_size, "shift_size must in 0-win_size"
self.norm1 = norm_layer(dim)
self.norm_kv = norm_layer(dim)
self.cross_attn = WindowAttention(
dim, win_size=to_2tuple(self.win_size), num_heads=num_heads,qkv_bias=qkv_bias,
qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop,token_projection='linear_concat')
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,act_layer=act_layer, drop=drop) if token_mlp=='ffn' else LeFF(dim,mlp_hidden_dim,act_layer=act_layer, drop=drop)
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"win_size={self.win_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def forward(self, x, attn_kv=None, mask=None):
B, L, C = x.shape
H = int(math.sqrt(L))
W = int(math.sqrt(L))
## input mask
if mask != None:
input_mask = F.interpolate(mask, size=(H,W)).permute(0,2,3,1)
input_mask_windows = window_partition(input_mask, self.win_size) # nW, win_size, win_size, 1
attn_mask = input_mask_windows.view(-1, self.win_size * self.win_size) # nW, win_size*win_size
attn_mask = attn_mask.unsqueeze(2)*attn_mask.unsqueeze(1) # nW, win_size*win_size, win_size*win_size
attn_mask = attn_mask.masked_fill(attn_mask!=0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
## shift mask
if self.shift_size > 0:
# calculate attention mask for SW-MSA
shift_mask = torch.zeros((1, H, W, 1)).type_as(x)
h_slices = (slice(0, -self.win_size),
slice(-self.win_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.win_size),
slice(-self.win_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
shift_mask[:, h, w, :] = cnt
cnt += 1
shift_mask_windows = window_partition(shift_mask, self.win_size) # nW, win_size, win_size, 1
shift_mask_windows = shift_mask_windows.view(-1, self.win_size * self.win_size) # nW, win_size*win_size
shift_attn_mask = shift_mask_windows.unsqueeze(1) - shift_mask_windows.unsqueeze(2) # nW, win_size*win_size, win_size*win_size
shift_attn_mask = shift_attn_mask.masked_fill(shift_attn_mask != 0, float(-100.0)).masked_fill(shift_attn_mask == 0, float(0.0))
attn_mask = attn_mask + shift_attn_mask if attn_mask is not None else shift_attn_mask
attn_kv = attn_kv.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_kv = torch.roll(attn_kv, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_kv = attn_kv
# partition windows
attn_kv_windows = window_partition(shifted_kv, self.win_size) # nW*B, win_size, win_size, C
attn_kv_windows = attn_kv_windows.view(-1, self.win_size * self.win_size, C) # nW*B, win_size*win_size, C
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.win_size) # nW*B, win_size, win_size, C
x_windows = x_windows.view(-1, self.win_size * self.win_size, C) # nW*B, win_size*win_size, C
### multi-head cross-attention
shortcut1 = x_windows
# prenorm
x_windows = self.norm1(x_windows)
attn_kv_windows = self.norm_kv(attn_kv_windows)
# W-MCA/SW-MCA
attn_windows = self.cross_attn(x_windows, attn_kv=attn_kv_windows,mask=attn_mask) # nW*B, win_size*win_size, C
attn_windows = shortcut1 + self.drop_path(attn_windows)
# merge windows
attn_windows = attn_windows.view(-1, self.win_size, self.win_size, C)
shifted_x = window_reverse(attn_windows, self.win_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = x + self.drop_path(self.mlp(self.norm2(x)))
del attn_mask
return x
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
flops += self.cross_attn.flops(H, W)
# norm2
flops += self.dim * H * W
# mlp
flops += self.mlp.flops(H,W)
print("LeWin:{%.2f}"%(flops/1e9))
return flops
#########################################
########### Basic layer of Uformer ################
class BasicUformerLayer(nn.Module):
def __init__(self, dim, output_dim, input_resolution, depth, num_heads, win_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, use_checkpoint=False,
token_projection='linear',token_mlp='ffn',se_layer=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
LeWinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, win_size=win_size,
shift_size=0 if (i % 2 == 0) else win_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
for i in range(depth)])
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def forward(self, x, mask=None):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x,mask)
return x
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
return flops
########### Basic decoderlayer of Uformer_Cross ################
class CrossUformerLayer(nn.Module):
def __init__(self, dim, output_dim, input_resolution, depth, num_heads, win_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, use_checkpoint=False,
token_projection='linear',token_mlp='ffn'):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
LeWinTransformer_Cross(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, win_size=win_size,
shift_size=0 if (i % 2 == 0) else win_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,token_projection=token_projection,token_mlp=token_mlp)
for i in range(depth)])
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def forward(self, x, attn_kv=None, mask=None):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x,attn_kv,mask)
return x
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
return flops
########### Basic decoderlayer of Uformer_CatCross ################
class CatCrossUformerLayer(nn.Module):
def __init__(self, dim, output_dim, input_resolution, depth, num_heads, win_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, use_checkpoint=False,
token_projection='linear',token_mlp='ffn'):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
LeWinTransformer_CatCross(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, win_size=win_size,
shift_size=0 if (i % 2 == 0) else win_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,token_projection=token_projection,token_mlp=token_mlp)
for i in range(depth)])
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def forward(self, x, attn_kv=None, mask=None):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x,attn_kv, mask)
return x
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
return flops
class Uformer(nn.Module):
def __init__(self, img_size=128, in_chans=3,
embed_dim=32, depths=[2, 2, 2, 2, 2, 2, 2, 2, 2], num_heads=[1, 2, 4, 8, 16, 16, 8, 4, 2],
win_size=8, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, patch_norm=True,
use_checkpoint=False, token_projection='linear', token_mlp='ffn', se_layer=False,
dowsample=Downsample, upsample=NearestUpsample, **kwargs):
super().__init__()
self.num_enc_layers = len(depths)//2
self.num_dec_layers = len(depths)//2
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.mlp_ratio = mlp_ratio
self.token_projection = token_projection
self.mlp = token_mlp
self.win_size =win_size
self.reso = img_size
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
enc_dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths[:self.num_enc_layers]))]
conv_dpr = [drop_path_rate]*depths[4]
dec_dpr = enc_dpr[::-1]
# build layers
# Input/Output
self.input_proj = InputProj(in_channel=in_chans, out_channel=embed_dim, kernel_size=3, stride=1, act_layer=nn.LeakyReLU)
self.output_proj = OutputProj(in_channel=2*embed_dim, out_channel=in_chans, kernel_size=3, stride=1)
# Encoder
self.encoderlayer_0 = BasicUformerLayer(dim=embed_dim,
output_dim=embed_dim,
input_resolution=(img_size,
img_size),
depth=depths[0],
num_heads=num_heads[0],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=enc_dpr[sum(depths[:0]):sum(depths[:1])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.dowsample_0 = dowsample(embed_dim, embed_dim*2)
self.encoderlayer_1 = BasicUformerLayer(dim=embed_dim*2,
output_dim=embed_dim*2,
input_resolution=(img_size // 2,
img_size // 2),
depth=depths[1],
num_heads=num_heads[1],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=enc_dpr[sum(depths[:1]):sum(depths[:2])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.dowsample_1 = dowsample(embed_dim*2, embed_dim*4)
self.encoderlayer_2 = BasicUformerLayer(dim=embed_dim*4,
output_dim=embed_dim*4,
input_resolution=(img_size // (2 ** 2),
img_size // (2 ** 2)),
depth=depths[2],
num_heads=num_heads[2],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=enc_dpr[sum(depths[:2]):sum(depths[:3])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.dowsample_2 = dowsample(embed_dim*4, embed_dim*8)
self.encoderlayer_3 = BasicUformerLayer(dim=embed_dim*8,
output_dim=embed_dim*8,
input_resolution=(img_size // (2 ** 3),
img_size // (2 ** 3)),
depth=depths[3],
num_heads=num_heads[3],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=enc_dpr[sum(depths[:3]):sum(depths[:4])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.dowsample_3 = dowsample(embed_dim*8, embed_dim*16)
# Bottleneck
self.conv = BasicUformerLayer(dim=embed_dim*16,
output_dim=embed_dim*16,
input_resolution=(img_size // (2 ** 4),
img_size // (2 ** 4)),
depth=depths[4],
num_heads=num_heads[4],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=conv_dpr,
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
# Decoder
self.upsample_0 = upsample(embed_dim*16, embed_dim*8)
self.decoderblock_0 = ConvBlock_1(embed_dim*8,embed_dim*8)
self.decoderlayer_0 = BasicUformerLayer(dim=embed_dim*16,
output_dim=embed_dim*16,
input_resolution=(img_size // (2 ** 3),
img_size // (2 ** 3)),
depth=depths[5],
num_heads=num_heads[5],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dec_dpr[:depths[5]],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.upsample_1 = upsample(embed_dim*16, embed_dim*4)
self.decoderblock_1 = ConvBlock_1(embed_dim*4,embed_dim*4)
self.decoderlayer_1 = BasicUformerLayer(dim=embed_dim*8,
output_dim=embed_dim*8,
input_resolution=(img_size // (2 ** 2),
img_size // (2 ** 2)),
depth=depths[6],
num_heads=num_heads[6],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dec_dpr[sum(depths[5:6]):sum(depths[5:7])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.upsample_2 = upsample(embed_dim*8, embed_dim*2)
self.decoderblock_2 = ConvBlock_1(embed_dim*2,embed_dim*2)
self.decoderlayer_2 = BasicUformerLayer(dim=embed_dim*4,
output_dim=embed_dim*4,
input_resolution=(img_size // 2,
img_size // 2),
depth=depths[7],
num_heads=num_heads[7],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dec_dpr[sum(depths[5:7]):sum(depths[5:8])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.upsample_3 = upsample(embed_dim*4, embed_dim)
self.decoderblock_3 = ConvBlock_1(embed_dim,embed_dim)
self.decoderlayer_3 = BasicUformerLayer(dim=embed_dim*2,
output_dim=embed_dim*2,
input_resolution=(img_size,
img_size),
depth=depths[8],
num_heads=num_heads[8],
win_size=win_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dec_dpr[sum(depths[5:8]):sum(depths[5:9])],
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
token_projection=token_projection,token_mlp=token_mlp,se_layer=se_layer)
self.decoderblock_4 = ConvBlock_1(embed_dim*2,embed_dim*2)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def extra_repr(self) -> str:
return f"embed_dim={self.embed_dim}, token_projection={self.token_projection}, token_mlp={self.mlp},win_size={self.win_size}"
def forward(self, x, mask=None):
# Input Projection
y = self.input_proj(x)
y = self.pos_drop(y)
#Encoder
conv0 = self.encoderlayer_0(y,mask=mask)
pool0 = self.dowsample_0(conv0)
conv1 = self.encoderlayer_1(pool0,mask=mask)
pool1 = self.dowsample_1(conv1)
conv2 = self.encoderlayer_2(pool1,mask=mask)
pool2 = self.dowsample_2(conv2)
conv3 = self.encoderlayer_3(pool2,mask=mask)
pool3 = self.dowsample_3(conv3)
# Bottleneck
conv4 = self.conv(pool3, mask=mask)
#Decoder
| |
"""
Module containing routines used by 3D datacubes.
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
import inspect
from astropy import wcs, units
from astropy.coordinates import AltAz, SkyCoord
from astropy.io import fits
import scipy.optimize as opt
from scipy.interpolate import interp1d
import numpy as np
from pypeit import msgs
from pypeit.core.procimg import grow_masked
from pypeit.core import coadd
from pypeit.spectrographs.util import load_spectrograph
from pypeit import datamodel
from pypeit import io
class DataCube(datamodel.DataContainer):
"""
DataContainer to hold the products of a datacube
See the datamodel for argument descriptions
Args:
flux (`numpy.ndarray`_):
The science datacube (nwave, nspaxel_y, nspaxel_x)
variance (`numpy.ndarray`_):
The variance datacube (nwave, nspaxel_y, nspaxel_x)
refscale (`numpy.ndarray`_):
An image containing the relative scale of each pixel on the detector (nspec, nspat)
PYP_SPEC (str):
Name of the PypeIt Spectrograph
fluxed (bool):
If the cube has been flux calibrated, this will be set to "True"
Attributes:
head0 (`astropy.io.fits.Header`):
Primary header
filename (str):
Filename to use when loading from file
spect_meta (:obj:`dict`):
Parsed meta from the header
spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
Build from PYP_SPEC
"""
version = '1.0.0'
datamodel = {'flux': dict(otype=np.ndarray, atype=np.floating, descr='Flux array in units of counts/s or 10^-17 erg/s/cm^2/Ang'),
'variance': dict(otype=np.ndarray, atype=np.floating, descr='Variance array (matches units of flux)'),
'refscale': dict(otype=np.ndarray, atype=np.floating, descr='Reference scaling used for each slit'),
'PYP_SPEC': dict(otype=str, descr='PypeIt: Spectrograph name'),
'fluxed': dict(otype=bool, descr='Boolean indicating if the datacube is fluxed.')}
@classmethod
def from_file(cls, ifile):
"""
Over-load :func:`pypeit.datamodel.DataContainer.from_file`
to deal with the header
Args:
ifile (str): Filename holding the object
Returns:
:class:`OneSpec`:
"""
hdul = fits.open(ifile)
slf = super(DataCube, cls).from_hdu(hdul)
# Internals
slf.filename = ifile
slf.head0 = hdul[0].header
# Meta
slf.spectrograph = load_spectrograph(slf.PYP_SPEC)
slf.spect_meta = slf.spectrograph.parse_spec_header(slf.head0)
return slf
def __init__(self, flux, variance, PYP_SPEC, refscale=None, fluxed=None):
args, _, _, values = inspect.getargvalues(inspect.currentframe())
_d = dict([(k,values[k]) for k in args[1:]])
# Setup the DataContainer
datamodel.DataContainer.__init__(self, d=_d)
def _init_internals(self):
self.head0 = None
self.filename = None
self.spectrograph = None
self.spect_meta = None
def _bundle(self):
"""
Over-write default _bundle() method to separate the DetectorContainer
into its own HDU
Returns:
:obj:`list`: A list of dictionaries, each list element is
written to its own fits extension. See the description
above.
"""
d = []
# Rest of the datamodel
for key in self.keys():
# Skip Nones
if self[key] is None:
continue
# Array?
if self.datamodel[key]['otype'] == np.ndarray:
tmp = {}
if self.datamodel[key]['atype'] == np.floating:
tmp[key] = self[key].astype(np.float32)
else:
tmp[key] = self[key]
d.append(tmp)
else:
# Add to header of the primary image
d[0][key] = self[key]
# Return
return d
def to_file(self, ofile, primary_hdr=None, hdr=None, **kwargs):
"""
Over-load :func:`pypeit.datamodel.DataContainer.to_file`
to deal with the header
Args:
ofile (:obj:`str`): Filename
primary_hdr (`astropy.io.fits.Header`_, optional):
wcs (`astropy.io.fits.Header`_, optional):
The World Coordinate System, represented by a fits header
**kwargs: Passed to super.to_file()
"""
if primary_hdr is None:
primary_hdr = io.initialize_header(primary=True)
# Build the header
if self.head0 is not None and self.PYP_SPEC is not None:
spectrograph = load_spectrograph(self.PYP_SPEC)
subheader = spectrograph.subheader_for_spec(self.head0, self.head0)
else:
subheader = {}
# Add em in
for key in subheader:
primary_hdr[key] = subheader[key]
# Do it
super(DataCube, self).to_file(ofile, primary_hdr=primary_hdr, hdr=hdr, **kwargs)
def dar_fitfunc(radec, coord_ra, coord_dec, datfit, wave, obstime, location, pressure, temperature, rel_humidity):
""" Generates a fitting function to calculate the offset due to differential atmospheric refraction
Args:
radec (tuple):
A tuple containing two floats representing the shift in ra and dec due to DAR.
coord_ra (float):
RA in degrees
coord_dec (float):
Dec in degrees
datfit (`numpy.ndarray`_):
The RA and DEC that the model needs to match
wave (float):
Wavelength to calculate the DAR
location (`astropy.coordinates.EarthLocation`_):
observatory location
pressure (float):
Outside pressure at `location`
temperature (float):
Outside ambient air temperature at `location`
rel_humidity (float):
Outside relative humidity at `location`. This should be between 0 to 1.
Returns:
chisq (float):
chi-squared difference between datfit and model
"""
(diff_ra, diff_dec) = radec
# Generate the coordinate with atmopheric conditions
coord_atmo = SkyCoord(coord_ra + diff_ra, coord_dec + diff_dec, unit=(units.deg, units.deg))
coord_altaz = coord_atmo.transform_to(AltAz(obstime=obstime, location=location, obswl=wave,
pressure=pressure, temperature=temperature,
relative_humidity=rel_humidity))
# Return chi-squared value
return np.sum((np.array([coord_altaz.alt.value, coord_altaz.az.value])-datfit)**2)
def dar_correction(wave_arr, coord, obstime, location, pressure, temperature, rel_humidity,
wave_ref=None, numgrid=10):
"""
Apply a differental atmospheric refraction correction to the
input ra/dec.
This implementation is based on ERFA, which is called through
astropy.
.. todo::
There's probably going to be issues when the RA angle is
either side of RA=0.
Parameters
----------
wave_arr : `numpy.ndarray`_
wavelengths to obtain ra and dec offsets
coord : `astropy.coordinates.SkyCoord`_
ra, dec positions at the centre of the field
obstime : `astropy.time.Time`_
time at the midpoint of observation
location : `astropy.coordinates.EarthLocation`_
observatory location
pressure : :obj:`float`
Outside pressure at `location`
temperature : :obj:`float`
Outside ambient air temperature at `location`
rel_humidity : :obj:`float`
Outside relative humidity at `location`. This should be between 0 to 1.
wave_ref : :obj:`float`
Reference wavelength (The DAR correction will be performed relative to this wavelength)
numgrid : :obj:`int`
Number of grid points to evaluate the DAR correction.
Returns
-------
ra_diff : `numpy.ndarray`_
Relative RA shift at each wavelength given by `wave_arr`
dec_diff : `numpy.ndarray`_
Relative DEC shift at each wavelength given by `wave_arr`
"""
msgs.info("Performing differential atmospheric refraction correction")
if wave_ref is None:
wave_ref = 0.5*(wave_arr.min() + wave_arr.max())
# First create the reference frame and wavelength grid
coord_altaz = coord.transform_to(AltAz(obstime=obstime, location=location))
wave_grid = np.linspace(wave_arr.min(), wave_arr.max(), numgrid) * units.AA
# Prepare the fit
ra_grid, dec_grid = np.zeros(numgrid), np.zeros(numgrid)
datfit = np.array([coord_altaz.alt.value, coord_altaz.az.value])
# Loop through all wavelengths
for ww in range(numgrid):
# Fit the differential
args = (coord.ra.value, coord.dec.value, datfit, wave_grid[ww], obstime, location, pressure, temperature, rel_humidity)
#b_popt, b_pcov = opt.curve_fit(dar_fitfunc, tmp, datfit, p0=(0.0, 0.0))
res_lsq = opt.least_squares(dar_fitfunc, [0.0, 0.0], args=args, xtol=1.0e-6, ftol=None, gtol=None)
# Store the result
ra_grid[ww] = res_lsq.x[0]
dec_grid[ww] = res_lsq.x[1]
# Generate spline of differentials
spl_ra = interp1d(wave_grid, ra_grid, kind='cubic')
spl_dec = interp1d(wave_grid, dec_grid, kind='cubic')
# Evaluate the differentials at the input wave_arr
ra_diff = spl_ra(wave_arr) - spl_ra(wave_ref)
dec_diff = spl_dec(wave_arr) - spl_dec(wave_ref)
return ra_diff, dec_diff
def make_whitelight_fromref(all_ra, all_dec, all_wave, all_sci, all_wghts, all_idx, dspat, ref_filename):
""" Generate a whitelight image of every input frame,
based on a reference image. Note the, the reference
image must have a well-defined WCS.
Args:
all_ra (`numpy.ndarray`_):
1D flattened array containing the RA values of each pixel from all spec2d files
all_dec (`numpy.ndarray`_):
1D flattened array containing the DEC values of each pixel from all spec2d files
all_wave (`numpy.ndarray`_):
1D flattened array containing the wavelength values of each pixel from all spec2d files
all_sci (`numpy.ndarray`_):
1D flattened array containing the counts of each pixel from all spec2d files
all_wghts (`numpy.ndarray`_):
1D flattened array containing the weights attributed to each pixel from all spec2d files
all_idx (`numpy.ndarray`_):
1D flattened array containing an integer identifier indicating which spec2d file
each pixel originates from. For example, a 0 would indicate that a pixel originates
from the first spec2d frame listed in the input file. a 1 would indicate that this
pixel originates from the second spec2d file, and so forth.
dspat (float):
The size of each spaxel on the sky (in degrees)
ref_filename (str):
A fits filename of a reference image to be used when generating white light
images. Note, the fits file must have a valid 3D WCS.
Returns:
tuple : two `numpy.ndarray`_ and one WCS will be returned. The first is a 2D reference image
loaded from ref_filename. The second element is a 3D array of shape [N, M, numfiles],
where N and M are the spatial dimensions of the combined white light images. The third is
the WCS of the white light image.
"""
refhdu = fits.open(ref_filename)
reference_image = refhdu[0].data.T[:, :, 0]
refwcs = wcs.WCS(refhdu[0].header)
numra, numdec = reference_image.shape
# Generate coordinate system (i.e. update wavelength range to include all values)
coord_min = refwcs.wcs.crval
coord_dlt = refwcs.wcs.cdelt
coord_min[2] = np.min(all_wave)
coord_dlt[2] = np.max(all_wave) - np.min(all_wave) # For white light, we want to bin all wavelength pixels
wlwcs = generate_masterWCS(coord_min, coord_dlt)
# Generate white light images
whitelight_imgs, _, _ = make_whitelight(all_ra, all_dec, all_wave, all_sci, all_wghts, all_idx, dspat,
whitelightWCS=wlwcs, numra=numra, numdec=numdec)
# Return required info
return reference_image, whitelight_imgs, wlwcs
def make_whitelight(all_ra, all_dec, all_wave, all_sci, all_wghts, all_idx, | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
######################################################################
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
######################################################################
"""
File: data_provider.py
"""
import re
import os
import types
import csv
import random
import numpy as np
VOC_DICT = {}
def load_dict(vocab_dict):
"""
load vocabulary dict
"""
idx = 0
for line in open(vocab_dict):
line = line.strip()
VOC_DICT[line] = idx
idx += 1
return VOC_DICT
def prepare_batch_data(insts,
task_name,
max_len=128,
return_attn_bias=True,
return_max_len=True,
return_num_token=False):
"""
generate self attention mask, [shape: batch_size * max_len * max_len]
"""
batch_context_ids = [inst[0] for inst in insts]
batch_context_pos_ids = [inst[1] for inst in insts]
batch_segment_ids = [inst[2] for inst in insts]
batch_label_ids = [[inst[3]] for inst in insts]
labels_list = batch_label_ids
context_id, next_sent_context_index, context_attn_bias = \
pad_batch_data(batch_context_ids, pad_idx=0, max_len=max_len, \
return_next_sent_pos=True, return_attn_bias=True)
context_pos_id = pad_batch_data(
batch_context_pos_ids, pad_idx=0, max_len=max_len, return_pos=False, return_attn_bias=False)
context_segment_id = pad_batch_data(
batch_segment_ids, pad_idx=0, max_len=max_len, return_pos=False, return_attn_bias=False)
if 'kn' in task_name:
batch_kn_ids = [inst[4] for inst in insts]
kn_id = pad_bath_kn_data(batch_kn_ids, pad_idx=0, max_len=max_len)
batch_goal_ids = [inst[5] for inst in insts]
goal_id = pad_bath_kn_data(batch_goal_ids, pad_idx=0, max_len=max_len)
out_list = []
for i in range(len(insts)):
if 'kn' in task_name:
out = [context_id[i], context_pos_id[i], context_segment_id[i], context_attn_bias[i], \
kn_id[i], goal_id[i], labels_list[i], next_sent_context_index[i]]
else:
out = [context_id[i], context_pos_id[i], context_segment_id[i], \
context_attn_bias[i], labels_list[i], next_sent_context_index[i]]
out_list.append(out)
return out_list
def pad_bath_kn_data(insts,
pad_idx=0,
max_len=128):
"""pad_bath_kn_data"""
kn_list = []
for inst in insts:
inst = inst[0: min(max_len, len(inst))]
kn_list.append(inst)
return kn_list
def pad_batch_data(insts,
pad_idx=0,
max_len=128,
return_pos=False,
return_next_sent_pos=False,
return_attn_bias=False,
return_max_len=False,
return_num_token=False):
"""
Pad the instances to the max sequence length in batch, and generate the
corresponding position data and attention bias.
"""
return_list = []
inst_data = np.array(
[inst + list([pad_idx] * (max_len - len(inst))) for inst in insts])
return_list += [inst_data.astype("int64").reshape([-1, max_len, 1])]
if return_next_sent_pos:
batch_size = inst_data.shape[0]
max_seq_len = inst_data.shape[1]
next_sent_index = np.array(
range(0, batch_size * max_seq_len, max_seq_len)).astype(
"int64").reshape(-1, 1)
return_list += [next_sent_index]
if return_pos:
inst_pos = np.array([
list(range(0, len(inst))) + [pad_idx] * (max_len - len(inst))
for inst in insts])
return_list += [inst_pos.astype("int64").reshape([-1, max_len, 1])]
if return_attn_bias:
slf_attn_bias_data = np.array([[0] * len(inst) + [-1e9] *
(max_len - len(inst)) for inst in insts])
slf_attn_bias_data = np.tile(
slf_attn_bias_data.reshape([-1, 1, max_len]), [1, max_len, 1])
return_list += [slf_attn_bias_data.astype("float32")]
if return_max_len:
return_list += [max_len]
if return_num_token:
num_token = 0
for inst in insts:
num_token += len(inst)
return_list += [num_token]
return return_list if len(return_list) > 1 else return_list[0]
def preprocessing_for_one_line(line, labels, task_name, max_seq_len=256):
"""
process text to model inputs
"""
line = line.rstrip('\n').split('\t')
label_text = line[0]
context_text = line[1]
response_text = line[2]
if 'kn' in task_name:
kn_text = "%s [SEP] %s" % (line[3], line[4])
else:
kn_text = None
example = InputExample(guid=0, \
context_text=context_text, \
response_text=response_text, \
kn_text=kn_text, \
goal_text = kn_text, \
label_text=label_text)
feature = convert_single_example(0, example, labels, max_seq_len)
instance = [feature.context_ids, feature.context_pos_ids, \
feature.segment_ids, feature.label_ids, feature.kn_ids]
batch_data = prepare_batch_data([instance],
task_name,
max_len=max_seq_len,
return_attn_bias=True,
return_max_len=False,
return_num_token=False)
return batch_data
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def __init__(self, data_dir, task_name, vocab_path, max_seq_len, do_lower_case):
self.data_dir = data_dir
self.max_seq_len = max_seq_len
self.task_name = task_name
self.current_train_example = -1
self.num_examples = {'train': -1, 'dev': -1, 'test': -1}
self.current_train_epoch = -1
VOC_DICT = load_dict(vocab_path)
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
@classmethod
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def convert_example(self, index, example, labels, max_seq_len):
"""Converts a single `InputExample` into a single `InputFeatures`."""
feature = convert_single_example(index, example, labels, max_seq_len)
return feature
def generate_batch_data(self,
batch_data,
voc_size=-1,
mask_id=-1,
return_attn_bias=True,
return_max_len=False,
return_num_token=False):
"""generate_batch_data"""
return prepare_batch_data(
batch_data,
self.task_name,
self.max_seq_len,
return_attn_bias=True,
return_max_len=False,
return_num_token=False)
@classmethod
def _read_data(cls, input_file):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
lines = []
for line in f:
line = line.rstrip('\n').split('\t')
lines.append(line)
return lines
def get_num_examples(self, phase):
"""Get number of examples for train, dev or test."""
if phase not in ['train', 'dev', 'test']:
raise ValueError("Unknown phase, which should be in ['train', 'dev', 'test'].")
return self.num_examples[phase]
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_train_example, self.current_train_epoch
def data_generator(self,
batch_size,
phase='train',
epoch=1,
shuffle=False):
"""
Generate data for train, dev or test.
"""
if phase == 'train':
examples = self.get_train_examples(self.data_dir)
self.num_examples['train'] = len(examples)
elif phase == 'dev':
examples = self.get_dev_examples(self.data_dir)
self.num_examples['dev'] = len(examples)
elif phase == 'test':
examples = self.get_test_examples(self.data_dir)
self.num_examples['test'] = len(examples)
else:
raise ValueError("Unknown phase, which should be in ['train', 'dev', 'test'].")
def instance_reader():
"""instance_reader"""
for epoch_index in range(epoch):
if shuffle:
random.shuffle(examples)
if phase == 'train':
self.current_train_epoch = epoch_index
for (index, example) in enumerate(examples):
if phase == 'train':
self.current_train_example = index + 1
feature = self.convert_example(
index, example, self.get_labels(), self.max_seq_len)
if 'kn' in self.task_name:
instance = [feature.context_ids, feature.context_pos_ids, \
feature.segment_ids, feature.label_ids, feature.kn_ids, feature.goal_ids]
else:
instance = [feature.context_ids, feature.context_pos_ids, \
feature.segment_ids, feature.label_ids]
yield instance
def batch_reader(reader, batch_size):
"""batch_reader"""
batch = []
for instance in reader():
if len(batch) < batch_size:
batch.append(instance)
else:
yield batch
batch = [instance]
if len(batch) > 0:
yield batch
def wrapper():
"""wrapper"""
for batch_data in batch_reader(instance_reader, batch_size):
batch_data = self.generate_batch_data(
batch_data,
voc_size=-1,
mask_id=-1,
return_attn_bias=True,
return_max_len=False,
return_num_token=False)
yield batch_data
return wrapper
class InputExample(object):
"""A single training/test example"""
def __init__(self, guid, context_text, response_text, kn_text, goal_text, label_text):
self.guid = guid
self.context_text = context_text
self.response_text = response_text
self.kn_text = kn_text
self.goal_text = goal_text
self.label_text = label_text
class InputFeatures(object):
"""input features datas"""
def __init__(self, context_ids, context_pos_ids, segment_ids, kn_ids, goal_ids, label_ids):
self.context_ids = context_ids
self.context_pos_ids = context_pos_ids
self.segment_ids = segment_ids
self.kn_ids = kn_ids
self.goal_ids = goal_ids
self.label_ids = label_ids
class MatchProcessor(DataProcessor):
"""Processor for the Match data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_data(os.path.join(data_dir, "train.txt")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_data(os.path.join(data_dir, "dev.txt")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_data(os.path.join(data_dir, "test.txt")), "test")
@classmethod
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
context_text = line[1]
label_text = line[0]
response_text = line[2]
if 'kn' in self.task_name:
kn_text = line[4]
goal_text = line[3]
else:
kn_text = None
goal_text = None
examples.append(
InputExample(
guid=guid, context_text=context_text, response_text=response_text, \
kn_text=kn_text, goal_text=goal_text, label_text=label_text))
return examples
def convert_tokens_to_ids(tokens):
"""
convert input ids
"""
ids = []
for token in tokens:
if token in VOC_DICT:
ids.append(VOC_DICT[token])
else:
ids.append(VOC_DICT['[UNK]'])
return ids
def convert_single_example(ex_index, example, label_list, max_seq_length):
"""Converts a single `InputExample` into a single `InputFeatures`."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
if example.context_text:
tokens_context = example.context_text
tokens_context = tokens_context.split()
else:
tokens_context = []
if example.response_text:
tokens_response = example.response_text
tokens_response = tokens_response.split()
else:
tokens_response = []
if example.kn_text:
tokens_kn = example.kn_text
tokens_kn = tokens_kn.split()
tokens_kn = tokens_kn[0: min(len(tokens_kn), max_seq_length)]
else:
tokens_kn = []
if example.goal_text:
tokens_goal = example.goal_text
tokens_goal = tokens_goal.split()
tokens_goal = tokens_goal[0: min(len(tokens_goal), max_seq_length)]
else:
tokens_goal = []
tokens_response = tokens_response[0: min(50, len(tokens_response))]
if len(tokens_context) > max_seq_length - len(tokens_response) - 3:
tokens_context = tokens_context[len(tokens_context) \
+ len(tokens_response) - max_seq_length + 3:]
context_tokens = []
segment_ids = []
context_tokens.append("[CLS]")
segment_ids.append(0)
context_tokens.extend(tokens_context)
segment_ids.extend([0] * len(tokens_context))
context_tokens.append("[SEP]")
segment_ids.append(0)
context_tokens.extend(tokens_response)
segment_ids.extend([1] * len(tokens_response))
context_tokens.append("[SEP]")
segment_ids.append(1)
context_ids = convert_tokens_to_ids(context_tokens)
context_pos_ids = list(range(len(context_ids)))
label_ids = label_map[example.label_text]
if tokens_kn:
kn_ids = convert_tokens_to_ids(tokens_kn)
else:
kn_ids = []
if tokens_goal:
goal_ids = convert_tokens_to_ids(tokens_goal)
else:
goal_ids = []
feature = InputFeatures(
context_ids=context_ids,
context_pos_ids=context_pos_ids,
segment_ids=segment_ids,
kn_ids = kn_ids,
goal_ids = goal_ids,
label_ids=label_ids)
#if ex_index < |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.