input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
> 0:
c += self._layer_costs[i]
self._budget.consumeBudget(self._layer_costs[i])
try:
neighbors = set(list(self._graph[i].neighbors(u)))
edges = [(u,v) for v in neighbors]
self._sample[i].add_edges_from(edges)
except:
neighbors = []
self._queried[i].update([u])
self._unqueried[i].update(neighbors)
self._unqueried[i].difference_update(self._queried[i])
# If no unqueried node, stop
if len(self._unqueried[i]) == 0:
break
candidates = set(neighbors).difference(self._queried[i])
if np.random.random_sample() > self._alpha and len(candidates) > 0:
u = np.random.choice(list(candidates))
elif len(self._unqueried[i]) > 0:
u = np.random.choice(list(self._unqueried[i]))
else:
break
# Update layer importance
freshness = 0
if self._sample[i].number_of_edges() > 0:
edges1 = set([frozenset(e) for e in self._sample[i].edges()])
freshness = len(edges1.difference(edges0)) / len(edges1)
self._layer_importance.updateLayerFreshness(i, freshness)
class CommunityManager(object):
"""docstring for CBanditManager"""
def __init__(self, hcommunity):
super(CommunityManager, self).__init__()
self._hcommunity = hcommunity
self._initalCommunities()
self._generateMapping()
def _generateMapping(self):
"""
Map int to com ids
"""
for l in range(0,self._hcommunity.getLayerCount()):
c = self._hcommunity.getCommunityIds(l)
m = {i:c[i] for i in range(0, len(c))}
r = {c[i]:i for i in range(0, len(c))}
def _getComName(self, layer, i):
"""
Return com name given layer and id
"""
return self._map[layer][i]
def _initalCommunities(self):
"""
The two initial communities for all layers
"""
roots = self._hcommunity.getRootCommunity()
self._active_communities = []
self._rewards = []
self._crewards = []
for l in range(0, self._hcommunity.getLayerCount()):
coms = self._hcommunity.getChildren(l, roots[l])
self._active_communities.append(coms)
self._crewards.append({c:[] for c in coms})
def getActiveCommunities(self, layer):
return self._active_communities[layer]
def updateCReward(self, layer, cid, value):
#cid = self._map[layer][cid]
self._rewards.append(value)
self._crewards[layer][cid].append(value)
def switchArm(self, layer):
"""
Check rewards to check if active community need to be changed
"""
if np.any([len(self._crewards[layer][l]) for l in self._crewards[layer]] < 5) :
return False
rewards = self._crewards[layer]
cid = self._active_communities[layer]
aval = np.mean(self._rewards)
astd = np.std(self._rewards)
mval = {c:np.mean(rewards[c]) for c in cid}
sval = {c:np.std(rewards[c]) for c in cid}
changed = False
# If both arms have very low rewards, swith up
if np.all([mval[c] + sval[c] for c in cid] < aval):
self.switchArmUp(layer, cid[0])
elif mval[cid[0]] < mval[cid[1]] - sval[cid[1]]:
# If arm 0 is very much lower than 1, swith down to 1
self.switchArmDown(layer, cid[1])
elif mval[cid[1]] < mval[cid[0]] - sval[cid[0]]:
self.switchArmDown(layer, cdi[0])
if changed:
cid = self._active_communities[layer]
self._crewards[layer] = {c:[] for c in cid}
def switchArmDown(self, layer, cid):
"""
Switch to a lower level of community from comid
"""
active = self.getActiveCommunities(layer)
if comid not in active:
return None
if self._hcommunity.checkLeaf(layer, cid):
# If leaf, return cid and sibling
return (cid, self._hcommunity.getSibling(layer, cid)[0])
return self._hcommunity.getChildren(layer, cid)
def switchArmUp(self, layer, cid):
active = self.getActiveCommunities(layer)
if comid not in active:
return None
parent = self._hcommunity.getParent(layer, cid)
if self._hcommunity.checkLeaf(layer, parent):
# if parent is root, return self and sibling
return (cid, self._hcommunity.getSibling(layer, cid)[0])
return (parent, self._hcommunity.getSibling(layer, parent)[0])
class BanditManager(object):
"""Manages the multiple bandits"""
def __init__(self, graph, sample, queried, commode=0, bmode=1):
super(BanditManager, self).__init__()
self._bmode = bmode
self._epsilon = 0.2
self._graph = graph
self._sample = sample
self._queried = queried
self._layers = range(0, len(graph))
self._lbandit = bandit.MultiArmBandit(self._layers,\
mode=self._bmode, epsilon=self._epsilon)
self._cbandit = [None for _ in self._layers]
self._rbandit = [NodeSelection(self._sample[i]) for i in range(0, len(self._sample))]
self._commode = commode
def getLayerEstimates(self):
# Returns the estimated rewards for all the layers
return self._lbandit.getEst()
def initializeCBandits(self):
self._hcommunity = CommunitHeirarchy(self._sample, mode=self._commode)
if self._commode == 0:
self._commanager = CommunityManager(self._hcommunity)
self._cbandit = [bandit.MultiArmBandit(self._hcommunity.getCommunityIds(l).values(),\
mode=self._bmode, epsilon=self._epsilon) for l in self._layers]
else:
self._commanager = None
self._cbandit = [bandit.MultiArmBandit(self._hcommunity.getCommunityIds(l), mode=self._bmode, epsilon=self._epsilon) for l in self._layers]
def _nextArms(self):
"""
Get the next layer and role
"""
#try:
larm, _ = self._lbandit.nextArm()
if self._commode == 0:
carm, _ = self._cbandit[larm].nextArm(arms=self._commanager.getActiveCommunities(larm))
else:
carm, _ = self._cbandit[larm].nextArm()
self._arm = [larm, carm, -1]
def getArms(self):
return self._arm
def updateReward(self, reward, sample):
self._lbandit.updateEst(reward[0], self._arm[0])
self._rbandit[self._arm[0]].update(reward[2], sample[self._arm[0]])
if self._commode == 0:
self._updateRewardCBandit(reward[1], True)
self._commanager.updateCReward(self._arm[0], self._arm[1], reward[1])
self._commanager.switchArm(self._arm[0])
else:
self._cbandit[self._arm[0]].updateEst(reward[1], self._arm[1])
def _updateRewardCBandit(self, reward, updateparent=False):
"""
If updateparent is True, update the parents of community in heirarchy too
"""
cids = [self._arm[1]]
if updateparent:
cids += self._hcommunity.getAncestors(self._arm[0], self._arm[1])
for cid in cids:
self._cbandit[self._arm[0]].updateEst(reward, cid)
def getNode(self, count=1):
self._nextArms()
while self._arm is None:
self._nextArms()
candidates = set(list(self._hcommunity.getNodes(self._arm[0], self._arm[1])))
candidates.difference_update(self._queried[0])
return self._rbandit[self._arm[0]].nextNode(candidates)
class RoleManager(object):
"""docstring for RoleManager"""
def __init__(self, sample, queried):
super(RoleManager, self).__init__()
self._sample = sample
self._queried = queried
self._roles = [('degree', 'highest'), ('degree', 'lowest'), \
('betweeness', 'highest'), ('betweeness', 'lowest'),\
('core', 'highest'), ('core', 'lowest'),
('random','')]
self._cache = [{} for _ in self._sample]
def getRoles(self):
return self._roles
def clearCache(self):
self._cache = [{} for _ in self._sample]
def getNode(self, layer, role, nodes=None, count=1):
"""
Get node satisfying role from layer
If nodes is given, select only from that list
"""
s = self._sample[layer]
r = self._roles[role]
candidates = set(list(s.nodes()))
if nodes is not None:
candidates.intersection(nodes)
s = self._sample[0].subgraph(candidates)
# Restrict candidates to only unqueried nodes
candidates.difference_update(self._queried[0])
if len(candidates) == 0:
return False
# Get nodes and values
if r[0] == 'degree':
vals = nx.degree_centrality(s)
elif r[0] == 'betweeness':
vals = nx.betweenness_centrality(s, k=min(10, s.number_of_nodes()))
elif r[0] == 'closeness':
vals = nx.closeness_centrality(s)
elif r[0] == 'clustering':
vals = nx.clustering(s)
elif r[0] == 'eig':
vals = nx.eigenvector_centrality_numpy(s, max_iter=100, tol=1e-2)
elif r[0] == 'core':
vals = nx.core_number(s)
elif r[0] == 'random':
candidates = list(candidates)
np.random.shuffle(candidates)
return candidates[:count]
# Filter to only nodes in canditate list
candidates = {u:vals[u] for u in candidates}
# Sort by the value
candidates = sorted(candidates, key=candidates.get)
# Return highest or lowest depending on role
if r[1] == 'lowest':
return candidates[:count]
else:
return candidates[-count:]
class CommunitHeirarchy(object):
"""docstring for CommunitHeirarchy"""
def __init__(self, sample, mode=0):
super(CommunitHeirarchy, self).__init__()
self._sample = sample
self._mode = mode
self._initializeCommunities()
def getLayerCount(self):
return len(self._sample)
def _initializeCommunities(self):
"""
Intitialize communites for each of the layers
"""
self._ocom = {}
self._dendrogram = []
self._com_ids = {}
for i in range(0, len(self._sample)):
partition, _ = getApproxPartition(self._sample[i], single=False)
if self._mode == 0:
com = mycommunity.Community(self._sample[i], partition)
com.generateDendrogram(stop_max=False)
tree = com.communityTree()
ids = com.communityIds()
self._ocom.append(com)
self._dendrogram.append(com.flattenDendrogram(tree=tree))
self._com_ids.append({i:ids[i] for i in range(0, len(ids))})
else:
self._com_ids[i] = list(set(partition.values()))
self._ocom[i] = {c:[] for c in self._com_ids[i]}
for u in partition:
self._ocom[i][partition[u]].append(u)
def getCommunityIds(self, layer):
"""
Get the community ids
"""
return self._com_ids[layer]
def getNodes(self, layer, cid):
"""
Get the nodes in layer, and communit id
"""
if self._mode == 0:
return self._ocom[layer].nodesInCommunity(cid)
else:
return self._ocom[layer][cid]
def getRootCommunity(self):
"""
Get the root nodes of layers community
"""
if self._mode == 0:
return [c.getRoot() for c in self._ocom]
else:
return self._com_ids
def getChildren(self, layer, cid):
"""
Get the children of cid in layer
"""
if self._mode == 0:
return self._dendrogram[layer][cid]['children']
else:
return None
def getParent(self, layer, cid):
"""
Get the parent of cin in layer
"""
if self._mode == 0:
return self._dendrogram[layer][cid]['parent']
else:
return None
def getSibling(self, layer, cid):
"""
Get the sibling of cid in layer
"""
if self._mode == 0:
return self._dendrogram[layer][cid]['siblings']
else:
return None
def checkRoot(self, layer, cid):
"""
Check in cid is root in dendrogram
Returns True if root, otherwis false
"""
if self._mode == 0:
return self._dendrogram[layer][cid] is not None
else:
return True
def checkLeaf(self, layer, cid):
"""
Check if cid is a leaf in dendrogram
"""
if self._mode == 0:
return len(self._dendrogram[layer][cid]) == 0
else:
return True
def getAncestors(self, layer, cid):
"""
Get all the ancestors of cid in layer
"""
if self._mode == 0:
parent = self.getParent(layer, cid)
if parent is None:
return []
return [parent] + self.getAncestors(layer, parent)
else:
return None
def getDecendents(self, layer, cid):
"""
Get all decendents of cid in layer
"""
if self._mode == 0:
children = self.getChildren(layer, cid)
if len(children) == 0:
return []
return [ children[0], children[1] ] + self.getDecendents(layer, children[0]) + self.getDecendents(layer, children[1])
else:
return None
class MABSample(object):
"""docstring for MABSample"""
def __init__(self, graph, sample, lweight, lcosts, queried, budget, results_continuous=True, partition=None, bmode=1):
super(MABSample, self).__init__()
self._sample = sample
self._graph = graph
self._lweight = lweight
self._lcosts = lcosts
self._queried = queried
self._budget = budget
self._results_continuous = results_continuous
#print('Bandit Manager')
self._bandit = BanditManager(self._graph, self._sample, self._queried, commode=1, bmode=bmode)
#print('Evaluation')
self._evaluation = Evaluation(self._graph, partition=partition)
self._step = max(5,int(self._graph[0].number_of_nodes()/100))
self._window_size = 10
self._importance_threshold = 2 # Minimum importance of cheap layers to aggregate
self._scores = []
self._rtime = []
self._ppart = [None, None] # The previous best partition used for community update distance computation
self._psim = 0 # Similarity between previous 2 partitions
#self._bandit.initializeRBandits()
self._node_sequence = [] # Sequence of nodes queried
self._nodes_count = self._graph[0].number_of_nodes()
self._batch_size = 1
self._prevsim = [0.0, 0.0, 0.0] # For blackbox reward
def _initializeSample(self):
"""
Add nodes and edges from 'valid' layers to sample of interest
"""
importances = self._lweight.getLayerOverlap()
edges_add = set([]) # edges to add
edges_sub = set([]) # edges to remove
for i in range(1, len(self._sample)):
nodes = set(list(self._sample[i].nodes()))
nodes.difference_update(self._queried[0]) # nodes that have not been queried in layer 0
sg = self._sample[i].subgraph(nodes)
edges = [frozenset(e) for e in sg.edges() if e[0] != e[1]]
if importances[i] > self._importance_threshold:
edges_add.update(edges)
else:
edges_sub.update(edges)
# Edges to remove cannot be in edges to add
edges_sub.difference_update(edges_add)
# nodes always exist in 0 since its multiplex
self._sample[0].add_nodes_from(nodes)
# Update sample 0
self._sample[0].add_edges_from([list(e) for e in edges_add])
self._sample[0].remove_edges_from([list(e) for e in edges_sub])
self._ppart[1], self._pmod = getApproxPartition(self._sample[0])
def _getStreamingPartition(self, neighbors_all, subgraph):
"""
Compute partition after node u, with neighbors n
is added to current sample
sample is the current sample
"""
# Assign u to partition with most common neighbors
# This gives max modularity
"""
Compute partition after node u, with neighbors n
is added to current sample
sample is the current sample
"""
# Assign u to partition with most common neighbors
# This gives max modularity
if subgraph.number_of_edges() == 0:
cpart = {i:i for i in subgraph.nodes()}
cmod = 1 # So that next iteration there is proper community detection
return cpart, cmod
if len(self._queried[0]) < 10 or len(self._queried[0]) % 50 == 0:
cpart = community.best_partition(subgraph, randomize=False)
cmod = community.modularity(cpart, subgraph)
return cpart, cmod
tpart = dict(self._ppart[1])
for u in neighbors_all:
n = neighbors_all[u]
candidates = self._queried[0].intersection(n)
#for i in u:
counts = {c:0 for c in set(self._ppart[1].values())}
for v in candidates:
if v in self._ppart[1]:
counts[self._ppart[1][v]] += 1
c = max(counts.items(), key=operator.itemgetter(1))[0]
tpart[u] = c
tmod = community.modularity(tpart, subgraph)
if tmod < self._pmod:
cpart, cmod = getApproxPartition(subgraph)
return cpart, cmod
else:
return tpart, tmod
def _communityUpdateDistance(self, cpart, u):
"""
Compute the change in community between communities of the
current sample and previous one
"""
nodes = self._queried[0].intersection(self._ppart[1].keys()).intersection(cpart.keys())
part1 = [self._ppart[1][u] for u in nodes]
part2 = [cpart[u] for u in nodes]
nmi = 1 - normalized_mutual_info_score(part1, part2)
if self._ppart[0] is None:
return nmi
# Find direction
part1 = [self._ppart[0][u] for u in nodes if u in self._ppart[0]]
part2 = [cpart[u] for u in nodes if u in self._ppart[0]]
dnmi = 1 - normalized_mutual_info_score(part1, part2)
direction = 1.0
if dnmi < self._psim:
direction = -1.0
self._psim = dnmi
return direction * dnmi
def _rewards(self, cpart, u):
dist = self._communityUpdateDistance(cpart, u)
self._past_distances.append(np.abs(dist))
return (dist, dist, dist)
def _checkTerminate(self):
"""
Check to see if we should end current iteration of MAB
"""
if len(self._past_distances) < 5:
return 0
if np.mean(self._past_distances[-5:]) < 0.10:
return 1
if np.mean(self._past_distances[-5:]) < 0.10:
return 2
return 0
def getScores(self):
return self._scores
def getRunningTime(self):
return self._rtime
def getNodeSequence(self):
return self._node_sequence
def _resultsStep(self):
if self._graph[0].number_of_nodes() < 1000:
return True
step = self._step
return self._budget.getBudgetConsumed() % step < 1
def _blackBoxReward(self, cpart, u):
sim | |
colisão - ocupante não especificado de um ônibus traumatizado em um acidente não-de-trânsito'),
('V78.4', 'Ocupante de um ônibus traumatizado em um acidente de transporte sem colisão - pessoa traumatizado ao subir ou descer do veículo'),
('V78.5', 'Ocupante de um ônibus traumatizado em um acidente de transporte sem colisão - condutor [motorista] traumatizado em um acidente de trânsito'),
('V78.6', 'Ocupante de um ônibus traumatizado em um acidente de transporte sem colisão - passageiro traumatizado em um acidente de trânsito'),
('V78.7', 'Ocupante de um ônibus traumatizado em um acidente de transporte sem colisão - pessoa viajando no exterior do veículo traumatizada em um acidente de trânsito'),
('V78.9', 'Ocupante de um ônibus traumatizado em um acidente de transporte sem colisão - ocupante não especificado de um ônibus traumatizado em um acidente de trânsito'),
('V79.0', 'Condutor [motorista] traumatizado em colisão com outros veículos e com veículos não especificados, a motor, em um acidente não-de-trânsito'),
('V79.1', 'Passageiro traumatizado em colisão com outros veículos e com veículos não especificados, a motor, em um acidente não-de-trânsito'),
('V79.2', 'Ocupante não especificado de um ônibus traumatizado em colisão com outros veículos e com um veículos não especificados, a motor, em um acidente não-de-trânsito'),
('V79.3', 'Ocupante [qualquer] de um ônibus traumatizado em um acidente não-de-trânsito'),
('V79.4', 'Condutor [motorista] traumatizado em colisão com outros veículos e com veículos não especificados, a motor, em um acidente de trânsito'),
('V79.5', 'Passageiro traumatizado em colisão com outros veículos e com veículos não especificados, a motor, em um acidente de trânsito'),
('V79.6', 'Ocupante não especificado de um ônibus traumatizado em colisão com um outros veículos e com um veículos não especificados, a motor, em um acidente de trânsito'),
('V79.8', 'Ocupante [ qualquer] de um ônibus traumatizado em outros acidentes de transporte especificados'),
('V79.9', 'Ocupante [qualquer] de um ônibus traumatizado em um acidente de trânsito não especificado'),
('V80.0', 'Queda ou ejeção de uma pessoa montada em animal ou ocupante de um veículo a tração animal em um acidente sem colisão'),
('V80.1', 'Pessoa montada em animal ou ocupante de um veículo a tração animal traumatizado em colisão com um pedestre ou um animal'),
('V80.2', 'Pessoa montada em animal ou ocupante de um veículo a tração animal traumatizado em colisão com um veículo a pedal'),
('V80.3', 'Pessoa montada em animal ou ocupante de um veículo a tração animal traumatizado em colisão com um veículo a motor de duas ou três rodas'),
('V80.4', 'Pessoa montada em animal ou ocupante de um veículo a tração animal traumatizado em colisão com um automóvel [carro], uma caminhonete, um veículo de transporte pesado ou um ônibus'),
('V80.5', 'Pessoa montada em animal ou ocupante de um veículo a tração animal traumatizado em colisão com um outro veículo a motor especificado'),
('V80.6', 'Pessoa montada em animal ou ocupante de um veículo a tração animal traumatizado em colisão com um trem [comboio] ou um veículo ferroviário'),
('V80.7', 'Pessoa montada em animal ou ocupante de um veículo a tração animal traumatizado em colisão com um outro veículo não-motorizado'),
('V80.8', 'Pessoa montada em animal ou ocupante de um veículo a tração animal traumatizado em colisão com um objeto fixo ou parado'),
('V80.9', 'Pessoa montada em animal ou ocupante de um veículo a tração animal traumatizado em outros acidentes de transporte e em acidentes de transporte não especificados'),
('V81.0', 'Ocupante de um trem [comboio] ou de um veículo ferroviário traumatizado em colisão com um veículo a motor em um acidente não-de-trânsito'),
('V81.1', 'Ocupante de um trem [comboio] ou de um veículo ferroviário traumatizado em colisão com um veículo a motor em um acidente de trânsito'),
('V81.2', 'Ocupante de um trem [comboio] ou de um veículo ferroviário traumatizado em colisão com ou atingido por material rodante'),
('V81.3', 'Ocupante de um trem [comboio] ou de um veículo ferroviário traumatizado em colisão com outro objeto'),
('V81.4', 'Pessoa traumatizada ao subir ou descer de um trem [comboio] ou de um veículo ferroviário'),
('V81.5', 'Ocupante de um trem [comboio] ou de um veículo ferroviário traumatizado devido a uma queda no interior dos mesmos'),
('V81.6', 'Ocupante de um trem [comboio] ou de um veículo ferroviário traumatizado devido a uma queda do trem ou do veículo ferroviário'),
('V81.7', 'Ocupante de um trem [comboio] ou de um veículo ferroviário traumatizado em razão de um descarrilhamento sem colisão antecedente'),
('V81.8', 'Ocupante de um trem [comboio] ou de um veículo ferroviário traumatizado em outro acidente ferroviário especificado'),
('V81.9', 'Ocupante de um trem [comboio] ou de um veículo ferroviário traumatizado em um acidente ferroviário não especificado'),
('V82.0', 'Ocupante de um bonde [carro elétrico] traumatizado em uma colisão com um veículo a motor em um acidente não-de-trânsito'),
('V82.1', 'Ocupante de um bonde [carro elétrico] traumatizado em uma colisão com um veículo a motor em um acidente de trânsito'),
('V82.2', 'Ocupante de um bonde [carro elétrico] traumatizado em uma colisão com ou atingido por material rodante'),
('V82.3', 'Ocupante de um bonde [carro elétrico] traumatizado em uma colisão com outro objeto'),
('V82.4', 'Pessoa traumatizada ao subir ou descer de um bonde [carro elétrico]'),
('V82.5', 'Ocupante de um bonde [carro elétrico] traumatizado devido a uma queda no interior do mesmo'),
('V82.6', 'Ocupante de um bonde [carro elétrico] traumatizado devido a uma queda do mesmo');
INSERT INTO servicos_cid (cid_id, descricao) VALUES
('V82.7', 'Ocupante de um bonde [carro elétrico] traumatizado devido a um descarrilhamento sem colisão antecedente'),
('V82.8', 'Ocupante de um bonde [carro elétrico] traumatizado em outros acidentes de transporte especificados'),
('V82.9', 'Ocupante de um bonde [carro elétrico] traumatizado em um acidente de trânsito não especificado'),
('V83.0', 'Condutor [motorista] de um veículo especial a motor usado principalmente em áreas industrias traumatizado em um acidente de trânsito'),
('V83.1', 'Passageiro de um veículo especial a motor usado principalmente em áreas industriais traumatizado em um acidente de trânsito'),
('V83.2', 'Pessoa viajando no exterior de um veículo especial a motor usado principalmente em áreas industriais traumatizado em um acidente de trânsito'),
('V83.3', 'Ocupante não especificado de um veículo especial a motor usado principalmente em áreas industriais traumatizado em um acidente de trânsito'),
('V83.4', 'Pessoa traumatizada ao subir ou descer de um veículo especial a motor usado principalmente em áreas industriais'),
('V83.5', 'Condutor [motorista] de um veículo especial a motor usado principalmente em áreas industrias traumatizado em um acidente não-de-trânsito'),
('V83.6', 'Passageiro de um veículo especial a motor usado principalmente em áreas industriais traumatizado em um acidente não-de-trânsito'),
('V83.7', 'Pessoa viajando no exterior de um veículo especial a motor usado principalmente em áreas industriais traumatizado em um acidente não-de-trânsito'),
('V83.9', 'Ocupante não especificado de um veículo especial a motor usado principalmente em áreas industriais traumatizado em um acidente não-de-trânsito'),
('V84.0', 'Condutor [motorista] de um veículo especial a motor de uso essencialmente agrícola traumatizado em um acidente de trânsito'),
('V84.1', 'Passageiro de um veículo especial a motor de uso essencialmente agrícola traumatizado em um acidente de trânsito'),
('V84.2', 'Pessoa viajando no exterior de um veículo especial a motor de uso essencialmente agrícola traumatizado em um acidente de trânsito'),
('V84.3', 'Ocupante não especificado de um veículo especial a motor de uso essencialmente agrícola traumatizado em um acidente de trânsito'),
('V84.4', 'Pessoa traumatizado ao subir ou descer de um veículo especial a motor de uso essencialmente agrícola'),
('V84.5', 'Condutor [motorista] de um veículo especial a motor de uso essencialmente agrícola traumatizado em um acidente não-de-trânsito'),
('V84.6', 'Passageiro de um veículo especial a motor de uso essencialmente agrícola traumatizado em um acidente não-de-trânsito'),
('V84.7', 'Pessoa viajando no exterior de um veículo especial a motor de uso essencialmente agrícola traumatizado em um acidente não-de-trânsito'),
('V84.9', 'Ocupante não especificado de um veículo especial a motor de uso essencialmente agrícola traumatizada em um acidente não-de-trânsito'),
('V85.0', 'Condutor [motorista] de um veículo a motor especial de construções traumatizado em um acidente de trânsito'),
('V85.1', 'Passageiro de um veículo a motor especial de construções traumatizado em um acidente de trânsito'),
('V85.2', 'Pessoa viajando no exterior de um veículo a motor especial de construções traumatizado em um acidente de trânsito'),
('V85.3', 'Ocupante não especificado de um veículo a motor especial de construções traumatizado em um acidente de trânsito'),
('V85.4', 'Pessoa traumatizada ao subir ou descer de um veículo a motor especial de construções'),
('V85.5', 'Condutor [motorista] de um veículo a motor especial de construções traumatizado em um acidente não-de-trânsito'),
('V85.6', 'Passageiro | |
while self.g_chkreplexit:
try:
if len(grtsps)>0:
for key in grtsps:
if grtsps[key].child.terminated:
pass
del grtsps[key]
# else:
# grtsps[key].write_contents()
finally:
pass
if len(grtsps)>0:
for key in grtsps:
if grtsps[key].child.terminated:
pass
del grtsps[key]
else:
grtsps[key].child.terminate(force=True)
del grtsps[key]
def cleanup_files(self):
# keep the list of files create in case there is an exception
# before they can be deleted as usual
if self.files != None and len(self.files) > 0:
for file in self.files:
if(os.path.exists(file)):
os.remove(file)
def new_temp_file(self, **kwargs):
# We don't want the file to be deleted when closed, but only when the kernel stops
kwargs['delete'] = False
kwargs['mode'] = 'w'
file = tempfile.NamedTemporaryFile(**kwargs)
self.files.append(file.name)
return file
def create_codetemp_file(self,magics,code,suffix):
encodestr="UTF-8"
if suffix.strip().lower().endswith(".bat") or suffix.strip().lower().endswith(".ps1"):
encodestr="GBK"
source_file=self.new_temp_file(suffix=suffix,dir=os.path.abspath(''),encoding=encodestr)
magics['codefilename']=source_file.name
with source_file:
source_file.write(code)
source_file.flush()
return source_file
def _log(self, output,level=1,outputtype='text/plain'):
if self._loglevel=='0': return
streamname='stdout'
if not self.silent:
prestr=self.kernelinfo+' Info:'
if level==2:
prestr=self.kernelinfo+' Warning:'
streamname='stderr'
elif level==3:
prestr=self.kernelinfo+' Error:'
streamname='stderr'
else:
prestr=self.kernelinfo+' Info:'
streamname='stdout'
# if len(outputtype)>0 and (level!=2 or level!=3):
# self._write_display_data(mimetype=outputtype,contents=prestr+output)
# return
# Send standard output
stream_content = {'name': streamname, 'text': prestr+output}
self.send_response(self.iopub_socket, 'stream', stream_content)
def _logln(self, output,level=1,outputtype='text/plain'):
self._log(output+"\n",level=1,outputtype='text/plain')
def _write_display_data(self,mimetype='text/html',contents=""):
try:
if mimetype.startswith('image'):
metadata ={mimetype:{}}
# contents=contents
# self._logln(base64.encodebytes(contents))
# contents=base64.encodebytes(contents)
# contents=urllib.parse.quote(base64.b64encode(contents))
header="<div><img alt=\"Output\" src=\"data:"+mimetype+";base64,"
end="\"></div>"
contents=header+base64.encodebytes(contents).decode( errors='ignore')+end
mimetype='text/html'
metadata = {mimetype:{}}
# 'text/html' : {
# 'width': 640,
# 'height': 480
# }
# }
except Exception as e:
self._logln("_write_display_data err "+str(e),3)
return
self.send_response(self.iopub_socket, 'display_data', {'data': {mimetype:contents}, 'metadata': {mimetype:{}}})
def _write_to_stdout(self,contents,magics=None):
if magics !=None and len(magics['_st']['outputtype'])>0:
self._write_display_data(mimetype=magics['_st']['outputtype'],contents=contents)
else:
self.send_response(self.iopub_socket, 'stream', {'name': 'stdout', 'text': contents})
def _write_to_stderr(self, contents):
self.send_response(self.iopub_socket, 'stream', {'name': 'stderr', 'text': contents})
def _read_from_stdin(self):
return self.raw_input()
def readcodefile(self,filename,spacecount=0):
filecode=''
codelist1=None
if not os.path.exists(filename):
return ''
with open(os.path.join(os.path.abspath(''),filename), 'r',encoding="UTF-8") as codef1:
codelist1 = codef1.readlines()
if len(codelist1)>0:
for t in codelist1:
filecode+=' '*spacecount + t
return filecode
def loadurl(self,url):
content=''
try:
request=urllib.request.Request(url)
myURL = urlopen(request)
# content= myURL.read()
lines = myURL.readlines()
for line in lines:
print(line)
content+=line.decode()+"\n"
except Exception as e:
self._logln("loadurl error! "+str(e),3)
return content
#####################################################################
def _start_replprg(self,command,args,magics):
# Signal handlers are inherited by forked processes, and we can't easily
# reset it from the subprocess. Since kernelapp ignores SIGINT except in
# message handlers, we need to temporarily reset the SIGINT handler here
# so that bash and its children are interruptible.
sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
self.silent = None
try:
child = pexpect.spawn(command, args,timeout=60, echo=False,
encoding='utf-8')
self._write_to_stdout("replchild pid:"+str(child.pid)+"\n")
self._write_to_stdout("--------process info---------\n")
self.replwrapper = IREPLWrapper(
self._write_to_stdout,
self._write_to_stderr,
self._read_from_stdin,
child,
replsetip=magics['_st']['replsetip'],
orig_prompt='\r\n',
prompt_change=None,
extra_init_cmd=None,
line_output_callback=self.process_output)
# self._write_to_stdout("replchild pid:"+str(self.replwrapper.child.pid)+"\n")
self.g_rtsps[str(self.replwrapper.child.pid)]=self.replwrapper
except Exception as e:
self._write_to_stderr("[MyPythonkernel] Error:Executable _start_replprg error! "+str(e)+"\n")
finally:
signal.signal(signal.SIGINT, sig)
def process_output(self, output,magics=None):
if not self.silent:
if magics !=None and len(magics['_st']['outputtype'])>0:
self._write_display_data(mimetype=magics['_st']['outputtype'],contents=output)
return
# Send standard output
stream_content = {'name': 'stdout', 'text': output}
self.send_response(self.iopub_socket, 'stream', stream_content)
def send_replcmd(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False,magics=None):
self.silent = silent
if not code.strip():
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
interrupted = False
try:
# Note: timeout=None tells IREPLWrapper to do incremental
# output. Also note that the return value from
# run_command is not needed, because the output was
# already sent by IREPLWrapper.
self._write_to_stdout("send replcmd:"+code.rstrip()+"\n")
self._write_to_stdout("---Received information after send repl cmd---\n")
if magics and len(magics['_st']['replchildpid'])>0 :
if self.g_rtsps[magics['_st']['replchildpid']] and \
self.g_rtsps[magics['_st']['replchildpid']].child and \
not self.g_rtsps[magics['_st']['replchildpid']].child.terminated :
self.g_rtsps[magics['_st']['replchildpid']].run_command(code.rstrip(), timeout=None)
else:
if self.replwrapper and \
self.replwrapper.child and \
not self.replwrapper.child.terminated :
self.replwrapper.run_command(code.rstrip(), timeout=None)
pass
except KeyboardInterrupt:
self.gdbwrapper.child.sendintr()
interrupted = True
self.gdbwrapper._expect_prompt()
output = self.gdbwrapper.child.before
self.process_output(output)
except EOF:
# output = self.gdbwrapper.child.before + 'Restarting GDB'
# self._start_gdb()
# self.process_output(output)
pass
if interrupted:
return {'status': 'abort', 'execution_count': self.execution_count}
# try:
# exitcode = int(self.replwrapper.run_command('echo $?').rstrip())
# except Exception as e:
# self.process_output("[MyPythonkernel] Error:Executable send_replcmd error! "+str(e)+"\n")
exitcode = 0
if exitcode:
error_content = {'execution_count': self.execution_count,
'ename': '', 'evalue': str(exitcode), 'traceback': []}
self.send_response(self.iopub_socket, 'error', error_content)
error_content['status'] = 'error'
return error_content
else:
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
#####################################################################
def do_shell_command(self,commands,cwd=None,shell=True,env=True,magics=None):
try:
if len(magics['_bt']['replcmdmode'])>0:
findObj= commands[0].split(" ",1)
if findObj and len(findObj)>1:
cmd=findObj[0]
arguments=findObj[1]
cmdargs=[]
for argument in re.findall(r'(?:[^\s,"]|"(?:\\.|[^"])*")+', arguments):
cmdargs += [argument.strip('"')]
self._write_to_stdout(cmd)
self._write_to_stdout(''.join((' '+ str(s) for s in cmdargs))+"\n")
self._start_replprg(cmd,cmdargs,magics)
return
cmds=[]
for argument in re.findall(r'(?:[^\s,"]|"(?:\\.|[^"])*")+', commands.strip()):
cmds += [argument.strip('"')]
p = self.create_jupyter_subprocess(cmds,cwd,shell,env=env,magics=magics)
if magics!=None and len(self.get_magicsbykey(magics,'showpid'))>0:
self._write_to_stdout("The process PID:"+str(p.pid)+"\n")
self.g_rtsps[str(p.pid)]=p
returncode=p.wait_end(magics)
del self.g_rtsps[str(p.pid)]
if returncode != 0:
self._logln("Executable command exited with code {}\n".format(returncode),3)
else:
self._logln("command success.\n")
return
except Exception as e:
self._logln("Executable command error! "+str(e)+"\n",3)
def do_Py_command(self,commands,cwd=None,shell=False,env=True,magics=None):
try:
cmds=[]
for argument in re.findall(r'(?:[^\s,"]|"(?:\\.|[^"])*")+', commands.strip()):
cmds += [argument.strip('"')]
p = self.create_jupyter_subprocess(['python']+cmds,cwd,shell,env=env,magics=magics)
if magics!=None and len(self.get_magicsbykey(magics,'showpid'))>0:
self._write_to_stdout("The process PID:"+str(p.pid)+"\n")
self.g_rtsps[str(p.pid)]=p
returncode=p.wait_end(magics)
del self.g_rtsps[str(p.pid)]
if returncode != 0:
self._logln("Executable python exited with code {}".format(returncode))
else:
self._logln("command python success.")
except Exception as e:
self._logln("Executable python command error! "+str(e)+"\n",3)
return
def send_cmd(self,pid,cmd):
try:
# self._write_to_stdout("send cmd PID:"+pid+"\n cmd:"+cmd)
# if self.g_rtsps.has_key(pid):
# self._write_to_stderr("[MyPythonkernel] Info:exist! "+pid+"\n")
# self.g_rtsps[pid].stdin.write(cmd.encode())
self.g_rtsps[pid]._write_to_stdout(cmd)
except Exception as e:
self._log("Executable send_cmd error! "+str(e)+"\n")
return
def create_jupyter_subprocess(self, cmd,cwd=None,shell=False,env=None,magics=None,outencode='UTF-8'):
try:
if env==None or len(env)<1:
env=os.environ
if magics!=None and len(self.addmagicsBkey(magics,'runinterm'))>0:
self.inittermcmd(magics)
if len(magics['_st']['term'])<1:
self._logln("no term!",2)
execfile=''
for x in cmd:
execfile+=x+" "
cmdshstr=self.create_termrunsh(execfile,magics)
if self.sys=='Windows':
cmd=magics['_st']['term']+[cmdshstr]
elif self.sys=='Linux':
cmd=magics['_st']['term']+['--',cmdshstr]
else:
cmd=magics['_st']['term']+['--',cmdshstr]
cstr=''
for x in cmd: cstr+=x+" "
self._logln(cstr)
return RealTimeSubprocess(cmd,
self._write_to_stdout,
self._write_to_stderr,
self._read_from_stdin,cwd,shell,env,self,outencode=outencode)
except Exception as e:
self._write_to_stdout("RealTimeSubprocess err:"+str(e))
raise
def getossubsys(self):
uname=''
try:
u=os.popen('bash -c "uname"')
uname=u.read()
except Exception as e:
self._logln(""+str(e),3)
return uname
def inittermcmd(self,magics):
if len(magics['_st']['term'])>0:return ''
termcmd=''
try:
if self.subsys.startswith('MINGW64') or self.subsys.startswith('CYGWIN'):
termcmd='mintty "/usr/bin/bash" --login'
if self.sys=='Linux':
termcmd='gnome-terminal'
elif self.sys=='Windows':
termcmd='c:\\Windows\\System32\\cmd.exe /c start'
except Exception as e:
self._logln(""+str(e),3)
if len(termcmd)>1:
magics['_st']['term']=[]
for argument in re.findall(r'(?:[^\s,"]|"(?:\\.|[^"])*")+', termcmd):
magics['_st']['term'] += [argument.strip('"')]
return termcmd
##//%overwritefile
##//%file:../src/create_termrunsh.py
##//%noruncode
def create_termrunsh(self,execfile,magics):
fil_ename=execfile
uname=''
try:
u=os.popen('bash -c "uname"')
uname=u.read()
except Exception as e:
self._logln(""+str(e),3)
if self.subsys.startswith('MINGW64') or self.subsys.startswith('CYGWIN'):
pausestr=self.pausestr
termrunsh="\n"+execfile+"\n"+pausestr+"\n"
termrunsh_file=self.create_codetemp_file(magics,termrunsh,suffix='.sh')
newsrcfilename=termrunsh_file.name
fil_ename=newsrcfilename
elif self.sys=='Windows' :
termrunsh="echo off\r\ncls\r\n"+execfile+"\r\npause\r\nexit\r\n"
if execfile.strip().lower().endswith(".bat"):
termrunsh="echo off\r\ncls\r\ncall "+execfile+"\r\npause\r\nexit\r\n"
termrunsh_file=self.create_codetemp_file(magics,termrunsh,suffix='.bat')
newsrcfilename=termrunsh_file.name
fil_ename=newsrcfilename
elif self.sys=='Linux':
pausestr=self.pausestr
termrunsh="\n"+execfile+"\n"+pausestr+"\n"
termrunsh_file=self.create_codetemp_file(magics,termrunsh,suffix='.sh')
newsrcfilename=termrunsh_file.name
fil_ename=newsrcfilename
else:
self._logln("Cannot create terminal!",3)
self._logln(fil_ename)
os.chmod(newsrcfilename,stat.S_IRWXU+stat.S_IRGRP+stat.S_IXGRP+stat.S_IXOTH)
return fil_ename
def generate_Pythonfile(self, source_filename, binary_filename, cflags=None, ldflags=None):
return
#####################################################################
#####################################################################
def _add_main(self, magics, code):
# remove comments
tmpCode = re.sub(r"//.*", "", code)
tmpCode = re.sub(r"/\*.*?\*/", "", tmpCode, flags=re.M|re.S)
x = re.search(r".*\s+main\s*\(", tmpCode)
if not x:
code = self.main_head + code + self.main_foot
# magics['_st']['cflags'] += ['-lm']
return magics, code
def raise_plugin(self,code,magics,returncode=None,filename='',ifunc=1,ieven=1)->Tuple[bool,str]:
bcancel_exec=False
bretcancel_exec=False
retstr=''
for pluginlist in self.plugins:
for pkey,pvalue in pluginlist.items():
# print( pkey +":"+str(len(pvalue))+"\n")
for pobj in pvalue:
newline=''
try:
# if key in pobj.getIDSptag(pobj):
if ifunc==1 and ieven==1:
bretcancel_exec,retstr=pobj.on_before_buildfile(pobj,code,magics)
elif ifunc==2 and ieven==1:
bretcancel_exec,retstr=pobj.on_before_compile(pobj,code,magics)
elif ifunc==3 and ieven==1:
bretcancel_exec,retstr=pobj.on_before_exec(pobj,code,magics)
elif ifunc==1 and ieven==2:
bretcancel_exec=pobj.on_after_buildfile(pobj,returncode,filename,magics)
elif ifunc==2 and ieven==2:
bretcancel_exec=pobj.on_after_compile(pobj,returncode,filename,magics)
elif ifunc==3 and ieven==2:
bretcancel_exec=pobj.on_after_exec(pobj,returncode,filename,magics)
elif ifunc==3 and ieven==3:
bretcancel_exec=pobj.on_after_completion(pobj,returncode,filename,magics)
bcancel_exec=bretcancel_exec & bcancel_exec
if bcancel_exec:
return bcancel_exec,""
except Exception as e:
self._log(pobj.getName(pobj)+"---"+str(e)+"\n")
finally:pass
return bcancel_exec,retstr
def do_execute_script(self, code, magics,silent, store_history=True,
user_expressions=None, allow_stdin=True):
try:
bcancel_exec,retinfo,magics, code=self.do_preexecute(
code,magics,
silent, store_history,user_expressions, allow_stdin)
if bcancel_exec:return retinfo
return_code=0
fil_ename=''
retstr=''
bcancel_exec,retstr=self.raise_plugin(code,magics,return_code,fil_ename,1,1)
if bcancel_exec:return self.get_retinfo()
bcancel_exec,retinfo,magics, code,fil_ename,retstr=self.do_create_codefile(
magics,code,
silent, store_history,user_expressions, allow_stdin)
if bcancel_exec:return retinfo
code,magics,return_code,fil_ename
bcancel_exec,retstr=self.raise_plugin(code,magics,return_code,fil_ename,1,2)
if bcancel_exec:return self.get_retinfo()
fil_ename=magics['codefilename']
if len(self.get_magicsbykey(magics,'noruncode'))>0:
bcancel_exec=True
return self.get_retinfo()
bcancel_exec,retstr=self.raise_plugin(code,magics,return_code,fil_ename,2,1)
if bcancel_exec:return self.get_retinfo()
bcancel_exec,retinfo,magics, code,fil_ename,retstr=self.do_compile_code(
return_code,fil_ename,magics,code,
silent, store_history,user_expressions, allow_stdin)
if bcancel_exec:return retinfo
bcancel_exec,retstr=self.raise_plugin(code,magics,return_code,fil_ename,2,2)
if bcancel_exec:return self.get_retinfo()
if len(self.get_magicsbykey(magics,'onlycompile'))>0:
self._log("only run compile \n")
bcancel_exec=True
return retinfo
bcancel_exec,retstr=self.raise_plugin(code,magics,return_code,fil_ename,3,1)
if bcancel_exec:return self.get_retinfo()
self._logln("The process :"+fil_ename)
bcancel_exec,retinfo,magics, code,fil_ename,retstr=self.do_runcode(
return_code,fil_ename,magics,code,
silent, store_history,user_expressions, allow_stdin)
if bcancel_exec:return retinfo
bcancel_exec,retstr=self.raise_plugin(code,magics,return_code,fil_ename,3,3)
if bcancel_exec:return self.get_retinfo()
except Exception as e:
self._log(""+str(e),3)
return self.get_retinfo()
def do_execute_class(self, code, magics,silent, store_history=True,
user_expressions=None, allow_stdin=True):
try:
return_code=0
fil_ename=''
outpath=''
bcancel_exec,retinfo,magics, code=self.do_preexecute(
code, magics,
silent, store_history,user_expressions, allow_stdin)
if bcancel_exec:return retinfo
return_code=0
fil_ename=''
bcancel_exec,retstr=self.raise_plugin(code,magics,return_code,fil_ename,1,1)
if bcancel_exec:return self.get_retinfo()
bcancel_exec,retinfo,magics, code,fil_ename,class_filename,outpath,retstr=self.do_create_codefile(
magics,code,
silent, store_history,user_expressions, allow_stdin)
if bcancel_exec:return retinfo
bcancel_exec,retstr=self.raise_plugin(code,magics,return_code,fil_ename,1,2)
if bcancel_exec:return self.get_retinfo()
fil_ename=magics['codefilename']
if len(self.get_magicsbykey(magics,'noruncode'))>0:
bcancel_exec=True
return self.get_retinfo()
bcancel_exec,retstr=self.raise_plugin(code,magics,return_code,fil_ename,2,1)
if bcancel_exec:return self.get_retinfo()
bcancel_exec,retinfo,magics, code,fil_ename,class_filename,outpath,retstr=self.do_compile_code(
return_code,fil_ename,magics,code,
silent, store_history,user_expressions, allow_stdin)
if bcancel_exec:return self.get_retinfo()
bcancel_exec,retstr=self.raise_plugin(code,magics,return_code,fil_ename,2,2)
if bcancel_exec:return self.get_retinfo()
if len(self.get_magicsbykey(magics,'onlycompile'))>0:
self._log("only run compile \n")
bcancel_exec=True
return retinfo
bcancel_exec,retstr=self.raise_plugin(code,magics,return_code,fil_ename,3,1)
if bcancel_exec:return self.get_retinfo()
self._logln("The process :"+class_filename)
bcancel_exec,retinfo,magics, code,fil_ename,retstr=self.do_runcode(
return_code,fil_ename,class_filename,outpath,magics,code,
silent, store_history,user_expressions, allow_stdin)
if bcancel_exec:return retinfo
bcancel_exec,retstr=self.raise_plugin(code,magics,return_code,fil_ename,3,3)
if bcancel_exec:return self.get_retinfo()
except Exception as e:
self._log("???"+str(e),3)
return self.get_retinfo()
def do_execute_runprg(self, code, magics,silent, store_history=True,
user_expressions=None, allow_stdin=True):
try:
bcancel_exec,retinfo,magics, code=self.dor_preexecute(
code,magics,
silent, store_history,user_expressions, allow_stdin)
if bcancel_exec:return retinfo
| |
<filename>tests/myapp/modelsfinaldb.py
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from __future__ import unicode_literals
from django.db import models
class LabifyVillageCensusFieldsFinal(models.Model):
state_code = models.TextField(blank=True, null=True) # This field type is a guess.
state = models.TextField(blank=True, null=True) # This field type is a guess.
dist_code = models.TextField(blank=True, null=True) # This field type is a guess.
dist_name = models.TextField(blank=True, null=True) # This field type is a guess.
sub_dist_name = models.TextField(blank=True, null=True) # This field type is a guess.
sub_dist_code = models.TextField(blank=True, null=True) # This field type is a guess.
vil_code = models.TextField(blank=True, null=True) # This field type is a guess.
vil_name = models.TextField(blank=True, null=True) # This field type is a guess.
cdb_code = models.TextField(blank=True, null=True) # This field type is a guess.
cdb_name = models.TextField(blank=True, null=True) # This field type is a guess.
gram_pan_code = models.TextField(blank=True, null=True) # This field type is a guess.
gram_pan_name = models.TextField(blank=True, null=True) # This field type is a guess.
ref_year = models.TextField(blank=True, null=True) # This field type is a guess.
sub_dist_hquarter_name = models.TextField(blank=True, null=True) # This field type is a guess.
sub_dist_hquarter_distance = models.TextField(blank=True, null=True) # This field type is a guess.
dist_hquarter_name = models.TextField(blank=True, null=True) # This field type is a guess.
dist_hquarter_dist = models.TextField(blank=True, null=True) # This field type is a guess.
nearest_stat_town_name = models.TextField(blank=True, null=True) # This field type is a guess.
nearest_stat_town_dist = models.TextField(blank=True, null=True) # This field type is a guess.
within_state_ut_name = models.TextField(blank=True, null=True) # This field type is a guess.
within_state_ut_dist = models.TextField(blank=True, null=True) # This field type is a guess.
outside_state_ut_name = models.TextField(blank=True, null=True) # This field type is a guess.
outside_state_ut_dist = models.TextField(blank=True, null=True) # This field type is a guess.
tot_geograph_area = models.TextField(blank=True, null=True) # This field type is a guess.
tot_households = models.TextField(blank=True, null=True) # This field type is a guess.
tot_population = models.TextField(blank=True, null=True) # This field type is a guess.
tot_male_population = models.TextField(blank=True, null=True) # This field type is a guess.
tot_female_population = models.TextField(blank=True, null=True) # This field type is a guess.
tot_sc_population = models.TextField(blank=True, null=True) # This field type is a guess.
tot_scm_population = models.TextField(blank=True, null=True) # This field type is a guess.
tot_scf_population = models.TextField(blank=True, null=True) # This field type is a guess.
tot_st_population = models.TextField(blank=True, null=True) # This field type is a guess.
tot_stm_population = models.TextField(blank=True, null=True) # This field type is a guess.
tot_stf_population = models.TextField(blank=True, null=True) # This field type is a guess.
gov_preprim_sch_status = models.TextField(blank=True, null=True) # This field type is a guess.
gov_preprim_sch_nos = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_preprim_sch_status = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_preprim_sch_nos = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_status_preprim = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_village_town_preprim = models.TextField(blank=True, null=True) # This field type is a guess.
ina_distcode_preprim = models.TextField(blank=True, null=True) # This field type is a guess.
gov_prim_sch_status = models.TextField(blank=True, null=True) # This field type is a guess.
gov_prim_sch_nos = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_prim_sch_status = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_prim_sch_nos = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_status_prim = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_village_town_prim = models.TextField(blank=True, null=True) # This field type is a guess.
ina_distcode_prim = models.TextField(blank=True, null=True) # This field type is a guess.
gov_mid_sch_status = models.TextField(blank=True, null=True) # This field type is a guess.
gov_mid_sch_nos = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_mid_sch_status = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_mid_sch_nos = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_status_mid = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_village_town_mid = models.TextField(blank=True, null=True) # This field type is a guess.
ina_distcode_mid = models.TextField(blank=True, null=True) # This field type is a guess.
gov_sec_sch_status = models.TextField(blank=True, null=True) # This field type is a guess.
gov_sec_sch_nos = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_sec_sch_status = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_sec_sch_nos = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_status_sec = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_village_town_sec = models.TextField(blank=True, null=True) # This field type is a guess.
ina_distcode_sec = models.TextField(blank=True, null=True) # This field type is a guess.
gov_sen_sec_sch_status = models.TextField(blank=True, null=True) # This field type is a guess.
gov_sen_sec_sch_nos = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_sen_sec_sch_status = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_sen_sec_sch_nos = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_status_sen_sec = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_village_town_sen_sec = models.TextField(blank=True, null=True) # This field type is a guess.
ina_distcode_sen_sec = models.TextField(blank=True, null=True) # This field type is a guess.
gov_deg_col_status = models.TextField(blank=True, null=True) # This field type is a guess.
gov_deg_col_nos = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_deg_col_status = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_deg_col_nos = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_status_deg_col = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_village_town_deg_col = models.TextField(blank=True, null=True) # This field type is a guess.
ina_distcode_deg_col = models.TextField(blank=True, null=True) # This field type is a guess.
gov_eng_col_status = models.TextField(blank=True, null=True) # This field type is a guess.
gov_eng_col_nos = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_eng_col_status = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_eng_col_nos = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_status_eng_col = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_village_town_eng_col = models.TextField(blank=True, null=True) # This field type is a guess.
ina_distcode_eng_col = models.TextField(blank=True, null=True) # This field type is a guess.
gov_med_col_status = models.TextField(blank=True, null=True) # This field type is a guess.
gov_med_col_nos = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_med_col_status = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_med_col_nos = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_status_med_col = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_village_town_med_col = models.TextField(blank=True, null=True) # This field type is a guess.
ina_distcode_med_col = models.TextField(blank=True, null=True) # This field type is a guess.
gov_mgmt_inst_status = models.TextField(blank=True, null=True) # This field type is a guess.
gov_mgmt_inst_nos = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_mgmt_inst_status = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_mgmt_inst_nos = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_status_mgmt_inst = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_village_town_mgmt_inst = models.TextField(blank=True, null=True) # This field type is a guess.
ina_distcode_mgmt_inst = models.TextField(blank=True, null=True) # This field type is a guess.
gov_polytech_status = models.TextField(blank=True, null=True) # This field type is a guess.
gov_polytech_nos = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_polytech_status = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_polytech_nos = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_status_polytech = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_village_town_polytech = models.TextField(blank=True, null=True) # This field type is a guess.
ina_distcode_polytech = models.TextField(blank=True, null=True) # This field type is a guess.
gov_voc_training_status = models.TextField(blank=True, null=True) # This field type is a guess.
gov_voc_training_nos = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_voc_training_status = models.TextField(blank=True, null=True) # This field type is a guess.
pvt_voc_training_nos = models.TextField(blank=True, null=True) # This field type is a guess.
nfaci_status_voc_training = models.TextField(blank=True, null=True) # This field | |
import copy
from sqlalchemy import exc as sa_exc
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import util
from sqlalchemy.orm import attributes
from sqlalchemy.orm import backref
from sqlalchemy.orm import class_mapper
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import foreign
from sqlalchemy.orm import mapper
from sqlalchemy.orm import object_mapper
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import util as orm_util
from sqlalchemy.orm.attributes import instance_state
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import in_
from sqlalchemy.testing import not_in_
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from test.orm import _fixtures
class CascadeArgTest(fixtures.MappedTest):
run_inserts = None
run_create_tables = None
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30), nullable=False),
)
Table(
"addresses",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", Integer, ForeignKey("users.id")),
Column("email_address", String(50), nullable=False),
)
@classmethod
def setup_classes(cls):
class User(cls.Basic):
pass
class Address(cls.Basic):
pass
def test_delete_with_passive_deletes_all(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
mapper(
User,
users,
properties={
"addresses": relationship(
Address,
passive_deletes="all",
cascade="all, delete-orphan",
)
},
)
mapper(Address, addresses)
assert_raises_message(
sa_exc.ArgumentError,
"On User.addresses, can't set passive_deletes='all' "
"in conjunction with 'delete' or 'delete-orphan' cascade",
configure_mappers,
)
def test_delete_orphan_without_delete(self):
Address = self.classes.Address
assert_raises_message(
sa_exc.SAWarning,
"The 'delete-orphan' cascade option requires 'delete'.",
relationship,
Address,
cascade="save-update, delete-orphan",
)
def test_bad_cascade(self):
addresses, Address = self.tables.addresses, self.classes.Address
mapper(Address, addresses)
assert_raises_message(
sa_exc.ArgumentError,
r"Invalid cascade option\(s\): 'fake', 'fake2'",
relationship,
Address,
cascade="fake, all, delete-orphan, fake2",
)
def test_cascade_repr(self):
eq_(
repr(orm_util.CascadeOptions("all, delete-orphan")),
"CascadeOptions('delete,delete-orphan,expunge,"
"merge,refresh-expire,save-update')",
)
def test_cascade_immutable(self):
assert isinstance(
orm_util.CascadeOptions("all, delete-orphan"), frozenset
)
def test_cascade_deepcopy(self):
old = orm_util.CascadeOptions("all, delete-orphan")
new = copy.deepcopy(old)
eq_(old, new)
def test_cascade_assignable(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
rel = relationship(Address)
eq_(rel.cascade, set(["save-update", "merge"]))
rel.cascade = "save-update, merge, expunge"
eq_(rel.cascade, set(["save-update", "merge", "expunge"]))
mapper(User, users, properties={"addresses": rel})
am = mapper(Address, addresses)
configure_mappers()
eq_(rel.cascade, set(["save-update", "merge", "expunge"]))
assert ("addresses", User) not in am._delete_orphans
rel.cascade = "all, delete, delete-orphan"
assert ("addresses", User) in am._delete_orphans
eq_(
rel.cascade,
set(
[
"delete",
"delete-orphan",
"expunge",
"merge",
"refresh-expire",
"save-update",
]
),
)
def test_cascade_unicode(self):
Address = self.classes.Address
rel = relationship(Address)
rel.cascade = util.u("save-update, merge, expunge")
eq_(rel.cascade, set(["save-update", "merge", "expunge"]))
class O2MCascadeDeleteOrphanTest(fixtures.MappedTest):
run_inserts = None
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30), nullable=False),
)
Table(
"addresses",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", Integer, ForeignKey("users.id")),
Column("email_address", String(50), nullable=False),
)
Table(
"orders",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", Integer, ForeignKey("users.id"), nullable=False),
Column("description", String(30)),
)
Table(
"dingalings",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("address_id", Integer, ForeignKey("addresses.id")),
Column("data", String(30)),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
class Order(cls.Comparable):
pass
class Dingaling(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
(
users,
Dingaling,
Order,
User,
dingalings,
Address,
orders,
addresses,
) = (
cls.tables.users,
cls.classes.Dingaling,
cls.classes.Order,
cls.classes.User,
cls.tables.dingalings,
cls.classes.Address,
cls.tables.orders,
cls.tables.addresses,
)
mapper(Address, addresses)
mapper(Order, orders)
mapper(
User,
users,
properties={
"addresses": relationship(
Address, cascade="all, delete-orphan", backref="user"
),
"orders": relationship(
Order, cascade="all, delete-orphan", order_by=orders.c.id
),
},
)
mapper(
Dingaling,
dingalings,
properties={"address": relationship(Address)},
)
def test_list_assignment_new(self):
User, Order = self.classes.User, self.classes.Order
with Session() as sess:
u = User(
name="jack",
orders=[
Order(description="order 1"),
Order(description="order 2"),
],
)
sess.add(u)
sess.commit()
eq_(
u,
User(
name="jack",
orders=[
Order(description="order 1"),
Order(description="order 2"),
],
),
)
def test_list_assignment_replace(self):
User, Order = self.classes.User, self.classes.Order
with Session() as sess:
u = User(
name="jack",
orders=[
Order(description="someorder"),
Order(description="someotherorder"),
],
)
sess.add(u)
u.orders = [
Order(description="order 3"),
Order(description="order 4"),
]
sess.commit()
eq_(
u,
User(
name="jack",
orders=[
Order(description="order 3"),
Order(description="order 4"),
],
),
)
# order 1, order 2 have been deleted
eq_(
sess.query(Order).order_by(Order.id).all(),
[Order(description="order 3"), Order(description="order 4")],
)
def test_standalone_orphan(self):
Order = self.classes.Order
with Session() as sess:
o5 = Order(description="order 5")
sess.add(o5)
assert_raises(sa_exc.DBAPIError, sess.flush)
def test_save_update_sends_pending(self):
"""test that newly added and deleted collection items are
cascaded on save-update"""
Order, User = self.classes.Order, self.classes.User
sess = sessionmaker(expire_on_commit=False)()
o1, o2, o3 = (
Order(description="o1"),
Order(description="o2"),
Order(description="o3"),
)
u = User(name="jack", orders=[o1, o2])
sess.add(u)
sess.commit()
sess.close()
u.orders.append(o3)
u.orders.remove(o1)
sess.add(u)
assert o1 in sess
assert o2 in sess
assert o3 in sess
sess.commit()
def test_remove_pending_from_collection(self):
User, Order = self.classes.User, self.classes.Order
with Session() as sess:
u = User(name="jack")
sess.add(u)
sess.commit()
o1 = Order()
u.orders.append(o1)
assert o1 in sess
u.orders.remove(o1)
assert o1 not in sess
def test_remove_pending_from_pending_parent(self):
# test issue #4040
User, Order = self.classes.User, self.classes.Order
with Session() as sess:
u = User(name="jack")
o1 = Order()
sess.add(o1)
# object becomes an orphan, but parent is not in session
u.orders.append(o1)
u.orders.remove(o1)
sess.add(u)
assert o1 in sess
sess.flush()
assert o1 not in sess
def test_delete(self):
User, users, orders, Order = (
self.classes.User,
self.tables.users,
self.tables.orders,
self.classes.Order,
)
with Session() as sess:
u = User(
name="jack",
orders=[
Order(description="someorder"),
Order(description="someotherorder"),
],
)
sess.add(u)
sess.flush()
sess.delete(u)
sess.flush()
eq_(
sess.execute(
select(func.count("*")).select_from(users)
).scalar(),
0,
)
eq_(
sess.execute(
select(func.count("*")).select_from(orders)
).scalar(),
0,
)
def test_delete_unloaded_collections(self):
"""Unloaded collections are still included in a delete-cascade
by default."""
User, addresses, users, Address = (
self.classes.User,
self.tables.addresses,
self.tables.users,
self.classes.Address,
)
with Session() as sess:
u = User(
name="jack",
addresses=[
Address(email_address="address1"),
Address(email_address="address2"),
],
)
sess.add(u)
sess.flush()
sess.expunge_all()
eq_(
sess.execute(
select(func.count("*")).select_from(addresses)
).scalar(),
2,
)
eq_(
sess.execute(
select(func.count("*")).select_from(users)
).scalar(),
1,
)
u = sess.get(User, u.id)
assert "addresses" not in u.__dict__
sess.delete(u)
sess.flush()
eq_(
sess.execute(
select(func.count("*")).select_from(addresses)
).scalar(),
0,
)
eq_(
sess.execute(
select(func.count("*")).select_from(users)
).scalar(),
0,
)
def test_cascades_onlycollection(self):
"""Cascade only reaches instances that are still part of the
collection, not those that have been removed"""
User, Order, users, orders = (
self.classes.User,
self.classes.Order,
self.tables.users,
self.tables.orders,
)
with Session(autoflush=False) as sess:
u = User(
name="jack",
orders=[
Order(description="someorder"),
Order(description="someotherorder"),
],
)
sess.add(u)
sess.flush()
o = u.orders[0]
del u.orders[0]
sess.delete(u)
assert u in sess.deleted
assert o not in sess.deleted
assert o in sess
u2 = User(name="newuser", orders=[o])
sess.add(u2)
sess.flush()
sess.expunge_all()
eq_(
sess.execute(
select(func.count("*")).select_from(users)
).scalar(),
1,
)
eq_(
sess.execute(
select(func.count("*")).select_from(orders)
).scalar(),
1,
)
eq_(
sess.query(User).all(),
[
User(
name="newuser", orders=[Order(description="someorder")]
)
],
)
def test_cascade_nosideeffects(self):
"""test that cascade leaves the state of unloaded
scalars/collections unchanged."""
Dingaling, User, Address = (
self.classes.Dingaling,
self.classes.User,
self.classes.Address,
)
sess = Session()
u = User(name="jack")
sess.add(u)
assert "orders" not in u.__dict__
sess.flush()
assert "orders" not in u.__dict__
a = Address(email_address="<EMAIL>")
sess.add(a)
assert "user" not in a.__dict__
a.user = u
sess.flush()
d = Dingaling(data="d1")
d.address_id = a.id
sess.add(d)
assert "address" not in d.__dict__
sess.flush()
assert d.address is a
def test_cascade_delete_plusorphans(self):
User, users, orders, Order = (
self.classes.User,
self.tables.users,
self.tables.orders,
self.classes.Order,
)
sess = Session()
u = User(
name="jack",
orders=[
Order(description="someorder"),
Order(description="someotherorder"),
],
)
sess.add(u)
sess.flush()
eq_(
sess.execute(select(func.count("*")).select_from(users)).scalar(),
1,
)
eq_(
sess.execute(select(func.count("*")).select_from(orders)).scalar(),
2,
)
del u.orders[0]
sess.delete(u)
sess.flush()
eq_(
sess.execute(select(func.count("*")).select_from(users)).scalar(),
0,
)
eq_(
sess.execute(select(func.count("*")).select_from(orders)).scalar(),
0,
)
def test_collection_orphans(self):
User, users, orders, Order = (
self.classes.User,
self.tables.users,
self.tables.orders,
self.classes.Order,
)
with Session() as sess:
u = User(
name="jack",
orders=[
Order(description="someorder"),
Order(description="someotherorder"),
],
)
sess.add(u)
sess.flush()
eq_(
sess.execute(
select(func.count("*")).select_from(users)
).scalar(),
1,
)
eq_(
sess.execute(
select(func.count("*")).select_from(orders)
).scalar(),
2,
)
u.orders[:] = []
sess.flush()
eq_(
sess.execute(
select(func.count("*")).select_from(users)
).scalar(),
1,
)
eq_(
sess.execute(
select(func.count("*")).select_from(orders)
).scalar(),
0,
)
class O2MCascadeTest(fixtures.MappedTest):
run_inserts = None
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30), nullable=False),
)
Table(
"addresses",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", Integer, ForeignKey("users.id")),
Column("email_address", String(50), nullable=False),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
users, User, Address, addresses = (
cls.tables.users,
cls.classes.User,
cls.classes.Address,
cls.tables.addresses,
)
mapper(Address, addresses)
mapper(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
def test_none_o2m_collection_assignment(self):
User = self.classes.User
s = Session()
u1 = User(name="u", addresses=[None])
s.add(u1)
eq_(u1.addresses, [None])
assert_raises_message(
orm_exc.FlushError,
"Can't flush None value found in collection User.addresses",
s.commit,
)
eq_(u1.addresses, [None])
def test_none_o2m_collection_append(self):
User = self.classes.User
s = Session()
u1 = User(name="u")
s.add(u1)
u1.addresses.append(None)
eq_(u1.addresses, [None])
assert_raises_message(
orm_exc.FlushError,
"Can't flush None value found in collection User.addresses",
s.commit,
)
eq_(u1.addresses, [None])
class O2MCascadeDeleteNoOrphanTest(fixtures.MappedTest):
run_inserts = None
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30)),
)
Table(
"orders",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", Integer, ForeignKey("users.id")),
Column("description", String(30)),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Order(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
User, | |
<reponame>JunhoPark0314/FSCE
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from collections import defaultdict
from fsdet.data.catalog import MetadataCatalog
import logging
from typing import Dict
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from fsdet.layers import ShapeSpec
from fsdet.structures import Boxes, Instances, pairwise_iou
from fsdet.utils.events import get_event_storage
from fsdet.utils.registry import Registry
import fvcore.nn.weight_init as weight_init
from ..backbone import build_backbone
from ..backbone.resnet import BottleneckBlock, make_stage
from ..box_regression import Box2BoxTransform
from ..matcher import Matcher
from ..poolers import ROIPooler
from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals
from ..sampling import subsample_labels
from .box_head import build_box_head
from .fast_rcnn import (
FastRCNNOutputLayers,
FastRCNNOutputs,
FastRCNNContrastOutputs,
FastRCNNMoCoOutputs,
ContrastWithPrototypeOutputs,
ContrastOutputsWithStorage,
ROI_HEADS_OUTPUT_REGISTRY,
)
from ..utils import concat_all_gathered, select_all_gather
from ..contrastive_loss import (
SupConLoss,
SupConLossV2,
ContrastiveHead,
SupConLossWithPrototype,
SupConLossWithStorage
)
from fsdet.layers import cat
ROI_HEADS_REGISTRY = Registry("ROI_HEADS")
ROI_HEADS_REGISTRY.__doc__ = """
Registry for ROI heads in a generalized R-CNN model.
ROIHeads take feature maps and region proposals, and
perform per-region computation.
The registered object will be called with `obj(cfg, input_shape)`.
The call is expected to return an :class:`ROIHeads`.
"""
logger = logging.getLogger(__name__)
def build_roi_heads(cfg, input_shape):
"""
Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`.
"""
name = cfg.MODEL.ROI_HEADS.NAME
return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape)
def select_foreground_proposals(proposals, bg_label):
"""
Given a list of N Instances (for N images), each containing a `gt_classes` field,
return a list of Instances that contain only instances with `gt_classes != -1 &&
gt_classes != bg_label`.
Args:
proposals (list[Instances]): A list of N Instances, where N is the number of
images in the batch.
bg_label: label index of background class.
Returns:
list[Instances]: N Instances, each contains only the selected foreground instances.
list[Tensor]: N boolean vector, correspond to the selection mask of
each Instances object. True for selected instances.
"""
assert isinstance(proposals, (list, tuple))
assert isinstance(proposals[0], Instances)
assert proposals[0].has("gt_classes")
fg_proposals = []
fg_selection_masks = []
for proposals_per_image in proposals:
gt_classes = proposals_per_image.gt_classes
fg_selection_mask = (gt_classes != -1) & (gt_classes != bg_label)
fg_idxs = fg_selection_mask.nonzero().squeeze(1)
fg_proposals.append(proposals_per_image[fg_idxs])
fg_selection_masks.append(fg_selection_mask)
return fg_proposals, fg_selection_masks
class ROIHeads(torch.nn.Module):
"""
ROIHeads perform all per-region computation in an R-CNN.
It contains logic of cropping the regions, extract per-region features,
and make per-region predictions.
It can have many variants, implemented as subclasses of this class.
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super(ROIHeads, self).__init__()
# fmt: off
self.batch_size_per_image = cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE
self.positive_sample_fraction = cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION
self.test_score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST
self.test_nms_thresh = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST
self.test_detections_per_img = cfg.TEST.DETECTIONS_PER_IMAGE
self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
self.num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES
self.proposal_append_gt = cfg.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT
self.feature_strides = {k: v.stride for k, v in input_shape.items()}
self.feature_channels = {k: v.channels for k, v in input_shape.items()}
self.cls_agnostic_bbox_reg = cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG
self.smooth_l1_beta = cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA
# fmt: on
# Matcher to assign box proposals to gt boxes
self.proposal_matcher = Matcher(
cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS,
cfg.MODEL.ROI_HEADS.IOU_LABELS,
allow_low_quality_matches=False,
)
# Box2BoxTransform for bounding box regression
self.box2box_transform = Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
self.meta = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
if len(self.meta.thing_classes) > len(self.meta.base_classes):
novel_cls_list = []
for i, cls_name in enumerate(self.meta.thing_classes):
if cls_name in self.meta.novel_classes:
novel_cls_list.append(i)
self.novel_mask = torch.zeros(len(self.meta.thing_classes)+1)
self.novel_mask[novel_cls_list] = 1
self.base_mask = 1 - self.novel_mask
self.base_mask[-1] = 0
else:
self.novel_mask = None
def _sample_proposals(self, matched_idxs, matched_labels, gt_classes):
"""
Based on the matching between N proposals and M groundtruth,
sample the proposals and set their classification labels.
Args:
matched_idxs (Tensor): a vector of length N, each is the best-matched
gt index in [0, M) for each proposal.
matched_labels (Tensor): a vector of length N, the matcher's label
(one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.
gt_classes (Tensor): a vector of length M.
Returns:
Tensor: a vector of indices of sampled proposals. Each is in [0, N).
Tensor: a vector of the same length, the classification label for
each sampled proposal. Each sample is labeled as either a category in
[0, num_classes) or the background (num_classes).
"""
has_gt = gt_classes.numel() > 0
# Get the corresponding GT for each proposal
if has_gt:
gt_classes = gt_classes[matched_idxs] # post_nms_top_k proposals have no matche will be drop here
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
gt_classes[matched_labels == 0] = self.num_classes
# Label ignore proposals (-1 label)
gt_classes[matched_labels == -1] = -1
else:
gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
sampled_fg_idxs, sampled_bg_idxs = subsample_labels(
gt_classes, self.batch_size_per_image, self.positive_sample_fraction, self.num_classes
)
sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)
return sampled_idxs, gt_classes[sampled_idxs]
@torch.no_grad()
def label_and_sample_proposals(self, proposals, targets):
"""
Prepare some proposals to be used to train the ROI heads.
It performs box matching between `proposals` and `targets`, and assigns
training labels to the proposals.
It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes,
with a fraction of positives that is no larger than `self.positive_sample_fraction.
Args:
See :meth:`ROIHeads.forward`
Returns:
list[Instances]:
length `N` list of `Instances`s containing the proposals
sampled for training. Each `Instances` has the following fields:
- proposal_boxes: the proposal boxes
- gt_boxes: the ground-truth box that the proposal is assigned to
(this is only meaningful if the proposal has a label > 0; if label = 0
then the ground-truth box is random)
Other fields such as "gt_classes" that's included in `targets`.
"""
gt_boxes = [x.gt_boxes for x in targets]
if self.proposal_append_gt:
# use ground truth bboxes as super-high quality proposals for training
# with logits = math.log((1.0 - 1e-10) / (1 - (1.0 - 1e-10)))
proposals = add_ground_truth_to_proposals(gt_boxes, proposals)
proposals_with_gt = []
num_fg_samples = []
num_bg_samples = []
num_base_fg_samples = defaultdict(list)
num_novel_fg_samples = defaultdict(list)
base_fg_iou = defaultdict(list)
novel_fg_iou = defaultdict(list)
areaRng = {
"s":[0 ** 2, 32 ** 2],
"m":[32 ** 2, 96 ** 2],
"l":[96 ** 2, 1e5 ** 2],
}
for proposals_per_image, targets_per_image in zip(proposals, targets):
has_gt = len(targets_per_image) > 0
match_quality_matrix = pairwise_iou(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
# matched_idxs in [0, M)
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
iou, _ = match_quality_matrix.max(dim=0)
# random sample batche_size_per_image proposals with positive fraction
# NOTE: only matched proposals will be returned
sampled_idxs, gt_classes = self._sample_proposals(
matched_idxs, matched_labels, targets_per_image.gt_classes
)
# Set target attributes of the sampled proposals:
proposals_per_image = proposals_per_image[sampled_idxs]
proposals_per_image.gt_classes = gt_classes
proposals_per_image.iou = iou[sampled_idxs]
# We index all the attributes of targets that start with "gt_"
# and have not been added to proposals yet (="gt_classes").
if has_gt:
sampled_targets = matched_idxs[sampled_idxs]
# NOTE: here the indexing waste some compute, because heads
# will filter the proposals again (by foreground/background,
# etc), so we essentially index the data twice.
for (trg_name, trg_value) in targets_per_image.get_fields().items():
if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name):
proposals_per_image.set(trg_name, trg_value[sampled_targets])
else:
gt_boxes = Boxes(
targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 4))
)
proposals_per_image.gt_boxes = gt_boxes
num_bg_samples.append((gt_classes == self.num_classes).sum().item())
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
proposals_with_gt.append(proposals_per_image)
"""
novel_mask = self.novel_mask[gt_classes].cuda()
base_mask = self.base_mask[gt_classes].cuda()
gt_area = proposals_per_image.gt_boxes.area()
gt_iou = pairwise_iou(proposals_per_image.proposal_boxes, proposals_per_image.gt_boxes).diag()
for arg_key, arg_val in areaRng.items():
arg_mask = (gt_area >= arg_val[0]) * (gt_area < arg_val[1])
if (arg_mask * novel_mask).sum() > 0:
num_novel_fg_samples[arg_key].append((arg_mask * novel_mask).sum())
novel_fg_iou[arg_key].append(gt_iou[(arg_mask * novel_mask).bool()])
if (arg_mask * base_mask).sum() > 0:
num_base_fg_samples[arg_key].append((arg_mask * base_mask).sum())
base_fg_iou[arg_key].append(gt_iou[(arg_mask * base_mask).bool()])
"""
# Log the number of fg/bg samples that are selected for training ROI heads
storage = get_event_storage()
storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))
"""
for k in list(areaRng.keys()):
if k in num_novel_fg_samples:
storage.put_scalar("roi_head/novel_{}_fg".format(k), torch.stack(num_novel_fg_samples[k]).mean().item())
storage.put_histogram("roi_head/novel_{}_iou".format(k), torch.cat(novel_fg_iou[k]))
if k in num_base_fg_samples:
storage.put_scalar("roi_head/base_{}_fg".format(k), torch.stack(num_base_fg_samples[k]).mean().item())
storage.put_histogram("roi_head/base_{}_iou".format(k), torch.cat(base_fg_iou[k]))
"""
return proposals_with_gt
# proposals_with_gt, List[Instances], fields = ['gt_boxes', 'gt_classes', ‘proposal_boxes’, 'objectness_logits']
def forward(self, images, features, proposals, targets=None):
"""
Args:
images (ImageList):
features (dict[str: Tensor]): input data as a mapping from feature
map name to tensor. Axis 0 represents the number of images `N` in
the input data; axes 1-3 are channels, height, and width, which may
vary between feature maps (e.g., if a feature pyramid is used).
proposals (list[Instances]): length `N` list of `Instances`s. The i-th
`Instances` contains object proposals for the i-th input image,
with fields "proposal_boxes" and "objectness_logits".
targets (list[Instances], optional): length `N` list of `Instances`s. The i-th
`Instances` contains the ground-truth per-instance annotations
for the i-th input image. Specify `targets` during training only.
It may have the following fields:
- gt_boxes: the bounding box of each instance.
- gt_classes: the label for each instance with a category ranging in [0, #class].
Returns:
results (list[Instances]): length `N` list of `Instances`s containing the
detected instances. Returned during inference only; may be []
during training.
losses (dict[str: Tensor]): mapping from a named loss to a tensor
storing the loss. Used during training only.
"""
raise NotImplementedError()
@ROI_HEADS_REGISTRY.register()
class Res5ROIHeads(ROIHeads):
"""
The ROIHeads in a typical "C4" R-CNN | |
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0622844,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.95773,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.222955,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.377807,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.2034,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.311228,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.501999,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.253392,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.06662,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.171456,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.2792,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.227348,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0130543,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.177895,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0965445,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.405243,
'Execution Unit/Register Files/Runtime Dynamic': 0.109599,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.430526,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.841634,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.80104,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000732837,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000732837,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000637026,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000245906,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00138687,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00348957,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0070719,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0928107,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.90356,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.229625,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.315227,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.40858,
'Instruction Fetch Unit/Runtime Dynamic': 0.648225,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0626646,
'L2/Runtime Dynamic': 0.00572501,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.84618,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.778221,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0520569,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0520569,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.092,
'Load Store Unit/Runtime Dynamic': 1.087,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.128363,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.256727,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0455566,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0464908,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.367061,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.037664,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.601429,
'Memory Management Unit/Runtime Dynamic': 0.0841549,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 22.0334,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.598048,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0213199,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.143953,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming | |
None
""" The API OpenID """
openid_structure = None
""" The OpenID structure """
diffie_hellman = None
""" the Diffie Hellman management structure """
def __init__(
self,
api_openid_plugin = None,
diffie_hellman_plugin = None,
random_plugin = None,
api_openid = None,
openid_structure = None,
diffie_hellman = None
):
"""
Constructor of the class.
:type api_openid_plugin: APIOpenIDPlugin
:param api_openid_plugin: The API OpenID plugin.
:type diffie_hellman_plugin: DiffieHellmanPlugin
:param diffie_hellman_plugin: The Diffie Hellman plugin.
:type random_plugin: RandomPlugin
:param random_plugin: The random plugin.
:type api_openid: ServiceOpenID
:param api_openid: The API OpenID.
:type openid_structure: OpenIDStructure
:param openid_structure: The OpenID structure.
:type diffie_hellman: DiffieHellman
:param diffie_hellman: The Diffie Hellman management structure.
"""
self.api_openid_plugin = api_openid_plugin
self.diffie_hellman_plugin = diffie_hellman_plugin
self.random_plugin = random_plugin
self.api_openid = api_openid
self.openid_structure = openid_structure
self.diffie_hellman = diffie_hellman
def open(self):
"""
Opens the OpenID server.
"""
pass
def close(self):
"""
Closes the OpenID server.
"""
pass
def generate_openid_structure(
self,
provider_url = None,
association_type = HMAC_SHA256_VALUE,
session_type = NO_ENCRYPTION_VALUE,
prime_value = None,
base_value = None,
consumer_public = None,
set_structure = True
):
# creates a new OpenID structure
openid_structure = OpenIDStructure(
provider_url,
association_type = association_type,
session_type = session_type
)
# in case the structure is meant to be set
# sets the OpenID structure
if set_structure: self.set_openid_structure(openid_structure)
# decodes the Diffie Hellman values in case they exist
prime_value = prime_value and self.api_openid._mklong(base64.b64decode(prime_value)) or None
base_value = base_value and self.api_openid._mklong(base64.b64decode(base_value)) or None
consumer_public = consumer_public and self.api_openid._mklong(base64.b64decode(consumer_public)) or None
# sets the default Diffie Hellman values
prime_value = prime_value or DEFAULT_PRIME_VALUE
base_value = base_value or DEFAULT_BASE_VALUE
# creates the parameters to send to be used in the Diffie Hellman
# structure creation
parameters = dict(
prime_value = prime_value,
base_value = base_value
)
# creates the Diffie Hellman management structure with the prime
# and base values given
self.diffie_hellman = self.diffie_hellman_plugin.create_structure(parameters)
# sets the a value in the Diffie Hellman structure
self.diffie_hellman.set_A_value(consumer_public)
# returns the OpenID structure
return openid_structure
def openid_associate(self):
"""
Requests an association (associate mode) according to the
OpenID specification.
:rtype: OpenIDStructure
:return: The current OpenID structure.
"""
# generates an association handle
association_handle = self._generate_handle()
# retrieves the MAC key type to be used
mac_key_type = self._get_mac_key_type()
# generates the MAC key
mac_key = self._generate_mac_key(mac_key_type)
# sets the association handle in the OpenID structure
self.openid_structure.association_handle = association_handle
# sets the expires in in the OpenID structure
self.openid_structure.expires_in = DEFAULT_EXPIRES_IN
# sets the MAC key in the OpenID structure
self.openid_structure.mac_key = mac_key
# in case the current session type is of type diffie hellman
if self.openid_structure.session_type in DIFFIE_HELLMAN_ASSOCIATION_TYPES:
# generates a private key for the diffie hellman "b" value
private_key = self._generate_private_key()
# sets the "b" value in the diffie hellman management structure
self.diffie_hellman.set_b_value(private_key)
# returns the OpenID structure
return self.openid_structure
def openid_request(self):
# generates an invalidate handle if necessary
invalidate_handle = self.openid_structure.invalidate_handle or self._generate_handle()
# retrieves the current date time
current_date_time = datetime.datetime.utcnow()
# converts the current date time to string
current_date_time_string = current_date_time.strftime(NONCE_TIME_FORMAT)
# generates a random handle
random_handle = self._generate_handle()
# creates the response nonce appending the current date time string
# to the random handle
response_nonce = current_date_time_string + random_handle
# sets the mode in the OpenID structure
self.openid_structure.mode = ID_RES_VALUE
# sets the invalidate handle in the OpenID structure
self.openid_structure.invalidate_handle = invalidate_handle
# sets the response nonce in the OpenID structure
self.openid_structure.response_nonce = response_nonce
# sets the signed in the OpenID structure
self.openid_structure.signed = ",".join(DEFAULT_SIGNED_NAMES)
# generates the signature
signature = self._generate_signature()
# sets the signature in the OpenID structure
self.openid_structure.signature = signature
def openid_check_authentication(self, return_openid_structure, strict = True):
"""
Verifies the given return OpenID structure (verification)
according to the OpenID specification.
:type return_openid_structure: OpenIDStructure
:param return_openid_structure: The return OpenID structure
to be verified.
:type strict: bool
:param strict: Flag to control if the verification should be strict.
:rtype: OpenIDStructure
:return: The current OpenID structure.
"""
# in case the verification is strict and any of the base information items mismatches
if strict and not (self.openid_structure.return_to == return_openid_structure.return_to and\
self.openid_structure.claimed_id == return_openid_structure.claimed_id and\
self.openid_structure.identity == return_openid_structure.identity and\
self.openid_structure.provider_url == return_openid_structure.provider_url and\
self.openid_structure.response_nonce == return_openid_structure.response_nonce and\
self.openid_structure.signed == return_openid_structure.signed and\
self.openid_structure.signature == return_openid_structure.signature and\
return_openid_structure.ns == OPENID_NAMESPACE_VALUE):
# raises a verification failed exception
raise exceptions.VerificationFailed("invalid discovered information")
# returns the OpenID structure
return self.openid_structure
def get_response_parameters(self):
# start the parameters map
parameters = {}
# sets the namespace
parameters["ns"] = self.openid_structure.ns
# sets the association handle
parameters["assoc_handle"] = self.openid_structure.association_handle
# sets the session type
parameters["session_type"] = self.openid_structure.session_type
# sets the association type
parameters["assoc_type"] = self.openid_structure.association_type
# sets the expires in
parameters["expires_in"] = self.openid_structure.expires_in
# in case the current session type is of type diffie hellman
if self.openid_structure.session_type in DIFFIE_HELLMAN_ASSOCIATION_TYPES:
# retrieves the MAC key type to be used
mac_key_type = self._get_mac_key_type()
# generates the "B" value
B_value = self.diffie_hellman.generate_B()
# calculates the shared key value
key_value = self.diffie_hellman.calculate_Kb()
# decodes the MAC key using Base64
decoded_mac_key = base64.b64decode(self.openid_structure.mac_key)
# retrieves the hash module from the HMAC hash modules map
hash_module = HMAC_HASH_MODULES_MAP.get(mac_key_type, None)
# encodes the key value in order to be used in the xor operation
encoded_key_value = hash_module(self.api_openid._btwoc(key_value)).digest()
# calculates the encoded MAC key value and retrieves the digest
encoded_mac_key = colony.xor_string_value(decoded_mac_key, encoded_key_value)
# encodes the encoded MAC key into Base64
encoded_mac_key = base64.b64encode(encoded_mac_key)
# sets the DH server public
parameters["dh_server_public"] = base64.b64encode(self.api_openid._btwoc(B_value))
# sets the encoded MAC key
parameters["enc_mac_key"] = encoded_mac_key
else:
# sets the MAC key
parameters["mac_key"] = self.openid_structure.mac_key
# returns the parameters
return parameters
def get_encoded_response_parameters(self):
# retrieves the response parameters
response_parameters = self.get_response_parameters()
# encodes the response parameters
encoded_response_parameters = self._encode_key_value(response_parameters)
# returns the encoded response parameters
return encoded_response_parameters
def get_check_authentication_parameters(self):
# start the parameters map
parameters = {}
# sets the namespace
parameters["ns"] = self.openid_structure.ns
# sets the is valid
parameters["is_valid"] = TRUE_VALUE
# sets the invalidate handle
parameters["invalidate_handle"] = self.openid_structure.association_handle
# returns the parameters
return parameters
def get_encoded_check_authentication_parameters(self):
# retrieves the check authentication parameters
check_authentication_parameters = self.get_check_authentication_parameters()
# encodes the check authentication parameters
encoded_check_authentication_parameters = self._encode_key_value(check_authentication_parameters)
# returns the encoded check authentication parameters
return encoded_check_authentication_parameters
def get_return_url(self):
# sets the retrieval URL
retrieval_url = self.openid_structure.return_to
# start the parameters map and sets the complete set of
# parameters associated with the return URL, these values
# are taken from the current structure instance
parameters = {}
parameters["openid.ns"] = self.openid_structure.ns
parameters["openid.mode"] = self.openid_structure.mode
parameters["openid.op_endpoint"] = self.openid_structure.provider_url
parameters["openid.claimed_id"] = self.openid_structure.claimed_id
parameters["openid.identity"] = self.openid_structure.identity
parameters["openid.return_to"] = self.openid_structure.return_to
parameters["openid.response_nonce"] = self.openid_structure.response_nonce
parameters["openid.invalidate_handle"] = self.openid_structure.invalidate_handle
parameters["openid.assoc_handle"] = self.openid_structure.association_handle
parameters["openid.signed"] = self.openid_structure.signed
parameters["openid.sig"] = self.openid_structure.signature
# creates the request (get) URL from the parameters and returns
# the value to the caller method
request_url = self._build_get_url(retrieval_url, parameters)
return request_url
def get_openid_structure(self):
"""
Retrieves the OpenID structure.
:rtype: OpenIDStructure
:return: The OpenID structure.
"""
return self.openid_structure
def set_openid_structure(self, openid_structure):
"""
Sets the OpenID structure.
:type openid_structure: OpenIDStructure
:param openid_structure: The OpenID structure.
"""
self.openid_structure = openid_structure
def _get_mac_key_type(self):
"""
Retrieves the type of hashing to be used in the
MAC key.
:rtype: String
:return: The type of hashing to be used in the MAC key.
"""
# in case the current session is of type no encryption
if self.openid_structure.session_type == NO_ENCRYPTION_VALUE:
# returns the current association type
return self.openid_structure.association_type
# in case the current session is of type DH SHA1
elif self.openid_structure.session_type == DH_SHA1_VALUE:
# returns the HMAC SHA1 value
return HMAC_SHA1_VALUE
# in case the current session is of type DH SHA256
elif self.openid_structure.session_type == DH_SHA256_VALUE:
# returns the HMAC sha256 value
return HMAC_SHA256_VALUE
def _generate_signature(self):
# sets the signature items list
signed_items_list = DEFAULT_SIGNED_ITEMS
# sets the signature names list
signed_names_list = DEFAULT_SIGNED_NAMES
# creates the string buffer for the message
message_string_buffer = colony.StringBuffer()
# starts the index counter
index = 0
# iterates over all the signed items
for signed_item_name in signed_items_list:
# retrieves the signed item value from the return OpenID structure
signed_item_value = getattr(self.openid_structure, signed_item_name)
# retrieves | |
<filename>flash/video/classification/data.py
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Type, TYPE_CHECKING, Union
import numpy as np
import torch
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from torch.utils.data import Sampler
from flash.core.data.data_module import DataModule
from flash.core.data.data_pipeline import DataPipelineState
from flash.core.data.io.classification_input import ClassificationState
from flash.core.data.io.input import DataKeys, InputFormat
from flash.core.data.io.input_base import Input, IterableInput
from flash.core.data.io.input_transform import InputTransform
from flash.core.data.utilities.paths import list_valid_files
from flash.core.integrations.fiftyone.utils import FiftyOneLabelUtilities
from flash.core.integrations.labelstudio.input import _parse_labelstudio_arguments, LabelStudioVideoClassificationInput
from flash.core.utilities.imports import (
_FIFTYONE_AVAILABLE,
_KORNIA_AVAILABLE,
_PYTORCHVIDEO_AVAILABLE,
lazy_import,
requires,
)
from flash.core.utilities.stages import RunningStage
SampleCollection = None
if _FIFTYONE_AVAILABLE:
fol = lazy_import("fiftyone.core.labels")
if TYPE_CHECKING:
from fiftyone.core.collections import SampleCollection
else:
fol = None
if _KORNIA_AVAILABLE:
import kornia.augmentation as K
if _PYTORCHVIDEO_AVAILABLE:
from pytorchvideo.data.clip_sampling import ClipSampler, make_clip_sampler
from pytorchvideo.data.encoded_video import EncodedVideo
from pytorchvideo.data.labeled_video_dataset import labeled_video_dataset, LabeledVideoDataset
from pytorchvideo.data.labeled_video_paths import LabeledVideoPaths
from pytorchvideo.transforms import ApplyTransformToKey, UniformTemporalSubsample
from torchvision.transforms import CenterCrop, Compose, RandomCrop, RandomHorizontalFlip
else:
ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None
_PYTORCHVIDEO_DATA = Dict[str, Union[str, torch.Tensor, int, float, List]]
Label = Union[int, List[int]]
def _make_clip_sampler(
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
) -> "ClipSampler":
if clip_sampler_kwargs is None:
clip_sampler_kwargs = {}
return make_clip_sampler(clip_sampler, clip_duration, **clip_sampler_kwargs)
class VideoClassificationInput(IterableInput):
def load_data(self, dataset: "LabeledVideoDataset") -> "LabeledVideoDataset":
if self.training:
label_to_class_mapping = {p[1]: p[0].split("/")[-2] for p in dataset._labeled_videos._paths_and_labels}
self.set_state(ClassificationState(label_to_class_mapping))
self.num_classes = len(np.unique([s[1]["label"] for s in dataset._labeled_videos]))
return dataset
def load_sample(self, sample):
return sample
class VideoClassificationPathsPredictInput(Input):
def predict_load_data(
self,
paths: List[str],
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
decode_audio: bool = False,
decoder: str = "pyav",
**_: Any,
) -> Iterable[Tuple[str, Any]]:
paths = list_valid_files(paths, valid_extensions=("mp4", "avi"))
self._clip_sampler = _make_clip_sampler(clip_sampler, clip_duration, clip_sampler_kwargs)
self._decode_audio = decode_audio
self._decoder = decoder
return paths
def predict_load_sample(self, sample: str) -> Dict[str, Any]:
video = EncodedVideo.from_path(sample, decode_audio=self._decode_audio, decoder=self._decoder)
(
clip_start,
clip_end,
clip_index,
aug_index,
is_last_clip,
) = self._clip_sampler(0.0, video.duration, None)
loaded_clip = video.get_clip(clip_start, clip_end)
clip_is_null = (
loaded_clip is None or loaded_clip["video"] is None or (loaded_clip["audio"] is None and self._decode_audio)
)
if clip_is_null:
raise MisconfigurationException(
f"The provided video is too short {video.duration} to be clipped at {self._clip_sampler._clip_duration}"
)
frames = loaded_clip["video"]
audio_samples = loaded_clip["audio"]
return {
"video": frames,
"video_name": video.name,
"video_index": 0,
"clip_index": clip_index,
"aug_index": aug_index,
**({"audio": audio_samples} if audio_samples is not None else {}),
DataKeys.METADATA: {"filepath": sample},
}
class VideoClassificationFoldersInput(VideoClassificationInput):
def load_data(
self,
path: str,
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
decode_audio: bool = False,
decoder: str = "pyav",
) -> "LabeledVideoDataset":
dataset = labeled_video_dataset(
pathlib.Path(path),
_make_clip_sampler(clip_sampler, clip_duration, clip_sampler_kwargs),
video_sampler=video_sampler,
decode_audio=decode_audio,
decoder=decoder,
)
return super().load_data(dataset)
class VideoClassificationFilesInput(VideoClassificationInput):
def _to_multi_hot(self, label_list: List[int]) -> torch.Tensor:
v = torch.zeros(len(self.labels_set))
for label in label_list:
v[label] = 1
return v
def load_data(
self,
paths: List[str],
labels: List[Union[str, List]],
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
decode_audio: bool = False,
decoder: str = "pyav",
) -> "LabeledVideoDataset":
self.is_multilabel = any(isinstance(label, list) for label in labels)
if self.is_multilabel:
self.labels_set = {label for label_list in labels for label in label_list}
self.label_to_id = {label: i for i, label in enumerate(sorted(self.labels_set))}
self.id_to_label = {i: label for label, i in self.label_to_id.items()}
encoded_labels = [
self._to_multi_hot([self.label_to_id[classname] for classname in label_list]) for label_list in labels
]
data = list(
zip(
paths,
encoded_labels,
)
)
else:
self.labels_set = set(labels)
self.label_to_id = {label: i for i, label in enumerate(sorted(self.labels_set))}
self.id_to_label = {i: label for label, i in self.label_to_id.items()}
data = list(zip(paths, [self.label_to_id[classname] for classname in labels]))
dataset = LabeledVideoDataset(
LabeledVideoPaths(data),
_make_clip_sampler(clip_sampler, clip_duration, clip_sampler_kwargs),
video_sampler=video_sampler,
decode_audio=decode_audio,
decoder=decoder,
)
if self.training:
self.set_state(ClassificationState(self.id_to_label))
self.num_classes = len(self.labels_set)
return dataset
class VideoClassificationFiftyOneInput(VideoClassificationInput):
def load_data(
self,
sample_collection: SampleCollection,
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
decode_audio: bool = False,
decoder: str = "pyav",
label_field: str = "ground_truth",
) -> "LabeledVideoDataset":
label_utilities = FiftyOneLabelUtilities(label_field, fol.Classification)
label_utilities.validate(sample_collection)
classes = label_utilities.get_classes(sample_collection)
label_to_class_mapping = dict(enumerate(classes))
class_to_label_mapping = {c: lab for lab, c in label_to_class_mapping.items()}
filepaths = sample_collection.values("filepath")
labels = sample_collection.values(label_field + ".label")
targets = [class_to_label_mapping[lab] for lab in labels]
dataset = LabeledVideoDataset(
LabeledVideoPaths(list(zip(filepaths, targets))),
_make_clip_sampler(clip_sampler, clip_duration, clip_sampler_kwargs),
video_sampler=video_sampler,
decode_audio=decode_audio,
decoder=decoder,
)
return super().load_data(dataset)
class VideoClassificationInputTransform(InputTransform):
def __init__(
self,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
decode_audio: bool = False,
decoder: str = "pyav",
**_kwargs: Any,
):
self.clip_sampler = clip_sampler
self.clip_duration = clip_duration
self.clip_sampler_kwargs = clip_sampler_kwargs
self.video_sampler = video_sampler
self.decode_audio = decode_audio
self.decoder = decoder
if not _PYTORCHVIDEO_AVAILABLE:
raise ModuleNotFoundError("Please, run `pip install pytorchvideo`.")
if not clip_sampler_kwargs:
clip_sampler_kwargs = {}
if not clip_sampler:
raise MisconfigurationException(
"clip_sampler should be provided as a string or ``pytorchvideo.data.clip_sampling.ClipSampler``"
)
clip_sampler = make_clip_sampler(clip_sampler, clip_duration, **clip_sampler_kwargs)
super().__init__(
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
inputs={
InputFormat.FILES: VideoClassificationPathsPredictInput,
InputFormat.FOLDERS: VideoClassificationPathsPredictInput,
InputFormat.FIFTYONE: VideoClassificationFiftyOneInput,
},
default_input=InputFormat.FILES,
)
def get_state_dict(self) -> Dict[str, Any]:
return {
**self.transforms,
"clip_sampler": self.clip_sampler,
"clip_duration": self.clip_duration,
"clip_sampler_kwargs": self.clip_sampler_kwargs,
"video_sampler": self.video_sampler,
"decode_audio": self.decode_audio,
"decoder": self.decoder,
}
@classmethod
def load_state_dict(cls, state_dict: Dict[str, Any], strict: bool) -> "VideoClassificationInputTransform":
return cls(**state_dict)
def default_transforms(self) -> Dict[str, Callable]:
if self.training:
per_sample_transform = [
RandomCrop(244, pad_if_needed=True),
RandomHorizontalFlip(p=0.5),
]
else:
per_sample_transform = [
CenterCrop(244),
]
return {
"per_sample_transform": Compose(
[
ApplyTransformToKey(
key="video",
transform=Compose([UniformTemporalSubsample(8)] + per_sample_transform),
),
]
),
"per_batch_transform_on_device": Compose(
[
ApplyTransformToKey(
key="video",
transform=K.VideoSequential(
K.Normalize(torch.tensor([0.45, 0.45, 0.45]), torch.tensor([0.225, 0.225, 0.225])),
data_format="BCTHW",
same_on_frame=False,
),
),
]
),
}
class VideoClassificationData(DataModule):
"""Data module for Video classification tasks."""
input_transform_cls = VideoClassificationInputTransform
@classmethod
def from_files(
cls,
train_files: Optional[Sequence[str]] = None,
train_targets: Optional[Sequence[Any]] = None,
val_files: Optional[Sequence[str]] = None,
val_targets: Optional[Sequence[Any]] = None,
test_files: Optional[Sequence[str]] = None,
test_targets: Optional[Sequence[Any]] = None,
predict_files: Optional[Sequence[str]] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
decode_audio: bool = False,
decoder: str = "pyav",
**data_module_kwargs,
) -> "VideoClassificationData":
dataset_kwargs = dict(
clip_sampler=clip_sampler,
clip_duration=clip_duration,
clip_sampler_kwargs=clip_sampler_kwargs,
video_sampler=video_sampler,
decode_audio=decode_audio,
decoder=decoder,
data_pipeline_state=DataPipelineState(),
)
return cls(
VideoClassificationFilesInput(RunningStage.TRAINING, train_files, train_targets, **dataset_kwargs),
VideoClassificationFilesInput(RunningStage.VALIDATING, val_files, val_targets, **dataset_kwargs),
VideoClassificationFilesInput(RunningStage.TESTING, test_files, test_targets, **dataset_kwargs),
VideoClassificationPathsPredictInput(RunningStage.PREDICTING, predict_files, **dataset_kwargs),
input_transform=cls.input_transform_cls(
train_transform,
val_transform,
test_transform,
predict_transform,
),
**data_module_kwargs,
)
@classmethod
def from_folders(
cls,
train_folder: Optional[str] = None,
val_folder: Optional[str] = None,
test_folder: Optional[str] = None,
predict_folder: Optional[str] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
decode_audio: bool = False,
decoder: str = "pyav",
**data_module_kwargs,
) -> "VideoClassificationData":
dataset_kwargs = dict(
clip_sampler=clip_sampler,
clip_duration=clip_duration,
clip_sampler_kwargs=clip_sampler_kwargs,
video_sampler=video_sampler,
decode_audio=decode_audio,
decoder=decoder,
data_pipeline_state=DataPipelineState(),
)
return cls(
VideoClassificationFoldersInput(RunningStage.TRAINING, train_folder, **dataset_kwargs),
VideoClassificationFoldersInput(RunningStage.VALIDATING, val_folder, **dataset_kwargs),
VideoClassificationFoldersInput(RunningStage.TESTING, test_folder, **dataset_kwargs),
VideoClassificationPathsPredictInput(RunningStage.PREDICTING, predict_folder, **dataset_kwargs),
input_transform=cls.input_transform_cls(
train_transform,
val_transform,
test_transform,
predict_transform,
),
**data_module_kwargs,
)
@classmethod
@requires("fiftyone")
def from_fiftyone(
cls,
train_dataset: Optional[SampleCollection] = None,
val_dataset: Optional[SampleCollection] = None,
test_dataset: Optional[SampleCollection] = None,
predict_dataset: Optional[SampleCollection] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
clip_sampler: Union[str, "ClipSampler"] = "random",
clip_duration: float = 2,
clip_sampler_kwargs: Dict[str, Any] = None,
video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
decode_audio: bool = False,
decoder: str = "pyav",
label_field: str = "ground_truth",
**data_module_kwargs,
) -> "VideoClassificationData":
dataset_kwargs = dict(
clip_sampler=clip_sampler,
clip_duration=clip_duration,
clip_sampler_kwargs=clip_sampler_kwargs,
video_sampler=video_sampler,
decode_audio=decode_audio,
decoder=decoder,
label_field=label_field,
data_pipeline_state=DataPipelineState(),
)
return cls(
VideoClassificationFiftyOneInput(RunningStage.TRAINING, train_dataset, **dataset_kwargs),
VideoClassificationFiftyOneInput(RunningStage.VALIDATING, val_dataset, **dataset_kwargs),
VideoClassificationFiftyOneInput(RunningStage.TESTING, test_dataset, **dataset_kwargs),
VideoClassificationFiftyOneInput(RunningStage.PREDICTING, predict_dataset, **dataset_kwargs),
input_transform=cls.input_transform_cls(
train_transform,
val_transform,
test_transform,
predict_transform,
),
**data_module_kwargs,
)
@classmethod
def from_labelstudio(
cls,
export_json: str = None,
train_export_json: str | |
" Conditions: %s"%str(lc)
raise Exception(msg)
# Synthdiag-specific
if synthdiag is True:
ids = self.get_inputs_for_synthsignal(ids=ids, verb=False,
returnas=list)
lc[1] = True
# Prepare dids[name] = {'ids':None/ids, 'needidd':bool}
dids = {}
if lc[0] or lc[1]:
if lc[0]:
ids = [ids]
for ids_ in ids:
if not ids_ in self._lidsnames:
msg = "ids %s matched no known imas ids !"%ids_
msg += " => Available ids are:\n"
msg += repr(self._lidsnames)
raise Exception(msg)
for k in ids:
dids[k] = {'ids':None, 'needidd':True, 'idd':idd}
lids = ids
elif lc[2]:
dids[ids.__class__.__name__] = {'ids':ids,
'needidd':False, 'idd':idd}
lids = [ids.__class__.__name__]
nids = len(lids)
# Check / format occ and deduce nocc
if occ is None:
occ = 0
lc = [type(occ) in [int,np.int], hasattr(occ,'__iter__')]
assert any(lc)
if lc[0]:
occ = [np.r_[occ].astype(int) for _ in range(0,nids)]
else:
if len(occ) == nids:
occ = [np.r_[oc].astype(int) for oc in occ]
else:
occ = [np.r_[occ].astype(int) for _ in range(0,nids)]
for ii in range(0,nids):
nocc = occ[ii].size
dids[lids[ii]]['occ'] = occ[ii]
dids[lids[ii]]['nocc'] = nocc
if dids[lids[ii]]['ids'] is not None:
dids[lids[ii]]['ids'] = [dids[lids[ii]]['ids']]*nocc
# Format isget / get
for ii in range(0,nids):
nocc = dids[lids[ii]]['nocc']
if dids[lids[ii]]['ids'] is None:
isgeti = np.zeros((nocc,), dtype=bool)
if dids[lids[ii]]['ids'] is not None:
if isget is None:
isgeti = np.r_[False]
elif type(isget) is bool:
isgeti = np.r_[bool(isget)]
elif hasattr(isget,'__iter__'):
if len(isget) == nids:
isgeti = np.r_[isget[ii]]
else:
isgeti = np.r_[isget]
assert isgeti.size in [1,nocc]
if isgeti.size < nocc:
isgeti = np.repeat(isgeti,nocc)
dids[lids[ii]]['isget'] = isgeti
return dids
def add_ids(self, ids=None, occ=None, idd=None, preset=None,
shot=None, run=None, refshot=None, refrun=None,
user=None, database=None, version=None,
ref=None, isget=None, get=None):
""" Add an ids (or a list of ids)
Optionally specify also a specific idd to which the ids will be linked
The ids can be provided as such, or by name (str)
"""
if get is None:
get = False if preset is None else True
# preset
if preset is not None:
if preset not in self._dpreset.keys():
msg = "Available preset values are:\n"
msg += " - %s\n"%str(sorted(self._dpreset.keys()))
msg += " - Provided: %s"%str(preset)
raise Exception(msg)
ids = sorted(self._dpreset[preset].keys())
self._preset = preset
# Add idd if relevant
if hasattr(idd, 'close') or shot is not None:
name = self.add_idd(idd=idd,
shot=shot, run=run,
refshot=refshot, refrun=refrun,
user=user, database=database,
version=version, ref=ref, return_name=True)
idd = name
if idd is None and ids is not None:
if self._refidd is None:
msg = "No idd was provided (and ref idd is not clear) !\n"
msg += "Please provide an idd either directly or via \n"
msg += "args (shot, user, database...)!\n"
msg += " - %s"%str([(k,v.get('ref',None))
for k,v in self._didd.items()])
raise Exception(msg)
idd = self._refidd
elif idd is not None:
assert idd in self._didd.keys()
# Add ids
if ids is not None:
dids = self._checkformat_ids(ids, occ=occ, idd=idd, isget=isget)
self._dids.update(dids)
if get:
self.open_get_close()
def add_ids_base(self, occ=None, idd=None,
shot=None, run=None, refshot=None, refrun=None,
user=None, database=None, version=None,
ref=None, isget=None, get=None):
""" Add th list of ids stored in self._IDS_BASE
Typically used to add a list of common ids without having to re-type
them every time
"""
self.add_ids(ids=self._IDS_BASE, occ=occ, idd=idd,
shot=shot, run=run, refshot=refshot, refrun=refrun,
user=user, database=database, version=version,
ref=ref, isget=isget, get=get)
def add_ids_synthdiag(self, ids=None, occ=None, idd=None,
shot=None, run=None, refshot=None, refrun=None,
user=None, database=None, version=None,
ref=None, isget=None, get=None):
""" Add pre-tabulated input ids necessary for calculating synth. signal
The necessary input ids are given by self.get_inputs_for_synthsignal()
"""
if get is None:
get = True
ids = self.get_inputs_for_synthsignal(ids=ids, verb=False,
returnas=list)
self.add_ids(ids=ids, occ=occ, idd=idd, preset=None,
shot=shot, run=run, refshot=refshot, refrun=refrun,
user=user, database=database, version=version,
ref=ref, isget=isget, get=get)
def remove_ids(self, ids=None, occ=None):
""" Remove an ids (optionally remove only an occurence)
If all the ids linked to an idd are removed, said idd is removed too
"""
if ids is not None:
if not ids in self._dids.keys():
msg = "Please provide the name (str) of a valid ids\n"
msg += "Currently available ids are:\n"
msg += " - %s"%str(sorted(self._dids.keys()))
raise Exception(msg)
occref = self._dids[ids]['occ']
if occ is None:
occ = occref
else:
occ = np.unique(np.r_[occ].astype(int))
occ = np.intersect1d(occ, occref, assume_unique=True)
idd = self._dids[ids]['idd']
lids = [k for k,v in self._dids.items() if v['idd']==idd]
if lids == [ids]:
del self._didd[idd]
if np.all(occ == occref):
del self._dids[ids]
else:
isgetref = self._dids[ids]['isget']
indok = np.array([ii for ii in range(0,occref.size)
if occref[ii] not in occ])
self._dids[ids]['ids'] = [self._dids[ids]['ids'][ii]
for ii in indok]
self._dids[ids]['occ'] = occref[indok]
self._dids[ids]['isget'] = isgetref[indok]
self._dids[ids]['nocc'] = self._dids[ids]['occ'].size
def get_ids(self, ids=None, occ=None):
if ids is None and len(self._dids.keys()) == 1:
ids = list(self._dids.keys())[0]
assert ids in self._dids.keys()
if occ is None:
occ = self._dids[ids]['occ'][0]
else:
assert occ in self._dids[ids]['occ']
indoc = np.where(self._dids[ids]['occ'] == occ)[0][0]
return self._dids[ids]['ids'][indoc]
#---------------------
# Methods for showing content
#---------------------
def get_summary(self, sep=' ', line='-', just='l',
table_sep=None, verb=True, return_=False):
""" Summary description of the object content as a np.array of str """
if table_sep is None:
table_sep = '\n\n'
# -----------------------
# idd
a0 = []
if len(self._didd) > 0:
c0 = ['idd', 'user', 'database', 'version',
'shot', 'run', 'refshot', 'refrun', 'isopen', '']
for k0,v0 in self._didd.items():
lu = ([k0] + [str(v0['params'][k]) for k in c0[1:-2]]
+ [str(v0['isopen'])])
ref = '(ref)' if k0==self._refidd else ''
lu += [ref]
a0.append(lu)
a0 = np.array(a0, dtype='U')
# -----------------------
# ids
if len(self._dids) > 0:
c1 = ['ids', 'idd', 'occ', 'isget']
llids = self._checkformat_get_ids()
a1 = []
for (k0, lk1) in llids:
for ii in range(0,len(lk1)):
idd = k0 if ii == 0 else '"'
a1.append([lk1[ii], idd,
str(self._dids[lk1[ii]]['occ']),
str(self._dids[lk1[ii]]['isget'])])
a1 = np.array(a1, dtype='U')
else:
a1 = []
# Out
if verb or return_ in [True,'msg']:
if len(self._didd) > 0:
msg0 = self._getcharray(a0, c0,
sep=sep, line=line, just=just)
else:
msg0 = ''
if len(self._dids) > 0:
msg1 = self._getcharray(a1, c1,
sep=sep, line=line, just=just)
else:
msg1 = ''
if verb:
msg = table_sep.join([msg0,msg1])
print(msg)
if return_ != False:
if return_ == True:
out = (a0, a1, msg0, msg1)
elif return_ == 'array':
out = (a0, a1)
elif return_ == 'msg':
out = table_sep.join([msg0,msg1])
else:
lok = [False, True, 'msg', 'array']
raise Exception("Valid return_ values are: %s"%str(lok))
return out
def __repr__(self):
if hasattr(self, 'get_summary'):
return self.get_summary(return_='msg', verb=False)
else:
return object.__repr__(self)
#---------------------
# Methods for returning data
#---------------------
# DEPRECATED ?
def _checkformat_getdata_indt(self, indt):
msg = "Arg indt must be a either:\n"
msg += " - None: all channels used\n"
msg += " - int: times to use (index)\n"
msg += " - array of int: times to use (indices)"
lc = [type(indt) is None, type(indt) is int, hasattr(indt,'__iter__')]
if not any(lc):
raise Exception(msg)
if lc[1] or lc[2]:
indt = np.r_[indt].rave()
lc = [indt.dtype == np.int]
if not any(lc):
raise Exception(msg)
assert np.all(indt>=0)
return indt
def _set_fsig(self):
for ids in self._dshort.keys():
for k, v in self._dshort[ids].items():
self._dshort[ids][k]['fsig'] = _comp.get_fsig(v['str'])
@classmethod
def get_units(cls, ids, sig, force=None):
return _comp.get_units(ids, sig,
dshort=cls._dshort, dcomp=cls._dcomp,
force=force)
def get_data(self, dsig=None, occ=None,
data=None, units=None,
indch=None, indt=None, stack=None,
isclose=None, flatocc=True,
nan=None, pos=None, empty=None, strict=None,
return_all=None, warn=None):
""" Return a dict of the desired signals extracted from specified ids
For each signal, loads the data and / or units
If the ids has a field 'channel', indch is used to specify from which
channel data shall be loaded (all by default)
Parameters
----------
ids: None / str
ids from which the data should be loaded
ids should be available (check self.get_summary())
ids should be loaded if not available, using:
- self.add_ids() to add the ids
- self.open_get_close() to force loading if necessary
sig: None / str / list
shortcuts of signals to be loaded from the ids
Check available shortcuts using self.get_shortcuts(ids)
You can add custom shortcuts if needed (cf. self.add_shortcuts())
sig can be a single str (shortcut) or a list of such
occ: None / int
occurence from which to load the data
data: None / bool
Flag indicating whether to load the data
units: None / bool
Flag indicating whether to load the units
indch: None / list / np.ndarray
If the data has channels, this lists / array of int indices can be
used | |
we get the best set of parameters out, even if this error
# comes up
#def iterCallback(params, iter, resid, *args, **kwargs):
# if iter >= maxEvalsList[i]:
# Just kidding, I'll just use a try and ignore the error, since I
# am pretty sure that the params variable is constantly updated, not just
# at the end.
try:
# Now do the optimization
# The fatol kwarg is the minimum change between iterations before the
# fit is considered to have converged. I just choose this based on an educated guess,
# and it seems to work (worst case, you can just it really small and run the optimization
# for the max number of evals every time).
result = minimize(objectiveFunction, params,
args=(maskedImage, z, scaledRadius, scaledCenter, localizedMask),
method=methodList[i], max_nfev=maxEvalsList[i], nan_policy='omit', options={"fatol": 1e-2})
# Copy over the new values of the forces, alphas, and betas
for j in range(z):
forceArr[j] = result.params[f"f{j}"]
betaArr[j] = result.params[f"b{j}"]
alphaArr[j] = result.params[f"a{j}"]
# If we run out of function evaluations, that not too huge a deal,
# so we just grab the best value and move on. If we run into an actual error,
# that shouldn't be caught here, since that might be serious, so we re-trhow it.
except Exception as e:
if not isinstance(e, AbortFitException):
raise e
if debug:
print(e)
# Otherwise, we take the last good value (since we kept track outside of
# the optimization function)
if bestParams is not None:
for j in range(z):
forceArr[j] = bestParams[f"f{j}"]
betaArr[j] = bestParams[f"b{j}"]
alphaArr[j] = bestParams[f"a{j}"]
else:
# If we don't have any good parameter values, just return the initial
# guess. The arrays are initialized as this value, so we don't need
# to do anything in this case.
pass
# ---------------------
# Detect missing forces
# ---------------------
# If the code detects there is a missing force (no idea how yet)
if result is not None and result.chisqr > missingForceChiSqrThreshold and allowAddForces:
# We sweep around the edge of the particle to see if there
# are any regions that look like they could have a force
# (denoted by a particularly high g2 value, or rather a peak)
testBetaCount = 30
avgG2Arr = np.zeros(testBetaCount)
newBetaArr = np.linspace(-np.pi, np.pi, testBetaCount)
# Calculate all of the g2s around the edge of the particle
gSqr = gSquared(scaledImage)
for j in range(testBetaCount):
contactPoint = scaledCenter + scaledRadius * np.array([np.cos(newBetaArr[j]), np.sin(newBetaArr[j])])
# Create a mask just over the small area inside of the particle
contactMask = circularMask(scaledImage.shape, contactPoint, scaledContactMaskRadius)[:,:,0]
contactMask = (contactMask + particleMask) == 2
avgG2Arr[j] = np.sum(contactMask * gSqr) / np.sum(contactMask)
# Identify any peaks in the average g2s
peakIndArr = find_peaks(avgG2Arr, height=newBetaG2Height)[0]
peakIndArr = np.sort(peakIndArr)
# Make sure that there aren't any artifacts of periodicity
# Usually this isn't actually a problem, because the peak
# finding algorithm requires a proper peak, which can only
# be on one side (but we'll leave it here just in case)
if np.arange(3).any() in peakIndArr and np.arange(len(avgG2Arr)-3, len(avgG2Arr)).any() in peakIndArr:
# Remove last entry
peakIndArr = peakIndArr[:-1]
peakBetaArr = newBetaArr[peakIndArr]
# Now we have a list of likely points, we need to see if our original
# list is missing any of these.
differenceArr = np.abs(np.subtract.outer(peakBetaArr, betaArr))
# Check to see if there is a new peak that doesn't have
# a previous force close to it
for j in range(len(peakBetaArr)):
if np.min(differenceArr[j]) > newBetaMinSeparation and np.max(differenceArr[j]) < 2*np.pi - newBetaMinSeparation:
# Add the new force
betaArr = np.append(betaArr, peakBetaArr[j])
forceArr = np.append(forceArr, .1) # Value isn't too imporant here
alphaArr = np.append(alphaArr, 0.)
# If we have added a force, we should run the optimization again, and see if it improves
if len(forceArr) > z:
if debug:
print(f'Added {len(forceArr) - z} force(s).')
# We also want to make sure we're allowed to vary beta on the next iteration
tempVaryBeta = True
# This skips the i += 1 at the end of the loop, and makes the optimization run again
continue
# ------------------------------------
# Remove forces that don't do anything
# ------------------------------------
if len(forceArr[forceArr < minForceThreshold]) > 0 and allowRemoveForces:
# Remove forces that aren't actually doing anything
betaArr = betaArr[forceArr > minForceThreshold]
alphaArr = alphaArr[forceArr > minForceThreshold]
# This one has to be done last for the other indexing to work
forceArr = forceArr[forceArr > minForceThreshold]
if debug:
print(f'Removed {z - len(forceArr)} force(s).')
# Iterate (since we have a while not a for)
i += 1
if returnOptResult:
return result
else:
return forceArr, betaArr, alphaArr, residuals
@numba.jit(nopython=True)
def g2ForceCalibration(fSigma, radius, pxPerMeter=1., g2Padding=1, alphaArr=np.array([0., 0.]), betaArr=np.array([0., -np.pi]), forceSteps=100, forceBounds=np.array([.01, 1.]), brightfield=True):
"""
Use synthetic photoelastic response to fit the conversion constant between
gradient squared value and force (in Newtons), assuming a linear relationship.
Note that this computes the least squares of force (N) vs **average** gradient
squared. The Matlab just uses the sum of gradient squared, but this is not invariant
under changes of resolution, so I have opted to use the average. Because of this, a
calibration value calculated in Matlab will **not** work here unless you divide out
the number of points it is summed over first.
Parameters
----------
fSigma : float
Stress optic coefficient, relating to material thickness, wavelength of light
and other material property (C).
radius : float
Radius of the particle that is being simulated in pixels. If pxPerMeter is
not provided (or set to 1), this value will be assumed to already have been converted to meters.
contactMaskRadius : float
Radius of the circular mask that is applied over each contact to find the average gradient
squared.
pxPerMeter : float
The number of pixels per meter for the simulated image. If not provided, or set to 1, the radius
value will be assumed to already have been converted to meters.
g2Padding : int
Number of pixels to ignore at the edge of the particle. We don't expect any boundary
artifacts in our synthetic data, but we will eventually do this for the real data,
so it is important to keep the size of the particles the same throughout.
alphaArr : np.ndarray[Z]
Array of angles representing force contact angles.
betaArr : np.ndarray[Z]
Array of angles representing force contact positions.
forceSteps : int
The number of points to use for fitting our line of g^2 vs. force.
forceBounds : [float, float]
The minimum and maximum value of force applied to calculate the calibration value.
brightfield : bool
Whether the intensity should be simulated as seen through a brightfield (True)
polariscope or darkfield (False) polariscope.
Returns
-------
g2ForceSlope : float
Slope found via linear regression to convert average g^2 to force.
"""
# The magnitude of the forces that will be acting at each step
forceValues = np.linspace(forceBounds[0], forceBounds[1], forceSteps)
gSqrAvgArr = np.zeros(forceSteps)
imageSize = (np.int16(radius*2)+11, np.int16(radius*2)+11)
center = np.array([imageSize[0]/2, imageSize[1]/2], dtype=np.int64)
particleMask = circularMask(imageSize, center, radius - g2Padding)[:,:,0]
# The contact mask is a circle placed over the edge of the particle where the force is applied
#contactMask1 = circularMask(imageSize,
# np.array([imageSize[0]/2 + radius*np.cos(betaArr[0]), imageSize[1]/2 + radius*np.sin(betaArr[0])]),
# contactMaskRadius)[:,:,0]
#contactMask2 = circularMask(imageSize,
# np.array([imageSize[0]/2 + radius*np.cos(betaArr[1]), imageSize[1]/2 + radius*np.sin(betaArr[1])]),
# contactMaskRadius)[:,:,0]
# Get rid of the parts outside of the circle
#contactMask1 = contactMask1 * particleMask
#contactMask2 = contactMask2 * particleMask
# Add them together
#contactMask = contactMask1 + contactMask2
# To divide out the number of points
#numPoints = np.sum(contactMask)
numPoints = np.sum(particleMask)
for i in range(forceSteps):
# Assume two forces acting on the particle with equal magnitude
forceArr = np.array([forceValues[i], forceValues[i]])
# Create a synthetic photoelastic response
particleImg = genSyntheticResponse(forceArr, alphaArr, betaArr, fSigma, radius, pxPerMeter, brightfield, imageSize, center)
# Calculate the gradient
gSqr = gSquared(particleImg)
# Multiply by the mask to avoid weird edge effects
gSqrAvgArr[i] = np.sum(gSqr * particleMask) | |
sees just theirs but we see both our old and new ones plus those we
# have group access to
self._asJoeBloggs()
response = self._client.get('/tasks/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
wflows = response.data
self.assertEqual(len(wflows["results"]), 2)
self._asJaneDoe()
response = self._client.get('/tasks/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
wflows = response.data
self.assertEqual(len(wflows["results"]), 4)
def test_user_edit_own(self):
self._asJoeBloggs()
updated_task = {"description": "Update"}
response = self._client.patch("/tasks/%d/" % self._task1.id,
updated_task, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIs(TaskTemplate.objects.filter(description="First").exists(), False)
self.assertIs(TaskTemplate.objects.filter(description="Update").exists(), True)
def test_user_edit_other_nonread(self):
# Joe cannot see Jane's item 4
self._asJoeBloggs()
updated_task = {"description": "Update"}
response = self._client.patch("/tasks/%d/" % self._task3.id,
updated_task, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertIs(TaskTemplate.objects.filter(description="Third").exists(), True)
self.assertIs(TaskTemplate.objects.filter(description="Update").exists(), False)
def test_user_edit_other_readonly(self):
# Joe can see but not edit Jane's item 3
self._asJoeBloggs()
updated_task = {"description": "Update"}
response = self._client.patch("/tasks/%d/" % self._task2.id,
updated_task, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertIs(TaskTemplate.objects.filter(description="Second").exists(), True)
self.assertIs(TaskTemplate.objects.filter(description="Update").exists(), False)
def test_user_edit_other_readwrite(self):
# Give Jane write permission to Joe's item 1 first so she can edit it
ViewPermissionsMixin().assign_permissions(instance=self._task1,
permissions={"jane_group": "rw"})
self._asJaneDoe()
updated_task = {"description": "Update"}
response = self._client.patch("/tasks/%d/" % self._task1.id,
updated_task, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIs(TaskTemplate.objects.filter(description="First").exists(), False)
self.assertIs(TaskTemplate.objects.filter(description="Update").exists(), True)
def test_admin_edit_any(self):
self._asAdmin()
updated_task = {"description": "Update"}
response = self._client.patch("/tasks/%d/" % self._task1.id,
updated_task, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIs(TaskTemplate.objects.filter(description="First").exists(), False)
self.assertIs(TaskTemplate.objects.filter(description="Update").exists(), True)
def test_user_delete_own(self):
self._asJaneDoe()
response = self._client.delete("/tasks/%d/" % self._task3.id)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertIs(TaskTemplate.objects.filter(name="TaskTempl3").exists(), False)
def test_user_delete_other_noread(self):
# Joe can only see/edit his
self._asJoeBloggs()
response = self._client.delete("/tasks/%d/" % self._task3.id)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertIs(TaskTemplate.objects.filter(name="TaskTempl3").exists(), True)
def test_user_delete_other_readonly(self):
# Jane can edit hers and see both
self._asJaneDoe()
response = self._client.delete("/tasks/%d/" % self._task1.id)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertIs(TaskTemplate.objects.filter(name="TaskTempl1").exists(), True)
def test_user_delete_other_readwrite(self):
# Give Jane write permission to Joe's group first so she can delete it
ViewPermissionsMixin().assign_permissions(instance=self._task1,
permissions={"jane_group": "rw"})
self._asJaneDoe()
response = self._client.delete("/tasks/%d/" % self._task1.id)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertIs(TaskTemplate.objects.filter(name="TaskTempl1").exists(), False)
def test_admin_delete_any(self):
self._asAdmin()
response = self._client.delete("/tasks/%d/" % self._task1.id)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertIs(TaskTemplate.objects.filter(name="TaskTempl1").exists(), False)
def test_user_set_permissions_own(self):
# Any user should be able to set permissions on own sets
self._asJoeBloggs()
permissions = {"joe_group": "rw", "jane_group": "rw"}
response = self._client.patch(
"/tasks/%d/set_permissions/" % self._task1.id,
permissions, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
t = TaskTemplate.objects.get(name="TaskTempl1")
self.assertEqual(
ViewPermissionsMixin().current_permissions(instance=t,
group=Group.objects.get(
name="joe_group")), "rw")
self.assertEqual(
ViewPermissionsMixin().current_permissions(instance=t,
group=Group.objects.get(
name="jane_group")), "rw")
def test_user_set_permissions_nonread(self):
# Joe is not in the right group to see Jane's project
self._asJoeBloggs()
permissions = {"jane_group": "r"}
response = self._client.patch(
"/tasks/%d/set_permissions/" % self._task3.id,
permissions, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
t = TaskTemplate.objects.get(name="TaskTempl3")
self.assertEqual(
ViewPermissionsMixin().current_permissions(instance=t,
group=Group.objects.get(
name="jane_group")), "rw")
def test_user_set_permissions_readonly(self):
# Jane can see but not edit Joe's project
self._asJaneDoe()
permissions = {"jane_group": "rw"}
response = self._client.patch(
"/tasks/%d/set_permissions/" % self._task1.id,
permissions, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
t = TaskTemplate.objects.get(name="TaskTempl1")
self.assertEqual(
ViewPermissionsMixin().current_permissions(instance=t,
group=Group.objects.get(
name="jane_group")), "r")
def test_user_set_permissions_readwrite(self):
# Jane can see and edit Joe's project if we change her permissions first
ViewPermissionsMixin().assign_permissions(instance=self._task1,
permissions={"jane_group": "rw"})
self._asJaneDoe()
permissions = {"joe_group": "r", "jane_group": "r"}
response = self._client.patch(
"/tasks/%d/set_permissions/" % self._task1.id,
permissions, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
t = TaskTemplate.objects.get(name="TaskTempl1")
self.assertEqual(
ViewPermissionsMixin().current_permissions(instance=t,
group=Group.objects.get(
name="joe_group")), "r")
self.assertEqual(
ViewPermissionsMixin().current_permissions(instance=t,
group=Group.objects.get(
name="jane_group")), "r")
def test_admin_set_permissions(self):
# Admin can do what they like
self._asAdmin()
permissions = {"joe_group": "r", "jane_group": "r"}
response = self._client.patch(
"/tasks/%d/set_permissions/" % self._task1.id,
permissions, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
t = TaskTemplate.objects.get(name="TaskTempl1")
self.assertEqual(
ViewPermissionsMixin().current_permissions(instance=t,
group=Group.objects.get(
name="joe_group")), "r")
self.assertEqual(
ViewPermissionsMixin().current_permissions(instance=t,
group=Group.objects.get(
name="jane_group")), "r")
def test_set_permissions_invalid_group(self):
# An invalid group should throw a 400 data error
self._asAdmin()
permissions = {"jim_group": "r"}
response = self._client.patch(
"/tasks/%d/set_permissions/" % self._task1.id,
permissions, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Check the group wasn't created accidentally in the process
self.assertIs(Group.objects.filter(name="jim_group").exists(), False)
def test_set_permissions_invalid_permission(self):
# An invalid permission should throw a 400 data error
self._asAdmin()
permissions = {"joe_group": "flibble"}
response = self._client.patch(
"/tasks/%d/set_permissions/" % self._task1.id,
permissions, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Check the permission wasn't changed accidentally in the process
t = TaskTemplate.objects.get(name="TaskTempl1")
self.assertEqual(
ViewPermissionsMixin().current_permissions(instance=t,
group=Group.objects.get(
name="joe_group")), "rw")
self.assertEqual(
ViewPermissionsMixin().current_permissions(instance=t,
group=Group.objects.get(
name="jane_group")), "r")
def test_user_remove_permissions_own(self):
# Any user should be able to remove permissions on own projects
self._asJoeBloggs()
response = self._client.delete(
"/tasks/%d/remove_permissions/?groups=joe_group" % self._task1.id,
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
t = TaskTemplate.objects.get(name="TaskTempl1")
self.assertEqual(
ViewPermissionsMixin().current_permissions(instance=t,
group=Group.objects.get(
name="joe_group")), None)
def test_user_remove_permissions_nonread(self):
# Joe is not in the right group to see Jane's item 4
self._asJoeBloggs()
response = self._client.delete(
"/tasks/%d/remove_permissions/?groups=jane_group" % self._task3.id,
format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
t = TaskTemplate.objects.get(name="TaskTempl3")
self.assertEqual(
ViewPermissionsMixin().current_permissions(instance=t,
group=Group.objects.get(
name="jane_group")), "rw")
def test_user_remove_permissions_readonly(self):
# Jane can see but not edit Joe's item 1
self._asJaneDoe()
response = self._client.delete(
"/tasks/%d/remove_permissions/?groups=joe_group" % self._task1.id,
format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
t = TaskTemplate.objects.get(name="TaskTempl1")
self.assertEqual(
ViewPermissionsMixin().current_permissions(instance=t,
group=Group.objects.get(
name="joe_group")), "rw")
def test_user_remove_permissions_readwrite(self):
# Jane can see and edit Joe's project if we change her permissions first
ViewPermissionsMixin().assign_permissions(instance=self._task1,
permissions={"jane_group": "rw"})
self._asJaneDoe()
response = self._client.delete(
"/tasks/%d/remove_permissions/?groups=joe_group" % self._task1.id,
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
t = TaskTemplate.objects.get(name="TaskTempl1")
self.assertEqual(
ViewPermissionsMixin().current_permissions(instance=t,
group=Group.objects.get(
name="joe_group")), None)
def test_admin_remove_permissions(self):
# Admin can do what they like
self._asAdmin()
response = self._client.delete(
"/tasks/%d/remove_permissions/?groups=jane_group&groups=joe_group" %
self._task1.id,
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
t = TaskTemplate.objects.get(name="TaskTempl1")
self.assertEqual(
ViewPermissionsMixin().current_permissions(instance=t,
group=Group.objects.get(
name="jane_group")), None)
self.assertEqual(
ViewPermissionsMixin().current_permissions(instance=t,
group=Group.objects.get(
name="joe_group")), None)
def test_remove_permissions_invalid_group(self):
# An invalid group name should fail quietly - we don't care if permissions can't be
# removed as the end result is the same, i.e. that group can't access anything
self._asAdmin()
response = self._client.delete(
"/tasks/%d/remove_permissions/?groups=jim_group" %
self._task1.id,
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Test that the group wasn't created accidentally
self.assertIs(Group.objects.filter(name="jim_group").exists(), False)
def test_task_store_labware_as(self):
self.assertEqual(self._task1.store_labware_as(), "labware_identifier")
def _setup_test_task_fields(self):
# Set up some test fields on the task
calc = "{input1}+{input2}*{output1]}/{variable1}*{prop1}+{product_input_amount}"
self._calcField = CalculationFieldTemplate.objects.create(template=self._task3,
label='calc1',
description="Calculation field 1",
calculation=calc)
self._task3.calculation_fields.add(self._calcField)
self._inputField1 = InputFieldTemplate.objects.create(template=self._task3,
label='input1',
description="Input field 1",
amount=1.0,
measure=self._millilitre,
lookup_type=self._prodinput,
from_input_file=False)
self._inputField2 = InputFieldTemplate.objects.create(template=self._task3,
label='input2',
description="Input field 2",
amount=4.4,
measure=self._millilitre,
lookup_type=self._prodinput,
from_input_file=False)
self._task3.input_fields.add(self._inputField1)
self._task3.input_fields.add(self._inputField2)
self._variableField = VariableFieldTemplate.objects.create(
template=self._task1,
label="variable1",
description="Variable field 1",
amount=3.3,
measure=self._millilitre,
measure_not_required=False)
self._task3.variable_fields.add(self._variableField)
self._outputField = OutputFieldTemplate.objects.create(template=self._task3,
label='output1',
description="Output field 1",
amount=9.6,
measure=self._millilitre,
lookup_type=self._prodinput)
self._task3.output_fields.add(self._outputField)
self._stepField = StepFieldTemplate.objects.create(template=self._task3,
label='step1',
description="Step field 1")
self._stepFieldProperty = StepFieldProperty.objects.create(step=self._stepField,
label='prop1',
amount=9.6,
measure=self._millilitre)
self._stepField.properties.add(self._stepFieldProperty)
self._task3.step_fields.add(self._stepField)
self._task3.save()
def _setup_test_task_recalculation(self):
return {"id": self._task1.id,
"product_input_measure": self._millilitre.symbol,
"product_input_amount": 5,
"product_input": self._prodinput.name,
"input_files": [self._inputTempl.name],
"output_files": [self._outputTempl.name],
"equipment_files": [self._equipTempl.name],
"labware": self._labware.name,
"store_labware_as": "labware_identifier",
"capable_equipment": [self._equipmentSequencer.name],
"name": "NewTask",
"assign_groups": {"jane_group": "rw"},
"input_fields": [{"measure": self._millilitre.symbol,
"lookup_type": self._prodinput.name, "label": "input1",
"amount": 6.2, "template": self._task3.id,
"calculation_used": self._calcField.id,
"from_calculation": False},
{"measure": self._millilitre.symbol,
"lookup_type": self._prodinput.name, "label": "input2",
"amount": 4.3, "template": self._task3.id,
"calculation_used": self._calcField.id,
"from_calculation": False}],
"step_fields": [
{"label": "step1", "template": self._task3.id,
"properties": [{"id": self._stepFieldProperty.id,
"measure": self._millilitre.symbol,
"label": "prop1", "amount": 9.9,
"calculation_used": self._calcField.id,
"from_calculation": False}]}],
"variable_fields": [
{"measure": self._millilitre.symbol, "label": "variable1",
"amount": 8.4, "template": self._task3.id}],
"output_fields": [{"measure": self._millilitre.symbol,
"lookup_type": self._prodinput.name, "label": "output1",
"amount": 9.6, "template": self._task3.id,
"calculation_used": self._calcField.id,
"from_calculation": False}],
"calculation_fields": [{"id": self._calcField.id,
"label": "calc1",
"calculation": ("{input1}"
"+{input2}"
"/{variable1}"
"*{prop1}"
"+{product_input_amount}"),
"template": self._task3.id}],
"created_by": self._janeDoe.username}
def test_user_recalculate_nonread_task(self):
self._setup_test_task_fields()
updated_task = self._setup_test_task_recalculation()
self._asJoeBloggs()
response = self._client.post("/tasks/%d/recalculate/" % self._task3.id,
updated_task, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_user_recalculate_readonly_task(self):
self._setup_test_task_fields()
updated_task = self._setup_test_task_recalculation()
self._asJaneDoe()
response = self._client.post("/tasks/%d/recalculate/" % self._task1.id,
# ID mismatch doesn't matter as won't get that far
updated_task, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
"""
def test_user_recalculate_readwrite_task(self):
self._setup_test_task_fields()
updated_task = self._setup_test_task_recalculation()
self._asJaneDoe()
response = self._client.post("/tasks/%d/recalculate/" % self._task3.id,
updated_task, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["calculation_fields"][0]["result"], 16.267857142857142)
def test_admin_recalculate_task(self):
self._setup_test_task_fields()
updated_task = self._setup_test_task_recalculation()
self._asAdmin()
response = self._client.post("/tasks/%d/recalculate/" % self._task3.id,
updated_task, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["calculation_fields"][0]["result"], 16.267857142857142)
"""
def test_user_listall_taskfield_readonly(self):
self._setup_test_task_fields()
# Make Jane temporarily readonly on her task
ViewPermissionsMixin().assign_permissions(instance=self._task3,
permissions={"jane_group": "r"})
self._asJaneDoe()
response = self._client.get('/taskfields/?type=%s' % "Step")
self.assertEqual(response.status_code, status.HTTP_200_OK)
t = response.data["results"]
# TODO This test fails because Jane - who is r-only for TaskTemplate 'task3' - should
# be able to see the StepFieldTemplate associated with that task, but the
# system is not returning it because it is checking Field permissions not Template ones.
self.assertEqual(len(t), 1)
self.assertEqual(t[0]["label"], self._stepField.label)
def test_user_listall_taskfield_nonread(self):
self._setup_test_task_fields()
self._asJoeBloggs()
# Joe should see none as all on Jane's
response = self._client.get('/taskfields/?type=%s' % "Step")
self.assertEqual(response.status_code, status.HTTP_200_OK)
t = response.data["results"]
self.assertEqual(len(t), 0)
def test_user_listall_taskfield_readwrite(self):
self._setup_test_task_fields()
self._asJaneDoe()
ViewPermissionsMixin().assign_permissions(self._stepField, {'jane_group': 'rw'})
response = self._client.get('/taskfields/?type=%s' % "Step")
self.assertEqual(response.status_code, status.HTTP_200_OK)
# TODO This test fails because Jane - who is r/w for TaskTemplate 'task3' - should
# be able to see the StepFieldTemplate associated with that task, but the
# system is not returning it because it is checking Field permissions not Template ones.
t = response.data["results"]
self.assertEqual(len(t), 1)
self.assertEqual(t[0]["label"], self._stepField.label)
def test_admin_listall_taskfield_any(self):
self._setup_test_task_fields()
self._asAdmin()
response = self._client.get('/taskfields/?type=%s' % "Step")
self.assertEqual(response.status_code, status.HTTP_200_OK)
t = response.data["results"]
self.assertEqual(len(t), 1)
self.assertEqual(t[0]["label"], self._stepField.label)
response = self._client.get('/taskfields/?type=%s' % "Input")
self.assertEqual(response.status_code, status.HTTP_200_OK)
t = response.data["results"]
self.assertEqual(len(t), 2)
self.assertEqual(t[0]["label"], self._inputField2.label)
self.assertEqual(t[1]["label"], self._inputField1.label)
response = self._client.get('/taskfields/?type=%s' % "Output")
self.assertEqual(response.status_code, status.HTTP_200_OK)
t = response.data["results"]
self.assertEqual(len(t), 1)
self.assertEqual(t[0]["label"], self._outputField.label)
response = self._client.get('/taskfields/?type=%s' % "Variable")
self.assertEqual(response.status_code, | |
ground_truth[i, :]))
fdr_den = np.sum(importance_score[i, :])
fdr[i] = 100 * float(fdr_nom) / float(fdr_den + 1e-8)
mean_tpr = np.mean(tpr)
std_tpr = np.std(tpr)
mean_fdr = np.mean(fdr)
std_fdr = np.std(fdr)
return mean_tpr, std_tpr, mean_fdr, std_fdr
def quantile_discretizer(df, num, cat_cols):
quantiles = np.round(np.linspace(0, 1, num=num), 2)
columns_names = list(df.columns)
q_cols = {}
q_values = {'{}'.format(col): [] for col in columns_names if col not in cat_cols}
for col in columns_names:
if col not in cat_cols:
it = 0
for q_low, q_high in zip(quantiles[:-1], quantiles[1:]):
it = it + 1
q_low_v = np.quantile(df[col].values, q_low)
q_high_v = np.quantile(df[col].values, q_high)
q_values['{}'.format(col)].append([q_low_v, q_high_v])
if it == num - 1:
q_cols['{}: q{}-q{}'.format(col, q_low, q_high)] = 1 * (q_low_v <= df[col].values) * (
df[col].values <= q_high_v)
else:
q_cols['{}: q{}-q{}'.format(col, q_low, q_high)] = 1 * (q_low_v <= df[col].values) * (
df[col].values < q_high_v)
else:
q_cols[col] = df[col]
return pd.DataFrame.from_dict(q_cols), q_values
def quantile_discretizer_byq(df, cat_cols, q_values):
columns_names = list(df.columns)
num = len(list(q_values.values())[0]) + 1
q_cols = {}
for col in columns_names:
if col not in cat_cols:
it = 0
for q_val in q_values[col]:
it = it + 1
q_low_v = q_val[0]
q_high_v = q_val[1]
if it == num - 1:
q_cols['{}: I{}'.format(col, it)] = 1 * (q_low_v <= df[col].values) * (df[col].values <= q_high_v)
else:
q_cols['{}: I{}'.format(col, it)] = 1 * (q_low_v <= df[col].values) * (df[col].values < q_high_v)
else:
q_cols[col] = df[col]
return pd.DataFrame.from_dict(q_cols)
def cond_exp_tree(x, tree, S, mean, cov, N=10000):
d = x.shape[0]
index = list(range(d))
rg_data = np.zeros(shape=(N, d))
rg_data[:, S] = x[S]
if len(S) != d:
S_bar = [i for i in index if i not in S]
rg = sampleMVN(N, mean, cov, S_bar, S, x[S])
rg_data[:, S_bar] = rg
y_pred = tree.predict(rg_data)
else:
y_pred = tree.predict(np.expand_dims(np.array(x, dtype=np.float32), axis=0))
return np.mean(y_pred, axis=0)
def cond_exp_tree_true(x, yx, tree, S, mean, cov, N=10000):
d = x.shape[0]
index = list(range(d))
rg_data = np.zeros(shape=(N, d))
rg_data[:, S] = x[S]
if len(S) != d:
S_bar = [i for i in index if i not in S]
rg = sampleMVN(N, mean, cov, S_bar, S, x[S])
rg_data[:, S_bar] = rg
logit1 = np.exp(rg_data[:, 0] * rg_data[:, 1])
logit2 = np.exp(rg_data[:, 2] * rg_data[:, 3])
idx1 = (rg_data[:, 4] < 0) * 1
idx2 = (rg_data[:, 4] >= 0) * 1
logit = logit1 * idx1 + logit2 * idx2
# Compute P(Y=0|X)
prob_0 = np.reshape((logit / (1 + logit)), [N, 1])
# Sampling process
y = np.zeros([N, 2])
y[:, 0] = np.reshape(np.random.binomial(1, prob_0), [N, ])
y[:, 1] = 1 - y[:, 0]
y_pred = y[:, 1]
# print(prob_0)
else:
# x = np.expand_dims(x, axis=0)
# logit1 = np.exp(x[:, 0] * x[:, 1])
# logit2 = np.exp(x[:, 2] * x[:, 3])
# idx1 = (x[:, 4] < 0) * 1
# idx2 = (x[:, 4] >= 0) * 1
# logit = logit1 * idx1 + logit2 * idx2
#
# # Compute P(Y=0|X)
# prob_0 = np.reshape((logit / (1 + logit)), [1, 1])
#
# # Sampling process
# y = np.zeros([N, 2])
# y[:, 0] = np.reshape(np.random.binomial(1, prob_0), [1, ])
# y[:, 1] = 1 - y[:, 0]
y_pred = np.expand_dims(yx, axis=0)
return np.mean(y_pred, axis=0)
def single_sdp_true_v(x, yx, tree, S, mean, cov, N=10000):
d = x.shape[0]
index = list(range(d))
rg_data = np.zeros(shape=(N, d))
rg_data[:, S] = x[S]
if len(S) != d:
S_bar = [i for i in index if i not in S]
rg = sampleMVN(N, mean, cov, S_bar, S, x[S])
rg_data[:, S_bar] = rg
logit1 = np.exp(rg_data[:, 0] * rg_data[:, 1])
logit2 = np.exp(rg_data[:, 2] * rg_data[:, 3])
idx1 = (rg_data[:, 4] < 0) * 1
idx2 = (rg_data[:, 4] >= 0) * 1
logit = logit1 * idx1 + logit2 * idx2
# Compute P(Y=0|X)
prob_0 = np.reshape((logit / (1 + logit)), [N, 1])
# Sampling process
y = np.zeros([N, 2])
y[:, 0] = np.reshape(np.random.binomial(1, prob_0), [N, ])
y[:, 1] = 1 - y[:, 0]
y_pred = y[:, 1]
# print(prob_0)
else:
y_pred = np.expand_dims(yx, axis=0)
return np.mean(y_pred == yx, axis=0)
def shap_exp(tree, S, x):
tree_ind = 0
def R(node_ind):
f = tree.features[tree_ind, node_ind]
lc = tree.children_left[tree_ind, node_ind]
rc = tree.children_right[tree_ind, node_ind]
if lc < 0:
return tree.values[tree_ind, node_ind]
if f in S:
if x[f] <= tree.thresholds[tree_ind, node_ind]:
return R(lc)
return R(rc)
lw = tree.node_sample_weight[tree_ind, lc]
rw = tree.node_sample_weight[tree_ind, rc]
return (R(lc) * lw + R(rc) * rw) / (lw + rw)
out = 0.0
l = tree.values.shape[0] if tree.tree_limit is None else tree.tree_limit
for i in range(l):
tree_ind = i
out += R(0)
return out
def shap_cond_exp(X, S, tree):
cond = np.zeros((X.shape[0], tree.values.shape[2]))
for i in range(X.shape[0]):
cond[i] = shap_exp(x=X[i], S=S, tree=tree)
return cond
def mc_cond_exp(X, S, tree, mean, cov, N):
cond = np.zeros((X.shape[0], tree.values.shape[2]))
for i in range(X.shape[0]):
cond[i] = cond_exp_tree(x=X[i], S=S, tree=tree, mean=mean, cov=cov, N=N)
return cond
def mc_cond_exp_true(X, yX, S, tree, mean, cov, N):
cond = np.zeros((X.shape[0], tree.values.shape[2]))
for i in range(X.shape[0]):
cond[i] = cond_exp_tree_true(x=X[i], yx=yX[i], S=S, tree=tree, mean=mean, cov=cov, N=N)
return cond
def sdp_true_v(X, yX, S, tree, mean, cov, N):
cond = np.zeros((X.shape[0]))
for i in range(X.shape[0]):
cond[i] = single_sdp_true_v(x=X[i], yx=yX[i], S=S, tree=tree, mean=mean, cov=cov, N=N)
return cond
def tree_sv_exact(X, C, tree, mean, cov, N):
m = X.shape[1]
va_id = list(range(m))
va_buffer = va_id.copy()
if C[0] != []:
for c in C:
m -= len(c)
va_id = list(set(va_id) - set(c))
m += len(C)
for c in C:
va_id += [c]
phi = np.zeros(shape=(X.shape[0], X.shape[1], tree.values.shape[2]))
for i in tqdm(va_id):
Sm = list(set(va_buffer) - set(convert_list(i)))
if C[0] != []:
buffer_Sm = Sm.copy()
for c in C:
if set(c).issubset(buffer_Sm):
Sm = list(set(Sm) - set(c))
for c in C:
if set(c).issubset(buffer_Sm):
Sm += [c]
for S in powerset(Sm):
weight = comb(m - 1, len(S)) ** (-1)
v_plus = mc_cond_exp(X=X, S=np.array(chain_l(S) + convert_list(i)).astype(int), tree=tree, mean=mean, cov=cov, N=N)
v_minus = mc_cond_exp(X=X, S=np.array(chain_l(S)).astype(int), tree=tree, mean=mean, cov=cov, N=N)
for j in convert_list(i):
phi[:, j] += weight * (v_plus - v_minus)
return phi / m
def tree_sv_exact_true(X, yX, C, tree, mean, cov, N):
m = X.shape[1]
va_id = list(range(m))
va_buffer = va_id.copy()
if C[0] != []:
for c in C:
m -= len(c)
va_id = list(set(va_id) - set(c))
m += len(C)
for c in C:
va_id += [c]
phi = np.zeros(shape=(X.shape[0], X.shape[1], tree.values.shape[2]))
for i in tqdm(va_id):
Sm = list(set(va_buffer) - set(convert_list(i)))
if C[0] != []:
buffer_Sm = Sm.copy()
for c in C:
if set(c).issubset(buffer_Sm):
Sm = list(set(Sm) - set(c))
for c in C:
if set(c).issubset(buffer_Sm):
Sm += [c]
for S in powerset(Sm):
weight = comb(m - 1, len(S)) ** (-1)
v_plus = mc_cond_exp_true(X=X, yX=yX, S=np.array(chain_l(S) + convert_list(i)).astype(int), tree=tree, mean=mean, cov=cov, N=N)
v_minus = mc_cond_exp_true(X=X, yX=yX, S=np.array(chain_l(S)).astype(int), tree=tree, mean=mean, cov=cov, N=N)
for j in convert_list(i):
phi[:, j] += weight * (v_plus - v_minus)
return phi / m
def pytree_shap_plugin(X, data, C, tree):
N = X.shape[0]
m = X.shape[1]
va_id = list(range(m))
va_buffer = va_id.copy()
if C[0] != []:
for c in C:
m -= len(c)
va_id = list(set(va_id) - set(c))
m += len(C)
for c in C:
va_id += [c]
phi = np.zeros(shape=(X.shape[0], X.shape[1], tree.values.shape[2]))
for i in tqdm(va_id):
Sm = list(set(va_buffer) - set(convert_list(i)))
if C[0] != []:
buffer_Sm = Sm.copy()
for c in C:
if set(c).issubset(buffer_Sm):
Sm = list(set(Sm) - set(c))
for c in C:
if set(c).issubset(buffer_Sm):
Sm += [c]
for S in powerset(Sm):
weight = comb(m - 1, len(S)) ** (-1)
v_plus = tree.compute_exp_normalized(X=X, S=np.array(chain_l(S) + convert_list(i)).astype(int), data=data)
v_minus = tree.compute_exp_normalized(X=X, S=np.array(chain_l(S)).astype(int), data=data)
for a in convert_list(i):
phi[:, a] += weight * (v_plus - v_minus)
return phi / m
def marMVNDiscrete(mean, cov, set):
if set == []:
return mean, cov
else:
mean_cond = mean[set]
cov_cond = cov[set][:, set]
return mean_cond, cov_cond
def sampleMarMVNDiscrete(n, mean, cov, set):
mean_cond, cov_cond = marMVNDiscrete(mean, cov, set)
sample = st.multivariate_normal(mean_cond, cov_cond).rvs(n)
if len(set) != 0:
sample = np.reshape(sample, (sample.shape[0], len(set)))
return sample
def mc_exp_tree_discretized(X, tree, S, q_arr, q_values, C, mean, cov, N):
d = len(C)
if len(S) != X.shape[1]:
part = np.zeros((len(S), X.shape[0], 2))
for i in range(len(S)):
q_idx = np.argmax(X[:, C[S[i]]], axis=1)
for j in range(X.shape[0]):
part[i, j, | |
<filename>pesummary/tests/conversion_test.py<gh_stars>1-10
# Licensed under an MIT style license -- see LICENSE.md
import os
import socket
import shutil
import numpy as np
import h5py
import deepdish
from pesummary.gw.conversions import *
from pesummary.gw.conversions.nrutils import *
from pycbc import conversions
import pytest
__author__ = ["<NAME> <<EMAIL>>"]
def conversion_check(
pesummary_function, pesummary_args, other_function, other_args,
dp=8
):
"""Check that the conversions made by PESummary are the same as those
from pycbc
"""
_pesummary = pesummary_function(*pesummary_args)
_other = other_function(*other_args)
assert np.testing.assert_almost_equal(_pesummary, _other, dp) is None
class TestConversions(object):
@classmethod
def setup_class(cls):
class Arguments(object):
mass1 = 10.
mass2 = 5.
mtotal = 30.
mchirp = 10.
q = 1. / 4.
eta = 0.214
iota = 0.5
spin1x = 0.75
spin1y = 0.
spin1z = 0.5
spin2x = 0.
spin2y = 0.
spin2z = 0.5
lambda1 = 500.
lambda2 = 500.
lambda_tilde = 1000.
theta_jn = [0.5, 0.5]
phi_jl = [0.3, 0.3]
tilt_1 = [0.5, 0.]
tilt_2 = [0., 0.]
phi_12 = [0., 0.]
a_1 = [0.5, 0.5]
a_2 = [0., 0.]
f_ref = [20., 20.]
phase = [0., 0.]
redshift = 0.5
l_distance = 500.
cls.opts = Arguments()
def test_z_from_dL(self):
from bilby.gw.conversion import luminosity_distance_to_redshift
l_distance = np.random.randint(100, 5000, 20)
bilby_function = luminosity_distance_to_redshift
pesummary_function = z_from_dL_exact
conversion_check(
pesummary_function, [l_distance], bilby_function, [l_distance]
)
pesummary_function = z_from_dL_approx
conversion_check(
pesummary_function, [l_distance], bilby_function, [l_distance],
dp=4
)
def test_change_of_cosmology_for_z_from_dL(self):
from lalinference.bayespputils import calculate_redshift
l_distance = np.random.randint(100, 5000, 20)
lal_redshift = calculate_redshift(
np.atleast_2d(l_distance).T
).T[0]
redshift = z_from_dL_exact(
l_distance, cosmology="Planck15_lal"
)
np.testing.assert_almost_equal(lal_redshift, redshift, 8)
def test_dL_from_z(self):
from bilby.gw.conversion import redshift_to_luminosity_distance
redshift = np.random.randint(1, 5, 100)
bilby_function = redshift_to_luminosity_distance
pesummary_function = dL_from_z
conversion_check(
pesummary_function, [redshift], bilby_function, [redshift]
)
def test_comoving_distance_from_z(self):
from bilby.gw.conversion import redshift_to_comoving_distance
redshift = np.random.randint(1, 5, 100)
bilby_function = redshift_to_comoving_distance
pesummary_function = comoving_distance_from_z
conversion_check(
pesummary_function, [redshift], bilby_function, [redshift]
)
def test_m1_source_from_m1_z(self):
from bilby.gw.conversion import generate_source_frame_parameters
mass_1 = np.random.randint(5, 100, 100)
mass_2 = np.random.randint(2, mass_1, 100)
luminosity_distance = np.random.randint(100, 500, 100)
sample = generate_source_frame_parameters(
{"mass_1": mass_1, "mass_2": mass_2,
"luminosity_distance": luminosity_distance}
)
source_frame = generate_source_frame_parameters(sample)
assert np.testing.assert_almost_equal(
m1_source_from_m1_z(mass_1, sample["redshift"]), sample["mass_1_source"],
8
) is None
def test_m1_m2_from_m1_source_m2_source_z(self):
from bilby.gw.conversion import generate_source_frame_parameters
mass_1_source = np.random.randint(5, 100, 100)
mass_2_source = np.random.randint(2, mass_1_source, 100)
redshift = np.random.randint(1, 10, 100)
luminosity_distance = dL_from_z(redshift)
# calculate mass_1 and mass_2 using pesummary
mass_1 = m1_from_m1_source_z(mass_1_source, redshift)
mass_2 = m2_from_m2_source_z(mass_2_source, redshift)
# use calculated mass_1/mass_2 to calculate mass_1_source/mass_2_source using
# bilby
sample = generate_source_frame_parameters(
{"mass_1": mass_1, "mass_2": mass_2,
"luminosity_distance": luminosity_distance}
)
source_frame = generate_source_frame_parameters(sample)
# confirm that bilby's mass_1_source/mass_2_source is the same as
# mass_1_source/mass_2_source that pesummary used
np.testing.assert_almost_equal(sample["mass_1_source"], mass_1_source, 6)
np.testing.assert_almost_equal(sample["mass_2_source"], mass_2_source, 6)
def test_m2_source_from_m2_z(self):
from bilby.gw.conversion import generate_source_frame_parameters
mass_1 = np.random.randint(5, 100, 100)
mass_2 = np.random.randint(2, mass_1, 100)
luminosity_distance = np.random.randint(100, 500, 100)
sample = generate_source_frame_parameters(
{"mass_1": mass_1, "mass_2": mass_2,
"luminosity_distance": luminosity_distance}
)
source_frame = generate_source_frame_parameters(sample)
assert np.testing.assert_almost_equal(
m2_source_from_m2_z(mass_1, sample["redshift"]), sample["mass_1_source"],
8
) is None
def test_m_total_source_from_mtotal_z(self):
from bilby.gw.conversion import generate_source_frame_parameters
total_mass = np.random.randint(5, 100, 100)
luminosity_distance = np.random.randint(100, 500, 100)
sample = generate_source_frame_parameters(
{"total_mass": total_mass, "luminosity_distance": luminosity_distance}
)
source_frame = generate_source_frame_parameters(sample)
assert np.testing.assert_almost_equal(
m_total_source_from_mtotal_z(total_mass, sample["redshift"]),
sample["total_mass_source"], 8
) is None
def test_m_total_from_mtotal_source_z(self):
total_mass_source, redshift = 20., self.opts.redshift
m_total = mtotal_from_mtotal_source_z(total_mass_source, redshift)
assert np.round(m_total, 4) == self.opts.mtotal
def test_mchirp_source_from_mchirp_z(self):
from bilby.gw.conversion import generate_source_frame_parameters
chirp_mass = np.random.randint(5, 100, 100)
luminosity_distance = np.random.randint(100, 500, 100)
sample = generate_source_frame_parameters(
{"chirp_mass": chirp_mass,
"luminosity_distance": luminosity_distance}
)
source_frame = generate_source_frame_parameters(sample)
assert np.testing.assert_almost_equal(
mchirp_source_from_mchirp_z(chirp_mass, sample["redshift"]),
sample["chirp_mass_source"],
8
) is None
def test_mchirp_from_mchirp_source_z(self):
mchirp_source, redshift = 20./3., self.opts.redshift
mchirp = mchirp_from_mchirp_source_z(mchirp_source, redshift)
assert np.round(mchirp, 4) == self.opts.mchirp
def test_mchirp_from_m1_m2(self):
mass1 = np.random.randint(5, 100, 100)
mass2 = np.random.randint(2, mass1, 100)
pycbc_function = conversions.mchirp_from_mass1_mass2
pesummary_function = mchirp_from_m1_m2
conversion_check(
pesummary_function, [mass1, mass2], pycbc_function, [mass1, mass2]
)
def test_m_total_from_m1_m2(self):
mass1 = np.random.randint(5, 100, 100)
mass2 = np.random.randint(2, mass1, 100)
pycbc_function = conversions.mtotal_from_mass1_mass2
pesummary_function = m_total_from_m1_m2
conversion_check(
pesummary_function, [mass1, mass2], pycbc_function, [mass1, mass2]
)
def test_m1_from_mchirp_q(self):
mchirp = np.random.randint(5, 100, 100)
q = np.random.random(100)
mchirp, q = self.opts.mchirp, self.opts.q
pycbc_function = conversions.mass1_from_mchirp_q
pesummary_function = m1_from_mchirp_q
conversion_check(
pesummary_function, [mchirp, q], pycbc_function, [mchirp, 1./q]
)
def test_m2_from_mchirp_q(self):
mchirp = np.random.randint(5, 100, 100)
q = np.random.random(100)
pycbc_function = conversions.mass2_from_mchirp_q
pesummary_function = m2_from_mchirp_q
conversion_check(
pesummary_function, [mchirp, q], pycbc_function, [mchirp, 1./q]
)
def test_m1_from_mtotal_q(self):
mtotal = np.random.randint(5, 100, 100)
q = np.random.random(100)
pycbc_function = conversions.mass1_from_mtotal_q
pesummary_function = m1_from_mtotal_q
conversion_check(
pesummary_function, [mtotal, q], pycbc_function, [mtotal, 1./q]
)
def test_m2_from_mtotal_q(self):
mtotal = np.random.randint(5, 100, 100)
q = np.random.random(100)
pycbc_function = conversions.mass2_from_mtotal_q
pesummary_function = m2_from_mtotal_q
conversion_check(
pesummary_function, [mtotal, q], pycbc_function, [mtotal, 1./q]
)
def test_eta_from_m1_m2(self):
mass1 = np.random.randint(5, 100, 100)
mass2 = np.random.randint(2, mass1, 100)
pycbc_function = conversions.eta_from_mass1_mass2
pesummary_function = eta_from_m1_m2
conversion_check(
pesummary_function, [mass1, mass2], pycbc_function, [mass1, mass2]
)
def test_q_from_m1_m2(self):
mass1 = np.random.randint(5, 100, 100)
mass2 = np.random.randint(2, mass1, 100)
pycbc_function = conversions.invq_from_mass1_mass2
pesummary_function = q_from_m1_m2
conversion_check(
pesummary_function, [mass1, mass2], pycbc_function, [mass1, mass2]
)
def test_q_from_eta(self):
from bilby.gw.conversion import symmetric_mass_ratio_to_mass_ratio
eta = np.random.uniform(0, 0.25, 100)
bilby_function = symmetric_mass_ratio_to_mass_ratio
pesummary_function = q_from_eta
conversion_check(
pesummary_function, [eta], bilby_function, [eta]
)
def test_mchirp_from_mtotal_q(self):
mtotal, q = self.opts.mtotal, self.opts.q
mchirp = mchirp_from_mtotal_q(mtotal, q)
assert np.round(mchirp, 4) == 9.9906
def test_chi_p(self):
mass1 = np.random.randint(5, 100, 100)
mass2 = np.random.randint(2, mass1, 100)
spin1_mag = np.random.random(100)
spin1_ang = np.random.random(100)
spin2_mag = np.random.random(100)
spin2_ang = np.random.random(100)
spin1x = spin1_mag * np.cos(spin1_ang)
spin1y = spin1_mag * np.sin(spin1_ang)
spin2x = spin2_mag * np.cos(spin2_ang)
spin2y = spin2_mag * np.sin(spin2_ang)
pycbc_function = conversions.chi_p
pesummary_function = chi_p
conversion_check(
pesummary_function, [mass1, mass2, spin1x, spin1y, spin2x, spin2y],
pycbc_function, [mass1, mass2, spin1x, spin1y, spin2x, spin2y]
)
from lalsimulation import SimPhenomUtilsChiP
mass1, mass2 = self.opts.mass1, self.opts.mass2
spin1x, spin1y = self.opts.spin1x, self.opts.spin1y
spin1z, spin2x = self.opts.spin1z, self.opts.spin2x
spin2y, spin2z = self.opts.spin2y, self.opts.spin2z
chi_p_value = chi_p(mass1, mass2, spin1x, spin1y, spin2y, spin2y)
assert chi_p_value == 0.75
for i in range(100):
mass_1 = np.random.randint(10, 100)
mass_2 = np.random.randint(5, mass_1)
spin1 = np.random.random(3)
norm = np.sqrt(np.sum(np.square(spin1)))
spin1 /= norm
spin2 = np.random.random(3)
norm = np.sqrt(np.sum(np.square(spin2)))
spin2 /= norm
chi_p_value = chi_p(
mass_1, mass_2, spin1[0], spin1[1], spin2[0], spin2[1]
)
lal_value = SimPhenomUtilsChiP(
mass_1, mass_2, spin1[0], spin1[1], spin2[0], spin2[1]
)
assert np.testing.assert_almost_equal(chi_p_value, lal_value, 9) is None
def test_chi_eff(self):
mass1 = np.random.randint(5, 100, 100)
mass2 = np.random.randint(2, mass1, 100)
spin1z = np.random.uniform(-1, 1, 100)
spin2z = np.random.uniform(-1, 1, 100)
pycbc_function = conversions.chi_eff
pesummary_function = chi_eff
conversion_check(
pesummary_function, [mass1, mass2, spin1z, spin2z], pycbc_function,
[mass1, mass2, spin1z, spin2z]
)
def test_phi_12_from_phi1_phi2(self):
data = phi_12_from_phi1_phi2(0.2, 0.5)
assert data == 0.3
data = phi_12_from_phi1_phi2(0.5, 0.2)
rounded_data = np.round(data, 2)
assert rounded_data == 5.98
data = phi_12_from_phi1_phi2(np.array([0.5, 0.2]), np.array([0.3, 0.7]))
rounded_data = np.round(data, 2)
assert all(i == j for i,j in zip(rounded_data, [6.08, 0.5]))
def test_phi_from_spins(self):
def cart2sph(x, y):
return np.fmod(2 * np.pi + np.arctan2(y,x), 2 * np.pi)
assert phi1_from_spins(0.5, 0.2) == cart2sph(0.5, 0.2)
assert phi2_from_spins(0.1, 0.5) == cart2sph(0.1, 0.5)
def test_spin_angles(self):
from lalsimulation import SimInspiralTransformPrecessingWvf2PE
mass1 = np.random.uniform(5., 100., 100)
mass2 = np.random.uniform(2., mass1, 100)
inc = np.random.uniform(0, np.pi, 100)
spin1_mag = np.random.random(100)
spin1_ang = np.random.random(100)
spin2_mag = np.random.random(100)
spin2_ang = np.random.random(100)
spin1x = spin1_mag * np.cos(spin1_ang)
spin1y = spin1_mag * np.sin(spin1_ang)
spin2x = spin2_mag * np.cos(spin2_ang)
spin2y = spin2_mag * np.sin(spin2_ang)
spin1z = np.random.random(100) - (spin1x**2 + spin1y**2)**0.5
spin2z = np.random.random(100) - (spin1x**2 + spin1y**2)**0.5
f_ref = [20.0] * len(mass1)
phase = [0.4] * len(mass1)
lalsimulation_function = SimInspiralTransformPrecessingWvf2PE
pesummary_function = spin_angles
for ii in np.arange(len(mass1)):
conversion_check(
pesummary_function,
[mass1[ii], mass2[ii], inc[ii], spin1x[ii], spin1y[ii],
spin1z[ii], spin2x[ii], spin2y[ii], spin2z[ii], f_ref[ii],
phase[ii]],
lalsimulation_function,
[inc[ii], spin1x[ii], spin1y[ii], spin1z[ii], spin2x[ii],
spin2y[ii], spin2z[ii], mass1[ii], mass2[ii], f_ref[ii],
phase[ii]]
)
def test_component_spins(self):
from bilby.gw.conversion import bilby_to_lalsimulation_spins
from lal import MSUN_SI
mass1 = np.random.uniform(5., 100., 100)
mass2 = np.random.uniform(2., mass1, 100)
theta_jn = np.random.uniform(0, np.pi, 100)
phi_jl = np.random.uniform(0, np.pi, 100)
phi_12 = np.random.uniform(0, np.pi, 100)
a_1 = np.random.uniform(0, 1, 100)
a_2 = np.random.uniform(0, 1, 100)
tilt_1 = np.random.uniform(0, np.pi, 100)
tilt_2 = np.random.uniform(0, np.pi, 100)
f_ref = [20.] * len(mass1)
phase = [0.5] * len(mass2)
bilby_function = bilby_to_lalsimulation_spins
pesummary_function = component_spins
for ii in np.arange(len(mass1)):
conversion_check(
pesummary_function,
[theta_jn[ii], phi_jl[ii], tilt_1[ii], tilt_1[ii], phi_12[ii],
a_1[ii], a_2[ii], mass1[ii], mass2[ii], f_ref[ii], phase[ii]],
bilby_function,
[theta_jn[ii], phi_jl[ii], tilt_1[ii], tilt_1[ii], phi_12[ii],
a_1[ii], a_2[ii], mass1[ii]*MSUN_SI, mass2[ii]*MSUN_SI,
f_ref[ii], phase[ii]]
)
def test_time_in_each_ifo(self):
from pycbc.detector import Detector
from lal import TimeDelayFromEarthCenter
from lalsimulation import DetectorPrefixToLALDetector
| |
<gh_stars>0
# coding: utf-8
# In[ ]:
"""
Microkinetic model for ammonia oxidation
Inspired by Grabow, <NAME>.
"Computational catalyst screening."
Computational Catalysis. RSC Publishing, 2013. 1-58.
"""
# In[ ]:
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import pickle
import pandas as pd
from copy import deepcopy
# In[ ]:
# Ammonia oxidation on Pt(211) is used as an example here
# Reaction conditions
P0 = 101325 # Unit: Pa
P = P0
PNH3 = 1000 * 10 **(-6) * P
PO2 = 0.02 * P
PH2O = 0.05 * P
PNO = 0 * P
PN2 = 0 * P
PN2O = 0 * P
# Physical constants
kb = 8.617*10**(-5) # eV K-1
kbj = 1.38064852*10**(-23) # J K-1
R = 8.314 # J mol-1 K-1
h = 4.135667*10**(-15) # eV*s
CT = 1.467 * 10**19 # m-2 sites per unit area on the Pt 211/111 surface
NA = 6.02214086*10**23 # mol-1
evtoj= 96.485 * 1000 # J/eV
# Entropy of gases are from NIST-JANAF. Adsorbate entropies are estimated with harmonic oscillator approximation.
gas_entropy = pd.read_csv("gas_entropy.csv")
adsorbate_entropy = pd.read_csv("adsorbate_entropy_Pt.csv")
# In[ ]:
# Calculate the entropy for gas adsorption
deltaS_O2 = adsorbate_entropy['PtO']*2 - gas_entropy['S_O2']/evtoj
deltaS_NH3 = adsorbate_entropy['PtNH3'] - gas_entropy['S_NH3']/evtoj
deltaS_NO = adsorbate_entropy['PtNO'] - gas_entropy['S_NO']/evtoj
deltaS_N2 = adsorbate_entropy['PtN']*2 - gas_entropy['S_N2']/evtoj
deltaS_H2O = adsorbate_entropy['PtH2O'] - gas_entropy['S_H2O']/evtoj
deltaS_N2O = adsorbate_entropy['PtN2O'] - gas_entropy['S_N2O']/evtoj
# In[ ]:
def get_rate_constants(T):
# Activation energy and prefactors for 40 reactions (20 forward and 20 backward)
# i is used as an index to get gas entropies
i = int((T-300)/100)
# DFT computed activation energy for the reactions
Ea_eV = np.array([0.0, #0 O2 + 2* = 2O*
2.993,
0.0, #1 NH3 + * = NH3*
0.773,
0.580, #2 NH3* + O* = NH2* + OH*
1.276,
1.449, #3 NH2* + O* = NH* + OH*
1.203,
0.470, #4 NH* + O* = N* + OH*
0.692,
0.833, #5 NH3* + OH* = NH2* + H2O*
0.995,
0.793, #6 NH2* + OH* = NH* + H2O*
0.013,
0.838, #7 NH* + OH* = N* + H2O*
0.525,
0.842, #8 OH* + OH* = O* + H2O*
0.308,
0.0, #9 H2O + * = H2O*
0.252,
1.182, #10 N* + N* = N2 + *
1.813,
1.458, #11 N* + O* = NO* + *
1.657,
2.329, #12 NO* = NO + *
0.0,
1.625, #13 N* + NO* =N2O*
0.444,
0.000, #14 N2O* = N2O + *
0.095,
1.15, #15 NH3* + * = NH2* + H*
1.37,
1.61, #16 NH2* + * = NH* + H*
0.88,
1.30, #17 NH* + * = N* + H*
0.66,
0.50, #18 O* + H* = OH*
1.03,
0.96, #19 OH* + H* = H2O*
0.64])
# Gibbs free energy for O2 adsorption
deltaG_O2 = Ea_eV[0] - Ea_eV[1] - T*deltaS_O2[i]
# Equilibrium constant for O2 adsorption
K_O2 = np.exp(-deltaG_O2/kb/T)
# Forward reaction prefactor estimated with Hertz-Knudsen equation
A_O2_f = 1/CT/(2*3.1415*32/NA/1000*kbj*T)**0.5
# Gibbs free energy, equilibrium constant and forward reaction prefactor for NH3 adsorption
deltaG_NH3 = Ea_eV[2] - Ea_eV[3] - T*deltaS_NH3[i]
K_NH3 = np.exp(-deltaG_NH3/kb/T)
A_NH3_f = 1/CT/(2*3.1415*17/NA/1000*kbj*T)**0.5
# Gibbs free energy and equilibrium constant for N* combination
deltaG_NN = Ea_eV[20] - Ea_eV[21] - T*(-deltaS_N2[i])
K_NN = np.exp(-deltaG_NN/kb/T)
# Gibbs free energy, equilibrium constant and backward reaction prefactor for NO* desorption
deltaG_NO = Ea_eV[24] - Ea_eV[25] - T*(-deltaS_NO[i])
K_NO = np.exp(-deltaG_NO/kb/T)
A_NO_b = 1/CT/(2*3.1415*30/NA/1000*kbj*T)**0.5
# Gibbs free energy, equilibrium constant and forward reaction prefactor for H2O adsorption
deltaG_H2O = Ea_eV[18] - Ea_eV[19] - T*deltaS_H2O[i]
K_H2O = np.exp(-deltaG_H2O/kb/T)
A_H2O_f = 1/CT/(2*3.1415*18/NA/1000*kbj*T)**0.5
# Gibbs free energy, equilibrium constant and forward reaction prefactor for N2O* desorption
deltaG_N2O = Ea_eV[28] - Ea_eV[29] - T*(-deltaS_N2O[i])
K_N2O = np.exp(-deltaG_N2O/kb/T)
A_N2O_b = 1/CT/(2*3.1415*44/NA/1000*kbj*T)**0.5
# Prefactors of the reactions
A = np.array([A_O2_f,
A_O2_f/K_O2*np.exp(Ea_eV[1]/kb/T)*P0,
A_NH3_f,
A_NH3_f/K_NH3*np.exp(Ea_eV[3]/kb/T)*P0,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h,
A_H2O_f,
A_H2O_f/K_H2O*np.exp(Ea_eV[19]/kb/T)*P0,
kb*T/h,
kb*T/h/K_NN*np.exp((Ea_eV[21]-Ea_eV[20])/kb/T)/P0,
kb*T/h,
kb*T/h,
K_NO*A_NO_b*np.exp((Ea_eV[24]-Ea_eV[25])/kb/T)*P0,
A_NO_b,
kb*T/h,
kb*T/h,
K_N2O*A_N2O_b*np.exp((Ea_eV[28]-Ea_eV[29])/kb/T)*P0,
A_N2O_b,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h,
kb*T/h])
# Calculate rate constants with Eyring Equation
k = np.zeros(40)
for i in range(0, 40):
k[i] = A[i]*np.exp(-Ea_eV[i]/kb/T)
return (k)
# In[ ]:
def get_rates(theta, k):
# returns the rates depending on the current coverages theta
global PO2, PNH3, PH2O, PNO, PN2O, PN2
# theta for O*, NH3*, NH2*, OH*, NH*, N*, NO*, H2O*, N2O*, H* and *
tO = theta[0]
tNH3 = theta[1]
tNH2 = theta[2]
tOH = theta[3]
tNH = theta[4]
tN = theta[5]
tNO = theta[6]
tH2O = theta[7]
tN2O = theta[8]
tH = theta[9]
tstar = 1.0 - tO - tNH3 - tNH2 - tOH - tNH - tN - tNO - tH2O - tN2O - tH
# Caluclate the rates
rate = np.zeros(40)
rate[0] = k[0] * PO2 * tstar**2
rate[1] = k[1] * tO**2
rate[2] = k[2] * PNH3 * tstar
rate[3] = k[3] * tNH3
rate[4] = k[4] * tNH3 * tO
rate[5] = k[5] * tNH2 * tOH
rate[6] = k[6] * tNH2 * tO
rate[7] = k[7] * tNH * tOH
rate[8] = k[8] * tNH * tO
rate[9] = k[9] * tN * tOH
rate[10] = k[10] * tNH3 * tOH
rate[11] = k[11] * tNH2 * tH2O
rate[12] = k[12] * tNH2 * tOH
rate[13] = k[13] * tNH * tH2O
rate[14] = k[14] * tNH * tOH
rate[15] = k[15] * tN * tH2O
rate[16] = k[16] * tOH**2
rate[17] = k[17] * tH2O * tO
rate[18] = k[18] * PH2O * tstar
rate[19] = k[19] * tH2O
rate[20] = k[20] * tN**2
rate[21] = k[21] * PN2 * tstar**2
rate[22] = k[22] * tN * tO
rate[23] = k[23] * tNO * tstar
rate[24] = k[24] * tNO
rate[25] = k[25] * PNO * tstar
rate[26] = k[26] * tN * tNO
rate[27] = k[27] * tN2O * tstar
rate[28] = k[28] * tN2O
rate[29] = k[29] * PN2O * tstar
rate[30] = k[30] * tNH3 * tstar
rate[31] = k[31] * tNH2 * tH
rate[32] = k[32] * tNH2 * tstar
rate[33] = k[33] * tNH * tH
rate[34] = k[34] * tNH * tstar
rate[35] = k[35] * tN * tH
rate[36] = k[36] * tO * tH
rate[37] = k[37] * tOH * tstar
rate[38] = k[38] * tOH * tH
rate[39] = k[39] * tH2O
return rate
# In[ ]:
def get_odes(theta, time, k):
# returns the system of ODEs d(theta)/dt, calculated at the current value of theta.
rate = get_rates(theta,k) # calculate the current rates
# Time derivatives of theta for O*, NH3*, NH2*, OH*, NH*, N*, NO*, H2O*, N2O* and H*
dt = np.zeros(10)
dt[0] = 2*rate[0] - 2*rate[1] - rate[4] + rate[5] - rate[6] + rate[7] - rate[8] + rate[9] + rate[16] - rate[17] - rate[22] + rate[23] - rate[36] + rate[37]
dt[1] = rate[2] - rate[3] - rate[4] + rate[5] - rate[10] + rate[11] - rate[30] + rate[31]
dt[2] = rate[4] - rate[5] - rate[6] + rate[7] + rate[10] - rate[11] - rate[12] + rate[13] + rate[30] - rate[31] - rate[32] + rate[33]
dt[3] = rate[4] - rate[5] + rate[6] - rate[7] + rate[8] - rate[9] - rate[10] + rate[11] - rate[12] + rate[13] - rate[14] + rate[15] -2*rate[16] + 2*rate[17] + rate[36] - rate[37] - rate[38] + rate[39]
dt[4] = rate[6] - rate[7] - rate[8] + rate[9] + rate[12] - rate[13] - rate[14] + rate[15] + rate[32] -rate[33] - rate[34] + rate[35]
dt[5] = rate[8] - rate[9] + rate[14] - rate[15] - 2*rate[20] + 2*rate[21] - rate[22] + rate[23] - rate[26] + rate[27] + rate[34] - rate[35]
dt[6] = rate[22] - rate[23] - rate[24] + rate[25] - rate[26] + rate[27]
dt[7] = rate[10] - rate[11] + rate[12] - rate[13] + rate[14] - rate[15] + rate[16] - rate[17] + rate[18] - rate[19] + rate[38] - rate[39]
dt[8] = rate[26] | |
in time.")
if all_equal is None:
warnings.warn(f"Rate function {rate.__name__} couldn't be evaluated at more than one point or no point in time {t}")
has_time_dependence = False
else:
has_time_dependence = not all_equal
self.rates_have_explicit_time_dependence = \
self.rates_have_explicit_time_dependence or has_time_dependence
self.rates_have_functional_dependence = True
return True
else:
return False
def set_linear_events(self,
event_list,
allow_nonzero_column_sums=False,
reset_events=True):
r"""
Define the linear transition events between compartments.
Parameters
==========
event_list : :obj:`list` of :obj:`tuple`
A list of tuples that contains transition events in the
following format:
.. code:: python
[
(
("affected_compartment_0",),
rate,
[
("affected_compartment_0", dN0),
("affected_compartment_1", dN1),
...
],
),
...
]
allow_nonzero_column_sums : :obj:`bool`, default : False
Traditionally, epidemiological models preserve the
total population size. If that's not the case,
switch off testing for this.
reset_events : bool, default : True
Whether to reset all linear events to zero before
converting those.
Example
-------
For an SEIR model with infectious period ``tau``
and incubation period ``theta``.
.. code:: python
epi.set_linear_events([
( ("E",),
1/theta,
[ ("E", -1), ("I", +1) ]
),
( ("I",),
1/tau,
[ ("I", -1), ("R", +1) ]
),
])
Read as "compartment E reacts with rate :math:`1/\theta`
which leads to the decay of one E particle to one I particle."
"""
if reset_events:
birth_rate_functions = []
birth_event_updates = []
linear_rate_functions = []
linear_event_updates = []
birth_events = []
linear_events = []
else:
linear_event_updates = list(self.linear_event_updates)
birth_event_updates = list(self.birth_event_updates)
linear_rate_functions = list(self.linear_rate_functions)
birth_rate_functions = list(self.birth_rate_functions)
birth_events = list(self.birth_events)
linear_events = list(self.linear_events)
for acting_compartments, rate, affected_compartments in event_list:
dy = np.zeros(self.N_comp)
for trg, change in affected_compartments:
_t = self.get_compartment_id(trg)
dy[_t] += change
if acting_compartments[0] is None:
if self._rate_has_functional_dependency(rate):
this_rate = DynamicBirthRate(rate)
else:
this_rate = ConstantBirthRate(rate)
birth_event_updates.append( dy )
birth_rate_functions.append( this_rate )
birth_events.append((acting_compartments, rate, affected_compartments))
else:
_s = self.get_compartment_id(acting_compartments[0])
if self._rate_has_functional_dependency(rate):
this_rate = DynamicLinearRate(rate, _s)
else:
this_rate = ConstantLinearRate(rate, _s)
linear_event_updates.append( dy )
linear_rate_functions.append( this_rate )
linear_events.append((acting_compartments, rate, affected_compartments))
if dy.sum() != 0 and not self.correct_for_dynamical_population_size:
warnings.warn("This model has processes with a fluctuating "+\
"number of agents. Consider correcting the rates dynamically with "+\
"the attribute correct_for_dynamical_population_size = True")
if not allow_nonzero_column_sums and len(linear_rate_functions)>0:
_y = np.ones(self.N_comp)
if not self.rates_have_explicit_time_dependence:
if self.t0 is None:
t0 = 0
else:
t0 = self.t0
else:
if self.t0 is None:
t0 = None
warnings.warn('Rates are time-dependent, but no initial time was set yet, so I cannot check the column sums.')
else:
t0 = self.t0
if t0 is not None:
try:
test = sum([r(t0,_y) * dy for dy, r in zip (linear_event_updates, linear_rate_functions)])
test += sum([r(t0,_y) * dy for dy, r in zip (birth_event_updates, birth_rate_functions)])
test_sum = test.sum()
if np.abs(test_sum) > 1e-15:
warnings.warn("events do not sum to zero for each column:" + str(test_sum))
except ValueError as e:
warnings.warn(' '.join(
f"Some rate functions couldn't be evaluated at {t0=}. This can happen when",
f"explicit time-dependence couldn't be inferred from any of your rates but they're time-dependent nevertheless.",
f"You can get rid of this warning by setting",
'``model.set_initial_conditions(...,initial_time=actual_initial_time)`` before setting processes.',
'You should also make sure to tell the model.simulate() function that it should assume explicit time',
'dependence by calling it as',
'``model.simulate(...,rates_have_explicit_time_dependence=True)``',
))
self.linear_event_updates = linear_event_updates
self.linear_rate_functions = linear_rate_functions
self.birth_event_updates = birth_event_updates
self.birth_rate_functions = birth_rate_functions
self.linear_events = linear_events
self.birth_events = birth_events
return self
def add_transition_processes(self,process_list):
"""
Define the linear transition processes between compartments.
Parameters
==========
process_list : :obj:`list` of :obj:`tuple`
A list of tuples that contains transitions events in the following format:
.. code:: python
[
( source_compartment, rate, target_compartment ),
...
]
Example
=======
For an SEIR model.
.. code:: python
epi.add_transition_processes([
("E", symptomatic_rate, "I" ),
("I", recovery_rate, "R" ),
])
"""
linear_events = transition_processes_to_events(process_list)
return self.set_linear_events(linear_events,
reset_events=False,
allow_nonzero_column_sums=True)
def add_fission_processes(self,process_list):
"""
Define linear fission processes between compartments.
Parameters
==========
process_list : :obj:`list` of :obj:`tuple`
A list of tuples that contains fission rates in the following format:
.. code:: python
[
("source_compartment", rate, "target_compartment_0", "target_compartment_1" ),
...
]
Example
-------
For pure exponential growth of compartment `B`.
.. code:: python
epi.add_fission_processes([
("B", growth_event, "B", "B" ),
])
"""
linear_events = fission_processes_to_events(process_list)
return self.set_linear_events(linear_events,
reset_events=False,
allow_nonzero_column_sums=True)
def add_fusion_processes(self,process_list):
"""
Define fusion processes between compartments.
Parameters
==========
process_list : :obj:`list` of :obj:`tuple`
A list of tuples that contains fission rates in the following format:
.. code:: python
[
("coupling_compartment_0", "coupling_compartment_1", rate, "target_compartment_0" ),
...
]
Example
-------
Fusion of reactants "A", and "B" to form "C".
.. code:: python
epi.add_fusion_processes([
("A", "B", reaction_rate, "C" ),
])
"""
quadratic_events = fusion_processes_to_events(process_list)
return self.set_quadratic_events(quadratic_events,
reset_events=False,
allow_nonzero_column_sums=True)
def add_transmission_processes(self,process_list):
r"""
A wrapper to define quadratic process rates
through transmission reaction equations.
Note that in stochastic network/agent simulations, the transmission
rate is equal to a rate per link. For the mean-field ODEs,
the rates provided to this function will just be equal
to the prefactor of the respective quadratic terms.
on a network of mean degree :math:`k_0`,
a basic reproduction number :math:`R_0`, and a
recovery rate :math:`\mu`, you would define the single
link transmission process as
.. code:: python
("I", "S", R_0/k_0 * mu, "I", "I")
For the mean-field system here, the corresponding reaction equation would read
.. code:: python
("I", "S", R_0 * mu, "I", "I")
Parameters
----------
process_list : :obj:`list` of :obj:`tuple`
A list of tuples that contains transitions rates in the following format:
.. code:: python
[
("source_compartment",
"target_compartment_initial",
rate,
"source_compartment",
"target_compartment_final",
),
...
]
Example
-------
For an SEIR model.
.. code:: python
epi.add_transmission_processes([
("I", "S", +1, "I", "E" ),
])
"""
quadratic_events = transmission_processes_to_events(process_list)
return self.set_quadratic_events(quadratic_events,
reset_events=False,
allow_nonzero_column_sums=True)
def add_quadratic_events(self,
event_list,
allow_nonzero_column_sums=False):
"""
Add quadratic events without resetting the existing event terms.
See :func:`epipack.numeric_epi_models.EpiModel.set_quadratic_events` for docstring.
"""
return self.set_quadratic_events(event_list,
reset_events=False,
allow_nonzero_column_sums=allow_nonzero_column_sums,
)
def add_linear_events(self,
event_list,
allow_nonzero_column_sums=False):
"""
Add linear events without resetting the existing event terms.
See :func:`epipack.numeric_epi_models.EpiModel.set_linear_events` for docstring.
"""
return self.set_linear_events(event_list,
reset_events=False,
allow_nonzero_column_sums=allow_nonzero_column_sums
)
def set_quadratic_events(self,
event_list,
allow_nonzero_column_sums=False,
reset_events=True,
initial_time_for_column_sum_test=0,
):
r"""
Define quadratic transition events between compartments.
Parameters
----------
event_list : :obj:`list` of :obj:`tuple`
A list of tuples that contains transmission events in the following format:
.. code:: python
[
(
("coupling_compartment_0", "coupling_compartment_1"),
rate,
[
("affected_compartment_0", dN0),
("affected_compartment_1", dN1),
...
],
),
...
]
allow_nonzero_column_sums : :obj:`bool`, default : False
Traditionally, epidemiological models preserve the
total population size. If that's not the case,
switch off testing for this.
reset_events : bool, default : True
Whether to reset all linear events to zero before
converting those.
Example
-------
For an SEIR model with infection rate ``eta``.
.. code:: python
epi.set_quadratic_events([
( ("S", "I"),
eta,
[ ("S", -1), ("E", +1) ]
),
])
Read as
"Coupling of *S* and *I* leads to
the decay of one *S* particle to one *E* particle with
rate :math:`\eta`.".
"""
if reset_events:
quadratic_event_updates = []
quadratic_rate_functions = []
quadratic_events = []
else:
quadratic_event_updates = list(self.quadratic_event_updates)
quadratic_rate_functions = list(self.quadratic_rate_functions)
quadratic_events = list(self.quadratic_events)
for coupling_compartments, rate, affected_compartments in event_list:
_s0 = self.get_compartment_id(coupling_compartments[0])
_s1 = self.get_compartment_id(coupling_compartments[1])
dy = np.zeros(self.N_comp)
for trg, change in affected_compartments:
_t = self.get_compartment_id(trg)
dy[_t] += change
if self._rate_has_functional_dependency(rate):
this_rate = DynamicQuadraticRate(rate, _s0, _s1)
else:
this_rate = ConstantQuadraticRate(rate, _s0, _s1)
quadratic_event_updates.append( dy )
quadratic_rate_functions.append( this_rate )
quadratic_events.append( (coupling_compartments, rate, affected_compartments) )
if not allow_nonzero_column_sums and len(quadratic_rate_functions)>0:
_y = np.ones(self.N_comp)
if not self.rates_have_explicit_time_dependence:
if self.t0 is None:
t0 = 0
else:
t0 = self.t0
else:
if self.t0 is None:
t0 = None
warnings.warn('Rates are time-dependent, but no initial time was set yet, so I cannot check the column sums.')
else:
t0 = self.t0
if t0 is not None:
try:
test = sum([r(t0,_y) * dy for dy, r in zip (quadratic_event_updates, quadratic_rate_functions)])
test_sum = test.sum()
if np.abs(test_sum) > 1e-15:
warnings.warn("events do not sum to zero for each column:" + str(test_sum))
except ValueError as e:
warnings.warn(' '.join([
f"Some rate functions couldn't be evaluated at {t0=}. This can happen when",
f"explicit time-dependence couldn't be inferred from any of your rates but they're time-dependent nevertheless.",
f"You can get rid of this warning by setting",
'``model.set_initial_conditions(...,initial_time=actual_initial_time)`` before setting processes.',
'You should also make sure | |
<reponame>mgiangreco/apartments-scraper
import boto3
import csv
import datetime
import json
import re
import sys
import datetime
import requests
import os
from bs4 import BeautifulSoup
# Config parser was renamed in Python 3
try:
import configparser
except ImportError:
import ConfigParser as configparser
def create_csv(search_urls, fname):
"""Create a CSV file with information that can be imported into ideal-engine"""
# avoid the issue on Windows where there's an extra space every other line
if sys.version_info[0] == 2: # Not named on 2.6
access = 'wb'
kwargs = {}
else:
access = 'wt'
kwargs = {'newline': ''}
# open file for writing
csv_file = open(fname, access, **kwargs)
# write to CSV
try:
writer = csv.writer(csv_file)
# this is the header (make sure it matches with the fields in
# write_parsed_to_csv)
header = ['Option Name', 'Contact', 'Address', 'Size',
'Rent', 'Monthly Fees', 'One Time Fees',
'Pet Policy',
'Parking', 'Gym', 'Kitchen',
'Amenities', 'Features', 'Living Space',
'Lease Info', 'Services',
'Property Info', 'Indoor Info', 'Outdoor Info',
'Images', 'Description', 'ds']
# write the header
writer.writerow(header)
# parse current entire apartment list including pagination for all search urls
for url in search_urls:
print ("Now getting apartments from: %s" % url)
write_parsed_to_csv(url, writer)
finally:
csv_file.close()
def write_parsed_to_csv(page_url, writer):
"""Given the current page URL, extract the information from each apartment in the list"""
# read the current page
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
page = requests.get(page_url, headers=headers)
# soupify the current page
soup = BeautifulSoup(page.content, 'html.parser')
soup.prettify()
# only look in this region
soup = soup.find('div', class_='placardContainer')
# append the current apartments to the list
for item in soup.find_all('article', class_='placard'):
url = ''
rent = ''
contact = ''
if item.find('a', class_='placardTitle') is None: continue
url = item.find('a', class_='placardTitle').get('href')
# get the rent and parse it to unicode
obj = item.find('span', class_='altRentDisplay')
if obj is not None:
rent = obj.getText().strip()
# get the phone number and parse it to unicode
obj = item.find('div', class_='phone')
if obj is not None:
contact = obj.getText().strip()
# get the other fields to write to the CSV
fields = parse_apartment_information(url)
# make this wiki markup
fields['name'] = '[' + str(fields['name']) + '](' + url + ')'
fields['address'] = '[' + fields['address'] + '](' + ')'
# get the datetime
fields['ds'] = str(datetime.datetime.utcnow().date())
# fill out the CSV file
row = [fields['name'], contact,
fields['address'], fields['size'],
rent, fields['monthFees'], fields['onceFees'],
fields['petPolicy'],
fields['parking'], fields['gym'], fields['kitchen'],
fields['amenities'], fields['features'], fields['space'],
fields['lease'], fields['services'],
fields['info'], fields['indoor'], fields['outdoor'],
fields['img'], fields['description'], fields['ds']]
# write the row
writer.writerow(row)
# get the next page URL for pagination
next_url = soup.find('a', class_='next')
# if there's only one page this will actually be none
if next_url is None:
return
# get the actual next URL address
next_url = next_url.get('href')
if next_url is None or next_url == '' or next_url == 'javascript:void(0)':
return
# recurse until the last page
write_parsed_to_csv(next_url, writer)
def parse_apartment_information(url):
"""For every apartment page, populate the required fields to be written to CSV"""
# read the current page
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
page = requests.get(url, headers=headers)
# soupify the current page
soup = BeautifulSoup(page.content, 'html.parser')
soup.prettify()
# the information we need to return as a dict
fields = {}
# get the name of the property
get_property_name(soup, fields)
# get the address of the property
get_property_address(soup, fields)
# get the size of the property
get_property_size(soup, fields)
# get the one time and monthly fees
get_fees(soup, fields)
# get the images as a list
get_images(soup, fields)
# get the description section
get_description(soup, fields)
# only look in this section (other sections are for example for printing)
soup = soup.find('section', class_='specGroup js-specGroup')
# get the pet policy of the property
get_pet_policy(soup, fields)
# get parking information
get_parking_info(soup, fields)
# get the amenities description
get_field_based_on_class(soup, 'amenities', 'featuresIcon', fields)
# get the 'interior information'
get_field_based_on_class(soup, 'indoor', 'interiorIcon', fields)
# get the 'outdoor information'
get_field_based_on_class(soup, 'outdoor', 'parksIcon', fields)
# get the 'gym information'
get_field_based_on_class(soup, 'gym', 'fitnessIcon', fields)
# get the 'kitchen information'
get_field_based_on_class(soup, 'kitchen', 'kitchenIcon', fields)
# get the 'services information'
get_field_based_on_class(soup, 'services', 'servicesIcon', fields)
# get the 'living space information'
get_field_based_on_class(soup, 'space', 'sofaIcon', fields)
# get the lease length
get_field_based_on_class(soup, 'lease', 'leaseIcon', fields)
# get the 'property information'
get_features_and_info(soup, fields)
return fields
def prettify_text(data):
"""Given a string, replace unicode chars and make it prettier"""
# format it nicely: replace multiple spaces with just one
data = re.sub(' +', ' ', data)
# format it nicely: replace multiple new lines with just one
data = re.sub('(\r?\n *)+', '\n', data)
# format it nicely: replace bullet with *
data = re.sub(u'\u2022', '* ', data)
# format it nicely: replace registered symbol with (R)
data = re.sub(u'\xae', ' (R) ', data)
# format it nicely: remove trailing spaces
data = data.strip()
# format it nicely: encode it, removing special symbols
data = data.encode('utf8', 'ignore')
return str(data).encode('utf-8')
def get_images(soup, fields):
"""Get the images of the apartment"""
fields['img'] = ''
if soup is None: return
# find ul with id fullCarouselCollection
soup = soup.find('ul', {'id': 'fullCarouselCollection'})
if soup is not None:
for img in soup.find_all('img'):
fields['img'] += '![' + img['alt'] + '](' + img['src'] + ') '
def get_description(soup, fields):
"""Get the description for the apartment"""
fields['description'] = ''
if soup is None: return
# find p with itemprop description
obj = soup.find('p', {'itemprop': 'description'})
if obj is not None:
fields['description'] = prettify_text(obj.getText())
def get_property_size(soup, fields):
"""Given a beautifulSoup parsed page, extract the property size of the first one bedroom"""
#note: this might be wrong if there are multiple matches!!!
fields['size'] = ''
if soup is None: return
obj = soup.find('tr', {'data-beds': '1'})
if obj is not None:
data = obj.find('td', class_='sqft').getText()
data = prettify_text(data)
fields['size'] = data
def get_features_and_info(soup, fields):
"""Given a beautifulSoup parsed page, extract the features and property information"""
fields['features'] = ''
fields['info'] = ''
if soup is None: return
obj = soup.find('i', class_='propertyIcon')
if obj is not None:
for obj in soup.find_all('i', class_='propertyIcon'):
data = obj.parent.findNext('ul').getText()
data = prettify_text(data)
if obj.parent.findNext('h3').getText().strip() == 'Features':
# format it nicely: remove trailing spaces
fields['features'] = data
if obj.parent.findNext('h3').getText() == 'Property Information':
# format it nicely: remove trailing spaces
fields['info'] = data
def get_field_based_on_class(soup, field, icon, fields):
"""Given a beautifulSoup parsed page, extract the specified field based on the icon"""
fields[field] = ''
if soup is None: return
obj = soup.find('i', class_=icon)
if obj is not None:
data = obj.parent.findNext('ul').getText()
data = prettify_text(data)
fields[field] = data
def get_parking_info(soup, fields):
"""Given a beautifulSoup parsed page, extract the parking details"""
fields['parking'] = ''
if soup is None: return
obj = soup.find('div', class_='parkingDetails')
if obj is not None:
data = obj.getText()
data = prettify_text(data)
# format it nicely: remove trailing spaces
fields['parking'] = data
def get_pet_policy(soup, fields):
"""Given a beautifulSoup parsed page, extract the pet policy details"""
if soup is None:
fields['petPolicy'] = ''
return
# the pet policy
data = soup.find('div', class_='petPolicyDetails')
if data is None:
data = ''
else:
data = data.getText()
data = prettify_text(data)
# format it nicely: remove the trailing whitespace
fields['petPolicy'] = data
def get_fees(soup, fields):
"""Given a beautifulSoup parsed page, extract the one time and monthly fees"""
fields['monthFees'] = ''
fields['onceFees'] = ''
if soup is None: return
obj = soup.find('div', class_='monthlyFees')
if obj is not None:
for expense in obj.find_all('div', class_='fee'):
description = expense.find(
'div', class_='descriptionWrapper').getText()
description = prettify_text(description)
price = expense.find('div', class_='priceWrapper').getText()
price = prettify_text(price)
fields['monthFees'] += '* ' + description + ': ' + price + '\n'
# get one time fees
obj = soup.find('div', class_='oneTimeFees')
if obj is not None:
for expense in obj.find_all('div', class_='fee'):
description = expense.find(
'div', class_='descriptionWrapper').getText()
description = prettify_text(description)
price = expense.find('div', class_='priceWrapper').getText()
price = prettify_text(price)
fields['onceFees'] += '* ' + description + ': ' + price + '\n'
# remove ending \n
fields['monthFees'] = fields['monthFees'].strip()
fields['onceFees'] = fields['onceFees'].strip()
def average_field(obj1, obj2, field):
"""Take the average given two objects that have field values followed | |
#############################################################################
# Analysis codes for mBLA_UF code #
# #
# Used in the paper: #
# [1] Park and N{\"a}gele, JCP, 2020 #
# doi: 10.1063/5.0020986 #
# #
# [2] Park and N{\"a}gele, Membranes, 2021 #
# doi: https://doi.org/10.3390/membranes11120960 #
# #
# #
# Code Developer: Park, <NAME> (<EMAIL>) #
# MIT Open License (see LICENSE file in the main directory) #
# #
# Update (June 2021): #
# The original code only applicable for the hollow fiber #
# New version support for the channel between two flat sheets: #
# 1. FMM: channel flow between flat membrane (top) / membrane (bottom) #
# 2. FMS: channel flow between flat membrane (top) / substrate (bottom) #
# For this reason, the hollow fiber expression will be renamed as HF #
# #
# Important note: #
# 1. The front factor for Phi_z, Phi_ex_z, and Phi_b_z come from the CM #
# For FMM/FMS geometries, it need to convert the proper front factors #
# as described in the paper [2]. #
#############################################################################
from aux_functions import *
from membrane_geometry_functions import *
import sol_solvent as PS
import sol_CT as CT
import sol_GT as GT
import osmotic_pressure_CS as CS
import transport_properties_SPHS as PHS
import sys
from numpy import *
from scipy.stats import linregress
from scipy.interpolate import interp1d
from scipy.linalg import norm
from copy import deepcopy
from datetime import datetime
def print_preface(fn_inp, fn_out, fn_out_log, f_log):
now_str = datetime.now().strftime("%H:%M (%d/%m/%Y)")
print(fn_out_log)
print ('##############################################################')
print ('# Semi-analytic solution of mBLA UF using CP layer model #')
print ('# git-repository: https://github.com/gwpark-git/mBLA_UF.git #')
print ('# Developer: <NAME> (IBI-4, Forschungszentrum Juelich) #')
print ('# Reference [1]: Park and Naegele, JCP (2020) #')
print ('# Reference [2]: Park and Naegele, Membranes (2021) #')
print ('##############################################################')
print ('')
print ('WARNING: Phi_ast definition has not been updated properly.')
print ('')
print ('Executed time (date): %s'%(now_str))
print ('with Arguments: ', fn_inp, fn_out)
print (' Log will be stored in ', fn_out_log)
f_log.write('\n##############################################################\n')
f_log.write('# Semi-analytic solution of mBLA UF using CP layer model #\n')
f_log.write('# git-repository: https://github.com/gwpark-git/mBLA_UF.git #\n')
f_log.write('# Developer: <NAME> (IBI-4, Forschungszentrum Juelich) #\n')
f_log.write('# Reference [1]: Park and Naegele, JCP (2020) #\n')
f_log.write('# Reference [2]: Park and Naegele, Membranes (2021) #\n')
f_log.write('##############################################################\n\n')
f_log.write('WARNING: Phi_ast definition has not been updated properly.\n\n')
f_log.write('Executed time (date): %s\n'%(now_str))
f_log.write('with Arguments: %s, %s\n'%(fn_inp, fn_out))
f_log.write(' Log will be stored in %s\n\n'%(fn_out_log))
return now_str
def print_summary(cond_GT, f_log=None):
print ('\nSystem and operating conditions ( geometry = ', cond_GT['membrane_geometry'], '/ BC_inlet = ', cond_GT['BC_inlet'], ' ):')
print (' - Summary of dimensional quantities (in SI units):')
print ('\tPin_ast=%9d, Pper=%9d, ref_Pout=%9d '%(int(cond_GT['Pin_ast']), int(cond_GT['Pper']), int(cond_GT['Pout'])))
print ('\tu_ast=%lf'%(cond_GT['u_ast']))
print ('\tDLP=%9d, DTP_PS=%9d, DTP_HP=%9d '%(int(cond_GT['DLP']), int(cond_GT['DTP_PS']), int(cond_GT['DTP_HP'])))
print ('\tLp =%4.3e, eta0=%4.3e, R=%4.3e, L=%4.3e'%(cond_GT['Lp'], cond_GT['eta0'], cond_GT['R'], cond_GT['L']))
print ('\ta =%4.3e, a_H=%4.3e, D0=%4.3e, Phi_ast=%4.3e'%(cond_GT['a'], cond_GT['a_H'], cond_GT['D0'], cond_GT['Phi_ast']))
print ('\n - Permeability of system (kappa-Darcy and Lp):')
if (cond_GT['define_permeability'].lower()=='darcy'):
print('\tDarcy-permeability (kappa-Darcy = %.6e) is used instead of membrane permeability (Lp)'%(cond_GT['kappa_Darcy']))
print('\th/R=%.4f, which leads Lp=%4.3e'%(cond_GT['h']/cond_GT['R'], cond_GT['Lp']))
# print('\tCalculating Lp requires h, R, and eta0: Lp = %4.3e'%cond_GT['Lp'])
else:
print('\tMembrane-permeability (Lp = %4.3e) is used directly'%(cond_GT['Lp']))
print('\tIt does not require h for all the calculation. AS AN EXAMPLE, if h=R/2, the re-calculated kappa-Darcy value will be %.6e'%(get_kappa_Darcy_from_Lp(cond_GT['membrane_geometry'], cond_GT['Lp'], cond_GT['R']/2., cond_GT['R'], cond_GT['eta0'])))
print ('\n - Corresponding dimensionless quantities: (WARNING: lam1 and lam2 have different definitions from [2])')
print ('\tlam1=%.4f, lam2=%.4f'%(cond_GT['lam1'], cond_GT['lam2']))
print ('\tk=%.4f, alpha_ast=%.4f, beta_ast=%.4f, gamma=a_H/a=%4.3e'%(cond_GT['k'], cond_GT['alpha_ast'], cond_GT['beta_ast'], cond_GT['gamma']))
print ('\tepsilon=%4.3e, epsilon_d=%4.3e (Pe_R=%.1f)'%(cond_GT['R']/cond_GT['L'], cond_GT['epsilon_d'], 1./cond_GT['epsilon_d']))
if not f_log.closed:
f_log.write('\nSystem and operating conditions:\n' )
f_log.write(' - Summary of dimensional quantities (in SI units):\n')
f_log.write('\tPin_ast=%9d, Pper=%9d, ref_Pout=%9d \n'%(int(cond_GT['Pin_ast']), int(cond_GT['Pper']), int(cond_GT['Pout'])))
f_log.write('\tDLP=%9d, DTP_PS=%9d, DTP_HP=%9d \n'%(int(cond_GT['DLP']), int(cond_GT['DTP_PS']), int(cond_GT['DTP_HP'])))
f_log.write('\tLp =%4.3e, eta0=%4.3e, R=%4.3e, L=%4.3e\n'%(cond_GT['Lp'], cond_GT['eta0'], cond_GT['R'], cond_GT['L']))
f_log.write('\ta =%4.3e, a_H=%4.3e, D0=%4.3e\n'%(cond_GT['a'], cond_GT['a_H'], cond_GT['D0']))
f_log.write(' - Corresponding dimensionless quantities:\n')
f_log.write('\tk=%.4f, alpha_ast=%.4f, beta_ast=%.4f\n'%(cond_GT['k'], cond_GT['alpha_ast'], cond_GT['beta_ast']))
f_log.write('\tepsilon=%4.3e, epsilon_d=%4.3e (Pe_R=%.1f)\n\n'%(cond_GT['R']/cond_GT['L'], cond_GT['epsilon_d'], 1./cond_GT['epsilon_d']))
return 0
def print_iteration_info(n, z_div_L_arr, phiw_set_1, phiw_set_2, cond_GT, Pi_div_DLP_arr, gp_arr, gm_arr, f_log=None):
# this part is for recording the analysis part
Nz = size(z_div_L_arr)
report_step = zeros(12)
report_step[0] = n
ind_max_z = argmax(phiw_set_2)
chi_A = norm(phiw_set_1 - phiw_set_2)/float(Nz) # estimated deviations
report_step[1] = z_div_L_arr[ind_max_z]*cond_GT['L']
report_step[2] = phiw_set_2[ind_max_z]*cond_GT['phi_bulk']
report_step[3] = phiw_set_2[-1]*cond_GT['phi_bulk']
r0_div_R = 0.; L_div_L = 1.
dz_div_L = cond_GT['dz']/cond_GT['L']
report_step[4] = length_average_f(z_div_L_arr, Pi_div_DLP_arr, L_div_L, dz_div_L)*cond_GT['DLP']/cond_GT['DTP_HP']
report_P_div_DLP_arr = zeros(Nz)
for i in range(Nz):
report_P_div_DLP_arr[i] = GT.get_P_conv(r0_div_R, z_div_L_arr[i], cond_GT, gp_arr[i], gm_arr[i])
report_step[5] = length_average_f(z_div_L_arr, report_P_div_DLP_arr - cond_GT['Pper_div_DLP'], L_div_L, dz_div_L)*cond_GT['DLP']/cond_GT['DTP_HP']
print('iter=%d, chi_A=%4.3e (weight = %4.3e)'%(report_step[0], chi_A, cond_GT['weight']))
print('\tz_max=%4.3f, phiw(z_max)=%.4f, phiw(L)=%.4f\n\t<Pi>/DTP_HP=%4.4f, DTP/DTP_HP=%4.4f'%(report_step[1], report_step[2], report_step[3], report_step[4], report_step[5]))
print('\tP(0)/Pin_ast=%4.3f, u(0,0)/u_ast=%4.3f\n'%(report_P_div_DLP_arr[0]*cond_GT['DLP']/cond_GT['Pin_ast'], (report_P_div_DLP_arr[0] - report_P_div_DLP_arr[1])/(z_div_L_arr[1] - z_div_L_arr[0])))
print()
if not f_log.closed:
if(n==0):
f_log.write('Columns of output files are \n')
f_log.write('\t[0] n : number of iteration\n')
f_log.write('\t[1] z_max : peak-position (z-coordinate) of phi_w\n')
f_log.write('\t[2] phi_w(z_max) : peak-value of phi_w\n')
f_log.write('\t[3] phi_w(L) : phi_w at the outlet z=L\n')
f_log.write('\t[4] <Pi>/DTP_HP : (dimensionless) length-averaged osmotic pressure\n')
f_log.write('\t[5] DTP/DTP_HP : (dimensionless) length-averaged transmembrane pressure\n')
f_log.write('\t[6] chi_A : Absolute deviation using norm(phiw_1/phi_b - phiw_2/phi_b)/Nz\n\n')
f_log.write('Calculating... \n')
f_log.write('%d\t%e\t%e\t%e\t%e\t%e\t%e\n'%(report_step[0], report_step[1], report_step[2], report_step[3], report_step[4], report_step[5], chi_A))
return chi_A
def aux_gen_analysis(z_arr, y_div_R_arr, phiw_arr, cond_GT, fcn_Pi_given, fcn_Dc_given, fcn_eta_given):
""" OUTPUT DATA FORMAT of mBLA-UF
Save analysis data into file with name of fn_out
Data first stored in array "re":
re[ 0] = z in the unit of m
re[ 1] = phi_w(z) in the dimensionless unit
re[ 2] = P(z) in the unit of Pa
re[ 3] = v_w(z) in the unit of m/sec
re[ 4] = u(r=0, z) in the unit of m/sec
re[ 5] = Pi(phi_w(z)) in the unit of Pa
re[ 6] = P(z) - P_perm in the unit of Pa
re[ 7] = v_w(z)/v^ast in the dimensionless unit
re[ 8] = u(r=0, z)/u^ast in the dimensionless unit
re[ 9] = Phi(z)/Phi(0) dimensionless
re[10] = j_mean(z)/j0_mean dimensionless (the same as before: re[9])
re[11] = excess part of re[10] dimensionless
re[12] = bulk part of re[12] dimensionless
Parameters:
z_arr = arrays for discretized z (m)
y_div_R_arr = arrays for discretized y (dimensionless).
works as auxiliary function to calculate some functions <- check
phiw_arr(z) = arrays for particle volume fraction at the wall
cond_GT = conditions for general transport properties
fcn_Pi(phi) = given function for the osmotic pressure
fcn_Dc_given(phi) = given function for gradient diffusion coefficient
fcn_eta_given(phi) = given function for suspension viscosity
fn_out = filename for data output
"""
Nz = size(z_arr); Ny = size(y_div_R_arr)
dz = z_arr[1] - z_arr[0]
dz_div_L = dz/cond_GT['L']
z_div_L_arr = z_arr/cond_GT['L']
sign_plus = +1.
sign_minus = -1.
re = zeros([Nz, 15])
re[:, 0] = z_arr
re[:, 1] = phiw_arr
Pi_arr = fcn_Pi_given(phiw_arr, cond_GT)
Pi_div_DLP_arr = deepcopy(Pi_arr)/cond_GT['DLP']
gp_arr = zeros(Nz) # constructing array for g+(z) function
gm_arr = zeros(Nz) # constructing array for g-(z) function
CT.gen_gpm_arr(sign_plus, z_div_L_arr, Pi_div_DLP_arr, cond_GT['k'], gp_arr)
CT.gen_gpm_arr(sign_minus, z_div_L_arr, Pi_div_DLP_arr, cond_GT['k'], gm_arr)
cond_GT['Gk'] = CT.get_Gk_boost(cond_GT['k'], dz_div_L, gp_arr[-1], gm_arr[-1], cond_GT['denom_Gk_BC_specific'])
cond_GT['Bp'] = CT.get_Bpm_conv(sign_plus, cond_GT)
cond_GT['Bm'] = CT.get_Bpm_conv(sign_minus, cond_GT)
ind_z0 = 0 #z-index at inlet
z0_div_L = 0. #z-coord at inlet
r0_div_R = 0. #r-coord at the centerline of pipe
rw_div_R = 1. #r-coord at the membrane wall
u_inlet = cond_GT['u_ast']*GT.get_u_conv(r0_div_R, 0., cond_GT, gp_arr[0], gm_arr[0], 1) # Ieta_arr_zi[-1]=1. because inlet flow field is assumed to be hydrodynamically fully developed flow, but the concentration is not yet build up (see the development length mentioned in [1])
u_inlet_mean = get_Uout_mean(cond_GT['membrane_geometry'])*u_inlet
phi_b = cond_GT['phi_bulk']
j0_mean = phi_b*u_inlet_mean # This is the base unit used for the cross-sectional averaged particle flux j. Note that j_mean(z)=j_mean(0) which is j0_mean
for i in range(Nz):
# when iteration is done
phi_arr_zi = zeros(Ny)
Ieta_arr_zi = zeros(Ny)
ID_arr_zi = zeros(Ny)
zi_div_L = z_div_L_arr[i]
re[i, 2] = cond_GT['DLP']*GT.get_P_conv(r0_div_R, zi_div_L, cond_GT, gp_arr[i], gm_arr[i])
vw_div_vw0_zi = GT.get_v_conv(rw_div_R, zi_div_L, Pi_div_DLP_arr[i], cond_GT, gp_arr[i], gm_arr[i])
re[i, 3] = cond_GT['vw0']*vw_div_vw0_zi
GT.gen_phi_wrt_yt(z_div_L_arr[i], phiw_arr[i], fcn_Dc_given, vw_div_vw0_zi, y_div_R_arr, phi_arr_zi, cond_GT)
GT.gen_INT_inv_f_wrt_yt(y_div_R_arr, phi_arr_zi, Ieta_arr_zi, fcn_eta_given, cond_GT)
Ieta_arr_zi /= Ieta_arr_zi[-1]
GT.gen_INT_inv_f_wrt_yt(y_div_R_arr, phi_arr_zi, ID_arr_zi, fcn_Dc_given, cond_GT)
re[i, 4] = cond_GT['u_ast']*GT.get_u_conv(r0_div_R, zi_div_L, cond_GT, gp_arr[i], gm_arr[i], Ieta_arr_zi[-1])
re[i, 5] = Pi_arr[i]
re[i, 6] = re[i, 2] - cond_GT['Pper']
re[i, 7] = re[i, 3]/cond_GT['vw0']
re[i, 8] = re[i, 4]/cond_GT['u_ast']
# Phi, Phi_ex, Phi_b in the original code uses the Eqs. (50), (60) and (60) respectively in reference [1].
# This is based | |
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2014
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** See LICENSE.TXT for license details
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
import time
import numpy.ma as ma
import math
from PySide import QtGui as gui
from PySide import QtCore as core
from numpy import *
from pylab import *
import matplotlib as mpl
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib import pyplot
from matplotlib.colorbar import ColorbarBase
from matplotlib.pyplot import figure, show
from matplotlib.lines import Line2D
# RadialDisplay Widget
import math
RENDER_PIXELS=300
MIN_DECIMATE = 3.0
SCALE_RATE = 1.1
SCALE = 300.0
ZOOM_WINDOW_WIDTH_LIMIT = 1.0
ZOOM_WINDOW_PIXEL_LIMIT = 20.0
FIGURE_CANCAS_RATIO = 0.78
R = 150.0
class MyMplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=3, height=3, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111)
# We want the axes cleared every time plot() is called
self.axes.hold(False)
self.compute_initial_figure()
FigureCanvas.__init__(self, self.fig)
FigureCanvas.setSizePolicy(self,
gui.QSizePolicy.Expanding,
gui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.AZIMUTH = False
self.RANGE_RING = False
self.COLORBAR = True
self.PICKER_LABEL = False
def sizeHint(self):
w, h = self.get_width_height()
return core.QSize(w, h)
def minimumSizeHint(self):
return core.QSize(10, 10)
class MplCanvas(MyMplCanvas):#,gui.QWidget):#(MyMplCanvas):
"""
A class for displaying radar data in basic mode. In this mode, the width and height of plot are equal.
Parameters
----------
title : string
Plotting header label.
colormap : ColorMap
ColorMap object.
Attributes
----------
figurecanvas : FigureCanvas
The canvas for display.
zoomer : list
Storing zoom windows.
_zoomWindow : QRectF
Storing current zoom window.
origin : list
Storing the coordinates for onPress event.
var_ : dict
Storing variables for display.
AZIMUTH : boolean
Flag for azimuth display.
RANGE_RING : boolean
Flag for RANGE_RING display.
COLORBAR : boolean
Flag for colorbar display.
PICKER_LABEL : boolean
Flag for picker label display.
cb : ColorbarBase
Colorbar object.
cMap : ColorMap
ColorMap object.
pressEvent : event
Press event.
pressed : boolean
Flag for press event.
deltaX : float
X change of rubberband. Zoom window only when the change is greater than ZOOM_WINDOW_PIXEL_LIMIT.
deltaY : float
Y change of rubberband.
startX : float
Rubberband start x value.
startY : float
Rubberband start y value.
moveLabel : QLabel
Picker label
sweep : Sweep
Sweep object.
ranges : list
Sweep ranges
varName : string
Storing current display variable name.
x : list
Storing sweep x values.
y : list
Storing sweep y values.
label : string
Storing header label and sweep time stamp
"""
def __init__(self, title, colormap, parent=None, width=3, height=3, dpi=100):
self.fig = Figure()#plt.figure()#figsize=(width, height), dpi=dpi)
plt.axis('off')
self.axes = self.fig.add_subplot(111,aspect='equal')
self.fig.set_dpi( dpi )
self.headerLabel = title
#self.axes.hold(False)
#self.fig.canvas.mpl_connect('pick_event', self.onpick)
self.figurecanvas = FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
gui.QSizePolicy.Expanding,
gui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.setWindow(core.QRectF(-1. * RENDER_PIXELS/2., 1. * RENDER_PIXELS/2., 1. * RENDER_PIXELS, -1. * RENDER_PIXELS))
# self.origins = core.QPoint()
self.ignorePaint = False
#self.bottomRight = core.QPoint()
self.rubberBand = gui.QRubberBand(gui.QRubberBand.Rectangle, self)
self.zoomer = []
# self.picker = []
self.origin = [RENDER_PIXELS,RENDER_PIXELS]
self.scaleFactor = 1.0
# self.offsetX = 0.0
# self.offsetY = 0.0
self.var_ = {}
self.AZIMUTH = False
self.RANGE_RING = False
self.COLORBAR = True
self.PICKER_LABEL = False
self.cb = None
self.cMap = colormap
self.pressEvent = None
self.pressed = False
self.deltaX = 0.
self.deltaY = 0.
self.startX = None
self.startY = None
self.moveLabel = gui.QLabel("",self)
self.moveLabel.setText("")
self.moveLabel.hide()
self.moveLabel.setStyleSheet("font-size:12px; margin:3px; padding:4px; background:#FFFFFF; border:2px solid #000;")
self.mpl_connect('button_press_event', self.onPress)
self.mpl_connect('button_release_event', self.onRelease)
self.mpl_connect('motion_notify_event', self.onMove)
def onPress(self,event):
""" method called when mouse press"""
if event.button == 1: ## left button
xdata = event.xdata
ydata = event.ydata
# check if mouse is outside the figure
if xdata is None or ydata is None:
return
self.pressed = True
self.pressEvent = event
self.origin = core.QPoint(event.x, self.height() - event.y)
self.rubberBand.setGeometry(core.QRect(self.origin, core.QSize()))
self.rubberBand.show()
# start point
self.startX = xdata
self.startY = ydata
if event.button == 2: ## middle botton - zoom in the center
pass
if event.button == 3:
pass
def onMove(self,event):
""" method called when mouse moves """
xdata = event.xdata
ydata = event.ydata
if xdata is None or ydata is None:
self.moveLabel.hide()
return
if self.pressed: ## display rubberband
if self.PICKER_LABEL:
self.moveLabel.hide()
deltaX = event.x - self.pressEvent.x ## moved distance
deltaY = event.y - self.pressEvent.y ## for rubberband
dx = dy = min(fabs(deltaX),fabs(deltaY))
if deltaX<0:
dx = -dx
if deltaY<0:
dy = -dy
newRect = core.QRect(self.origin.x(), self.origin.y(), int(dx), -int(dy))
newRect = newRect.normalized()
self.rubberBand.setGeometry(newRect)
self.deltaX = dx
self.deltaY = dy
else: ## display label
if self.PICKER_LABEL:
i,j = self.retrieve_z_value(xdata,ydata)
self.moveLabel.show()
if i is not None and j is not None:
# self.moveLabel.setText(core.QString(r"x=%g, y=%g, z=%g" % (xdata,ydata,self.var_[i][j]))) ## TODO: should use xdata or self.x[i][j]
self.moveLabel.setText(r"x=%g, y=%g, z=%g" % (xdata,ydata,self.var_[i][j])) ## TODO: should use xdata or self.x[i][j]
else:
# self.moveLabel.setText(core.QString(r"x=%g, y=%g, z=n/a" % (xdata,ydata)))
self.moveLabel.setText(r"x=%g, y=%g, z=n/a" % (xdata,ydata))
self.moveLabel.adjustSize()
offset = 10
if self.width()-event.x < self.moveLabel.width():
offset = -10 - self.moveLabel.width()
self.moveLabel.move(event.x+offset,self.height()-event.y)
def retrieve_z_value(self, xdata, ydata):
#xpos = np.argmin(np.abs(xdata-self.x))
#ypos = np.argmin(np.abs(ydata-self.y))
MIN = 99999
iv = None
jv = None
for i in range(len(self.x)):
j = self.findNearest(np.copy(self.x[i]),xdata)
if j is not None:
d = self.distance(xdata,ydata,self.x[i][j],self.y[i][j])
if d < MIN:
iv = i
jv = j
MIN = d
return iv,jv
def onRelease(self,event):
""" method called when mouse button is released """
if event.button == 1:
self.pressed = False
self.rubberBand.hide()
xdata = event.xdata ## mouse real position
ydata = event.ydata
if xdata is None or ydata is None or self.startX is None or self.startY is None:
return
d0 = self.width() * FIGURE_CANCAS_RATIO
x_range = self.axes.get_xlim()[1]-self.axes.get_xlim()[0]
y_range = self.axes.get_ylim()[1]-self.axes.get_ylim()[0]
(x1,y1) = self.startX, self.startY
(x2,y2) = x1 + self.deltaX/d0 * x_range, y1+self.deltaY/d0 * y_range
oldRect = core.QRectF() # last rubberband rect
oldRect.setLeft(self.axes.get_xlim()[0])
oldRect.setRight(self.axes.get_xlim()[1])
oldRect.setBottom(self.axes.get_ylim()[0])
oldRect.setTop(self.axes.get_ylim()[1])
rect = core.QRectF() # current rubberband rect
rect.setLeft(min(x1,x2))
rect.setRight(max(x1,x2))
rect.setBottom(min(y1,y2))
rect.setTop(max(y1,y2))
## react only when draged region is greater than 0.01 times of old rect
if fabs(self.deltaX)>ZOOM_WINDOW_PIXEL_LIMIT and \
fabs(rect.width())>ZOOM_WINDOW_WIDTH_LIMIT and \
fabs(rect.width()) >= 0.01*fabs(oldRect.width()):
self.zoomer.append(oldRect)
self.zoomTo(rect)
self._zoomWindow = rect
def zoomTo(self,rect):
""" adjust zoom winodw to rect """
self.axes.set_xlim(rect.left(),rect.right())
self.axes.set_ylim(rect.bottom(),rect.top())
self.draw()
def findNearest(self, array, target):
""" find nearest value to target and return its index """
diff = abs(array - target)
mask = np.ma.greater(diff, 0.151) ## TODO: select a threshold (range:meters_between_gates = 150.000005960464)
if np.all(mask):
return None # returns None if target is greater than any value
masked_diff = np.ma.masked_array(diff, mask)
return masked_diff.argmin()
def distance(self, x1, y1, x2, y2):
""" calculate distance between two points """
return sqrt((x1-x2)**2 + (y1-y2)**2) ## TODO: formula
def sizeHint(self):
w, h = self.get_width_height()
return core.QSize(w, h)
def minimumSizeHint(self):
return core.QSize(10, 10)
def setWindow(self, window):
""" initialize the full window to use for this widget """
self._zoomWindow = window
self._aspectRatio = window.width() / window.height()
def resizeEvent(self, event):
""" method called when resize window """
sz = event.size()
width = sz.width()
height = sz.height()
dpival = self.fig.dpi
winch = float(width)/dpival
hinch = float(height)/dpival
self.fig.set_size_inches( winch, hinch )
#self.draw()
#self.update()
self.fig.canvas.draw()
self.origin = [width,height]
def drawSweep(self, sweep, varName, beamWidth):
""" draw sweep """
self.beamWidth = beamWidth
self.ranges = sweep.ranges
self.sweep = sweep
self.varName = varName.lower()
self.var_ = sweep.vars_[varName] #in list
self.x = sweep.x
self.y = sweep.y
self.label = self.headerLabel + sweep.timeLabel
self.update_figure() #update figure
def update_figure(self):
""" update figure - need to call it explicitly """
if len(self.var_) > 0:
self.axes.clear()
# avoid missing values of -32768
self.var_ = np.ma.array(self.var_, mask=(self.var_ < -32000))
vmin = min(min(x) for x in self.var_)
vmax = max(max(x) for x in self.var_)
im = self.axes.pcolormesh(self.x,self.y,self.var_, vmin=vmin, vmax=vmax, cmap=self.cMap(self.varName))
## setup zeniths, azimuths, and colorbar
if self.RANGE_RING:
self.draw_range_ring()
if self.AZIMUTH:
self.draw_azimuth_line()
if self.COLORBAR:
self.draw_colorbar(im,vmin,vmax)
#self.x[0:359]/1e3,self.y[0:359]/1e3,self.var_,vmin=vmin, vmax=vmax)
#plt.axis('off') ## show x, y axes or not
#self.adjustZoomWindow() ## zoomWindow will not change for different variable - keep using the current zoom window
self.zoomTo(self._zoomWindow)
self.axes.set_title(self.label, size=9) ## TODO: change size to be adaptive
self.fig.canvas.draw()
## draw contour - a new feature - grayscale, no zoom in/out support
## self.axes.contour(self.x,self.y,self.var_,[0.5], linewidths=2., colors='k')
#self.fig.canvas.blit(self.axes.bbox)
def draw_azimuth_line(self):
""" draw azimuths with 30-degree intervals """
angles = np.arange(0, 360, 30)
labels = [90,60,30,0,330,300,270,240,210,180,150,120]
x = R * np.cos(np.pi*angles/180)
y = R * np.sin(np.pi*angles/180)
for xi,yi,ang,lb | |
import calendar
from collections import defaultdict
from datetime import datetime, timedelta
from math import ceil
import numpy
from django.conf import settings
from django.db.models import Count, Sum, F, Avg, FloatField, Case, When, IntegerField, Q, DateTimeField, ExpressionWrapper
from django.db.models.functions import Coalesce
from django.template.defaultfilters import linebreaksbr
from django.utils import timezone
from django.utils.timesince import timesince
from memoize import memoize
from mxlive.lims.models import Data, Sample, Session, Project, AnalysisReport, Container, Shipment, ProjectType, SupportArea, UserFeedback, UserAreaFeedback, SupportRecord, FeedbackScale, DataType
from mxlive.utils.functions import ShiftEnd, ShiftStart, ShiftIndex
from mxlive.utils.misc import humanize_duration, natural_duration
HOUR_SECONDS = 3600
SHIFT = getattr(settings, "HOURS_PER_SHIFT", 8)
SHIFT_SECONDS = SHIFT * HOUR_SECONDS
MAX_COLUMN_USERS = 30
class ColorScheme(object):
Live4 = ["#8f9f9a", "#c56052", "#9f6dbf", "#a0b552"]
Live8 = ["#073B4C", "#06D6A0", "#FFD166", "#EF476F", "#118AB2", "#7F7EFF", "#afc765", "#78C5E7"]
Live16 = [
"#67aec1", "#c45a81", "#cdc339", "#ae8e6b", "#6dc758", "#a084b6", "#667ccd", "#cd4f55",
"#805cd6", "#cf622d", "#a69e4c", "#9b9795", "#6db586", "#c255b6", "#073B4C", "#FFD166",
]
def js_epoch(dt):
return int("{:0.0f}000".format(dt.timestamp() if dt else datetime.now().timestamp()))
@memoize(timeout=HOUR_SECONDS)
def get_data_periods(period='year'):
field = 'created__{}'.format(period)
return sorted(Data.objects.values_list(field, flat=True).order_by(field).distinct())
def make_table(data, columns, rows, total_col=True, total_row=True):
''' Converts a list of dictionaries into a list of lists ready for displaying as a table
data: list of dictionaries (one dictionary per column header)
columns: list of column headers to display in table, ordered the same as data
rows: list of row headers to display in table
'''
header_row = [''] + columns
if total_col: header_row += ['All']
table_data = [[str(r)] + [0] * (len(header_row) - 1) for r in rows]
for row in table_data:
for i, val in enumerate(data):
row[i+1] = val.get(row[0], 0)
if total_col:
row[-1] = sum(row[1:-1])
if total_row:
footer_row = ['Total'] + [0] * (len(header_row) - 1)
for i in range(len(footer_row)-1):
footer_row[i+1] = sum([d[i+1] for d in table_data])
return [header_row] + table_data + [footer_row]
def get_time_scale(filters):
period = filters.get('time_scale', 'year')
if period == 'month':
periods = [i for i in range(1, 13)]
period_names = [calendar.month_abbr[per].title() for per in periods]
elif period == 'quarter':
periods = [1, 2, 3, 4]
period_names = [f'Q{per}' for per in periods]
elif period == 'cycle':
periods = [1, 2]
period_names = ['Jan-June', 'July-Dec']
else:
periods = get_data_periods('year')
period_names = periods
return (period, periods, period_names)
def usage_summary(period='year', **all_filters):
period, periods, period_names = get_time_scale(all_filters)
filters = {f: val for f, val in all_filters.items() if f != 'time_scale'}
field = 'created__{}'.format(period)
created_filters = {f.replace('modified', 'created'): val for f, val in filters.items()}
### Sample Stats
sample_filters = {f.replace('beamline', 'datasets__beamline'): val for f, val in created_filters.items()}
samples = Sample.objects.filter(**sample_filters)
sample_counts_info = samples.values(field).order_by(field).annotate(count=Count('id'))
### Session Stats
sessions = Session.objects.filter(**created_filters)
session_counts_info = sessions.values(field).order_by(field).annotate(count=Count('id'))
throughput_info = sessions.values(field).order_by(field).annotate(
num_datasets=Count(Case(When(datasets__kind__name="MX Dataset", then=1), output_field=IntegerField())),
num_samples=Count('datasets__sample', distinct=True),
time=Sum(Coalesce('stretches__end', timezone.now()) - F('stretches__start'), distinct=True)
)
throughput_types_info = sessions.values(field, 'project__kind__name').order_by(field).annotate(
num_datasets=Count(Case(When(datasets__kind__name="MX Dataset", then=1), output_field=IntegerField())),
num_samples=Count('datasets__sample', distinct=True),
time=Sum(Coalesce('stretches__end', timezone.now()) - F('stretches__start'), distinct=True)
)
session_params = sessions.values(field).order_by(field).annotate(
hours=Sum(Coalesce('stretches__end', timezone.now()) - F('stretches__start')),
shifts=Sum(ShiftEnd(Coalesce('stretches__end', timezone.now())) - ShiftStart('stretches__start')),
)
### Project Stats
project_filters = {f.replace('beamline', 'sessions__beamline'): val for f, val in created_filters.items()}
project_info = sessions.values(field, 'project__name').distinct().order_by(
field, 'project__name').annotate(count=Count('project__name'))
new_project_info = Project.objects.filter(**project_filters).values(field, 'name').order_by(
field, 'name').annotate(count=Count('name'))
project_type_colors = {
kind: ColorScheme.Live8[i]
for i, kind in enumerate(ProjectType.objects.values_list('name', flat=True).order_by('-name'))
}
### Data Stats
datasets = Data.objects.filter(**filters)
dataset_info = datasets.values(field).order_by(field).annotate(
count=Count('id'), exposure=Avg('exposure_time'),
duration=Sum(F('end_time') - F('start_time'))
)
dataset_durations = {entry[field]: entry['duration'].total_seconds() / HOUR_SECONDS for entry in dataset_info}
data_time_info = datasets.annotate(shift=ShiftIndex('end_time')).values('shift', 'end_time__week_day').order_by(
'end_time__week_day', 'shift').annotate(count=Count('id'))
data_project_kind_info = datasets.values('project__kind__name').order_by('project__kind__name').annotate(
count=Count('id'))
data_types_info = datasets.values(field, 'kind__name').order_by(field).annotate(count=Count('id'))
data_types_names = list(DataType.objects.values_list('name', flat=True))
### Metrics Overview
# Distinct Users
distinct_users = {key: len([entry for entry in project_info if entry[field] == key]) for key in periods}
# New Users
new_users = {key: len([e for e in new_project_info if e[field] == key and e['count']]) for key in periods}
# Samples Measured
samples_measured = {entry[field]: entry['count'] for entry in sample_counts_info}
# Sessions
session_counts = {entry[field]: entry['count'] for entry in session_counts_info}
# Shifts Used
shifts_used = {entry[field]: ceil(entry['shifts'].total_seconds() / SHIFT_SECONDS) for entry in session_params}
# Time Used (hr)
time_used = {entry[field]: entry['hours'].total_seconds() / HOUR_SECONDS for entry in session_params}
# Usage Efficiency (%)
usage_efficiency = {key: time_used.get(key, 0) / (SHIFT * shifts_used.get(key, 1)) for key in periods}
# Datasets Collected
dataset_counts = {entry[field]: entry['count'] for entry in dataset_info}
# Minutes/Dataset
minutes_per_dataset = {key: dataset_durations.get(key, 0) * 60 / dataset_counts.get(key, 1) for key in periods}
# Datasets/Hour
dataset_per_hour = {key: dataset_counts.get(key, 0) / dataset_durations.get(key, 1) for key in periods}
# Average Exposure (sec)
dataset_exposure = {entry[field]: round(entry['exposure'], 3) for entry in dataset_info}
# Samples/Dataset
samples_per_dataset = {key: samples_measured.get(key, 0) / dataset_counts.get(key, 1) for key in periods}
# Sample Throughput (/h)
sample_throughput = {
entry[field]: 3600. * entry['num_samples'] / entry['time'].total_seconds()
for entry in throughput_info if entry['time'] and entry['num_samples']
}
# MX Dataset Throughput (/h)
data_throughput = {
entry[field]: 3600. * entry['num_datasets'] / entry['time'].total_seconds()
for entry in throughput_info if entry['time'] and entry['num_datasets']
}
### Plots
# Throughput Plot
throughput_data = [
{
period.title(): period_names[i],
"Samples": sample_throughput.get(per, 0),
"MX Datasets": data_throughput.get(per, 0)
} for i, per in enumerate(periods)
]
# Sample Throughput Plot by Project Kind
sample_throughput_types = [
{
**{period.title(): period_names[i]},
**{entry['project__kind__name']: entry['time'] and 3600. * entry['num_samples'] / entry['time'].total_seconds() or 0
for entry in throughput_types_info if entry[field] == per}
} for i, per in enumerate(periods)
]
# MX Dataset Throughput Plot by Project Kind
data_throughput_types = [
{
**{period.title(): period_names[i]},
**{entry['project__kind__name']: entry['time'] and 3600. * entry['num_datasets'] / entry['time'].total_seconds() or 0
for entry in throughput_types_info if entry[field] == per}
} for i, per in enumerate(periods)
]
# Productivity Plot
dataset_per_shift = {key: dataset_counts.get(key, 0) / shifts_used.get(key, 1) for key in periods}
# Datasets by time of week Plot
day_names = list(calendar.day_abbr)
dataset_per_day = [
{
'Day': day,
**{
'{:02d}:00 Shift'.format(entry['shift'] * SHIFT): entry['count']
for entry in data_time_info if (entry['end_time__week_day'] - 2) % 7 == i
}
} for i, day in enumerate(day_names)
]
# Datasets by Project Type Chart
category_counts = {entry['project__kind__name']: entry['count'] for entry in data_project_kind_info}
# Data Summary Table and Plot
data_counts_by_type = {k: {e['kind__name']: e['count'] for e in data_types_info if e[field] == k} for k in periods}
data_types_data = [
{
period.title(): period_names[i],
**{
kind: data_counts_by_type.get(key, {}).get(kind, 0) for kind in data_types_names
}
} for i, key in enumerate(periods)
]
data_type_table = make_table(data_types_data, period_names, data_types_names)
# Dataset Type Chart
data_type_chart = [
{'label': kind, 'value': sum([e[kind] for e in data_types_data]) } for kind in data_types_names
]
### Formatting
period_xvalues = periods
x_scale = 'linear'
time_format = ''
if period == 'month':
yr = timezone.now().year
period_names = [calendar.month_abbr[per].title() for per in periods]
period_xvalues = [datetime.strftime(datetime(yr, per, 1, 0, 0), '%c') for per in periods]
time_format = '%b'
x_scale = 'time'
elif period == 'year':
period_xvalues = [datetime.strftime(datetime(per, 1, 1, 0, 0), '%c') for per in periods]
time_format = '%Y'
x_scale = 'time'
# Dataset Summary Plot
period_data = defaultdict(lambda: defaultdict(int))
for summary in datasets.values(field, 'kind__name').order_by(field).annotate(count=Count('pk')):
period_data[summary[field]][summary['kind__name']] = summary['count']
### User Statistics
user_session_info = sessions.values(user=F('project__name'), kind=F('project__kind__name')).order_by('user').annotate(
duration=Sum(Coalesce('stretches__end', timezone.now()) - F('stretches__start'),),
shift_duration=Sum(ShiftEnd(Coalesce('stretches__end', timezone.now())) - ShiftStart('stretches__start')),
)
user_data_info = datasets.values(user=F('project__name')).order_by('user').annotate(count=Count('id'),
shutters=Sum(F('end_time') - F('start_time'))
)
user_sample_info = samples.values(user=F('project__name')).order_by('user').annotate(count=Count('id'))
user_types = {info['user']: info["kind"] for info in user_session_info}
user_stats = {}
# Datasets
user_stats['datasets'] = [
{'User': info['user'], 'Datasets': info['count'], 'Type': user_types.get(info['user'], 'Unknown')}
for info in sorted(user_data_info, key=lambda v: v['count'], reverse=True)[:MAX_COLUMN_USERS]
]
# Samples
user_stats['samples'] = [
{'User': info['user'], 'Samples': info['count'], 'Type': user_types.get(info['user'], 'Unknown')}
for info in sorted(user_sample_info, key=lambda v: v['count'], reverse=True)[:MAX_COLUMN_USERS]
]
# Time Used
user_stats['time_used'] = [
{'User': info['user'], 'Hours': round(info["duration"].total_seconds() / HOUR_SECONDS, 1), 'Type': user_types.get(info['user'], 'Unknown')}
for info in sorted(user_session_info, key=lambda v: v['duration'], reverse=True)[:MAX_COLUMN_USERS]
]
# Efficiency
user_shutters = {
info['user']: info["shutters"].total_seconds()
for info in user_data_info
}
user_stats['efficiency'] = [
{'User': info['user'],
'Percent': min(100, 100 * user_shutters.get(info['user'], 0) / info["duration"].total_seconds()),
'Type': user_types.get(info['user'], 'Unknown')}
for info in sorted(user_session_info,
key=lambda v: v['duration'] and user_shutters.get(v['user'], 0) / v['duration'].total_seconds() or 0,
reverse=True)[:MAX_COLUMN_USERS]
]
# Schedule Efficiency
user_stats['schedule_efficiency'] = [
{'User': info['user'], 'Percent': round(100*info["duration"] / info["shift_duration"], 1),
'Type': user_types.get(info['user'], 'Unknown')}
for info in sorted(user_session_info, key=lambda v: v['duration']/v['shift_duration'], reverse=True)[:MAX_COLUMN_USERS]
]
for key, data in user_stats.items():
user_stats[key] = {
'x-label': 'User',
'aspect-ratio': .7,
'color-by': 'Type',
'colors': project_type_colors,
'data': data
}
beamtime = {}
if settings.LIMS_USE_SCHEDULE:
from mxlive.schedule.stats import beamtime_summary
beamtime = beamtime_summary(**{f.replace('modified', 'start'): val for f, val in all_filters.items()})
stats = {'details': [
{
'title': 'Metrics Overview',
'style': 'row',
'content': [
{
'title': 'Usage Statistics',
'kind': | |
"""
This is adapt from evennia/evennia/commands/default/unloggedin.py.
The licence of Evennia can be found in evennia/LICENSE.txt.
"""
import re
import traceback
import time
import hashlib
from collections import defaultdict
from random import getrandbits
from django.conf import settings
from evennia.accounts.models import AccountDB
from evennia.objects.models import ObjectDB
from evennia.server.models import ServerConfig
from evennia.utils import logger, utils
from evennia.commands.command import Command
from evennia.commands.cmdhandler import CMD_LOGINSTART
from muddery.utils.builder import create_player, create_character
from muddery.utils.localized_strings_handler import _
from muddery.utils.game_settings import GAME_SETTINGS
from muddery.utils.utils import search_obj_data_key
# limit symbol import for API
__all__ = ("CmdUnconnectedConnect", "CmdUnconnectedCreate", "CmdUnconnectedCreateConnect",
"CmdUnconnectedQuit", "CmdUnconnectedLook")
MULTISESSION_MODE = settings.MULTISESSION_MODE
# Helper function to throttle failed connection attempts.
# This can easily be used to limit player creation too,
# (just supply a different storage dictionary), but this
# would also block dummyrunner, so it's not added as default.
_LATEST_FAILED_LOGINS = defaultdict(list)
def _throttle(session, maxlim=None, timeout=None, storage=_LATEST_FAILED_LOGINS):
"""
This will check the session's address against the
_LATEST_LOGINS dictionary to check they haven't
spammed too many fails recently.
Args:
session (Session): Session failing
maxlim (int): max number of attempts to allow
timeout (int): number of timeout seconds after
max number of tries has been reached.
Returns:
throttles (bool): True if throttling is active,
False otherwise.
Notes:
If maxlim and/or timeout are set, the function will
just do the comparison, not append a new datapoint.
"""
address = session.address
if isinstance(address, tuple):
address = address[0]
now = time.time()
if maxlim and timeout:
# checking mode
latest_fails = storage[address]
if latest_fails and len(latest_fails) >= maxlim:
# too many fails recently
if now - latest_fails[-1] < timeout:
# too soon - timeout in play
return True
else:
# timeout has passed. Reset faillist
storage[address] = []
return False
else:
# store the time of the latest fail
storage[address].append(time.time())
return False
def create_guest_player(session):
"""
Creates a guest player/character for this session, if one is available.
Args:
session (Session): the session which will use the guest player/character.
Returns:
GUEST_ENABLED (boolean), player (Player):
the boolean is whether guest accounts are enabled at all.
the Player which was created from an available guest name.
"""
# check if guests are enabled.
if not settings.GUEST_ENABLED:
return False, None
# Check IP bans.
bans = ServerConfig.objects.conf("server_bans")
if bans and any(tup[2].match(session.address) for tup in bans if tup[2]):
# this is a banned IP!
string = "{rYou have been banned and cannot continue from here." \
"\nIf you feel this ban is in error, please email an admin.{x"
session.msg(string)
session.sessionhandler.disconnect(session, "Good bye! Disconnecting.")
return True, None
try:
# Find an available guest name.
playername = None
for playername in settings.GUEST_LIST:
if not AccountDB.objects.filter(username__iexact=playername).count():
break
playername = None
if playername == None:
session.msg("All guest accounts are in use. Please try again later.")
return True, None
password = <PASSWORD>)
permissions = settings.PERMISSION_GUEST_DEFAULT
new_player = create_player(playername, password, permissions=permissions)
if new_player:
create_character(new_player, playername, permissions=permissions)
except Exception, e:
# We are in the middle between logged in and -not, so we have
# to handle tracebacks ourselves at this point. If we don't,
# we won't see any errors at all.
session.msg({"alert":_("There was an error creating the Player: %s" % e)})
logger.log_trace()
finally:
return True, new_player
def create_normal_player(session, playername, password):
"""
Create a new player.
"""
# sanity checks
if not re.findall('^[\w. @+-]+$', playername) or not (0 < len(playername) <= 32):
# this echoes the restrictions made by django's auth
# module (except not allowing spaces, for convenience of
# logging in).
string = "\n\r Playername can max be 32 characters or fewer. Letters, spaces, digits and @/./+/-/_ only."
session.msg({"alert":string})
return
# strip excessive spaces in playername
playername = re.sub(r"\s+", " ", playername).strip()
if AccountDB.objects.filter(username__iexact=playername):
# player already exists (we also ignore capitalization here)
session.msg({"alert":_("Sorry, there is already a player with the name '%s'.") % playername})
return
# Reserve playernames found in GUEST_LIST
if settings.GUEST_LIST and playername.lower() in (guest.lower() for guest in settings.GUEST_LIST):
string = "\n\r That name is reserved. Please choose another Playername."
session.msg({"alert":string})
return
if not re.findall('^[\w. @+-]+$', password) or not (3 < len(password)):
string = "\n\r Password should be longer than 3 characers. Letters, spaces, digits and @\.\+\-\_ only." \
"\nFor best security, make it longer than 8 characters. You can also use a phrase of" \
"\nmany words if you enclose the password in quotes."
session.msg({"alert":string})
return
# Check IP and/or name bans
bans = ServerConfig.objects.conf("server_bans")
if bans and (any(tup[0]==playername.lower() for tup in bans)
or
any(tup[2].match(session.address) for tup in bans if tup[2])):
# this is a banned IP or name!
string = "{rYou have been banned and cannot continue from here." \
"\nIf you feel this ban is in error, please email an admin.{x"
session.msg({"alert":string})
session.execute_cmd('{"cmd":"quit","args":""}')
return
# everything's ok. Create the new player account.
new_player = None
try:
new_player = create_player(playername, password)
except Exception, e:
# We are in the middle between logged in and -not, so we have
# to handle tracebacks ourselves at this point. If we don't,
# we won't see any errors at all.
session.msg({"alert":_("There was an error creating the Player: %s" % e)})
logger.log_tracemsg()
return new_player
def connect_normal_player(session, name, password):
"""
Connect a player with the given name and password.
Args:
session (Session): the session which is requesting to create a player.
name (str): the name that the player wants to use for login.
password (str): the password desired by this player, for login.
Returns:
player (Player): the player which was connected from the name and password.
"""
# check for too many login errors too quick.
if _throttle(session, maxlim=5, timeout=5*60):
# timeout is 5 minutes.
session.msg("{RYou made too many connection attempts. Try again in a few minutes.{n")
return None
# Match account name and check password
player = AccountDB.objects.get_account_from_name(name)
pswd = None
if player:
pswd = player.check_password(password)
if not (player and pswd):
# No playername or password match
session.msg({"alert":_("Incorrect username or password.")})
# this just updates the throttle
_throttle(session)
# calls player hook for a failed login if possible.
if player:
player.at_failed_login(session)
return None
# Check IP and/or name bans
bans = ServerConfig.objects.conf("server_bans")
if bans and (any(tup[0]==player.name.lower() for tup in bans)
or
any(tup[2].match(session.address) for tup in bans if tup[2])):
# this is a banned IP or name!
string = "{rYou have been banned and cannot continue from here." \
"\nIf you feel this ban is in error, please email an admin.{x"
session.msg(string)
session.sessionhandler.disconnect(session, "Good bye! Disconnecting.")
return None
return player
class CmdUnconnectedConnect(Command):
"""
connect to the game
Usage:
{"cmd":"connect",
"args":{
"playername":<playername>,
"password":<password>
}
}
"""
key = "connect"
locks = "cmd:all()"
def func(self):
"""
Uses the Django admin api. Note that unlogged-in commands
have a unique position in that their func() receives
a session object instead of a source_object like all
other types of logged-in commands (this is because
there is no object yet before the player has logged in)
"""
session = self.caller
args = self.args
try:
playername = args["playername"]
password = args["password"]
except Exception:
string = 'Can not log in.'
logger.log_errmsg(string)
session.msg({"alert":string})
return
# check for too many login errors too quick.
if _throttle(session, maxlim=5, timeout=5*60, storage=_LATEST_FAILED_LOGINS):
# timeout is 5 minutes.
session.msg({"alert":_("{RYou made too many connection attempts. Try again in a few minutes.{n")})
return
# Guest login
if playername.lower() == "guest":
enabled, new_player = create_guest_player(session)
if new_player:
session.msg({"login":{"name": playername, "dbref": new_player.dbref}})
session.sessionhandler.login(session, new_player)
if enabled:
return
if not password:
session.msg({"alert":_("Please input password.")})
return
player = connect_normal_player(session, playername, password)
if player:
# actually do the login. This will call all other hooks:
# session.at_login()
# player.at_init() # always called when object is loaded from disk
# player.at_first_login() # only once, for player-centric setup
# player.at_pre_login()
# player.at_post_login(session=session)
session.msg({"login":{"name": playername, "dbref": player.dbref}})
session.sessionhandler.login(session, player)
class CmdUnconnectedCreate(Command):
"""
create a new player account and login
Usage:
{"cmd":"create",
"args":{
"playername":<playername>,
"password":<password>,
"connect":<connect>
}
}
args:
connect: (boolean)connect after created
"""
key = "create"
locks = "cmd:all()"
def func(self):
"Do checks, create account and login."
session = self.caller
args = self.args
try:
playername = args["playername"]
password = args["password"]
connect = args["connect"]
except Exception:
string = 'Syntax error!'
string += '\nUsage:'
string += '\n {"cmd":"create_connect",'
string += '\n "args":{'
| |
traced_fn1(x):
return torch.neg(x)
@_trace(torch.rand(3, 4))
def traced_fn(x):
return traced_fn1(x) + 1
FileCheck().check("traced_fn").check("prim::CallFunction").check("aten::add") \
.run(str(traced_fn.graph))
@unittest.skip("error in first class mode")
def test_call_traced_mod_from_tracing_fn(self):
class TracedModule(torch.nn.Module):
def __init__(self):
super(TracedModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3), requires_grad=False)
def forward(self, x):
return torch.mm(x, self.param)
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
with self.assertRaisesRegex(RuntimeError, "must be registered as submodules"):
@_trace(torch.rand(3, 4))
def traced_fn(x):
return tm(x) + 1.0
@_tmp_donotuse_dont_inline_everything
def test_call_script_fn_from_tracing_fn(self):
@torch.jit.script
def script_fn(x):
return torch.neg(x)
@_trace(torch.rand(3, 4))
def traced_fn(x):
return script_fn(x) + 1
FileCheck().check("prim::CallFunction").check("aten::add").run(str(traced_fn.graph))
@unittest.skip("error in first class mode")
def test_call_script_mod_from_tracing_fn(self):
with self.assertRaisesRegex(RuntimeError, "must be registered as submodules"):
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4), requires_grad=False)
@torch.jit.script_method
def forward(self, x):
for _i in range(4):
x += self.param
return x
sm = ScriptMod()
@_trace(torch.rand(3, 4))
def traced_fn(x):
return sm(x) + 1.0
def test_call_python_fn_from_traced_module(self):
def python_fn(x):
return torch.neg(x)
class TracedModule(torch.nn.Module):
def __init__(self):
super(TracedModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return torch.mm(python_fn(x), self.param)
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
# Note: parameter self.param from the traced module should appear as
# an input to the graph and the neg op from the Python function should
# be properly inlined
self.assertTrue(len(list(tm.graph.inputs())) == 2)
FileCheck().check("aten::neg").check("aten::mm").run(str(tm.graph))
def test_call_python_mod_from_traced_module(self):
class PythonModule(torch.nn.Module):
def __init__(self):
super(PythonModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(5, 7))
def forward(self, x):
return torch.mm(x, self.param)
class TracedModule(torch.nn.Module):
def __init__(self):
super(TracedModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 5))
self.mod = PythonModule()
def forward(self, x):
return self.mod(torch.mm(x, self.param)) + 1.0
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
FileCheck().check_not("value=<Tensor>").check("aten::mm")\
.check("prim::CallMethod[name=\"forward\"]").check("aten::add") \
.run(str(tm.graph))
FileCheck().check("aten::mm").run(str(tm.mod.graph))
def test_op_dtype(self):
def check_equal_and_dtype(a, b):
self.assertEqual(a, b)
self.assertEqual(a.dtype, b.dtype)
def fn():
a = torch.arange(10)
b = torch.arange(10, dtype=torch.float)
c = torch.arange(1, 10, 2)
d = torch.arange(1, 10, 2, dtype=torch.float)
e = torch.arange(1, 10., 2)
f = torch.arange(1, 10., 2, dtype=torch.float)
return a, b, c, d, e, f
scripted_fn = torch.jit.script(fn)
eager_out = fn()
script_out = scripted_fn()
for a, b in zip(eager_out, script_out):
check_equal_and_dtype(a, b)
def test_floordiv(self):
funcs_template = dedent('''
def fn():
ten = {a_construct}
ten_or_scalar = {b_construct}
return ten // ten_or_scalar, torch.floor_divide(ten, ten_or_scalar)
''')
lhs = ["torch.tensor([5.5, 3.2])", "torch.tensor([2, 2])", "torch.tensor([3, 2])"]
rhs = ["1.5", "2", "4", "1.1"] + lhs
for tensor in lhs:
for tensor_or_scalar in rhs:
funcs_str = funcs_template.format(a_construct=tensor, b_construct=tensor_or_scalar)
scope = {}
execWrapper(funcs_str, globals(), scope)
cu = torch.jit.CompilationUnit(funcs_str)
f_script = cu.fn
f = scope['fn']
with self.assertWarnsOnceRegex(UserWarning, "floor_divide"):
self.assertEqual(f_script(), f())
def test_call_python_fn_from_script_fn(self):
@torch.jit.ignore
def python_fn(x):
return torch.neg(x)
@torch.jit.script
def script_fn(x):
return python_fn(x) + 1
# Note: the call to python_fn appears as `^python_fn()` and is called
# as a PythonOp in the interpreter
a = torch.tensor(1)
self.assertEqual(script_fn(a), torch.tensor(0))
FileCheck().check("python_fn").run(str(script_fn.graph))
def test_call_python_mod_from_script_fn(self):
class PythonModule(torch.nn.Module):
def __init__(self):
super(PythonModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(5, 7))
def forward(self, x):
return torch.mm(x, self.param)
pm = PythonModule()
@torch.jit.script
def script_fn(x):
return pm(x) + 1
# Note: call to pm(x) appears as ^<python_value>() in the trace.
# Parameters are NOT inlined.
FileCheck().check("python_value").check("aten::add").run(str(script_fn.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_script_fn_from_script_fn(self):
@torch.jit.script
def script_fn1(x):
return torch.neg(x)
@torch.jit.script
def script_fn(x):
return script_fn1(x) + 1
FileCheck().check("prim::CallFunction").run(str(script_fn.graph))
def test_call_script_mod_from_script_fn(self):
with self.assertRaisesRegex(RuntimeError, "Cannot call a ScriptModule that is not a submodule of the caller"):
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
@torch.jit.script_method
def forward(self, x):
return torch.mm(x, torch.zeros([4, 3]))
sm = ScriptMod()
@torch.jit.script
def script_fn(x):
return sm(x) + 1
def test_call_python_fn_from_script_module(self):
@torch.jit.ignore
def python_fn(x):
return torch.neg(x)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
@torch.jit.script_method
def forward(self, x):
return python_fn(torch.mm(x, self.param))
sm = ScriptMod()
FileCheck().check("aten::mm").check("python_fn") \
.run(str(sm.forward.graph))
def test_call_python_mod_from_script_module(self):
class PythonMod(torch.nn.Module):
def __init__(self):
super(PythonMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(3, 5))
@torch.jit.ignore
def forward(self, x):
return torch.mm(x, self.param)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
self.pm = PythonMod()
@torch.jit.script_method
def forward(self, x):
return self.pm(torch.mm(x, self.param))
sm = ScriptMod()
# Note: the call into PythonMod appears as ^forward(). Parameters
# are NOT inlined
FileCheck().check("aten::mm").check("forward").run(str(sm.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_script_fn_from_script_module(self):
@torch.jit.script
def script_fn(x):
return torch.neg(x)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
@torch.jit.script_method
def forward(self, x):
return script_fn(torch.mm(x, self.param))
sm = ScriptMod()
graph = (sm.forward.graph)
FileCheck().check("aten::mm").check("prim::CallFunction").run(str(graph))
@_tmp_donotuse_dont_inline_everything
def test_call_script_mod_from_script_module(self):
class ScriptMod1(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod1, self).__init__()
self.param = torch.nn.Parameter(torch.rand(3, 5))
@torch.jit.script_method
def forward(self, x):
return torch.mm(x, self.param)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
self.tm = ScriptMod1()
@torch.jit.script_method
def forward(self, x):
return self.tm(torch.mm(x, self.param))
sm = ScriptMod()
# Note: the parameters from both modules should appear in the flattened
# input list to the graph. The mm op from ScriptMod1 should be properly
# inlined
# 3 % values in graph input lists, two mms in body
FileCheck().check_count('%', 3).check(":").check_count("mm", 1).check("prim::CallMethod").run(str(sm.graph))
def test_module_with_params_called_fails(self):
with self.assertRaisesRegex(RuntimeError, "Cannot call a ScriptModule that is not a submodule of the caller"):
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(3, 3))
@torch.jit.script_method
def forward(self, x):
return torch.mm(x, self.param)
sm = ScriptMod()
@torch.jit.script
def some_func(x):
return sm(x)
def test_tuple_index_to_list(self):
def test_non_constant_input(a):
# type: (bool) -> int
if a:
b = 1
else:
b = 0
c = (0, 1)
return c[b]
self.checkScript(test_non_constant_input, (True,))
self.checkScript(test_non_constant_input, (False,))
with self.assertRaisesRegex(RuntimeError, "because we cannot resolve the output type"):
@torch.jit.script
def test_non_constant_input(a):
# type: (bool) -> None
if a:
b = 1
else:
b = 0
c = (0, 1.1)
print(c[b])
def test_tuple_indexing(self):
def tuple_index(a):
if bool(a):
b = (1, 2)
else:
b = (0, 2)
return b[-2], b[1]
self.checkScript(tuple_index, (torch.tensor([0]),))
self.checkScript(tuple_index, (torch.tensor([1]),))
self.checkScript(tuple_index, (torch.tensor([1]),), optimize=True)
tuple_comp = torch.jit.script(tuple_index)
FileCheck().check_count("TupleIndex", 2, exactly=True).run(str(tuple_comp.graph))
with self.assertRaisesRegex(RuntimeError, "index must be an integer"):
@torch.jit.script
def test_indexing_float():
c = (1, 2)
return c[0.1]
def test_indexing_out_of_bounds_pos():
c = (1, 2)
return c[2]
self.checkScriptRaisesRegex(test_indexing_out_of_bounds_pos, (), Exception,
"out of range")
def test_indexing_out_of_bounds_neg():
c = (1, 2)
return c[-3]
self.checkScriptRaisesRegex(test_indexing_out_of_bounds_pos, (), Exception,
"out of range")
def negative_index():
tup = (1, 2, 3, 4)
return tup[-1]
self.checkScript(negative_index, [])
def really_negative_index():
tup = (1, 2, 3, 4)
return tup[-100]
self.checkScriptRaisesRegex(really_negative_index, [], Exception, "index out of range")
def negative_slice():
tup = (1, 2, 3, 4)
return tup[-3:4]
self.checkScript(negative_slice, [])
def really_slice_out_of_bounds():
tup = (1, 2, 3, 4)
return tup[-300:4000]
self.checkScript(really_slice_out_of_bounds, [])
def test_namedtuple_attr(self):
def f(x):
return x.max(dim=1).indices + torch.max(x, dim=1).indices
self.checkScript(f, (torch.rand(20, 20, 20),), optimize=True)
with self.assertRaisesRegex(RuntimeError, "object has no attribute or method"):
@torch.jit.script
def g1(x):
return x.max(dim=1).unknown_symbol
with self.assertRaisesRegex(RuntimeError, "object has no attribute or method"):
@torch.jit.script
def g2(x):
print((x, x, x).__doc__)
return x
def test_tuple_len(self):
@torch.jit.script
def foo():
return len((1, "str", None))
self.assertEqual(foo(), 3)
@torch.jit.script
def test_indexing_end_out_of_bounds():
c = (1, 2)
return c[2:10]
self.assertEqual(test_indexing_end_out_of_bounds(), ())
def test_lower_nested_tuples(self):
@torch.jit.script
def test():
return ((1, 2), 3)
self.run_pass('constant_propagation', test.graph)
FileCheck().check("prim::Constant").check_not("TupleConstruct").run(test.graph)
# fails if a tuple can't be lowered
self.run_pass('lower_all_tuples', test.graph)
def test_unwrap_optional_builtin(self):
def test(x):
# type: (Optional[int]) -> int
x = torch.jit._unwrap_optional(x)
x = x + x # noqa: T484
return x
self.checkScript(test, (3,))
with self.assertRaisesRegex(AssertionError, "Unwrapping null optional"):
test(None)
test_script = torch.jit.script(test)
with self.assertRaisesRegex(RuntimeError, "Unwrapping null optional"):
test_script(None)
@torch.jit.script
def test_test():
return torch.jit._unwrap_optional(1)
with self.assertRaisesRegex(RuntimeError, r"could not be inferred from actual type None"):
@torch.jit.script
def test_no_type():
# type: () -> int
return torch.jit._unwrap_optional(None)
def test_indexing_error(self):
with self.assertRaisesRegex(RuntimeError, "'int' object is not subscriptable"):
@torch.jit.script
def test_wrong_type():
a = 8
return a[0]
def test_unsupported_builtin_error(self):
with self.assertRaisesRegex(RuntimeError,
"Python builtin <built-in function hypot> is currently"):
@torch.jit.script
def test_unsupported(a):
return math.hypot(a, 2.0)
def test_annotated_script_fn(self):
@torch.jit.script
def foo(x, y, z):
# type: (Tensor, Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tuple[Tensor, Tensor]]) -> Tensor
return x
self.assertExpected(str(foo.schema))
def test_annotated_script_method(self):
class SM(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
# type: (Tuple[Tensor, Tensor], Tensor) -> Tuple[Tensor, Tensor, Tensor]
return y, y, y
sm = SM()
self.assertExpectedStripMangled(str(sm.forward.schema))
def test_annotated_script_fn_return_mismatch(self):
with self.assertRaisesRegex(RuntimeError, "but is actually of type"):
@torch.jit.script
def return_tup(x):
# type: (Tensor) -> Tuple[Tuple[Tensor, Tensor], Tensor]
return x, x # noqa: T484
def test_annotated_script_fn_arg_mismatch(self):
with self.assertRaisesRegex(RuntimeError, r"Arguments for call are not valid"):
@torch.jit.script
def tuple_arg(x):
# type: (Tuple[Tensor, Tensor]) -> Tensor
return x + 1 # noqa: T484
def test_script_non_tensor_args_outputs(self):
@torch.jit.script
def fn(x, y):
# type: (Tensor, float) -> float
return float((x + y).sum())
x = torch.ones(2, 2)
z = fn(x, 1)
self.assertIsInstance(z, float)
self.assertEqual(z, 8.)
@unittest.skip('https://github.com/pytorch/pytorch/issues/9595')
def test_inline_and_run_annotated_script_fn(self):
@torch.jit.script
def to_inline(x, y):
# type: (Tuple[Tensor, Tensor], Tensor) -> Tensor
return y
@torch.jit.script
def some_func(x):
return to_inline((x, x), x)
x = torch.rand(3, 4)
self.assertEqual(some_func(x), x)
def | |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class Ospfv3(Base):
"""Ospfv3 Interface level Configuration
The Ospfv3 class encapsulates a list of ospfv3 resources that are managed by the user.
A list of resources can be retrieved from the server using the Ospfv3.find() method.
The list can be managed by using the Ospfv3.add() and Ospfv3.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'ospfv3'
_SDM_ATT_MAP = {
'Active': 'active',
'AdjSID': 'adjSID',
'AreaId': 'areaId',
'AreaIdIp': 'areaIdIp',
'AuthAlgo': 'authAlgo',
'BFlag': 'bFlag',
'ConnectedVia': 'connectedVia',
'Count': 'count',
'DeadInterval': 'deadInterval',
'DemandCircuit': 'demandCircuit',
'DescriptiveName': 'descriptiveName',
'EnableAdjSID': 'enableAdjSID',
'EnableAuthentication': 'enableAuthentication',
'EnableBfdRegistration': 'enableBfdRegistration',
'EnableFastHello': 'enableFastHello',
'EnableIgnoreDbDescMtu': 'enableIgnoreDbDescMtu',
'Errors': 'errors',
'ExternalCapability': 'externalCapability',
'GFlag': 'gFlag',
'HelloInterval': 'helloInterval',
'HelloMultiplier': 'helloMultiplier',
'InstanceId': 'instanceId',
'Key': 'key',
'LFlag': 'lFlag',
'LinkMetric': 'linkMetric',
'LocalRouterID': 'localRouterID',
'Multiplier': 'multiplier',
'Name': 'name',
'NetworkType': 'networkType',
'NssaCapability': 'nssaCapability',
'Ospfv3IfaceState': 'ospfv3IfaceState',
'Ospfv3NeighborState': 'ospfv3NeighborState',
'PFlag': 'pFlag',
'Priority': 'priority',
'Router': 'router',
'SaId': 'saId',
'SessionInfo': 'sessionInfo',
'SessionStatus': 'sessionStatus',
'StackedLayers': 'stackedLayers',
'StateCounts': 'stateCounts',
'Status': 'status',
'TypeAreaId': 'typeAreaId',
'V6': 'v6',
'VFlag': 'vFlag',
'Weight': 'weight',
}
def __init__(self, parent):
super(Ospfv3, self).__init__(parent)
@property
def Connector(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b.Connector): An instance of the Connector class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b import Connector
return Connector(self)
@property
def LearnedInfo(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.learnedinfo_ff4d5e5643a63bccb40b6cf64fc58100.LearnedInfo): An instance of the LearnedInfo class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.learnedinfo_ff4d5e5643a63bccb40b6cf64fc58100 import LearnedInfo
return LearnedInfo(self)
@property
def Active(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Activate/Deactivate Configuration
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def AdjSID(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): An Adjacency Segment Identifier (Adj-SID) represents a router adjacency in Segment Routing
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AdjSID']))
@property
def AreaId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): OSPFv3 Area ID for a non-connected interface, displayed in Interger format
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AreaId']))
@property
def AreaIdIp(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): OSPFv3 Area ID for a non-connected interface, displayed in IP Address format
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AreaIdIp']))
@property
def AuthAlgo(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Authentication Algorithms
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AuthAlgo']))
@property
def BFlag(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): B Flag: Backup Flag: If set, the Adj-SID refers to an adjacency that is eligible for protection
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BFlag']))
@property
def ConnectedVia(self):
"""DEPRECATED
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of layers this layer is used to connect with to the wire.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectedVia'])
@ConnectedVia.setter
def ConnectedVia(self, value):
self._set_attribute(self._SDM_ATT_MAP['ConnectedVia'], value)
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DeadInterval(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Dead Interval
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DeadInterval']))
@property
def DemandCircuit(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Option bit 5
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DemandCircuit']))
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def EnableAdjSID(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Makes the Adjacency Segment Identifier (Adj-SID) available
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableAdjSID']))
@property
def EnableAuthentication(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Authentication
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableAuthentication']))
@property
def EnableBfdRegistration(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable BFD Registration
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableBfdRegistration']))
@property
def EnableFastHello(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Fast Hello
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableFastHello']))
@property
def EnableIgnoreDbDescMtu(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Ignore DB-Desc MTU
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableIgnoreDbDescMtu']))
@property
def Errors(self):
"""
Returns
-------
- list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str])): A list of errors that have occurred
"""
return self._get_attribute(self._SDM_ATT_MAP['Errors'])
@property
def ExternalCapability(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Option bit 1
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExternalCapability']))
@property
def GFlag(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): G-Flag: Group Flag: If set, the G-Flag indicates that the Adj-SID refers to a group of adjacencies where it may be assigned
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GFlag']))
@property
def HelloInterval(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Hello Interval
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HelloInterval']))
@property
def HelloMultiplier(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Hello Multiplier
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HelloMultiplier']))
@property
def InstanceId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Instance ID
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['InstanceId']))
@property
def Key(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Key
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Key']))
@property
def LFlag(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): L-Flag: Local Flag. If set, then the value/index carried by the SID has local significance
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LFlag']))
@property
def LinkMetric(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Link Metric
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LinkMetric']))
@property
def LocalRouterID(self):
"""
Returns
-------
- list(str): Router ID
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalRouterID'])
@property
def Multiplier(self):
"""
Returns
-------
- number: Number of layer instances per parent instance (multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['Multiplier'])
@Multiplier.setter
def Multiplier(self, value):
self._set_attribute(self._SDM_ATT_MAP['Multiplier'], value)
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NetworkType(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Network Type
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NetworkType']))
@property
def NssaCapability(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Option bit 3
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NssaCapability']))
@property
def Ospfv3IfaceState(self):
"""
Returns
-------
- list(str[backup | down | dr | drOther | pointToPoint | unrecognized | waiting]): Logs additional information about the Interface State
"""
return self._get_attribute(self._SDM_ATT_MAP['Ospfv3IfaceState'])
@property
def Ospfv3NeighborState(self):
"""
Returns
-------
- list(str[attempt | down | exchange | exStart | full | init | loading | multiNeighbor | none | twoWay]): Logs additional information about the Neighbor State
"""
return self._get_attribute(self._SDM_ATT_MAP['Ospfv3NeighborState'])
@property
def PFlag(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): P-Flag:Persistent Flag: If set, the SID is persistently allocated. The SID value remains consistent across router restart and session/interface flap
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PFlag']))
@property
def Priority(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Priority (when DR/BDR)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Priority']))
@property
def Router(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Option bit 4
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Router']))
@property
def SaId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Security Association ID
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SaId']))
@property
def SessionInfo(self):
"""
Returns
-------
- list(str[ifaceSessInfoAllNbrIn2Way | ifaceSessInfoAllNbrInattempt | ifaceSessInfoAllNbrInDown | ifaceSessInfoAllNbrInExchange | ifaceSessInfoAllNbrInExStart | ifaceSessInfoAllNbrInInit | ifaceSessInfoAllNbrInLoading | ifaceSessInfoFsmNotStarted | ifaceSessInfoSameNbrId | iPAddressNotRcvd | none]): Logs additional information about the session state
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionInfo'])
@property
def SessionStatus(self):
"""
Returns
-------
- list(str[down | notStarted | |
<reponame>jaipradeesh/PyRFC
# -*- coding: utf-8 -*-
# Copyright 2014 SAP AG.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: //www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import sys
import datetime
import unittest
from decimal import Decimal
import locale
import pytest
from pyrfc import *
from tests.config import (
CONNECTION_INFO,
RFC_MATH,
ABAP_to_python_date,
ABAP_to_python_time,
python_to_ABAP_date,
python_to_ABAP_time,
UNICODETEST,
BYTEARRAY_TEST,
BYTES_TEST,
)
client = Connection(**CONNECTION_INFO)
def test_structure_rejects_non_dict():
try:
IMPORTSTRUCT = {"RFCINT1": "1"}
output = client.call("STFC_STRUCTURE", IMPORTSTRUCT=[IMPORTSTRUCT])
except Exception as ex:
assert type(ex) is TypeError
assert ex.args[0] == 'dictionary required for structure parameter, received'
if sys.version > "3.0":
assert ex.args[1] == "<class 'list'>"
else:
assert ex.args[1] == "<type 'list'>"
assert ex.args[2] == 'IMPORTSTRUCT'
def test_table_rejects_non_list():
try:
IMPORTSTRUCT = {"RFCINT1": "1"}
output = client.call("STFC_STRUCTURE", RFCTABLE=IMPORTSTRUCT)
except Exception as ex:
assert type(ex) is TypeError
assert ex.args[0] == 'list required for table parameter, received'
if sys.version > "3.0":
assert ex.args[1] == "<class 'dict'>"
else:
assert ex.args[1] == "<type 'dict'>"
assert ex.args[2] == 'RFCTABLE'
def test_basic_datatypes():
INPUTS = [
dict(
# Float
ZFLTP=0.123456789,
# Decimal
ZDEC=12345.67,
# Currency, Quantity
ZCURR=1234.56,
ZQUAN=12.3456,
ZQUAN_SIGN=-12.345,
),
dict(
# Float
ZFLTP=Decimal("0.123456789"),
# Decimal
ZDEC=Decimal("12345.67"),
# Currency, Quantity
ZCURR=Decimal("1234.56"),
ZQUAN=Decimal("12.3456"),
ZQUAN_SIGN=Decimal("-12.345"),
),
dict(
# Float
ZFLTP="0.123456789",
# Decimal
ZDEC="12345.67",
# Currency, Quantity
ZCURR="1234.56",
ZQUAN="12.3456",
ZQUAN_SIGN="-12.345",
),
]
for is_input in INPUTS:
result = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=is_input)["ES_OUTPUT"]
for k in is_input:
in_value = is_input[k]
out_value = result[k]
if k == "ZFLTP":
assert type(out_value) is float
else:
assert type(out_value) is Decimal
if type(in_value) != type(out_value):
assert str(in_value) == str(out_value)
else:
assert in_value == out_value
def test_string_unicode():
hello = u"Hällo SAP!"
result = client.call("STFC_CONNECTION", REQUTEXT=hello)["ECHOTEXT"]
assert result == hello
hello = u"01234" * 51
result = client.call("STFC_CONNECTION", REQUTEXT=hello)["ECHOTEXT"]
assert result == hello
def test_date_output():
lm = client.call("BAPI_USER_GET_DETAIL", USERNAME="demo")["LASTMODIFIED"]
assert len(lm["MODDATE"]) > 0
assert len(lm["MODTIME"]) > 0
def test_min_max_positive():
IS_INPUT = {
# Float
"ZFLTP_MIN": RFC_MATH["FLOAT"]["POS"]["MIN"],
"ZFLTP_MAX": RFC_MATH["FLOAT"]["POS"]["MAX"],
# Decimal
"ZDECF16_MIN": RFC_MATH["DECF16"]["POS"]["MIN"],
"ZDECF16_MAX": RFC_MATH["DECF16"]["POS"]["MAX"],
"ZDECF34_MIN": RFC_MATH["DECF34"]["POS"]["MIN"],
"ZDECF34_MAX": RFC_MATH["DECF34"]["POS"]["MAX"],
}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
assert type(output["ZFLTP_MIN"]) is float
assert type(output["ZFLTP_MAX"]) is float
assert type(output["ZDECF16_MIN"]) is Decimal
assert type(output["ZDECF16_MAX"]) is Decimal
assert type(output["ZDECF34_MAX"]) is Decimal
assert type(output["ZDECF16_MIN"]) is Decimal
assert float(IS_INPUT["ZFLTP_MIN"]) == output["ZFLTP_MIN"]
assert float(IS_INPUT["ZFLTP_MAX"]) == output["ZFLTP_MAX"]
assert Decimal(IS_INPUT["ZDECF16_MIN"]) == output["ZDECF16_MIN"]
assert Decimal(IS_INPUT["ZDECF16_MAX"]) == output["ZDECF16_MAX"]
assert Decimal(IS_INPUT["ZDECF16_MIN"]) == output["ZDECF16_MIN"]
assert Decimal(IS_INPUT["ZDECF34_MAX"]) == output["ZDECF34_MAX"]
def test_min_max_negative():
IS_INPUT = {
# Float
"ZFLTP_MIN": RFC_MATH["FLOAT"]["NEG"]["MIN"],
"ZFLTP_MAX": RFC_MATH["FLOAT"]["NEG"]["MAX"],
# Decimal
"ZDECF16_MIN": RFC_MATH["DECF16"]["NEG"]["MIN"],
"ZDECF16_MAX": RFC_MATH["DECF16"]["NEG"]["MAX"],
"ZDECF34_MIN": RFC_MATH["DECF34"]["NEG"]["MIN"],
"ZDECF34_MAX": RFC_MATH["DECF34"]["NEG"]["MAX"],
}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
assert type(output["ZFLTP_MIN"]) is float
assert type(output["ZFLTP_MAX"]) is float
assert type(output["ZDECF16_MIN"]) is Decimal
assert type(output["ZDECF16_MAX"]) is Decimal
assert type(output["ZDECF16_MIN"]) is Decimal
assert type(output["ZDECF34_MAX"]) is Decimal
assert float(IS_INPUT["ZFLTP_MIN"]) == output["ZFLTP_MIN"]
assert float(IS_INPUT["ZFLTP_MAX"]) == output["ZFLTP_MAX"]
assert Decimal(IS_INPUT["ZDECF16_MIN"]) == output["ZDECF16_MIN"]
assert Decimal(IS_INPUT["ZDECF16_MAX"]) == output["ZDECF16_MAX"]
assert Decimal(IS_INPUT["ZDECF16_MIN"]) == output["ZDECF16_MIN"]
assert Decimal(IS_INPUT["ZDECF34_MAX"]) == output["ZDECF34_MAX"]
def test_bcd_floats_accept_floats():
IS_INPUT = {
# Float
"ZFLTP": 0.123456789,
# Decimal
"ZDEC": 12345.67,
"ZDECF16_MIN": 12345.67,
"ZDECF34_MIN": 12345.67,
# Currency, Quantity
"ZCURR": 1234.56,
"ZQUAN": 12.3456,
"ZQUAN_SIGN": -12.345,
}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
assert type(output["ZFLTP"]) is float
assert IS_INPUT["ZFLTP"] == output["ZFLTP"]
assert type(output["ZDEC"]) is Decimal
assert str(IS_INPUT["ZDEC"]) == str(output["ZDEC"])
assert IS_INPUT["ZDEC"] == float(output["ZDEC"])
assert type(output["ZDECF16_MIN"]) is Decimal
assert str(IS_INPUT["ZDECF16_MIN"]) == str(output["ZDECF16_MIN"])
assert IS_INPUT["ZDECF16_MIN"] == float(output["ZDECF16_MIN"])
assert type(output["ZDECF34_MIN"]) is Decimal
assert str(IS_INPUT["ZDECF34_MIN"]) == str(output["ZDECF34_MIN"])
assert IS_INPUT["ZDECF34_MIN"] == float(output["ZDECF34_MIN"])
assert type(output["ZCURR"]) is Decimal
assert str(IS_INPUT["ZCURR"]) == str(output["ZCURR"])
assert IS_INPUT["ZCURR"] == float(output["ZCURR"])
assert type(output["ZQUAN"]) is Decimal
assert str(IS_INPUT["ZQUAN"]) == str(output["ZQUAN"])
assert IS_INPUT["ZQUAN"] == float(output["ZQUAN"])
assert type(output["ZQUAN_SIGN"]) is Decimal
assert str(IS_INPUT["ZQUAN_SIGN"]) == str(output["ZQUAN_SIGN"])
assert IS_INPUT["ZQUAN_SIGN"] == float(output["ZQUAN_SIGN"])
def test_bcd_floats_accept_strings():
IS_INPUT = {
# Float
"ZFLTP": "0.123456789",
# Decimal
"ZDEC": "12345.67",
"ZDECF16_MIN": "12345.67",
"ZDECF34_MIN": "12345.67",
# Currency, Quantity
"ZCURR": "1234.56",
"ZQUAN": "12.3456",
"ZQUAN_SIGN": "-12.345",
}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
assert type(output["ZFLTP"]) is float
assert float(IS_INPUT["ZFLTP"]) == output["ZFLTP"]
assert type(output["ZDEC"]) is Decimal
assert IS_INPUT["ZDEC"] == str(output["ZDEC"])
assert type(output["ZDECF16_MIN"]) is Decimal
assert IS_INPUT["ZDECF16_MIN"] == str(output["ZDECF16_MIN"])
assert type(output["ZDECF34_MIN"]) is Decimal
assert IS_INPUT["ZDECF34_MIN"] == str(output["ZDECF34_MIN"])
assert type(output["ZCURR"]) is Decimal
assert IS_INPUT["ZCURR"] == str(output["ZCURR"])
assert type(output["ZQUAN"]) is Decimal
assert IS_INPUT["ZQUAN"] == str(output["ZQUAN"])
assert type(output["ZQUAN_SIGN"]) is Decimal
assert IS_INPUT["ZQUAN_SIGN"] == str(output["ZQUAN_SIGN"])
'''def test_bcd_floats_accept_strings_radix_comma():
locale.setlocale(locale.LC_ALL, "de_DE")
IS_INPUT = {
# Float
"ZFLTP": "0.123456789",
# Decimal
"ZDEC": "12345.67",
"ZDECF16_MIN": "12345.67",
"ZDECF34_MIN": "12345.67",
# Currency, Quantity
"ZCURR": "1234.56",
"ZQUAN": "12.3456",
"ZQUAN_SIGN": "-12.345",
}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
assert type(output["ZFLTP"]) is float
assert float(IS_INPUT["ZFLTP"]) == output["ZFLTP"]
assert type(output["ZDEC"]) is Decimal
assert IS_INPUT["ZDEC"] == str(output["ZDEC"])
assert type(output["ZDECF16_MIN"]) is Decimal
assert IS_INPUT["ZDECF16_MIN"] == str(output["ZDECF16_MIN"])
assert type(output["ZDECF34_MIN"]) is Decimal
assert IS_INPUT["ZDECF34_MIN"] == str(output["ZDECF34_MIN"])
assert type(output["ZCURR"]) is Decimal
assert IS_INPUT["ZCURR"] == str(output["ZCURR"])
assert type(output["ZQUAN"]) is Decimal
assert IS_INPUT["ZQUAN"] == str(output["ZQUAN"])
assert type(output["ZQUAN_SIGN"]) is Decimal
assert IS_INPUT["ZQUAN_SIGN"] == str(output["ZQUAN_SIGN"])
'''
def test_bcd_floats_accept_decimals():
IS_INPUT = {
# Float
"ZFLTP": Decimal("0.123456789"),
# Decimal
"ZDEC": Decimal("12345.67"),
"ZDECF16_MIN": Decimal("12345.67"),
"ZDECF34_MIN": Decimal("12345.67"),
# Currency, Quantity
"ZCURR": Decimal("1234.56"),
"ZQUAN": Decimal("12.3456"),
"ZQUAN_SIGN": Decimal("-12.345"),
}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
assert type(output["ZFLTP"]) is float
assert IS_INPUT["ZFLTP"] == Decimal(str(output["ZFLTP"]))
assert type(output["ZDEC"]) is Decimal
assert IS_INPUT["ZDEC"] == Decimal(str(output["ZDEC"]))
assert type(output["ZDECF16_MIN"]) is Decimal
assert IS_INPUT["ZDECF16_MIN"] == Decimal(str(output["ZDECF16_MIN"]))
assert type(output["ZDECF34_MIN"]) is Decimal
assert IS_INPUT["ZDECF34_MIN"] == Decimal(str(output["ZDECF34_MIN"]))
assert type(output["ZCURR"]) is Decimal
assert IS_INPUT["ZCURR"] == Decimal(str(output["ZCURR"]))
assert type(output["ZQUAN"]) is Decimal
assert IS_INPUT["ZQUAN"] == Decimal(str(output["ZQUAN"]))
assert type(output["ZQUAN_SIGN"]) is Decimal
assert IS_INPUT["ZQUAN_SIGN"] == Decimal(str(output["ZQUAN_SIGN"]))
def test_raw_types_accept_bytes():
ZRAW = BYTES_TEST
DIFF = b"\x00\x00\x00\x00"
IS_INPUT = {"ZRAW": ZRAW, "ZRAWSTRING": ZRAW}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
assert output["ZRAW"] == ZRAW + DIFF
assert output["ZRAWSTRING"] == ZRAW
assert type(output["ZRAW"]) is bytes
assert type(output["ZRAWSTRING"]) is bytes
def test_raw_types_accept_bytearray():
ZRAW = BYTEARRAY_TEST
DIFF = b"\x00\x00\x00\x00"
IS_INPUT = {"ZRAW": ZRAW, "ZRAWSTRING": ZRAW}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
assert output["ZRAW"] == ZRAW + DIFF
assert output["ZRAWSTRING"] == ZRAW
assert type(output["ZRAW"]) is bytes
assert type(output["ZRAWSTRING"]) is bytes
def test_date_time():
DATETIME_TEST = [
{"RFCDATE": "20161231", "RFCTIME": "123456"}, # good, correct date
{"RFCDATE": "", "RFCTIME": "123456"}, # good, empty date
{"RFCDATE": " ", "RFCTIME": "123456"}, # good, space date
{"RFCDATE": "20161231", "RFCTIME": ""}, # good, empty time
{"RFCDATE": "20161231", "RFCTIME": " "}, # good, space time
{"RFCDATE": "20161231", "RFCTIME": "000000"}, # good, zero time
{"RFCDATE": "2016123", "RFCTIME": "123456"}, # shorter date
{"RFCDATE": " ", "RFCTIME": "123456"}, # shorter empty date
{"RFCDATE": "201612311", "RFCTIME": "123456"}, # longer date
{"RFCDATE": " ", "RFCTIME": "123456"}, # longer empty date
{"RFCDATE": "20161232", "RFCTIME": "123456"}, # out of range date
{"RFCDATE": 20161231, "RFCTIME": "123456"}, # wrong date type
{"RFCDATE": "20161231", "RFCTIME": "12345"}, # shorter time
{"RFCDATE": "20161231", "RFCTIME": " "}, # shorter empty time
{"RFCDATE": "20161231", "RFCTIME": "1234566"}, # longer time
{"RFCDATE": "20161231", "RFCTIME": " "}, # longer empty time
{"RFCDATE": "20161231", "RFCTIME": "123466"}, # out of range time
{"RFCDATE": "20161231", "RFCTIME": 123456}, # wrong time type
]
counter = 0
for dt in DATETIME_TEST:
counter += 1
try:
result = client.call("STFC_STRUCTURE", IMPORTSTRUCT=dt)["ECHOSTRUCT"]
assert dt["RFCDATE"] == result["RFCDATE"]
if dt["RFCTIME"] == "":
assert "000000" == result["RFCTIME"]
else:
assert dt["RFCTIME"] == result["RFCTIME"]
except Exception as e:
assert type(e) is TypeError
if counter < 13:
assert e.args[0] == "date value required, received"
assert e.args[1] == dt["RFCDATE"]
assert e.args[3] == type(dt["RFCDATE"])
assert e.args[4] == "RFCDATE"
else:
assert e.args[0] == "time value required, received"
assert e.args[1] == dt["RFCTIME"]
assert e.args[3] == type(dt["RFCTIME"])
assert e.args[4] == "RFCTIME"
assert e.args[5] == "IMPORTSTRUCT"
def test_date_accepts_string():
TEST_DATE = u"20180625"
IMPORTSTRUCT = {"RFCDATE": TEST_DATE}
IMPORTTABLE = [IMPORTSTRUCT]
output = client.call(
"STFC_STRUCTURE", IMPORTSTRUCT=IMPORTSTRUCT, RFCTABLE=IMPORTTABLE
)
if sys.version > "3.0":
assert type(output["ECHOSTRUCT"]["RFCDATE"]) is str
assert type(output["RFCTABLE"][0]["RFCDATE"]) is str
else:
assert type(output["ECHOSTRUCT"]["RFCDATE"]) is unicode
assert type(output["RFCTABLE"][0]["RFCDATE"]) is unicode
assert output["ECHOSTRUCT"]["RFCDATE"] == TEST_DATE
assert output["RFCTABLE"][0]["RFCDATE"] == TEST_DATE
def test_date_accepts_date():
TEST_DATE = ABAP_to_python_date("20180625")
IMPORTSTRUCT = {"RFCDATE": TEST_DATE}
IMPORTTABLE = [IMPORTSTRUCT]
output = client.call(
"STFC_STRUCTURE", IMPORTSTRUCT=IMPORTSTRUCT, RFCTABLE=IMPORTTABLE
)
if sys.version > "3.0":
assert type(output["ECHOSTRUCT"]["RFCDATE"]) is str
assert type(output["RFCTABLE"][0]["RFCDATE"]) is str
else:
assert type(output["ECHOSTRUCT"]["RFCDATE"]) is unicode
assert type(output["RFCTABLE"][0]["RFCDATE"]) is unicode
assert output["ECHOSTRUCT"]["RFCDATE"] == python_to_ABAP_date(TEST_DATE)
assert output["RFCTABLE"][0]["RFCDATE"] == python_to_ABAP_date(TEST_DATE)
def | |
from torch.nn import LSTM, Linear, BatchNorm1d, Parameter
import torch
import torch.nn as nn
import torch.nn.functional as F
class NoOp(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x
class STFT(nn.Module):
def __init__(
self,
n_fft=4096,
n_hop=1024,
center=False
):
super(STFT, self).__init__()
self.window = nn.Parameter(
torch.hann_window(n_fft),
requires_grad=False
)
self.n_fft = n_fft
self.n_hop = n_hop
self.center = center
def forward(self, x):
"""
Input: (nb_samples, nb_channels, nb_timesteps)
Output:(nb_samples, nb_channels, nb_bins, nb_frames, 2)
"""
nb_samples, nb_channels, nb_timesteps = x.size()
# merge nb_samples and nb_channels for multichannel stft
x = x.reshape(nb_samples*nb_channels, -1)
# compute stft with parameters as close as possible scipy settings
stft_f = torch.stft(
x,
n_fft=self.n_fft, hop_length=self.n_hop,
window=self.window, center=self.center,
normalized=False, onesided=True,
pad_mode='reflect'
)
# reshape back to channel dimension
stft_f = stft_f.contiguous().view(
nb_samples, nb_channels, self.n_fft // 2 + 1, -1, 2
)
return stft_f
class Spectrogram(nn.Module):
def __init__(
self,
power=1,
mono=False
):
super(Spectrogram, self).__init__()
self.power = power
self.mono = mono
def forward(self, stft_f):
"""
Input: complex STFT
(nb_samples, nb_bins, nb_frames, 2)
Output: Power/Mag Spectrogram
(nb_frames, nb_samples, nb_channels, nb_bins)
"""
stft_f = stft_f.transpose(2, 3)
# take the magnitude
stft_f = stft_f.pow(2).sum(-1).pow(self.power / 2.0)
# downmix in the mag domain
if self.mono:
stft_f = torch.mean(stft_f, 1, keepdim=True)
# permute output for LSTM convenience
return stft_f.permute(2, 0, 1, 3)
class OpenUnmix_mtl(nn.Module):
def __init__(
self,
n_fft=4096,
n_hop=1024,
input_is_spectrogram=False,
hidden_size=512,
nb_channels=2, #changed from stereo to mono
sample_rate=44100, #changed sampling rate
nb_layers=3,
input_mean=None,
input_scale=None,
max_bin=None,
unidirectional=False,
power=1,
):
"""
Input: (nb_samples, nb_channels, nb_timesteps)
or (nb_frames, nb_samples, nb_channels, nb_bins)
Output: Power/Mag Spectrogram
(nb_frames, nb_samples, nb_channels, nb_bins)
"""
super(OpenUnmix_mtl, self).__init__()
self.nb_output_bins = n_fft // 2 + 1
if max_bin:
self.nb_bins = max_bin
else:
self.nb_bins = self.nb_output_bins
self.hidden_size = hidden_size
self.stft = STFT(n_fft=n_fft, n_hop=n_hop)
self.spec = Spectrogram(power=power, mono=(nb_channels == 1))
self.register_buffer('sample_rate', torch.tensor(sample_rate))
if input_is_spectrogram:
self.transform = NoOp()
else:
self.transform = nn.Sequential(self.stft, self.spec)
self.fc1 = Linear(
self.nb_bins*nb_channels, hidden_size,
bias=False
)
self.bn1 = BatchNorm1d(hidden_size)
if unidirectional:
lstm_hidden_size = hidden_size
else:
lstm_hidden_size = hidden_size // 2
self.lstm = LSTM(
input_size=hidden_size,
hidden_size=lstm_hidden_size,
num_layers=nb_layers,
bidirectional=not unidirectional,
batch_first=False,
dropout=0.4,
)
self.fc2 = Linear(
in_features=hidden_size*2,
out_features=hidden_size,
bias=False
)
self.bn2 = BatchNorm1d(hidden_size)
self.fc3 = Linear(
in_features=hidden_size,
out_features=self.nb_output_bins*nb_channels,
bias=False
)
self.bn3 = BatchNorm1d(self.nb_output_bins*nb_channels)
self.fc4 = Linear(
in_features=hidden_size*2,
out_features=hidden_size//2,
bias=False
)
self.bn4 = BatchNorm1d(hidden_size//2)
self.fc5 = Linear(
in_features=hidden_size//2,
out_features=1,
bias=False
)
self.bn5 = BatchNorm1d(1)
if input_mean is not None:
input_mean = torch.from_numpy(
-input_mean[:self.nb_bins]
).float()
else:
input_mean = torch.zeros(self.nb_bins)
if input_scale is not None:
input_scale = torch.from_numpy(
1.0/input_scale[:self.nb_bins]
).float()
else:
input_scale = torch.ones(self.nb_bins)
self.input_mean = Parameter(input_mean)
self.input_scale = Parameter(input_scale)
self.output_scale = Parameter(
torch.ones(self.nb_output_bins).float()
)
self.output_mean = Parameter(
torch.ones(self.nb_output_bins).float()
)
def forward(self, x):
# check for waveform or spectrogram
# transform to spectrogram if (nb_samples, nb_channels, nb_timesteps)
# and reduce feature dimensions, therefore we reshape
x = self.transform(x)
nb_frames, nb_samples, nb_channels, nb_bins = x.data.shape
mix = x.detach().clone()
# crop
x = x[..., :self.nb_bins]
# shift and scale input to mean=0 std=1 (across all bins)
x += self.input_mean
x *= self.input_scale
# to (nb_frames*nb_samples, nb_channels*nb_bins)
# and encode to (nb_frames*nb_samples, hidden_size)
x = self.fc1(x.reshape(-1, nb_channels*self.nb_bins))
#x = self.fc1(x.reshape(-1, self.nb_bins))
# normalize every instance in a batch
x = self.bn1(x)
x = x.reshape(nb_frames, nb_samples, self.hidden_size)
# squash range ot [-1, 1]
x = torch.tanh(x)
# apply 3-layers of stacked LSTM
lstm_out = self.lstm(x)
# lstm skip connection
x = torch.cat([x, lstm_out[0]], -1)
#######Branched onset detection layers#############
#first dense layer + batch norm
y = self.fc4(x.reshape(-1, x.shape[-1]))
y = self.bn4(y)
y = F.relu(y)
#second dense layer + batch norm
y = self.fc5(y)
y = self.bn5(y)
y = torch.sigmoid(y)
#Re-shape back to dims corresponding to num_frames and batch_size
y = y.reshape(nb_samples, nb_frames, 1)
###################################################
# first dense stage + batch norm
x = self.fc2(x.reshape(-1, x.shape[-1]))
x = self.bn2(x)
x = F.relu(x)
# second dense stage + layer norm
x = self.fc3(x)
x = self.bn3(x)
# reshape back to original dim
x = x.reshape(nb_frames, nb_samples, nb_channels, self.nb_output_bins)
# apply output scaling
x *= self.output_scale
x += self.output_mean
# since our output is non-negative, we can apply RELU
x = F.relu(x) * mix
return x, y
class OpenUnmix_mtl_short(nn.Module):
def __init__(
self,
n_fft=4096,
n_hop=1024,
input_is_spectrogram=False,
hidden_size=512,
nb_channels=2, #changed from stereo to mono
sample_rate=44100, #changed sampling rate
nb_layers=3,
input_mean=None,
input_scale=None,
max_bin=None,
unidirectional=False,
power=1,
):
"""
Input: (nb_samples, nb_channels, nb_timesteps)
or (nb_frames, nb_samples, nb_channels, nb_bins)
Output: Power/Mag Spectrogram
(nb_frames, nb_samples, nb_channels, nb_bins)
"""
super(OpenUnmix_mtl_short, self).__init__()
self.nb_output_bins = n_fft // 2 + 1
if max_bin:
self.nb_bins = max_bin
else:
self.nb_bins = self.nb_output_bins
self.hidden_size = hidden_size
self.stft = STFT(n_fft=n_fft, n_hop=n_hop)
self.spec = Spectrogram(power=power, mono=(nb_channels == 1))
self.register_buffer('sample_rate', torch.tensor(sample_rate))
if input_is_spectrogram:
self.transform = NoOp()
else:
self.transform = nn.Sequential(self.stft, self.spec)
self.fc1 = Linear(
self.nb_bins*nb_channels, hidden_size,
bias=False
)
self.bn1 = BatchNorm1d(hidden_size)
if unidirectional:
lstm_hidden_size = hidden_size
else:
lstm_hidden_size = hidden_size // 2
self.lstm = LSTM(
input_size=hidden_size,
hidden_size=lstm_hidden_size,
num_layers=nb_layers,
bidirectional=not unidirectional,
batch_first=False,
dropout=0.4,
)
self.fc2 = Linear(
in_features=hidden_size*2,
out_features=hidden_size,
bias=False
)
self.bn2 = BatchNorm1d(hidden_size)
self.fc3 = Linear(
in_features=hidden_size,
out_features=self.nb_output_bins*nb_channels,
bias=False
)
self.bn3 = BatchNorm1d(self.nb_output_bins*nb_channels)
self.fc4 = Linear(
in_features=hidden_size*2,
out_features=1,
bias=False
)
# self.bn4 = BatchNorm1d(hidden_size//2)
# self.fc5 = Linear(
# in_features=hidden_size//2,
# out_features=1,
# bias=False
# )
self.bn4 = BatchNorm1d(1)
if input_mean is not None:
input_mean = torch.from_numpy(
-input_mean[:self.nb_bins]
).float()
else:
input_mean = torch.zeros(self.nb_bins)
if input_scale is not None:
input_scale = torch.from_numpy(
1.0/input_scale[:self.nb_bins]
).float()
else:
input_scale = torch.ones(self.nb_bins)
self.input_mean = Parameter(input_mean)
self.input_scale = Parameter(input_scale)
self.output_scale = Parameter(
torch.ones(self.nb_output_bins).float()
)
self.output_mean = Parameter(
torch.ones(self.nb_output_bins).float()
)
def forward(self, x):
# check for waveform or spectrogram
# transform to spectrogram if (nb_samples, nb_channels, nb_timesteps)
# and reduce feature dimensions, therefore we reshape
x = self.transform(x)
nb_frames, nb_samples, nb_channels, nb_bins = x.data.shape
mix = x.detach().clone()
# crop
x = x[..., :self.nb_bins]
# shift and scale input to mean=0 std=1 (across all bins)
x += self.input_mean
x *= self.input_scale
# to (nb_frames*nb_samples, nb_channels*nb_bins)
# and encode to (nb_frames*nb_samples, hidden_size)
x = self.fc1(x.reshape(-1, nb_channels*self.nb_bins))
#x = self.fc1(x.reshape(-1, self.nb_bins))
# normalize every instance in a batch
x = self.bn1(x)
x = x.reshape(nb_frames, nb_samples, self.hidden_size)
# squash range ot [-1, 1]
x = torch.tanh(x)
# apply 3-layers of stacked LSTM
lstm_out = self.lstm(x)
# lstm skip connection
x = torch.cat([x, lstm_out[0]], -1)
#######Branched onset detection layers#############
#first dense layer + batch norm
y = self.fc4(x.reshape(-1, x.shape[-1]))
y = self.bn4(y)
# y = F.relu(y)
# #second dense layer + batch norm
# y = self.fc5(y)
# y = self.bn5(y)
y = torch.sigmoid(y)
#Re-shape back to dims corresponding to num_frames and batch_size
y = y.reshape(nb_samples, nb_frames, 1)
###################################################
# first dense stage + batch norm
x = self.fc2(x.reshape(-1, x.shape[-1]))
x = self.bn2(x)
x = F.relu(x)
# second dense stage + layer norm
x = self.fc3(x)
x = self.bn3(x)
# reshape back to original dim
x = x.reshape(nb_frames, nb_samples, nb_channels, self.nb_output_bins)
# apply output scaling
x *= self.output_scale
x += self.output_mean
# since our output is non-negative, we can apply RELU
x = F.relu(x) * mix
return x, y
class OpenUnmix_mtl_conv(nn.Module):
def __init__(
self,
n_fft=4096,
n_hop=1024,
input_is_spectrogram=False,
hidden_size=512,
nb_channels=2, #changed from stereo to mono
sample_rate=44100, #changed sampling rate
nb_layers=3,
input_mean=None,
input_scale=None,
max_bin=None,
unidirectional=False,
power=1,
):
"""
Input: (nb_samples, nb_channels, nb_timesteps)
or (nb_frames, nb_samples, nb_channels, nb_bins)
Output: Power/Mag Spectrogram
(nb_frames, nb_samples, nb_channels, nb_bins)
"""
super(OpenUnmix_mtl_conv, self).__init__()
self.nb_output_bins = n_fft // 2 + 1
if max_bin:
self.nb_bins = max_bin
else:
self.nb_bins = self.nb_output_bins
self.hidden_size = hidden_size
self.stft = STFT(n_fft=n_fft, n_hop=n_hop)
self.spec = Spectrogram(power=power, mono=(nb_channels == 1))
self.register_buffer('sample_rate', torch.tensor(sample_rate))
if input_is_spectrogram:
self.transform = NoOp()
else:
self.transform = nn.Sequential(self.stft, self.spec)
self.fc1 = Linear(
self.nb_bins*nb_channels, hidden_size,
bias=False
)
self.bn1 = BatchNorm1d(hidden_size)
if unidirectional:
lstm_hidden_size = hidden_size
else:
lstm_hidden_size = hidden_size // 2
self.lstm = LSTM(
input_size=hidden_size,
hidden_size=lstm_hidden_size,
num_layers=nb_layers,
bidirectional=not unidirectional,
batch_first=False,
dropout=0.4,
)
self.fc2 = Linear(
in_features=hidden_size*2,
out_features=hidden_size,
bias=False
)
self.bn2 = BatchNorm1d(hidden_size)
self.fc3 = Linear(
in_features=hidden_size,
out_features=self.nb_output_bins*nb_channels,
bias=False
)
self.bn3 = BatchNorm1d(self.nb_output_bins*nb_channels)
#New layers##
self.conv = torch.nn.Conv2d(1, 32 , kernel_size=(1,7), stride=1, padding=(0,3))
self.pool = torch.nn.AvgPool2d(kernel_size=(hidden_size*2,1))
self.fc4 = Linear(in_features=32, out_features=1, bias=False)
self.bn4 = BatchNorm1d(1)
###################
# self.fc4 = Linear(
# in_features=hidden_size*2,
# out_features=hidden_size//2,
# bias=False
# )
# self.bn4 = BatchNorm1d(hidden_size//2)
# self.fc5 = Linear(
# in_features=hidden_size//2,
# out_features=1,
# bias=False
# )
# | |
# WAE model
recon_error = Lambda(mean_reconstruction_l2sq_loss, name='mean_recon_error')([real_image, recon_image])
penalty_e = Lambda(get_qz_trick_loss, name='penalty_e')(q_e)
penalty_b = Lambda(get_b_penalty_loss, name='penalty_b',
arguments={'sigma':self.b_sd, 'zdim':self.b_z_dim, 'kernel':'RBF', 'p_z':'normal'})([prior_b_noise, b_given_x])
penalty_hsic = Lambda(get_hsic, name="penalty_hsic")([e_given_x_b, sample_b])
if self.feature_b: self.main_model = Model(inputs=[real_image, feature_for_b, cls_info, prior_b_noise],
outputs=[recon_error, penalty_e, penalty_b, penalty_hsic], name='main_model')
else: self.main_model = Model(inputs=[real_image, cls_info, prior_b_noise],
outputs=[recon_error, penalty_e, penalty_b, penalty_hsic], name='main_model')
# Blur information
prior_latent = Input(shape=(self.b_z_dim + self.e_z_dim,), name='prior_z_input', dtype='float32')
self.blurr_model = get_compute_blurriness_model(self.image_shape)
gen_image = self.decoder_model(prior_latent)
gen_sharpness = self.blurr_model(gen_image)
self.gen_blurr_model = Model(inputs=[prior_latent], outputs=[gen_sharpness], name='gen_blurr_model')
# cluster information
ssw, ssb, n_points_mean, n_l = Lambda(self._get_cluster_information_by_class_index,
name='get_cluster_information_by_class_index')([b_j_given_x_j, cls_info])
if self.feature_b:
self.cluster_info_model = Model(inputs=[feature_for_b, cls_info],
outputs=[ssw, ssb, n_points_mean, n_l], name='get_cluster_info')
else:
self.cluster_info_model = Model(inputs=[real_image, cls_info], outputs=[ssw, ssb, n_points_mean, n_l], name='get_cluster_info')
self.model_compile()
self.log.info('Loaded WAE model')
self.main_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
self.log.info('Loaded Discriminator model: GAN')
self.gan_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
def discriminator_sampler(self, x, y):
e_noise = self.noise_sampler(y.shape[0], self.e_z_dim, self.e_sd)
if self.feature_b:
return [x[0], x[1], y[:, np.newaxis], e_noise], [np.zeros([x[0].shape[0],2], dtype='float32')]
else:
return [x, y[:, np.newaxis], e_noise], [np.zeros([x.shape[0],2], dtype='float32')]
def main_sampler(self, x, y):
b_noise = self.noise_sampler(y.shape[0], self.b_z_dim, self.b_sd) #, dist='spherical_uniform')
if self.feature_b:
return [x[0], x[1], y[:,np.newaxis], b_noise], [np.zeros(x[0].shape[0], dtype='float32')]*4
else:
return [x, y[:,np.newaxis], b_noise], [np.zeros(x.shape[0], dtype='float32')]*4
def train_on_batch(self, x, y, sample_weight=None, class_weight=None, reset_metrics=True):
wx, wy = self.main_sampler(x, y)
main_outs = self.parallel_main_model.train_on_batch(wx, wy,
sample_weight=sample_weight, class_weight=class_weight, reset_metrics=reset_metrics)
dx, dy = self.discriminator_sampler(x, y)
if self.e_train: d_outs = self.parallel_gan_model.train_on_batch(dx, dy,
sample_weight=sample_weight, class_weight=class_weight, reset_metrics=reset_metrics)
else: d_outs = 0
return (main_outs +
[d_outs]
)
def test_on_batch(self, x, y, sample_weight=None, reset_metrics=True):
wx, wy = self.main_sampler(x, y)
main_outs = self.parallel_main_model.test_on_batch(wx, wy,
sample_weight=sample_weight, reset_metrics = reset_metrics)
dx, dy = self.discriminator_sampler(x, y)
if self.e_train: d_outs = self.parallel_gan_model.test_on_batch(dx, dy,
sample_weight=sample_weight, reset_metrics = reset_metrics)
else: d_outs = 0
return (main_outs +
[d_outs]
)
def get_reference_images(self, generator):
batches = [generator[i] for i in range(4)]
self.fixed_classes = np.concatenate([batch[1] for batch in batches])
if self.feature_b:
self.fixed_feature = np.concatenate([batch[0][1] for batch in batches])
return np.concatenate([batch[0][0] for batch in batches])
else:
return np.concatenate([batch[0] for batch in batches])
def on_train_begin(self, x):
self.fixed_images = x[self.fixed_classes == self.fixed_classes[0]]
if self.feature_b: self.fixed_feature = self.fixed_feature[self.fixed_classes == self.fixed_classes[0]]
self.fixed_classes = self.fixed_classes[self.fixed_classes == self.fixed_classes[0]]
real_image_blurriness = self.blurr_model.predict_on_batch(x)
self.fixed_noise = self.noise_sampler(x.shape[0], self.e_z_dim, self.e_sd)
self.log.info("Real image's sharpness = %.5f" % np.min(real_image_blurriness))
def on_epoch_end(self, epoch):
for name in self.train_models_lr.keys():
if self.train_models_lr[name]['decay'] > 0.:
self.train_models_lr[name]['lr'] = self._update_lr(epoch, lr=self.train_models_lr[name]['lr'],
decay=self.train_models_lr[name]['decay'])
k.set_value(self.parallel_train_models[name].optimizer.lr, self.train_models_lr[name]['lr'])
#####################################################################################################################
# ProductSpaceOAE using fixed b and HSIC GAN Network
#####################################################################################################################
class ProductSpaceOAEFixedBHSIC_GAN(WAE_GAN):
def __init__(self, log, path_info, network_info, n_label, is_profiling=False):
super(ProductSpaceOAEFixedBHSIC_GAN, self).__init__(log, path_info, network_info, n_label, is_profiling=is_profiling)
self.metrics_names = ['main_loss', 'reconstruction', 'penalty_e', 'penalty_hsic',
'discriminator_loss',
]
self.TB = ProductSpaceOAEFixedBTensorBoardWrapper_GAN
self.b_sd = float(network_info['model_info']['b_sd'])
self.lambda_hsic = float(network_info['model_info']['lambda_hsic'])
try: self.e_weight = float(network_info['model_info']['e_weight'])
except: self.e_weight = 1.
try: self.e_train = not ('false' == network_info['model_info']['e_train'].strip().lower())
except: self.e_train = True
try: self.reset_e = 'true' == network_info['model_info']['reset_e'].strip().lower()
except: self.reset_e = False
try: self.feature_b = 'true' == network_info['model_info']['feature_b'].strip().lower()
except: self.feature_b = False
try: self.fixed_b_path = network_info['training_info']['fixed_b_path'].strip()
except: raise ValueError("Need to set fixed_b_path")
def build_model(self, model_yaml_dir=None, verbose=0):
"""
verbose
0: Not show any model
1: Show AE, Discriminator model
2: Show all models
"""
# Load Models : encoder, decoder, discriminator
if model_yaml_dir == None: model_yaml_dir = os.path.join(self.model_save_dir, self.path_info['model_info']['model_architecture'])
self._encoder_base_model = self._load_model(model_yaml_dir+self.path_info['model_info']['model_encoder_base'], verbose=verbose==2)
self._encoder_b_model = self._load_model(model_yaml_dir+self.path_info['model_info']['model_encoder_b'], verbose=verbose==2)
self._encoder_e_model = self._load_model(model_yaml_dir+self.path_info['model_info']['model_encoder_e'], verbose=verbose==2)
self.decoder_model = self._load_model(model_yaml_dir+self.path_info['model_info']['model_decoder'], verbose=verbose==2)
self._discriminator_e_model = self._load_model(model_yaml_dir+self.path_info['model_info']['model_discriminator'], verbose=verbose==2)
self.save_models = {"encoder_base":self._encoder_base_model,
"encoder_b":self._encoder_b_model,
"encoder_e":self._encoder_e_model,
"decoder":self.decoder_model,
"discriminator":self._discriminator_e_model
}
self._discriminator_e_model.name = 'discriminator_e'
# build blocks
self.image_shape = self._encoder_base_model.input_shape[1:]
if self.feature_b: self.feature_shape = self._encoder_b_model.input_shape[1:]
self.b_z_dim = self._encoder_b_model.output_shape[-1]
self.e_z_dim = self._encoder_e_model.output_shape[-1]
real_image = Input(shape=self.image_shape, name='real_image_input', dtype='float32')
cls_info = Input(shape=(1,), name='class_info_input', dtype='int32')
prior_e_noise = Input(shape=(self.e_z_dim,), name='prior_e_input', dtype='float32')
prior_b_noise = Input(shape=(self.b_z_dim,), name='prior_b_input', dtype='float32')
b_input = Input(shape=(self.b_z_dim,), name='b_input', dtype='float32')
if self.feature_b: feature_for_b = Input(shape=self.feature_shape, name='feature_b_input', dtype='float32')
# Encoder base
last_h = self._encoder_base_model([real_image])
# B_i ~ Q_B|X=x^i
if self.feature_b: b_j_given_x_j = self._encoder_b_model([feature_for_b])
else: b_j_given_x_j = self._encoder_b_model([last_h])
sample_b, b_given_x = Lambda(get_b, name='get_b_given_x')([b_j_given_x_j, cls_info])
if self.feature_b: self.encoder_b_model = Model([feature_for_b, cls_info], [sample_b, b_given_x, b_j_given_x_j], name='encoder_b_model')
else: self.encoder_b_model = Model([real_image, cls_info], [sample_b, b_given_x, b_j_given_x_j], name='encoder_b_model')
# E^i_j ~Q_E_0|X_0,B=X^i_j,B_i
e_given_x_b = self._encoder_e_model([last_h, b_input])
self.encoder_e_model = Model([real_image, b_input], [e_given_x_b], name='encoder_e_model')
# Z^i_j = (B_i, E^i_j)
noise_input = Input(shape=(self.e_z_dim,), name='noise_input', dtype='float32')
if self.e_weight != 1.: noise_weighted = Lambda(lambda x : self.e_weight*x, name='noise_weighted')(noise_input)
else: noise_weighted = noise_input
latent = Concatenate(axis=1, name='concat_latent')([b_input, noise_weighted])
self.z_encoder_model = Model([b_input, noise_input], [latent], name='encoder_z_model')
# Build connections
e_given_x_b = self.encoder_e_model([real_image, b_input])
fake_latent = self.z_encoder_model([b_input, e_given_x_b])
recon_image = self.decoder_model(fake_latent)
self.ae_model = Model(inputs=[real_image, b_input], outputs=[recon_image], name='ae_model')
if verbose==2:
self.log.info('Auto-Encoder model')
self.ae_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
# GAN model
p_e = self._discriminator_e_model(prior_e_noise)
q_e = self._discriminator_e_model(e_given_x_b)
output = Concatenate(name='mlp_concat')([p_e, q_e]) ## TODO : fix..
self.gan_model = Model(inputs=[real_image, b_input, prior_e_noise], outputs=[output], name='GAN_model')
# WAE model
recon_error = Lambda(mean_reconstruction_l2sq_loss, name='mean_recon_error')([real_image, recon_image])
penalty_e = Lambda(get_qz_trick_loss, name='penalty_e')(q_e)
penalty_hsic = Lambda(get_hsic, name="penalty_hsic")([e_given_x_b, b_input])
self.main_model = Model(inputs=[real_image, b_input, cls_info],
outputs=[recon_error, penalty_e, penalty_hsic], name='main_model')
# Blur information
prior_latent = Input(shape=(self.b_z_dim + self.e_z_dim,), name='prior_z_input', dtype='float32')
self.blurr_model = get_compute_blurriness_model(self.image_shape)
gen_image = self.decoder_model(prior_latent)
gen_sharpness = self.blurr_model(gen_image)
self.gen_blurr_model = Model(inputs=[prior_latent], outputs=[gen_sharpness], name='gen_blurr_model')
if verbose==2:
self.log.info('Generative sample blurr model')
self.gen_blurr_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
try:
self.parallel_main_model = multi_gpu_model(self.main_model, gpus=self.number_of_gpu)
self.parallel_gan_model = multi_gpu_model(self.gan_model, gpus=self.number_of_gpu)
self.log.info("Training using multiple GPUs")
except ValueError:
self.parallel_main_model = self.main_model
self.parallel_gan_model = self.gan_model
self.log.info("Training using single GPU or CPU")
self.train_models = {'discriminator':self.gan_model, 'main':self.main_model}
self.parallel_train_models = {'discriminator':self.parallel_gan_model, 'main':self.parallel_main_model}
self.train_models_lr = {'discriminator':{'lr':float(self.network_info['model_info']['lr_e_adv']),
'decay':float(self.network_info['model_info']['lr_e_adv_decay'])},
'main':{'lr':float(self.network_info['model_info']['lr_e']),
'decay':float(self.network_info['model_info']['lr_e_decay'])}}
if verbose:
self.log.info('Main model')
self.main_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
self.log.info('Discriminator model')
self.gan_model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
def model_compile(self, verbose=0):
self.log.info('Start models compile.')
if self.network_info['model_info']['optimizer'] =='adam':
optimizer_e = getattr(keras.optimizers,
self.network_info['model_info']['optimizer'])(lr=self.train_models_lr['main']['lr'],
beta_1=float(self.network_info['model_info']['lr_e_beta1']))
optimizer_e_adv = getattr(keras.optimizers,
self.network_info['model_info']['optimizer'])(lr=self.train_models_lr['discriminator']['lr'],
beta_1=float(self.network_info['model_info']['lr_e_adv_beta1']))
else:
optimizer_e = getattr(keras.optimizers,
self.network_info['model_info']['optimizer'])(lr=self.train_models_lr['main']['lr'])
optimizer_e_adv = getattr(keras.optimizers,
self.network_info['model_info']['optimizer'])(lr=self.train_models_lr['discriminator']['lr'])
if self.reset_e:
self.reset_weights(self._encoder_base_model)
self.reset_weights(self._encoder_e_model)
self.reset_weights(self._discriminator_e_model)
# GAN model compile
self._encoder_b_model.trainable = False
self._encoder_e_model.trainable = False
self._encoder_base_model.trainable = False
self.decoder_model.trainable = False
self._discriminator_e_model.trainable = self.e_train
self.parallel_gan_model.compile(loss=getattr(loss_and_metric, self.network_info['model_info']['discriminator_loss']),
optimizer=optimizer_e_adv, options=self.run_options, run_metadata=self.run_metadata)
# WAE model compile
self.decoder_model.trainable = True
self._encoder_b_model.trainable = False
self._encoder_e_model.trainable = self.e_train
self._encoder_base_model.trainable = self.e_train
self._discriminator_e_model.trainable = False
self.parallel_main_model.compile(loss={'mean_recon_error':getattr(loss_and_metric, self.network_info['model_info']['main_loss']),
'penalty_e':getattr(loss_and_metric, self.network_info['model_info']['penalty_e']),
'penalty_hsic':getattr(loss_and_metric, self.network_info['model_info']['penalty_b']),
},
loss_weights=[1., self.lambda_e, self.lambda_hsic],
optimizer=optimizer_e, options=self.run_options, run_metadata=self.run_metadata)
if verbose:
for name, model in self.parallel_train_models.items():
self.log.info('%s model' % name)
model.summary(line_length=200, print_fn=self.log.info)
sys.stdout.flush()
self.log.info('Model compile done.')
def save(self, filepath, is_compile=True, overwrite=True, include_optimizer=True):
model_path = self.path_info['model_info']['weight']
for name, model in self.save_models.items():
model.save("%s/%s_%s" % (filepath, name, model_path), overwrite=overwrite, include_optimizer=include_optimizer)
self.log.debug('Save model at %s' % filepath)
def load(self, filepath, verbose=0):
model_path = self.path_info['model_info']['weight']
loss_list = [self.network_info['model_info']['main_loss'],
self.network_info['model_info']['penalty_e'],
self.network_info['model_info']['discriminator_loss']]
load_dict = dict([(loss_name, getattr(loss_and_metric, loss_name)) for loss_name in loss_list])
load_dict['SelfAttention2D'] = SelfAttention2D
load_dict['get_qz_trick_loss'] = get_qz_trick_loss
load_dict['get_qz_trick_with_weight_loss'] = get_qz_trick_with_weight_loss
load_dict['get_entropy_loss_with_logits'] = get_entropy_loss_with_logits
load_dict['mmd_penalty'] = mmd_penalty
load_dict['get_b'] = get_b
load_dict['get_b_estimation_var'] = get_b_estimation_var
load_dict['get_b_penalty_loss'] = get_b_penalty_loss
load_dict['mean_reconstruction_l2sq_loss'] = mean_reconstruction_l2sq_loss
load_dict['get_class_mean_by_class_index'] = get_class_mean_by_class_index
load_dict['concat_with_uniform_sample'] = concat_with_uniform_sample
load_dict['get_batch_covariance'] = get_batch_covariance
load_dict['get_mutual_information_from_gaussian_sample'] = get_mutual_information_from_gaussian_sample
# TODO : fix save & load
tmp_model = load_model("%s/%s_%s" % (filepath, "encoder_base", model_path), custom_objects=load_dict)
tmp_model.save_weights("%s/tmp_%s_%s" % (filepath, "encoder_base", model_path), overwrite=False)
self._encoder_base_model.load_weights("%s/tmp_%s_%s" % (filepath, "encoder_base", model_path), by_name=True)
os.remove("%s/tmp_%s_%s" % (filepath, "encoder_base", model_path))
tmp_model = load_model("%s/%s_%s" % (filepath, "encoder_b", model_path), custom_objects=load_dict)
tmp_model.save_weights("%s/tmp_%s_%s" % (filepath, "encoder_b", model_path), overwrite=False)
self._encoder_b_model.load_weights("%s/tmp_%s_%s" % (filepath, "encoder_b", model_path), by_name=True)
os.remove("%s/tmp_%s_%s" % (filepath, "encoder_b", model_path))
tmp_model = load_model("%s/%s_%s" % (filepath, "encoder_e", model_path), custom_objects=load_dict)
tmp_model.save_weights("%s/tmp_%s_%s" % (filepath, "encoder_e", model_path), overwrite=False)
self._encoder_e_model.load_weights("%s/tmp_%s_%s" % (filepath, "encoder_e", model_path), by_name=True)
os.remove("%s/tmp_%s_%s" % (filepath, "encoder_e", model_path))
tmp_model = load_model("%s/%s_%s" % (filepath, "decoder", model_path), custom_objects=load_dict)
tmp_model.save_weights("%s/tmp_%s_%s" % (filepath, "decoder", model_path), overwrite=False)
self.decoder_model.load_weights("%s/tmp_%s_%s" % (filepath, "decoder", model_path), by_name=True)
os.remove("%s/tmp_%s_%s" % (filepath, "decoder", model_path))
tmp_model = load_model("%s/%s_%s" % (filepath, "discriminator", model_path), custom_objects=load_dict)
tmp_model.save_weights("%s/tmp_%s_%s" % (filepath, "discriminator", model_path), overwrite=False)
self._discriminator_e_model.load_weights("%s/tmp_%s_%s" % (filepath, "discriminator", model_path), by_name=True)
os.remove("%s/tmp_%s_%s" % (filepath, "discriminator", model_path))
self._discriminator_e_model.name = 'discriminator_e'
# build blocks
self.image_shape = self._encoder_base_model.input_shape[1:]
if self.feature_b: self.feature_shape = self._encoder_b_model.input_shape[1:]
self.b_z_dim = self._encoder_b_model.output_shape[-1]
self.e_z_dim = self._encoder_e_model.output_shape[-1]
real_image = Input(shape=self.image_shape, name='real_image_input', dtype='float32')
cls_info = Input(shape=(1,), name='class_info_input', dtype='int32')
prior_e_noise = Input(shape=(self.e_z_dim,), name='prior_e_input', dtype='float32')
prior_b_noise = Input(shape=(self.b_z_dim,), name='prior_b_input', dtype='float32')
b_input = Input(shape=(self.b_z_dim,), name='b_input', dtype='float32')
if self.feature_b: feature_for_b = Input(shape=self.feature_shape, name='feature_b_input', dtype='float32')
# Encoder base
last_h = self._encoder_base_model([real_image])
# B_i ~ Q_B|X=x^i
if self.feature_b: b_j_given_x_j = self._encoder_b_model([feature_for_b])
else: b_j_given_x_j = self._encoder_b_model([last_h])
sample_b, b_given_x = Lambda(get_b, name='get_b_given_x')([b_j_given_x_j, cls_info])
if self.feature_b: | |
<reponame>egor5q/OlgaDmitrievna
# -*- coding: utf-8 -*-
import os
import telebot
import time
import telebot
import random
import threading
from emoji import emojize
from telebot import types
from pymongo import MongoClient
import traceback
import re
import apiai
import json
import config
from requests.exceptions import ReadTimeout
from requests.exceptions import ConnectionError
token = os.environ['TELEGRAM_TOKEN']
world = os.environ['worldtoken']
bot = telebot.TeleBot(token)
world = telebot.TeleBot(world)
alisa = telebot.TeleBot(os.environ['alisa'])
miku = telebot.TeleBot(os.environ['miku'])
lena = telebot.TeleBot(os.environ['lena'])
slavya = telebot.TeleBot(os.environ['slavya'])
uliana = telebot.TeleBot(os.environ['uliana'])
electronic = telebot.TeleBot(os.environ['electronic'])
zhenya = telebot.TeleBot(os.environ['zhenya'])
tolik = telebot.TeleBot(os.environ['tolik'])
shurik = telebot.TeleBot(os.environ['shurik'])
semen = telebot.TeleBot(os.environ['semen'])
pioneer = telebot.TeleBot(os.environ['pioneer'])
yuriy = telebot.TeleBot(os.environ['yuriy'])
alexandr = telebot.TeleBot(os.environ['alexandr'])
vladislav = telebot.TeleBot(os.environ['vladislav'])
samanta = telebot.TeleBot(os.environ['samanta'])
vasiliyhait = telebot.TeleBot(os.environ['vasiliyhait'])
viola=telebot.TeleBot(os.environ['viola'])
yuliya=telebot.TeleBot(os.environ['yuliya'])
evillena = telebot.TeleBot(os.environ['evillena'])
monster = telebot.TeleBot(os.environ['monster'])
sayori = telebot.TeleBot(os.environ['sayori'])
yuri = telebot.TeleBot(os.environ['yuri'])
monika = telebot.TeleBot(os.environ['monika'])
natsuki = telebot.TeleBot(os.environ['natsuki'])
liza = telebot.TeleBot(os.environ['liza'])
bot.send_message(441399484, 'Work')
cday=1
times=['Время до линейки', 'Линейка', 'Завтрак', 'Время после завтрака', 'Обед', 'Время после обеда', 'Ужин', 'Время после ужина (вечер)', 'Ночь']
rp_players=[441399484, 652585389, 737959649, 638721729, 438090820]
counts = {}
bot.send_message(441399484, str(os.environ))
print(os.environ)
client1 = os.environ['database']
client = MongoClient(client1)
db = client.everlastingsummer
users = db.users
thunder = db.thunder
thunder_variables = db.thunder_variables
ban = db.ban
cday=db.cday
ctime_rp=db.ctime
nowrp=False
if ctime_rp.find_one({})==None:
ctime_rp.insert_one({'ctime_rp':times[0]})
if cday.find_one({})==None:
cday.insert_one({'cday':1})
mainchat = -1001351496983
rpchats=[]
accept=[]
decline=[]
def neiro(m, pioner):
if pioner != alisa:
return
allow = False
if m.reply_to_message != None:
if m.reply_to_message.from_user.id == 634115873:
allow = True
if m.from_user.id == m.chat.id:
allow = True
if 'алиса' in m.text.lower():
allow = True
if allow == False:
return
req = apiai.ApiAI(os.environ['apiai_alisa']).text_request()
req.lang = 'ru'
req.session_id = 'Alisa_id'
req.query = m.text
responseJson = json.loads(req.getresponse().read().decode('utf-8'))
response = responseJson['result']['fulfillment']['speech']
print(responseJson)
answ = None
if answ != None:
response = answ
if response:
pass
# pioner.send_message(m.chat.id, response)
else:
not_understand = ['Я тебя не понимаю! Говори понятнее!', 'Прости, не понимаю тебя.', 'Я тебя не поняла!']
txt = random.choice(not_understand)
#pioner.send_message(m.chat.id, txt, reply_to_message_id = m.message_id)
@monika.message_handler(commands=['stopcombo'])
def comboss(m):
try:
del counts[m.chat.id]
monika.send_message(m.chat.id, 'Рассчёт окончен!')
except:
monika.send_message(m.chat.id, 'Пересчёт не запущен!')
@monika.message_handler(commands=['combinations'])
def comboss(m):
if m.chat.id in counts:
monika.send_message(m.chat.id, 'В этом чате уже идёт пересчёт комбинаций!')
return
try:
word = m.text.split('/combinations ')[1]
except:
monika.send_message(m.chat.id, 'Ошибка!')
return
counts.update(createcombo(word.lower(), m.chat.id))
def factorial(x):
f = 1
while x > 1:
f *= x
x -= 1
return f
def createcombo(word, id):
return {id:{
'id':id,
'already':[],
'word':word,
'limit':factorial(len(word))
}
}
def cycle():
nxt = False
while not nxt:
try:
threading.Timer(2.5, cycle).start()
nxt = True
except:
pass
try:
dellist = []
for ids in counts:
letters = []
c = counts[ids]
for idss in c['word']:
letters.append(idss)
curlet = letters.copy()
neww = ''
while len(curlet) > 0:
x = random.choice(curlet)
neww += x
curlet.remove(x)
if len(c['already']) != c['limit']:
while neww in c['already']:
curlet = letters.copy()
neww = ''
while len(curlet) > 0:
x = random.choice(curlet)
neww += x
curlet.remove(x)
try:
monika.send_message(c['id'], neww.title())
c['already'].append(neww)
except:
time.sleep(10)
else:
try:
monika.send_message(c['id'], 'Всё!')
dellist.append(c['id'])
except:
time.sleep(5)
for ids in dellist:
del counts[ids]
except:
try:
world.send_message(441399484, traceback.format_exc())
except:
pass
cycle()
@bot.message_handler(commands=['id'])
def iddd(m):
config.about(m, bot)
try:
if m.reply_to_message!=None:
user=m.reply_to_message.from_user
bot.send_message(m.chat.id, 'id выбранного пользователя:\n'+'`'+str(user.id)+'`',reply_to_message_id=m.message_id,parse_mode='markdown')
else:
bot.send_message(m.chat.id, 'Чтобы узнать id пользователя, введите эту команду, ответив на его сообщение.')
except:
pass
@bot.message_handler(commands=['change_time'])
def change_time(m):
config.about(m, bot)
if m.chat.id==-1001425303036:
if m.from_user.id in rp_players:
kb=types.InlineKeyboardMarkup()
kb.add(types.InlineKeyboardButton(text='Я за!', callback_data='accept'))
kb.add(types.InlineKeyboardButton(text='Я против!', callback_data='decline'))
bot.send_message(m.chat.id, m.from_user.first_name+' считает, что пора менять время суток!', reply_markup=kb)
@bot.message_handler(commands=['currenttime'])
def currenttime(m):
config.about(m, bot)
ct=ctime_rp.find_one({})
cd=str(cday.find_one({})['cday'])
bot.send_message(m.chat.id, 'Текущий день: *'+cd+'*.\n'+'Текущее время: *'+ct['ctime_rp']+'*.', parse_mode='markdown')
@bot.callback_query_handler(func=lambda call:True)
def inline(call):
if call.from_user.id in rp_players:
if call.data=='accept':
if call.from_user.id not in accept:
accept.append(call.from_user.id)
bot.answer_callback_query(call.id, 'Ваш голос учтён!')
if len(accept)>=3:
ct=ctime_rp.find_one({})
i=0
while ct['ctime_rp']!=times[i]:
i+=1
if ct['ctime_rp']=='Ночь':
cday.update_one({},{'$inc':{'cday':1}})
ctime_rp.update_one({},{'$set':{'ctime_rp':times[0]}})
else:
ctime_rp.update_one({},{'$set':{'ctime_rp':times[i+1]}})
medit('Время суток изменено!', call.message.chat.id, call.message.message_id)
accept.clear()
decline.clear()
else:
bot.answer_callback_query(call.id, 'Вы уже голосовали!')
else:
if call.from_user.id not in decline:
decline.append(call.from_user.id)
bot.answer_callback_query(call.id, 'Ваш голос учтён!')
if len(decline)>=3:
medit('3 человека проголосовало против смены времени!', call.message.chat.id, call.message.message_id)
accept.clear()
decline.clear()
else:
bot.answer_callback_query(call.id, 'Вы уже голосовали!')
yestexts = ['хорошо, ольга дмитриевна!', 'хорошо!', 'я этим займусь!', 'я готов!', 'я готова!']
notexts = ['простите, но у меня уже появились дела.']
botadmins = [441399484]
el_admins = []#[574865060, 524034660]
al_admins = []#[512006137, 737959649]
ul_admins = []#[851513241]
mi_admins = []#[268486177]
le_admins = []#[60727377, 851513241]
sl_admins = []#[851513241]
od_admins = []#[629070350, 512006137, 850666493]
zh_admins = []#[390362465]
to_admins = []#[414374606]
sh_admins = []#[574865060]
se_admins = []#[851513241, 737959649]
pi_admins = []#[512006137]
def createadmin(pioner, id=441399484):
return {
pioner:[id],
'name':pioner,
'controller':None
}
def medit(message_text, chat_id, message_id, reply_markup=None, parse_mode=None):
return bot.edit_message_text(chat_id=chat_id, message_id=message_id, text=message_text, reply_markup=reply_markup,
parse_mode=parse_mode)
admins=db.admins
if admins.find_one({'name':'evl_admins'})==None:
admins.insert_one(createadmin('evl_admins', 496583701))
if admins.find_one({'name':'mns_admins'})==None:
admins.insert_one(createadmin('mns_admins', 496583701))
if admins.find_one({'name':'sayori_admins'})==None:
admins.insert_one(createadmin('sayori_admins', 441399484))
if admins.find_one({'name':'yuri_admins'})==None:
admins.insert_one(createadmin('yuri_admins', 441399484))
if admins.find_one({'name':'natsuki_admins'})==None:
admins.insert_one(createadmin('natsuki_admins', 441399484))
if admins.find_one({'name':'monika_admins'})==None:
admins.insert_one(createadmin('monika_admins', 441399484))
if admins.find_one({'name':'ul_admins'})==None:
admins.insert_one(createadmin('ul_admins', 441399484))
if admins.find_one({'name':'le_admins'})==None:
admins.insert_one(createadmin('le_admins', 441399484))
if admins.find_one({'name':'to_admins'})==None:
admins.insert_one(createadmin('to_admins', 441399484))
if admins.find_one({'name':'al_admins'})==None:
admins.insert_one(createadmin('al_admins', 441399484))
if admins.find_one({'name':'od_admins'})==None:
admins.insert_one(createadmin('od_admins', 441399484))
if admins.find_one({'name':'zh_admins'})==None:
admins.insert_one(createadmin('zh_admins', 441399484))
if admins.find_one({'name':'sh_admins'})==None:
admins.insert_one(createadmin('sh_admins', 441399484))
if admins.find_one({'name':'el_admins'})==None:
admins.insert_one(createadmin('el_admins', 441399484))
if admins.find_one({'name':'sl_admins'})==None:
admins.insert_one(createadmin('sl_admins', 441399484))
if admins.find_one({'name':'mi_admins'})==None:
admins.insert_one(createadmin('mi_admins', 441399484))
if admins.find_one({'name':'pi_admins'})==None:
admins.insert_one(createadmin('pi_admins', 441399484))
if admins.find_one({'name':'se_admins'})==None:
admins.insert_one(createadmin('se_admins', 441399484))
if admins.find_one({'name':'yu_admins'})==None:
admins.insert_one(createadmin('yu_admins', 441399484))
if admins.find_one({'name':'ale_admins'})==None:
admins.insert_one(createadmin('ale_admins', 441399484))
if admins.find_one({'name':'vl_admins'})==None:
admins.insert_one(createadmin('vl_admins', 441399484))
if admins.find_one({'name':'sa_admins'})==None:
admins.insert_one(createadmin('sa_admins', 441399484))
if admins.find_one({'name':'va_admins'})==None:
admins.insert_one(createadmin('va_admins', 441399484))
if admins.find_one({'name':'vi_admins'})==None:
admins.insert_one(createadmin('vi_admins', 441399484))
if admins.find_one({'name':'yul_admins'})==None:
admins.insert_one(createadmin('yul_admins', 441399484))
if admins.find_one({'name':'li_admins'})==None:
admins.insert_one(createadmin('li_admins', 441399484))
ignorelist = []
rds = True
works = [
{'name': 'concertready',
'value': 0,
'lvl': 1
},
{'name': 'sortmedicaments',
'value': 0,
'lvl': 2
},
{'name': 'checkpionerssleeping',
'value': 0,
'lvl': 1
},
{'name': 'pickberrys',
'value': 0,
'lvl': 2
},
{'name': 'bringfoodtokitchen',
'value': 0,
'lvl': 2
},
{'name': 'helpinmedpunkt',
'value': 0,
'lvl': 1
},
{'name': 'helpinkitchen',
'value': 0,
'lvl': 2
},
{'name': 'cleanterritory',
'value': 0,
'lvl': 3
},
{'name': 'washgenda',
'value': 0,
'lvl': 3
}
]
def createban(id):
return {
'id': id
}
if ban.find_one({'id': 617640951}) == None:
ban.insert_one(createban(617640951))
@world.message_handler(commands=['do'])
def do(m):
config.about(m, world)
try:
if m.from_user.id == 441399484:
cmd = m.text.split('/do ')[1]
try:
eval(cmd)
world.send_message(m.chat.id, 'Success')
except:
world.send_message(441399484, traceback.format_exc())
except:
pass
@world.message_handler(commands=['rp'])
def rp(m):
config.about(m, world)
if m.from_user.id==441399484:
global nowrp
if nowrp==True:
nowrp=False
else:
nowrp=True
world.send_message(m.chat.id, 'now '+str(nowrp))
@bot.message_handler(commands=['see'])
def see(m):
config.about(m, bot)
if m.from_user.id==441399484:
try:
bot.send_message(m.chat.id, str(m.reply_to_message))
except:
bot.send_message(441399484, traceback.format_exc())
@bot.message_handler(commands=['ignore'])
def ignore(m):
config.about(m, bot)
if m.from_user.id == 441399484:
try:
x = int(m.text.split(' ')[1])
if x > 0:
ignorelist.append(x)
bot.send_message(m.chat.id, 'Теперь айди ' + str(x) + ' игнорится!')
except:
pass
@world.message_handler(commands=['switch'])
def do(m):
config.about(m, world)
if m.from_user.id == 441399484:
global rds
rds = not rds
if rds == True:
world.send_message(m.chat.id, 'now True')
else:
world.send_message(m.chat.id, 'now False')
def worktoquest(work):
if work == 'concertready':
return 'Подготовиться к вечернему концерту'
if work == 'sortmedicaments':
return 'Отсортировать лекарства в медпункте'
if work == 'checkpionerssleeping':
return 'На вечер - проследить за тем, чтобы в 10 часов все были в домиках'
if work == 'pickberrys':
return 'Собрать ягоды для торта'
if work == 'bringfoodtokitchen':
return 'Принести на кухню нужные ингридиенты'
if work == 'helpinmedpunkt':
return 'Последить за медпунктом, пока медсестры не будет'
if work == 'helpinkitchen':
return 'Помочь с приготовлением еды на кухне'
if work == 'cleanterritory':
return 'Подмести территорию лагеря'
if work == 'washgenda':
return 'Помыть памятник на главной площади'
def lvlsort(x):
finallist = []
for ids in works:
if ids['lvl'] == x and ids['value'] == 0:
finallist.append(ids['name'])
return finallist
def statfind(pioner):
stats=None
if pioner == uliana:
stats = 'ul_admins'
if pioner == lena:
stats = 'le_admins'
if pioner == tolik:
stats = 'to_admins'
if pioner == alisa:
stats = 'al_admins'
if pioner == bot:
stats = 'od_admins'
if pioner == zhenya:
stats = 'zh_admins'
if pioner == shurik:
stats = 'sh_admins'
if pioner == electronic:
stats = 'el_admins'
if pioner == slavya:
stats = 'sl_admins'
if pioner == miku:
stats = 'mi_admins'
if pioner == pioneer:
stats = 'pi_admins'
if pioner == semen:
stats = 'se_admins'
if pioner == yuriy:
stats='yu_admins'
if pioner==alexandr:
stats='ale_admins'
if pioner==vladislav:
stats='vl_admins'
if pioner==samanta:
stats='sa_admins'
if pioner==vasiliyhait:
stats='va_admins'
if pioner==viola:
stats='vi_admins'
if pioner==yuliya:
stats='yul_admins'
if pioner==monster:
stats='mns_admins'
if pioner==evillena:
stats='evl_admins'
if pioner==sayori:
stats='sayori_admins'
if pioner==monika:
stats='monika_admins'
if pioner==natsuki:
stats='natsuki_admins'
if pioner==yuri:
stats='yuri_admins'
if pioner == liza:
stats = 'li_admins'
return stats
def stickhandler(m, pioner):
config.about(m, pioner)
if ban.find_one({'id': m.from_user.id}) == None:
stats=statfind(pioner)
adm=admins.find_one({'name':stats})
if adm == None:
bot.send_message(441399484, stats)
return
if adm['controller'] != None:
controller = adm['controller']
if m.from_user.id == controller['id']:
if m.reply_to_message == None:
try:
bot.delete_message(m.chat.id, m.message_id)
except:
try:
monika.delete_message(m.chat.id, m.message_id)
except:
pass
pioner.send_sticker(m.chat.id, m.sticker.file_id)
else:
try:
bot.delete_message(m.chat.id, m.message_id)
except:
try:
monika.delete_message(m.chat.id, m.message_id)
except:
pass
pioner.send_sticker(m.chat.id, m.sticker.file_id, reply_to_message_id=m.reply_to_message.message_id)
def dochandler(m, pioner):
config.about(m, pioner)
if ban.find_one({'id': m.from_user.id}) == None:
stats=statfind(pioner)
adm=admins.find_one({'name':stats})
if adm == None:
bot.send_message(441399484, stats)
return
if adm['controller'] != None:
controller = adm['controller']
if m.from_user.id == controller['id']:
if m.reply_to_message == None:
try:
bot.delete_message(m.chat.id, m.message_id)
except:
try:
monika.delete_message(m.chat.id, m.message_id)
except:
pass
pioner.send_document(m.chat.id, m.document.file_id)
else:
try:
bot.delete_message(m.chat.id, m.message_id)
except:
try:
monika.delete_message(m.chat.id, m.message_id)
except:
pass
pioner.send_document(m.chat.id, m.document.file_id, reply_to_message_id=m.reply_to_message.message_id)
def pichandler(m, pioner):
config.about(m, pioner)
if ban.find_one({'id': m.from_user.id}) == | |
<filename>_base/_imf_decomposition/_emd.py
import numpy as np
import scipy
import scipy.interpolate #import Akima1DInterpolator, Rbf, InterpolatedUnivariateSpline, BSpline
def emd(x, order,method = 'cubic', max_itter = 100, tol = 0.1):
'''
Emperical Mode Decomposition (EMD).
The emperical mode deocomposition method is the nonlinear
time domain decomposition on the so-called
intrinsic mode functions (IMF), based on the idea,
that each component can be reconstructed by it envelope.
Parameters
----------------
* x: 1d ndarray.
* order: int,
number of IMFs (with out remainder).
* method: string,
method of spline approximation:
method = {cubic, akim, rbf, linear, thin_plate}.
* max_itter: int,
maximum number of itteration to search imf.
* tol: float,
tolerance to variance of changing imf in itterations.
Returns
---------------
* imfs: 2d ndarray,
intrinsic mode functions and remainder,
shape = (order+1,x.shape).
References
-----------------
[1] <NAME> et al.,
"The empirical mode decomposition and the Hilbert
spectrum for nonlinear and non-stationary time series analysis",
Proc. R. Soc. Lond. A, Math. Phys. Sci.,
vol. 454, no. 1971, 903–995, (1998).
[2] <NAME>,
"Hilbert-Huang transform and its applications",
vol. 16. World Scientific, 2014.
[3] <NAME>, <NAME>,
"Ensemble empirical mode decomposition:
A noise-assisted data analysis method",
Adv. Adapt. Data Anal., vol. 1, no. 1, 1–41 (2008).
[4] <NAME>, <NAME>, <NAME>,
"Partly ensemble empirical mode decomposition:
An improved noise-assisted method for eliminating mode mixing",
Signal Process., vol. 96, 362–374, (2014).
See also
-----------------------
vmd
hvd
ewt
hht (operators)
'''
x = np.array(x)
N = x.shape[0]
imf = np.zeros((order, N),dtype = x.dtype)
iscomplex = False
if x.dtype in [complex,np.complex,np.complex64,np.complex128]:
iscomplex = True
for ord_cnt in range(order):
h = x
for cnt in range(max_itter):
s1 = get_envelope(h, method = method)
s2 = -get_envelope(-h, method = method)
mean_env = (s1+s2)/2
# for RBF interpolation envelope is complex
if iscomplex and mean_env.dtype \
not in [complex,
np.complex,
np.complex64,
np.complex128]:
h = h - scipy.signal.hilbert(mean_env)
else:
h = h - mean_env
#Cashy Criteria
sd = np.sum(np.square(mean_env))/np.sum(np.square(h))
if (np.abs(sd) < tol) or isimf(h):
break
imf[ord_cnt,:] = h
x = x-h
if ismonotonic(x):
break
return imf
#--------------------------------------------------------
def ismonotonic(x):
'''
if there are exists maximums and minimums, False.
'''
pmax=findpeaks(x)
pmin=findpeaks(-x)
if pmax.size*pmin.size > 0:
return False
else:
return True
#--------------------------------------------------------
def isimf(x):
'''
if |zero crossing - extremums| less or equal to 1, than IMF
'''
N = x.shape[0];
# zero crossing
df = (x[1:]*x[:-1])
zc = np.sum(df[df<0])
pmax=findpeaks(x)
pmin=findpeaks(-x)
extremums = pmax.size+pmin.size
if abs(zc-extremums) > 1:
return False
else:
return True
#--------------------------------------------------------
def get_envelope(x, method = 'cubic'):
'''
Function to estimate envelope by spline method.
'''
N = x.shape[0];
p = findpeaks(x)
if(p.size<2):
return np.zeros(N)
points = np.concatenate([[0], p, [N]])
values = np.concatenate([[0], x[p], [0]])
#TODO check for mirror extention in my experiments it was worse
# values, points = x[p],p
# values,points =_extension(values, points, n_points=2)
new_points = np.arange(points[0],points[-1])
fp = np.flatnonzero(new_points == 0)[0]
s=_spline(values, points, new_points, method = method)[fp:fp+N]
return s
#--------------------------------------------------------
def _spline(values, points, new_points, method = 'cubic'):
'''
scipy.interpolate methods.
'''
if(method=='cubic'):
cofs = scipy.interpolate.splrep(points, values)
return scipy.interpolate.splev(new_points, cofs)
elif(method=='akim'):
return scipy.interpolate.Akima1DInterpolator(points,values)(new_points)
elif(method=='rbf'):
return scipy.interpolate.Rbf(points,values, function='gaussian')(new_points)
elif(method=='thin_plate'):
return scipy.interpolate.Rbf(points,values, function='thin_plate')(new_points)
elif(method=='linear'):
return scipy.interpolate.Rbf(points,values, function='linear')(new_points)
#--------------------------------------------------------
def findpeaks(x):
''' find maximums of signals.
'''
return scipy.signal.argrelmax(np.real(x))[0]
#--------------------------------------------------------
def _extension(values, points, n_points=2,mirror = True ):
'''
Mirror extention
FOR TEST
'''
N = values.shape[0]
if mirror:
values = np.concatenate(( values[n_points-1::-1],
values,
values[N-1:N-n_points-1:-1] ))
else:
values = np.concatenate(( values[n_points:0:-1],
values,
values[N-2:N-n_points-2:-1] ))
points = np.concatenate((2*points[0] - points[n_points:0:-1],
points,
2*points[-1] - points[N-2:N-n_points-2:-1]))
return values, points
# __all__ = ['emd_filter','emd']
# #--------------------------------------------------------------------
# _MIN_EXTREMUMS = 4 #Requirement of scipy
# TOL = 0.00005 # determined emperically
# #--------------------------------------------------------------------
# def emd_filter(x, method = 'cubic', max_itter=1):
# '''
# Emperical Mode Decomposition (EMD) filter.
# The filter based on the serching for first
# intrinsic mode function and subtract it.
# Parameters:
# --------------------------------------------
# * x: input 1d ndarray.
# * order: number of IMFs (with out remainder).
# * method: method of spline approximation: {cubic, akim, rbf, linear, thin_plate}.
# * max_itter: maximum number of itteration to search imf.
# Returns:
# -------------------------------------------
# * filtered signal.
# '''
# out = np.array(x)
# for _ in np.arange(max_itter):
# envdw, envup, _ = _envelops(out, method = method)
# out -= 0.5*(envdw+envup)
# return out
# #--------------------------------------------------------------------
# def emd(x, order=None, method = 'cubic', max_itter=100):
# '''
# Emperical Mode Decomposition (EMD).
# The emperical mode deocomposition method is the nonlinear time
# domain decomposition on the so-called intrinsic mode functions (IMF),
# based on the idea, that ech component can be reconstructed by searching it envelope.
# Parameters:
# ---------------------------------------------------------
# * x: input 1d ndarray.
# * order: number of IMFs (with out remainder).
# * method: method of spline approximation: {cubic, akim, rbf, linear, thin_plate}.
# * max_itter: maximum number of itteration to search imf.
# Returns:
# ----------------------------------------------------------
# * imfs: intrinsic mode functions and remainder, shape = (order+1,x.shape).
# References:
# --------------------------------------------------
# [1] <NAME> et al., "The empirical mode decomposition and the Hilbert
# spectrum for nonlinear and non-stationary time series analysis",
# Proc. R. Soc. L<NAME>, Math. Phys. Sci., vol. 454, no. 1971, 903–995, (1998).
# [2] <NAME>, "Hilbert-Huang transform and its applications", vol. 16. World Scientific, 2014.
# [3] <NAME>, <NAME>, "Ensemble empirical mode decomposition:
# A noise-assisted data analysis method", Adv. Adapt. Data Anal., vol. 1, no. 1, 1–41 (2008).
# [4] <NAME>, <NAME>, <NAME>, "Partly ensemble empirical mode decomposition:
# An improved noise-assisted method for eliminating mode mixing",
# Signal Process., vol. 96, 362–374, (2014).
# '''
# x = np.asarray(x)
# if order is None: order = x.shape[0]
# imf = np.zeros((order+1, x.shape[0]), dtype = x.dtype)
# out = np.zeros(x.shape[0], dtype = x.dtype)
# for i in np.arange(order):
# out = np.array(x - np.sum(imf,axis=0))
# for _ in np.arange(max_itter):
# envdw, envup, points = _envelops(out, method = method)
# if stop_criteria(out, envdw, envup, points): break
# else: out -= 0.5*(envdw+envup)
# imf[i,:] = out
# (pmax,pmin,pzeros) = points
# if(pmax.size < 2 or pmax.size < 2):
# break
# imf[i+1,:] = np.array(x - np.sum(imf,axis=0))
# return imf
# #--------------------------------------------------------------------
# def _spline(values, points, new_points, method = 'cubic'):
# if(method=='cubic'):
# cofs = scipy.interpolate.splrep(points, values)
# return scipy.interpolate.splev(new_points, cofs)
# elif(method=='akim'):
# return scipy.interpolate.Akima1DInterpolator(points,values)(new_points)
# elif(method=='rbf'):
# return scipy.interpolate.Rbf(points,values, function='gaussian')(new_points)
# elif(method=='thin_plate'):
# return scipy.interpolate.Rbf(points,values, function='thin_plate')(new_points)
# elif(method=='linear'):
# return scipy.interpolate.Rbf(points,values, function='linear')(new_points)
# #--------------------------------------------------------------------
# def _extension(values, points, n_points=2):
# '''
# Mirror extention
# '''
# N = values.shape[0]
# values = np.concatenate(( values[n_points-1::-1], values, values[N-1:N-n_points-1:-1] ))
# points = np.concatenate(( 2*points[0] - points[n_points:0:-1], points, 2*points[-1] - points[N-2:N-n_points-2:-1] ))
# return values, points
# #--------------------------------------------------------------------
# def _specialpoints(x,order=2, boundaires = False):
# '''
# Find special points (zeros, maximums and minimums) of the inpute
# sequence.
# Parameters:
# ----------------------------------
# * x: input sequence.
# * order: number of points before and after point to determine the class.
# * boundaires: if True, boun points (zero and last will also be concidered).
# Returns:
# -------------------------------
# * pmax: point of maximums (peaks).
# * pmin: point of minimums (peaks).
# * pzero: point of zeros (minimums of |x|).
# Notes:
# ---------------------------------
# * It is recommended to use _add_boundaries for bound points.
# '''
# x = np.asarray(x)
# N = x.shape[0]
# if(order<1 or order>N//2):
# raise ValueError('order should be between 1 and much less than samples size')
# pmax = np.array([], dtype = np.int)
# pmin = np.array([], dtype = np.int)
# pzero = np.array([], dtype = np.int)
# x_extend = np.concatenate((np.zeros(order), x, np.zeros(order) ))
# #TODO: replace x on x_extend
# for p in np.arange(order,N+order): #same as while(p<N+order)
# if(p-order>0 and p-order<N-1) or (boundaires):
# prange = x_extend[p-order:p+order]
# #max
# if(x[p-order] == np.max(prange)):
# pmax = np.append(pmax, p-order)
# #min
# if(x[p-order] == np.min(prange)):
# pmin = np.append(pmin, p-order)
# #zero
# if(np.abs(np.real(x[p-order]))) == np.min(np.abs(np.real(prange))):
# pzero = np.append(pzero, p-order)
# return pmax, pmin, pzero
# #--------------------------------------------------------------------
# def _envelops(x, method = 'cubic'):
# x = np.asarray(x)
# N = x.shape[0]
# pmax, pmin, pzeros = _specialpoints(x,order=2)
# x_max, x_min = x[pmax],x[pmin]
# envdw = np.zeros(N)
# envup = np.zeros(N)
# if(np.min([pmax.size, pmin.size])>=_MIN_EXTREMUMS):
# x_max,pmax = _extension(x_max,pmax)
# x_min,pmin = _extension(x_min,pmin)
# fp = np.min([pmax[0], pmin[0], 0])
# | |
<reponame>mtik00/invoicer<filename>invoicer/_invoice/__init__.py
import os
import re
from io import BytesIO
import json
import requests
import arrow
import pdfkit
import htmlmin
from pdfkit.configuration import Configuration
from flask import (
Blueprint, request, redirect, url_for, render_template, flash, current_app,
Response, session)
from sqlalchemy.orm import joinedload
from flask_login import login_required, current_user
from ..forms import EmptyForm
from ..submitter import sendmail
from ..database import db
from ..models import (
Item, Invoice, Customer, UnitPrice, InvoicePaidDate, User, InvoiceTheme)
from ..cache import app_cache
from .forms import InvoiceForm, ItemForm
from ..logger import LOGGER
invoice_page = Blueprint('invoice_page', __name__, template_folder='templates')
@app_cache.cached(key_prefix='invoice_themes')
def get_color_theme_data():
'''
Convert the invoice theme data into something a bit more usable.
'''
result = {}
for theme in InvoiceTheme.query.all():
result[theme.name] = {
'banner_color': theme.banner_color,
'banner_background_color': theme.banner_background_color,
'table_header_color': theme.table_header_color,
'table_header_background_color': theme.table_header_background_color
}
return result
@app_cache.memoize(30)
def user_invoices(user_id, order_by='desc'):
'''
Return a list of all user invoices.
'''
# We need to do a joined load of paid_date or we'll get session errors
q = Invoice.query.options(joinedload(Invoice.paid_date)).options(joinedload(Invoice.customer)).filter_by(user_id=user_id)
if order_by == 'desc':
return q.order_by(Invoice.id.desc()).all()
else:
return q.order_by(Invoice.id.asc()).all()
def pdf_ok():
return current_app.config.get('WKHTMLTOPDF_URI') or (
os.path.exists(current_app.config.get('WKHTMLTOPDF') or '') and
os.access(current_app.config['WKHTMLTOPDF'], os.R_OK)
)
def can_submit(customer_id):
"""
Returns `True` if we can submit an invoice by email to a customer, `False`
otherwise.
"""
if not (
current_app.config.get('EMAIL_PASSWORD') and
current_app.config.get('EMAIL_USERNAME') and
current_app.config.get('EMAIL_SERVER')
):
LOGGER.warning("Can't submit due to one or more missing `EMAIL` config options")
return False
customer = Customer.query.filter_by(id=customer_id).first()
if not (customer and customer.email):
LOGGER.debug("Can't submit due to either no customer or no customer email")
return False
return True
def format_my_address(html=True):
address = User.query.get(current_user.id).profile
join_with = '<br>' if html else '\n'
if not address.full_name:
return ''
result = join_with.join([
address.full_name,
address.street,
'%s %s, %s' % (address.city, address.state, address.zip)]).upper()
if address.email and html:
# Prevent gmail from making this a link
result += join_with + "<a rel='nofollow' style='text-decoration:none; color:#fff' href='#'>" + address.email + "</a>"
elif address.email:
result += join_with + address.email
return result
@invoice_page.route('/<regex("\d+-\d+-\d+"):invoice_number>/items/delete', methods=["GET", "POST"])
@login_required
def delete_items(invoice_number):
invoices = user_invoices(current_user.id)
invoice = next((x for x in invoices if x.number == invoice_number), None)
if not invoice:
flash('Unknown invoice', 'error')
return redirect(url_for('invoice_page.invoice_by_number', invoice_number=invoice.number))
items = Item.query.filter(Item.invoice_id == invoice.id)
form = EmptyForm()
if form.validate_on_submit():
if request.form.get('validate_delete', '').lower() != 'delete':
flash('Invalid delete request', 'error')
return redirect(url_for('invoice_page.invoice_by_number', invoice_number=invoice.number))
item_ids_to_delete = [y for x, y in request.form.items() if x.startswith('item_')]
items = Item.query.filter(Item.id.in_(item_ids_to_delete)).all()
if not items:
return redirect(url_for('invoice_page.invoice_by_number', invoice_number=invoice.number))
for item in items:
db.session.delete(item)
# Recalculate the invoice total
items = Item.query.filter(Item.invoice_id == invoice.id).all()
item_total = sum([x.quantity * x.unit_price for x in items])
Invoice.query.filter(Invoice.id == invoice.id).update({'total': item_total})
db.session.commit()
# Clear the app cache so everything updates
app_cache.clear()
flash('Item(s) deleted from %s' % invoice.number, 'success')
return redirect(url_for('invoice_page.invoice_by_number', invoice_number=invoice.number))
return render_template('invoice/lb_delete_items_form.html', form=form, items=items, invoice=invoice)
@invoice_page.route('/<regex("\d+-\d+-\d+"):invoice_number>/items/create', methods=["GET", "POST"])
@login_required
def create_item(invoice_number):
invoice = Invoice.query.filter_by(number=invoice_number, user_id=current_user.id).first_or_404()
form = ItemForm(quantity=1)
unit_prices = UnitPrice.query.filter_by(user_id=current_user.id).all()
if (request.method == 'POST') and ('cancel' in request.form):
flash('Action canceled', 'warning')
return redirect(url_for('invoice_page.invoice_by_number', invoice_number=invoice.number))
if form.validate_on_submit():
unit_price = float(request.form['unit_pricex'])
units = request.form['unit_price_units']
date = arrow.now()
if form.date.data:
date = arrow.get(form.date.data, 'DD-MMM-YYYY')
item = Item(
invoice_id=invoice.id,
date=date,
description=form.description.data,
unit_price=unit_price,
quantity=form.quantity.data,
units=units,
customer=invoice.customer
)
invoice.items.append(item)
db.session.add_all([invoice, item])
db.session.commit()
# Clear the app cache so everything updates
app_cache.clear()
flash('item added to invoice %s' % invoice.number, 'success')
return redirect(url_for('invoice_page.invoice_by_number', invoice_number=invoice.number))
return render_template(
'invoice/lb_item_form.html',
form=form,
invoice=invoice,
unit_price_objects=unit_prices,
)
@app_cache.memoize()
def get_user_unit_prices(user_id):
return UnitPrice.query.filter_by(user_id=user_id).all()
@invoice_page.route('/<regex("\d+-\d+-\d+"):invoice_number>/item/<item_id>/update', methods=["GET", "POST"])
@login_required
def update_item(invoice_number, item_id):
invoice = Invoice.query.filter_by(number=invoice_number, user_id=current_user.id).first_or_404()
item = Item.query.filter_by(invoice_id=invoice.id, id=item_id).first_or_404()
unit_prices = get_user_unit_prices(current_user.id)
form = ItemForm(
date=item.date.format('DD-MMM-YYYY').upper(),
description=item.description,
quantity=item.quantity
)
if (request.method == 'POST') and ('cancel' in request.form):
flash('Action canceled', 'warning')
return redirect(url_for('invoice_page.invoice_by_number', invoice_number=invoice.number))
if form.validate_on_submit():
# Only let the user modify the date and description if the invoice has
# been submitted.
if request.form.get('validate_delete', '').lower() == 'delete':
if invoice.submitted_date:
flash('You cannot delete an item from a submitted invoice', 'error')
return redirect(url_for('invoice_page.invoice_by_number', invoice_number=invoice.number))
db.session.delete(item)
invoice.total = sum([x.unit_price * x.quantity for x in invoice.items])
db.session.add(invoice)
db.session.commit()
flash('Invoice item has been deleted', 'warning')
return redirect(url_for('invoice_page.invoice_by_number', invoice_number=invoice.number))
units = request.form['unit_price_units']
unit_price = request.form['unit_pricex']
item.date = arrow.get(form.date.data, 'DD-MMM-YYYY')
item.description = form.description.data
if not invoice.submitted_date:
item.quantity = int(form.quantity.data)
item.units = units
if unit_price:
item.unit_price = float(unit_price)
db.session.add(item)
invoice.total = sum([x.unit_price * x.quantity for x in invoice.items])
db.session.add(invoice)
elif (
(str(item.unit_price) != unit_price) or
(item.units != units) or
(item.quantity != form.quantity.data)
):
flash('Only item date and description are allowed to be modified on submitted invoices', 'warning')
db.session.add(item)
db.session.commit()
# Clear the app cache so everything updates
app_cache.clear()
flash('item modified', 'success')
return redirect(url_for('invoice_page.invoice_by_number', invoice_number=invoice.number))
return render_template(
'invoice/lb_item_form.html',
form=form,
invoice=invoice,
item=item,
unit_price_objects=unit_prices,
)
@invoice_page.route('/<regex("\d+-\d+-\d+"):invoice_number>/update', methods=["GET", "POST"])
@login_required
def update(invoice_number):
if 'cancel' in request.form:
flash('invoice updated canceled', 'warning')
return redirect(url_for('invoice_page.invoice_by_number', invoice_number=invoice_number))
invoice = Invoice.query.filter_by(number=invoice_number, user_id=current_user.id).first_or_404()
customers = Customer.query.filter_by(user_id=current_user.id).all()
addr_choices = [(x.id, x.name1) for x in customers]
theme_choices = [('', '')] + [(x, x) for x in get_color_theme_data().keys()]
form = InvoiceForm(
description=invoice.description,
submitted_date=invoice.submitted_date.format('DD-MMM-YYYY').upper() if invoice.submitted_date else None,
paid_date=invoice.paid_date.paid_date.format('DD-MMM-YYYY').upper() if invoice.paid_date else '',
paid_date_notes=invoice.paid_date.description if invoice.paid_date else '',
terms=invoice.terms,
number=invoice.number
)
form.customer.choices = addr_choices
form.invoice_theme.choices = theme_choices
selected_theme = ''
if invoice.invoice_theme:
selected_theme = invoice.invoice_theme.name
if request.method == 'GET':
# Set the default theme only for `GET` or the value will never change.
form.customer.process_data(invoice.customer_id)
form.invoice_theme.process_data(selected_theme)
elif form.validate_on_submit():
submitted_date = invoice.submitted_date
if form.submitted_date.data:
submitted_date = arrow.get(form.submitted_date.data, 'DD-MMM-YYYY')
paid_date = None
if form.paid_date.data:
paid_date = InvoicePaidDate(
paid_date=arrow.get(form.paid_date.data, 'DD-MMM-YYYY'),
description=form.paid_date_notes.data
)
db.session.flush()
terms = invoice.terms
if form.terms.data:
terms = form.terms.data
invoice.description = form.description.data
invoice.customer_id = form.customer.data
invoice.submitted_date = submitted_date
invoice.paid_date = paid_date
invoice.terms = terms
invoice.number = form.number.data
if form.invoice_theme.data:
invoice.invoice_theme = InvoiceTheme.query.filter_by(name=form.invoice_theme.data).first()
else:
invoice.invoice_theme = None
db.session.commit()
# Clear the app cache so everything updates
app_cache.clear()
flash('invoice updated', 'success')
return redirect(url_for('invoice_page.invoice_by_number', invoice_number=invoice.number))
return render_template(
'invoice/lb_invoice_form.html',
form=form, invoice=invoice, theme_choices=theme_choices,
addr_choices=addr_choices, selected_theme=selected_theme)
@invoice_page.route('/<regex("\d+-\d+-\d+"):invoice_number>')
@login_required
def invoice_by_number(invoice_number):
form = EmptyForm(request.form)
invoices = user_invoices(current_user.id)
invoice = next((x for x in invoices if x.number == invoice_number), None)
if not invoice:
flash('Unknown invoice', 'error')
return redirect(url_for('index_page.dashboard'))
# Figure out next/previous
invoice_numbers = [x.number for x in invoices]
if not invoice_numbers:
current_pos = next_id = previous_id = 0
to_emails = None
else:
to_emails = ', '.join(get_address_emails(invoice.customer_id))
current_pos = invoice_numbers.index(invoice_number)
if current_pos == len(invoice_numbers) - 1:
next_id = None
else:
next_id = invoice_numbers[current_pos + 1]
if current_pos == 0:
previous_id = None
else:
previous_id = invoice_numbers[current_pos - 1]
allow_editing = (
session['user_debug'] or current_app.config['DEBUG'] or
(not invoice.submitted_date)
)
return render_template(
'invoice/invoices.html',
form=form,
next_id=next_id,
previous_id=previous_id,
invoice_obj=invoice,
to_emails=to_emails,
can_submit=to_emails and invoice and can_submit(invoice.customer_id),
pdf_ok=pdf_ok(), # The binary exists
show_pdf_button=User.query.get(current_user.id).profile.enable_pdf,
invoice_numbers=invoice_numbers,
simplified_invoice=simplified_invoice(invoice_number, show_item_edit=allow_editing, embedded=True),
invoices=user_invoices(current_user.id),
)
@invoice_page.route('/create', methods=["GET", "POST"])
@login_required
def create():
form = InvoiceForm(request.form)
customers = Customer.query.filter_by(user_id=current_user.id).all()
if not customers:
flash('You must add at least 1 customer before creating invoices', 'error')
return redirect(url_for('customers_page.create'))
addr_choices = [(x.id, x.name1) for x in customers]
form.customer.choices = addr_choices
theme_choices = [('', '')] + [(x, x) for x in get_color_theme_data().keys()]
form.invoice_theme.choices = theme_choices
me = User.query.get(current_user.id).profile
if form.validate_on_submit():
customer_id = int(request.form['customer'])
number = next_invoice_number(customer_id)
customer = Customer.query.filter_by(id=customer_id, user_id=current_user.id).first_or_404()
submitted_date = None
if form.submitted_date.data:
submitted_date = arrow.get(form.submitted_date.data, 'DD-MMM-YYYY')
terms = customer.terms or me.terms
paid_date = None
if form.paid_date.data:
paid_date = InvoicePaidDate(
paid_date=arrow.get(form.paid_date.data, 'DD-MMM-YYYY'),
description=form.paid_date_notes.data
)
db.session.add(
Invoice(
description=form.description.data,
customer_id=customer_id,
number=number,
terms=terms,
submitted_date=submitted_date,
paid_date=paid_date,
user=User.query.get(current_user.id),
)
)
db.session.commit()
# Clear the app cache so everything updates
app_cache.clear()
flash('invoice added', 'success')
return redirect(url_for('invoice_page.last_invoice'))
return render_template('invoice/lb_invoice_form.html', form=form, theme_choices=theme_choices, addr_choices=addr_choices)
@invoice_page.route('/<regex("\d+-\d+-\d+"):invoice_number>/delete', methods=['POST'])
@login_required
def delete(invoice_number):
invoice = Invoice.query.filter_by(number=invoice_number, user_id=current_user.id).first_or_404()
form = EmptyForm(request.form)
if request.form.get('validate_delete', '').lower() != 'delete':
flash('Invalid delete request', 'error')
return redirect(url_for('invoice_page.invoice_by_number', invoice_number=invoice.number))
if form.validate_on_submit():
items = Item.query.filter_by(invoice_id=invoice.id).all()
if invoice.paid_date:
pd = InvoicePaidDate.query.get(invoice.paid_date.id)
db.session.delete(pd)
db.session.delete(invoice)
for item in items:
db.session.delete(item)
db.session.commit()
# Clear the app cache so everything updates
app_cache.clear()
flash('Invoice %s has been deleted' % invoice_number, 'warning')
return redirect(url_for('invoice_page.last_invoice'))
else:
flash('Error occurred when attempting to delete form', 'error')
return redirect(url_for('invoice_page.invoice_by_number', invoice_number=invoice_number))
def get_pdf_from_kit(text: str):
config = Configuration(current_app.config['WKHTMLTOPDF'])
options = {
'print-media-type': None,
'page-size': 'letter',
'no-outline': None,
'quiet': None
}
return pdfkit.from_string(text, False, options=options, configuration=config)
def get_pdf_from_url(text: str, url: str) -> bytes:
data = {
'contents': text,
'options': {
'print-media-type': None,
'page-size': 'letter',
'no-outline': None,
'quiet': None
}
}
headers = {
'Content-Type': 'application/json',
}
wk_response = requests.post(url, data=json.dumps(data), headers=headers)
wk_response.raise_for_status()
return wk_response.content
def get_pdf_bytes(invoice_number) -> bytes:
text = bs4_invoice(current_user.id, invoice_number)
if current_app.config['WKHTMLTOPDF_URI']:
return get_pdf_from_url(text, current_app.config['WKHTMLTOPDF_URI'])
return get_pdf_from_kit(text)
@invoice_page.route('/<regex("\d+-\d+-\d+"):invoice_number>/pdf')
@login_required
def to_pdf(invoice_number):
if not pdf_ok():
flash('PDF configuration not supported', 'error')
return redirect(url_for('.invoice_by_number', invoice_number=invoice_by_number))
pdf_bytes = get_pdf_bytes(invoice_number)
return Response(
pdf_bytes,
mimetype='application/pdf',
)
def get_address_emails(customer_id):
if (session['user_debug'] or current_app.config['DEBUG']) and ('EMAIL_USERNAME' in current_app.config):
return [current_app.config['EMAIL_USERNAME'] or '']
customer = Customer.query.filter_by(user_id=current_user.id, id=customer_id).first_or_404()
email | |
<filename>sdk/python/pulumi_gitlab/instance_variable.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['InstanceVariableArgs', 'InstanceVariable']
@pulumi.input_type
class InstanceVariableArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
value: pulumi.Input[str],
masked: Optional[pulumi.Input[bool]] = None,
protected: Optional[pulumi.Input[bool]] = None,
variable_type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a InstanceVariable resource.
:param pulumi.Input[str] key: The name of the variable.
:param pulumi.Input[str] value: The value of the variable.
:param pulumi.Input[bool] masked: If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
:param pulumi.Input[bool] protected: If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
:param pulumi.Input[str] variable_type: The type of a variable. Available types are: env_var (default) and file.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
if masked is not None:
pulumi.set(__self__, "masked", masked)
if protected is not None:
pulumi.set(__self__, "protected", protected)
if variable_type is not None:
pulumi.set(__self__, "variable_type", variable_type)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The name of the variable.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value of the variable.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@property
@pulumi.getter
def masked(self) -> Optional[pulumi.Input[bool]]:
"""
If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
"""
return pulumi.get(self, "masked")
@masked.setter
def masked(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "masked", value)
@property
@pulumi.getter
def protected(self) -> Optional[pulumi.Input[bool]]:
"""
If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
"""
return pulumi.get(self, "protected")
@protected.setter
def protected(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "protected", value)
@property
@pulumi.getter(name="variableType")
def variable_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of a variable. Available types are: env_var (default) and file.
"""
return pulumi.get(self, "variable_type")
@variable_type.setter
def variable_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "variable_type", value)
@pulumi.input_type
class _InstanceVariableState:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
masked: Optional[pulumi.Input[bool]] = None,
protected: Optional[pulumi.Input[bool]] = None,
value: Optional[pulumi.Input[str]] = None,
variable_type: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering InstanceVariable resources.
:param pulumi.Input[str] key: The name of the variable.
:param pulumi.Input[bool] masked: If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
:param pulumi.Input[bool] protected: If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
:param pulumi.Input[str] value: The value of the variable.
:param pulumi.Input[str] variable_type: The type of a variable. Available types are: env_var (default) and file.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if masked is not None:
pulumi.set(__self__, "masked", masked)
if protected is not None:
pulumi.set(__self__, "protected", protected)
if value is not None:
pulumi.set(__self__, "value", value)
if variable_type is not None:
pulumi.set(__self__, "variable_type", variable_type)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
The name of the variable.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def masked(self) -> Optional[pulumi.Input[bool]]:
"""
If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
"""
return pulumi.get(self, "masked")
@masked.setter
def masked(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "masked", value)
@property
@pulumi.getter
def protected(self) -> Optional[pulumi.Input[bool]]:
"""
If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
"""
return pulumi.get(self, "protected")
@protected.setter
def protected(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "protected", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
The value of the variable.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="variableType")
def variable_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of a variable. Available types are: env_var (default) and file.
"""
return pulumi.get(self, "variable_type")
@variable_type.setter
def variable_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "variable_type", value)
class InstanceVariable(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
masked: Optional[pulumi.Input[bool]] = None,
protected: Optional[pulumi.Input[bool]] = None,
value: Optional[pulumi.Input[str]] = None,
variable_type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## # gitlab\_instance\_variable
This resource allows you to create and manage CI/CD variables for your GitLab instance.
For further information on variables, consult the [gitlab
documentation](https://docs.gitlab.com/ee/api/instance_level_ci_variables.html).
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example = gitlab.InstanceVariable("example",
key="instance_variable_key",
masked=False,
protected=False,
value="instance_variable_value")
```
## Import
GitLab instance variables can be imported using an id made up of `variablename`, e.g. console
```sh
$ pulumi import gitlab:index/instanceVariable:InstanceVariable example instance_variable_key
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: The name of the variable.
:param pulumi.Input[bool] masked: If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
:param pulumi.Input[bool] protected: If set to `true`, the variable will be passed only to pipelines running on protected branches and tags. Defaults to `false`.
:param pulumi.Input[str] value: The value of the variable.
:param pulumi.Input[str] variable_type: The type of a variable. Available types are: env_var (default) and file.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: InstanceVariableArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## # gitlab\_instance\_variable
This resource allows you to create and manage CI/CD variables for your GitLab instance.
For further information on variables, consult the [gitlab
documentation](https://docs.gitlab.com/ee/api/instance_level_ci_variables.html).
## Example Usage
```python
import pulumi
import pulumi_gitlab as gitlab
example = gitlab.InstanceVariable("example",
key="instance_variable_key",
masked=False,
protected=False,
value="instance_variable_value")
```
## Import
GitLab instance variables can be imported using an id made up of `variablename`, e.g. console
```sh
$ pulumi import gitlab:index/instanceVariable:InstanceVariable example instance_variable_key
```
:param str resource_name: The name of the resource.
:param InstanceVariableArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(InstanceVariableArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
masked: Optional[pulumi.Input[bool]] = None,
protected: Optional[pulumi.Input[bool]] = None,
value: Optional[pulumi.Input[str]] = None,
variable_type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = InstanceVariableArgs.__new__(InstanceVariableArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
__props__.__dict__["masked"] = masked
__props__.__dict__["protected"] = protected
if value is None and not opts.urn:
raise TypeError("Missing required property 'value'")
__props__.__dict__["value"] = value
__props__.__dict__["variable_type"] = variable_type
super(InstanceVariable, __self__).__init__(
'gitlab:index/instanceVariable:InstanceVariable',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
masked: Optional[pulumi.Input[bool]] = None,
protected: Optional[pulumi.Input[bool]] = None,
value: Optional[pulumi.Input[str]] = None,
variable_type: Optional[pulumi.Input[str]] = None) -> 'InstanceVariable':
"""
Get an existing InstanceVariable resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: The name of the variable.
:param pulumi.Input[bool] masked: If set to `true`, the value of the variable will be hidden in job logs. The value must meet the [masking requirements](https://docs.gitlab.com/ee/ci/variables/#masked-variable-requirements). Defaults to `false`.
:param pulumi.Input[bool] protected: If set to `true`, the variable will be passed only to pipelines running on | |
i in range(self._data.children_size())
},
self._mask,
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox._from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
> other
for i in range(self._data.children_size())
},
self._mask,
)
def __le__(self, other):
if isinstance(other, DataFrameCpu):
return self._fromdata(
{n: c <= other[n] for (n, c) in self._field_data.items()}
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox._from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
<= other
for i in range(self._data.children_size())
},
self._mask,
)
def __ge__(self, other):
if isinstance(other, DataFrameCpu):
assert len(self) == len(other)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox._from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
>= ColumnFromVelox._from_velox(
other.device,
other.dtype.fields[i].dtype,
other._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
else:
return self._fromdata(
{n: c >= other for (n, c) in self._field_data.items()}
)
def __or__(self, other):
if isinstance(other, DataFrameCpu):
return self._fromdata(
{n: c | other[n] for (n, c) in self._field_data.items()}
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox._from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
| other
for i in range(self._data.children_size())
},
self._mask,
)
def __ror__(self, other):
if isinstance(other, DataFrameCpu):
return self._fromdata(
{n: other[n] | c for (n, c) in self._field_data.items()}
)
else:
return self._fromdata({n: other | c for (n, c) in self._field_data.items()})
def __and__(self, other):
if isinstance(other, DataFrameCpu):
return self._fromdata(
{n: c & other[n] for (n, c) in self._field_data.items()}
)
else:
return self._fromdata({n: c & other for (n, c) in self._field_data.items()})
def __rand__(self, other):
if isinstance(other, DataFrameCpu):
return self._fromdata(
{n: other[n] & c for (n, c) in self._field_data.items()}
)
else:
return self._fromdata({n: other & c for (n, c) in self._field_data.items()})
def __invert__(self):
return self._fromdata({n: ~c for (n, c) in self._field_data.items()})
def __neg__(self):
return self._fromdata(
{
self.dtype.fields[i].name: -ColumnFromVelox._from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
def __pos__(self):
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox._from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
# isin ---------------------------------------------------------------
@trace
@expression
def isin(self, values: Union[list, dict, IColumn]):
"""Check whether values are contained in data."""
if isinstance(values, list):
return self._fromdata(
{
self.dtype.fields[i]
.name: ColumnFromVelox._from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
.isin(values)
for i in range(self._data.children_size())
},
self._mask,
)
if isinstance(values, dict):
self._check_columns(values.keys())
return self._fromdata(
{n: c.isin(values[n]) for n, c in self._field_data.items()}
)
if isinstance(values, IDataFrame):
self._check_columns(values.columns)
return self._fromdata(
{n: c.isin(values=list(values[n])) for n, c in self._field_data.items()}
)
else:
raise ValueError(
f"isin undefined for values of type {type(self).__name__}."
)
# data cleaning -----------------------------------------------------------
@trace
@expression
def fill_null(self, fill_value: Optional[Union[dt.ScalarTypes, Dict]]):
if fill_value is None:
return self
if isinstance(fill_value, IColumn._scalar_types):
return self._fromdata(
{
self.dtype.fields[i]
.name: ColumnFromVelox._from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
.fill_null(fill_value)
for i in range(self._data.children_size())
},
self._mask,
)
else:
raise TypeError(f"fill_null with {type(fill_value)} is not supported")
@trace
@expression
def drop_null(self, how="any"):
"""Return a dataframe with rows removed where the row has any or all nulls."""
self._prototype_support_warning("drop_null")
# TODO only flat columns supported...
assert self._dtype is not None
res = Scope._EmptyColumn(self._dtype.constructor(nullable=False))
if how == "any":
for i in self:
if not self._has_any_null(i):
res._append(i)
elif how == "all":
for i in self:
if not self._has_all_null(i):
res._append(i)
return res._finalize()
@trace
@expression
def drop_duplicates(
self,
subset: Optional[List[str]] = None,
keep="first",
):
"""Remove duplicate values from data but keep the first, last, none (keep=False)"""
self._prototype_support_warning("drop_duplicates")
columns = subset if subset is not None else self.columns
self._check_columns(columns)
# TODO fix slow implementation by vectorization,
# i.e do unique per column and delete when all agree
# shortcut once no match is found.
res = Scope._EmptyColumn(self.dtype)
indices = [self.columns.index(s) for s in columns]
seen = set()
for tup in self:
row = tuple(tup[i] for i in indices)
if row in seen:
continue
else:
seen.add(row)
res._append(tup)
return res._finalize()
# @staticmethod
def _has_any_null(self, tup) -> bool:
for t in tup:
if t is None:
return True
if isinstance(t, tuple) and self._has_any_null(t):
return True
return False
# @staticmethod
def _has_all_null(self, tup) -> bool:
for t in tup:
if t is not None:
return False
if isinstance(t, tuple) and not self._has_all_null(t):
return False
return True
# universal ---------------------------------------------------------
# TODO Decide on tracing level: If we trace 'min' om a
# - highlevel then we can use lambdas inside min
# - lowelevel, i.e call 'summarize', then lambdas have to become
# - global functions if they have no state
# - dataclasses with an apply function if they have state
@staticmethod
def _cmin(c):
return c.min
# with static function
@trace
@expression
def min(self):
"""Return the minimum of the non-null values of the Column."""
return self._summarize(DataFrameCpu._cmin)
# with dataclass function
# @expression
# def min(self, numeric_only=None):
# """Return the minimum of the non-null values of the Column."""
# return self._summarize(_Min(), {"numeric_only": numeric_only})
# with lambda
# @expression
# def min(self, numeric_only=None):
# """Return the minimum of the non-null values of the Column."""
# return self._summarize(lambda c: c.min, {"numeric_only": numeric_only})
@trace
@expression
def max(self):
"""Return the maximum of the non-null values of the column."""
# skipna == True
return self._summarize(lambda c: c.max)
@trace
@expression
def all(self):
"""Return whether all non-null elements are True in Column"""
return self._summarize(lambda c: c.all)
@trace
@expression
def any(self):
"""Return whether any non-null element is True in Column"""
return self._summarize(lambda c: c.any)
@trace
@expression
def sum(self):
"""Return sum of all non-null elements in Column"""
return self._summarize(lambda c: c.sum)
@trace
@expression
def _cummin(self):
"""Return cumulative minimum of the data."""
return self._lift(lambda c: c._cummin)
@trace
@expression
def _cummax(self):
"""Return cumulative maximum of the data."""
return self._lift(lambda c: c._cummax)
@trace
@expression
def cumsum(self):
"""Return cumulative sum of the data."""
return self._lift(lambda c: c.cumsum)
@trace
@expression
def _cumprod(self):
"""Return cumulative product of the data."""
return self._lift(lambda c: c._cumprod)
@trace
@expression
def mean(self):
"""Return the mean of the values in the series."""
return self._summarize(lambda c: c.mean)
@trace
@expression
def median(self):
"""Return the median of the values in the data."""
return self._summarize(lambda c: c.median)
@trace
@expression
def mode(self):
"""Return the mode(s) of the data."""
return self._summarize(lambda c: c.mode)
@trace
@expression
def std(self):
"""Return the stddev(s) of the data."""
return self._summarize(lambda c: c.std)
@trace
@expression
def _nunique(self, drop_null=True):
"""Returns the number of unique values per column"""
res = {}
res["column"] = ta.Column([f.name for f in self.dtype.fields], dt.string)
res["unique"] = ta.Column(
[
ColumnFromVelox._from_velox(
self.device,
f.dtype,
self._data.child_at(self._data.type().get_child_idx(f.name)),
True,
)._nunique(drop_null)
for f in self.dtype.fields
],
dt.int64,
)
return self._fromdata(res, None)
def _summarize(self, func):
res = ta.Column(self.dtype)
for i in range(self._data.children_size()):
result = func(
ColumnFromVelox._from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
)()
if result is None:
res._data.child_at(i).append_null()
else:
res._data.child_at(i).append(result)
res._data.set_length(1)
return res
@trace
def _lift(self, func):
if self.null_count == 0:
res = velox.Column(get_velox_type(self.dtype))
for i in range(self._data.children_size()):
child = func(
ColumnFromVelox._from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
)()
res.set_child(
i,
child._data,
)
res.set_length(len(self._data))
return ColumnFromVelox._from_velox(self.device, self.dtype, res, True)
raise NotImplementedError("Dataframe row is not allowed to have nulls")
# describe ----------------------------------------------------------------
@trace
@expression
def describe(
self,
percentiles=None,
include=None,
exclude=None,
):
"""Generate descriptive statistics."""
# Not supported: datetime_is_numeric=False,
includes = []
if include is None:
includes = [f.name for f in self.dtype.fields if dt.is_numerical(f.dtype)]
elif isinstance(include, list):
includes = [f.name for f in self.dtype.fields if f.dtype in include]
else:
raise TypeError(
f"describe with include of type {type(include).__name__} is not supported"
)
excludes = []
if exclude is None:
excludes = []
elif isinstance(exclude, list):
excludes = [f.name for f in self.dtype.fields if f.dtype in exclude]
else:
raise TypeError(
f"describe with exclude of type {type(exclude).__name__} is not supported"
)
selected = [i for i in includes if i not in excludes]
if percentiles is None:
percentiles = [25, 50, 75]
percentiles = sorted(set(percentiles))
if len(percentiles) > 0:
if percentiles[0] < 0 or percentiles[-1] > 100:
raise ValueError("percentiles must be betwen 0 and 100")
res = {}
res["metric"] = ta.Column(
["count", "mean", "std", "min"] + [f"{p}%" for p in percentiles] + ["max"]
)
for s in selected:
idx = self._data.type().get_child_idx(s)
c = ColumnFromVelox._from_velox(
self.device,
self.dtype.fields[idx].dtype,
self._data.child_at(idx),
True,
)
res[s] = ta.Column(
[c._count(), c.mean(), c.std(), c.min()]
+ c._quantile(percentiles, "midpoint")
+ [c.max()]
)
return self._fromdata(res, [False] * len(res["metric"]))
# Dataframe specific ops -------------------------------------------------- #
@trace
@expression
def drop(self, columns: List[str]):
self._check_columns(columns)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox._from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for i | |
support reduce_sum
# encoder_out_lens = encoder_mask.squeeze(1).sum(1)
encoder_out_lens = encoder_mask.squeeze(1).astype(paddle.int).sum(1)
ctc_probs = self.ctc.log_softmax(encoder_out) # (B, maxlen, vocab_size)
topk_prob, topk_index = ctc_probs.topk(1, axis=2) # (B, maxlen, 1)
topk_index = topk_index.view(batch_size, maxlen) # (B, maxlen)
pad_mask = make_pad_mask(encoder_out_lens) # (B, maxlen)
topk_index = topk_index.masked_fill_(pad_mask, self.eos) # (B, maxlen)
hyps = [hyp.tolist() for hyp in topk_index]
hyps = [remove_duplicates_and_blank(hyp) for hyp in hyps]
return hyps
def _ctc_prefix_beam_search(
self,
speech: paddle.Tensor,
speech_lengths: paddle.Tensor,
beam_size: int,
decoding_chunk_size: int=-1,
num_decoding_left_chunks: int=-1,
simulate_streaming: bool=False,
blank_id: int=0, ) -> Tuple[List[Tuple[int, float]], paddle.Tensor]:
""" CTC prefix beam search inner implementation
Args:
speech (paddle.Tensor): (batch, max_len, feat_dim)
speech_length (paddle.Tensor): (batch, )
beam_size (int): beam size for beam search
decoding_chunk_size (int): decoding chunk for dynamic chunk
trained model.
<0: for decoding, use full chunk.
>0: for decoding, use fixed chunk size as set.
0: used for training, it's prohibited here
simulate_streaming (bool): whether do encoder forward in a
streaming fashion
Returns:
List[Tuple[int, float]]: nbest results, (N,1), (text, likelihood)
paddle.Tensor: encoder output, (1, max_len, encoder_dim),
it will be used for rescoring in attention rescoring mode
"""
assert speech.shape[0] == speech_lengths.shape[0]
assert decoding_chunk_size != 0
batch_size = speech.shape[0]
# For CTC prefix beam search, we only support batch_size=1
assert batch_size == 1
# Let's assume B = batch_size and N = beam_size
# 1. Encoder forward and get CTC score
encoder_out, encoder_mask = self._forward_encoder(
speech, speech_lengths, decoding_chunk_size,
num_decoding_left_chunks,
simulate_streaming) # (B, maxlen, encoder_dim)
maxlen = encoder_out.shape[1]
ctc_probs = self.ctc.log_softmax(encoder_out) # (1, maxlen, vocab_size)
ctc_probs = ctc_probs.squeeze(0)
# cur_hyps: (prefix, (blank_ending_score, none_blank_ending_score))
# blank_ending_score and none_blank_ending_score in ln domain
cur_hyps = [(tuple(), (0.0, -float('inf')))]
# 2. CTC beam search step by step
for t in range(0, maxlen):
logp = ctc_probs[t] # (vocab_size,)
# key: prefix, value (pb, pnb), default value(-inf, -inf)
next_hyps = defaultdict(lambda: (-float('inf'), -float('inf')))
# 2.1 First beam prune: select topk best
top_k_logp, top_k_index = logp.topk(beam_size) # (beam_size,)
for s in top_k_index:
s = s.item()
ps = logp[s].item()
for prefix, (pb, pnb) in cur_hyps:
last = prefix[-1] if len(prefix) > 0 else None
if s == blank_id: # blank
n_pb, n_pnb = next_hyps[prefix]
n_pb = log_add([n_pb, pb + ps, pnb + ps])
next_hyps[prefix] = (n_pb, n_pnb)
elif s == last:
# Update *ss -> *s;
n_pb, n_pnb = next_hyps[prefix]
n_pnb = log_add([n_pnb, pnb + ps])
next_hyps[prefix] = (n_pb, n_pnb)
# Update *s-s -> *ss, - is for blank
n_prefix = prefix + (s, )
n_pb, n_pnb = next_hyps[n_prefix]
n_pnb = log_add([n_pnb, pb + ps])
next_hyps[n_prefix] = (n_pb, n_pnb)
else:
n_prefix = prefix + (s, )
n_pb, n_pnb = next_hyps[n_prefix]
n_pnb = log_add([n_pnb, pb + ps, pnb + ps])
next_hyps[n_prefix] = (n_pb, n_pnb)
# 2.2 Second beam prune
next_hyps = sorted(
next_hyps.items(),
key=lambda x: log_add(list(x[1])),
reverse=True)
cur_hyps = next_hyps[:beam_size]
hyps = [(y[0], log_add([y[1][0], y[1][1]])) for y in cur_hyps]
return hyps, encoder_out
def ctc_prefix_beam_search(
self,
speech: paddle.Tensor,
speech_lengths: paddle.Tensor,
beam_size: int,
decoding_chunk_size: int=-1,
num_decoding_left_chunks: int=-1,
simulate_streaming: bool=False, ) -> List[int]:
""" Apply CTC prefix beam search
Args:
speech (paddle.Tensor): (batch, max_len, feat_dim)
speech_length (paddle.Tensor): (batch, )
beam_size (int): beam size for beam search
decoding_chunk_size (int): decoding chunk for dynamic chunk
trained model.
<0: for decoding, use full chunk.
>0: for decoding, use fixed chunk size as set.
0: used for training, it's prohibited here
simulate_streaming (bool): whether do encoder forward in a
streaming fashion
Returns:
List[int]: CTC prefix beam search nbest results
"""
hyps, _ = self._ctc_prefix_beam_search(
speech, speech_lengths, beam_size, decoding_chunk_size,
num_decoding_left_chunks, simulate_streaming)
return hyps[0][0]
def attention_rescoring(
self,
speech: paddle.Tensor,
speech_lengths: paddle.Tensor,
beam_size: int,
decoding_chunk_size: int=-1,
num_decoding_left_chunks: int=-1,
ctc_weight: float=0.0,
simulate_streaming: bool=False, ) -> List[int]:
""" Apply attention rescoring decoding, CTC prefix beam search
is applied first to get nbest, then we resoring the nbest on
attention decoder with corresponding encoder out
Args:
speech (paddle.Tensor): (batch, max_len, feat_dim)
speech_length (paddle.Tensor): (batch, )
beam_size (int): beam size for beam search
decoding_chunk_size (int): decoding chunk for dynamic chunk
trained model.
<0: for decoding, use full chunk.
>0: for decoding, use fixed chunk size as set.
0: used for training, it's prohibited here
simulate_streaming (bool): whether do encoder forward in a
streaming fashion
Returns:
List[int]: Attention rescoring result
"""
assert speech.shape[0] == speech_lengths.shape[0]
assert decoding_chunk_size != 0
device = speech.place
batch_size = speech.shape[0]
# For attention rescoring we only support batch_size=1
assert batch_size == 1
# len(hyps) = beam_size, encoder_out: (1, maxlen, encoder_dim)
hyps, encoder_out = self._ctc_prefix_beam_search(
speech, speech_lengths, beam_size, decoding_chunk_size,
num_decoding_left_chunks, simulate_streaming)
assert len(hyps) == beam_size
hyp_list = []
for hyp in hyps:
hyp_content = hyp[0]
# Prevent the hyp is empty
if len(hyp_content) == 0:
hyp_content = (self.ctc.blank_id, )
hyp_content = paddle.to_tensor(
hyp_content, place=device, dtype=paddle.long)
hyp_list.append(hyp_content)
hyps_pad = pad_sequence(hyp_list, True, self.ignore_id)
hyps_lens = paddle.to_tensor(
[len(hyp[0]) for hyp in hyps], place=device,
dtype=paddle.long) # (beam_size,)
hyps_pad, _ = add_sos_eos(hyps_pad, self.sos, self.eos, self.ignore_id)
hyps_lens = hyps_lens + 1 # Add <sos> at begining
encoder_out = encoder_out.repeat(beam_size, 1, 1)
encoder_mask = paddle.ones(
(beam_size, 1, encoder_out.shape[1]), dtype=paddle.bool)
decoder_out, _ = self.decoder(
encoder_out, encoder_mask, hyps_pad,
hyps_lens) # (beam_size, max_hyps_len, vocab_size)
# ctc score in ln domain
decoder_out = paddle.nn.functional.log_softmax(decoder_out, axis=-1)
decoder_out = decoder_out.numpy()
# Only use decoder score for rescoring
best_score = -float('inf')
best_index = 0
# hyps is List[(Text=List[int], Score=float)], len(hyps)=beam_size
for i, hyp in enumerate(hyps):
score = 0.0
for j, w in enumerate(hyp[0]):
score += decoder_out[i][j][w]
# last decoder output token is `eos`, for laste decoder input token.
score += decoder_out[i][len(hyp[0])][self.eos]
# add ctc score (which in ln domain)
score += hyp[1] * ctc_weight
if score > best_score:
best_score = score
best_index = i
return hyps[best_index][0]
#@jit.to_static
def subsampling_rate(self) -> int:
""" Export interface for c++ call, return subsampling_rate of the
model
"""
return self.encoder.embed.subsampling_rate
#@jit.to_static
def right_context(self) -> int:
""" Export interface for c++ call, return right_context of the model
"""
return self.encoder.embed.right_context
#@jit.to_static
def sos_symbol(self) -> int:
""" Export interface for c++ call, return sos symbol id of the model
"""
return self.sos
#@<EMAIL>.to_static
def eos_symbol(self) -> int:
""" Export interface for c++ call, return eos symbol id of the model
"""
return self.eos
@jit.to_static
def forward_encoder_chunk(
self,
xs: paddle.Tensor,
offset: int,
required_cache_size: int,
subsampling_cache: Optional[paddle.Tensor]=None,
elayers_output_cache: Optional[List[paddle.Tensor]]=None,
conformer_cnn_cache: Optional[List[paddle.Tensor]]=None,
) -> Tuple[paddle.Tensor, paddle.Tensor, List[paddle.Tensor], List[
paddle.Tensor]]:
""" Export interface for c++ call, give input chunk xs, and return
output from time 0 to current chunk.
Args:
xs (paddle.Tensor): chunk input
subsampling_cache (Optional[paddle.Tensor]): subsampling cache
elayers_output_cache (Optional[List[paddle.Tensor]]):
transformer/conformer encoder layers output cache
conformer_cnn_cache (Optional[List[paddle.Tensor]]): conformer
cnn cache
Returns:
paddle.Tensor: output, it ranges from time 0 to current chunk.
paddle.Tensor: subsampling cache
List[paddle.Tensor]: attention cache
List[paddle.Tensor]: conformer cnn cache
"""
return self.encoder.forward_chunk(
xs, offset, required_cache_size, subsampling_cache,
elayers_output_cache, conformer_cnn_cache)
# @jit.to_static
def ctc_activation(self, xs: paddle.Tensor) -> paddle.Tensor:
""" Export interface for c++ call, apply linear transform and log
softmax before ctc
Args:
xs (paddle.Tensor): encoder output, (B, T, D)
Returns:
paddle.Tensor: activation before ctc
"""
return self.ctc.log_softmax(xs)
@jit.to_static
def forward_attention_decoder(
self,
hyps: paddle.Tensor,
hyps_lens: paddle.Tensor,
encoder_out: paddle.Tensor, ) -> paddle.Tensor:
""" Export interface for c++ call, forward decoder with multiple
hypothesis from ctc prefix beam search and one encoder output
Args:
hyps (paddle.Tensor): hyps from ctc prefix beam search, already
pad sos at the begining, (B, T)
hyps_lens (paddle.Tensor): length of each hyp in hyps, (B)
encoder_out (paddle.Tensor): corresponding encoder output, (B=1, T, D)
Returns:
paddle.Tensor: decoder output, (B, L)
"""
assert encoder_out.shape[0] == 1
num_hyps = hyps.shape[0]
assert hyps_lens.shape[0] == num_hyps
encoder_out = encoder_out.repeat(num_hyps, 1, 1)
# (B, 1, T)
encoder_mask = paddle.ones(
[num_hyps, 1, encoder_out.shape[1]], dtype=paddle.bool)
# (num_hyps, max_hyps_len, vocab_size)
decoder_out, _ = self.decoder(encoder_out, encoder_mask, hyps,
hyps_lens)
decoder_out = paddle.nn.functional.log_softmax(decoder_out, axis=-1)
return decoder_out
@paddle.no_grad()
def decode(self,
feats: paddle.Tensor,
feats_lengths: paddle.Tensor,
text_feature: Dict[str, int],
decoding_method: str,
beam_size: int,
ctc_weight: float=0.0,
decoding_chunk_size: int=-1,
num_decoding_left_chunks: int=-1,
simulate_streaming: bool=False):
"""u2 decoding.
Args:
feats (Tensor): audio features, (B, T, D)
feats_lengths (Tensor): (B)
text_feature (TextFeaturizer): text feature object.
decoding_method (str): decoding mode, e.g.
'attention', 'ctc_greedy_search',
'ctc_prefix_beam_search', 'attention_rescoring'
beam_size (int): beam size for search
| |
missing EC files
txt = MetabolicGraph.k.get("ec:" + valid_ecs)
try:
int(txt)
except ValueError:
open(joinP(dir_ec, "ec_" + name + ".txt"), "w").write(txt)
else: #Complete download. Possible improvement (?) : with bash check if number of ec_ files in EC_files/ is the same as 'grep -l "EC:" '+ self.directory + '/*|wc' ??
for fi in gene_files :
if fi.endswith("_gene.txt") :
fname = joinP(self.directory, fi)
KO, ECs = extract_ec_number(fname)
if len(ECs) > 0 :
name = ECs[1:].replace(" ", "_") #We don't gain much time since we parse every gene file...
if not os.path.exists(joinP(dir_ec, "ec_" + name + ".txt")) : #If not first time dowloading, will check only non valid ECs
txt = MetabolicGraph.k.get("ec:" + ECs)
try:
int(txt)
except ValueError:
all_ECs.append(ECs)
open(joinP(dir_ec, "ec_" + name + ".txt"), "w").write(txt)
else:
if ECs not in all_ECs :
all_ECs.append(ECs)
cpk.dump(all_ECs, open(joinP(self.directory, "backups/valid_EC_list.cpkl"), "wb"))
self.valid_ecs = all_ECs
self.dir_ec = dir_ec
def get_ecs_from_KOs(self, KO_list, dir_ec):
def extract_ec_number_KO(ko, ko_dict):
"""
Extracts EC (if found) from the definition field of a KEGG entry for a KO.
INPUT:
ko - Kegg Orthology (KO) code name, string
ko_dict - boolean or dict of KO keys and their associated ECs as values
OUTPUT:
ec_all - string of space-separated EC numbers, empty string otherwise
"""
try:
if ko in ko_dict.keys():
return ko_dict[ko]
except TypeError :
pass
txt = MetabolicGraph.k.get("ko:"+ko)
try :
int(txt)
return ""
except ValueError:
txt = txt.split("\n")
ec_all = ""
i = 0
line = txt[i]
while line != "" and not line.startswith("DEFINITION"):
i += 1
line = txt[i] #Skip lines until DEFINITION field reached
if line.startswith("DEFINITION"): #DEFINITION field exists
while line[0] == " " or line[0:5] == "DEFIN": #while still in DEFINITION field
line = line.lstrip("DEFINITION ") #Any of these characters are stripped from the beginning of str, order does not matter
i_ec = line.find("EC:")
if i_ec != -1: #There should be at least one EC
ec = line[i_ec+3:].split("]")[0] #Cropping first 3 characters ("EC:") and last ] of [EC:XXXXXXX] field
ECs = ec.split() #List of ECs
for EC in ECs:
if EC.find(".") != -1 : #EC confirmed
if EC not in ec_all :
ec_all += " " + EC
i += 1
line = txt[i]
return ec_all
logger.info("Fetching KEGG enzyme entries...")
all_ECs = [] #List of ECs with hits in KEGG db
if not os.path.exists(dir_ec):
logger.error("{} directory given in command does not exist! Check path (current one: {})".format(dir_ec, os.getcwd()))
raise SystemExit()
if not os.path.exists(self.directory) or not os.path.exists(joinP(self.directory, "backups/")):
os.makedirs(joinP(self.directory, "backups/"))
#Check if shortcut exists (if user has already run function once, and EC list has been saved)
if os.path.exists(joinP(self.directory, "backups/valid_EC_list.cpkl")) :
logger.info("Found a copy of the list of enzymes. Taking it as list of ECs...")
all_ECs = cpk.load(open(joinP(self.directory, "backups/valid_EC_list.cpkl"), "rb"))
for valid_ecs in all_ECs: #valid ECs taken from one of the gene files
name = valid_ecs[1:].replace(" ", "_")
if not os.path.exists(joinP(dir_ec, "ec_" + name + ".txt")) : #get missing EC files in global EC directory
logger.info("Fetching undownloaded EC files: {}".format(valid_ecs))
txt = MetabolicGraph.k.get("ec:" + valid_ecs)
try:
int(txt)
except ValueError:
open(joinP(dir_ec, "ec_" + name + ".txt"), "w").write(txt)
else: #Complete download
for ko in KO_list :
ECs = extract_ec_number_KO(ko, self.KO)
if len(ECs) > 0 :
name = ECs[1:].replace(" ", "_") #We don't gain much time since we parse every gene file...
if not os.path.exists(joinP(dir_ec, "ec_" + name + ".txt")) : #If not first time dowloading, will check only non valid ECs
txt = MetabolicGraph.k.get("ec:" + ECs)
try:
int(txt)
except ValueError:
all_ECs.append(ECs)
open(joinP(dir_ec, "ec_" + name + ".txt"), "w").write(txt)
else:
if ECs not in all_ECs :
all_ECs.append(ECs)
cpk.dump(all_ECs, open(joinP(self.directory, "backups/valid_EC_list.cpkl"), "wb"))
self.valid_ecs = all_ECs
self.dir_ec = dir_ec
def parse_enzymes(self) :
"""
Retrieves all KEGG enzyme records with Biopython parser. Saves them as cpickle
object for backup.
OUTPUT :
enzs - list of enzyme records
"""
enzs = []
logger.info("Parsing enzymes...")
if os.path.exists(joinP(self.directory, "EC_files")):
if os.path.exists(joinP(self.directory, "EC_files/enzs_parser_backup.cpkl")): #Gains only a few seconds...
enzs = cpk.load(open(joinP(self.directory, "EC_files/enzs_parser_backup.cpkl"), "rb"))
else:
for fi in sorted(os.listdir(joinP(self.directory, "EC_files"))):
if fi.startswith("ec_"):
enzs += list(Enzyme.parse(open(joinP(self.directory, "EC_files/", fi))))
else:
try:
if not os.path.exists(self.dir_ec):
logger.error("<{}> global EC directory does not exist! Check path (current one: {})".format(self.dir_ec, os.getcwd()))
raise SystemExit()
except AttributeError:
logger.error("self.dir_ec does not exist. Run get_ecs_from_KOs or get_ECs")
raise SystemExit()
if not self.valid_ecs and not os.path.exists(joinP(self.directory, "backups/valid_EC_list.cpkl")):
logger.error("Run get_ecs_from_KOs or get_ECs")
raise SystemExit()
if os.path.exists(joinP(self.directory, "backups/enzs_parser_backup.cpkl")): #Gains only a few seconds...
enzs = cpk.load(open(joinP(self.directory, "backups/enzs_parser_backup.cpkl"), "rb"))
else:
for ecs in sorted(self.valid_ecs):
name = ecs[1:].replace(" ", "_")
fi = joinP(self.dir_ec, "ec_" + name + ".txt")
try:
enzs += list(Enzyme.parse(open(fi)))
except IOError:
logger.error("<{}> file does not exist".format(fi))
raise SystemExit()
return enzs
def get_substrates_products(self, e, filtr, graphe):
"""
Finds unique substrate and products node ids and updates name equivalence dictionary.
May filter following compounds : water, ATP, ADP, NAD, NADH, NADPH, carbon dioxide,
ammonia, sulfate, thioredoxin, (ortho) phosphate (P), pyrophosphate (PPi), H+ and NADP.
Will consider as different compounds the metabolites that also appear in compounds that are
actually a list, or slightly different name versions of same metabolite.
INPUT:
e - KEGG enzyme/reaction entry parser (Biopython)
filtr - boolean. If True, filters list of ubiquitous metabolites.
graphe - determines to which graph these compounds need to be added
OUPUT:
subs - list of substrate node ids for given reaction, each being 10-char long
prod - list of product node ids for given reaction, each being 10-char long
"""
def extract_compound(comp) :
"""
Extracts compound code or first 10 characters if code is not present.
INPUT:
comp - string of compound
OUTPUT:
compound code or 10 first compound characters
i_cpd - -1 when no compound code
"""
i_cpd = comp.find('CPD:')
if i_cpd == -1:
return comp[:10].upper(), i_cpd #+/- random 10-char code
else:
return comp[i_cpd+4:].split("]")[0], i_cpd #CPD code
ubi_metab = ["C00001", "C00002", "C00008", "C00003", "C00004", "C00005",
"C00006", "C00011", "C00014", "C00059", "C00342", "C00009",
"C00013", "C00080"] #C00006 - NADP added
subs = [] #Substrate node ids
prod = [] #Product node ids
for s in e.substrate :
sub, i_cpd = extract_compound(s)
if filtr :
if sub in ubi_metab :
continue
if s not in graphe.node_name_equivalence.values(): #Check if substrate exists in our equivalence dictionary
i = 0
while sub in graphe.node_name_equivalence.keys() and i_cpd == -1 : #Check if by bad luck our random compound node id exists in dictionary. Compound code should be unique.
if s[i*10+10:] != "" :
sub, i_cpd = extract_compound(s[i*10+10:]) #Find new compound node id in name
else :
sub += str(i) #add number if no unique compound node id can be found
i += 1
graphe.node_name_equivalence[sub] = s
else:
sub = [k for k,name in graphe.node_name_equivalence.items() if name == s][0]
subs.append(sub)
for p in e.product :
prd, i_cpd = extract_compound(p)
if filtr :
if prd in ubi_metab :
continue
if p not in graphe.node_name_equivalence.values(): #Check if product exists in our equivalence dictionary
i = 0
while prd in graphe.node_name_equivalence.keys() and i_cpd == -1 : #Check if by bad luck our random compound node id exists
if p[i*10+10:] != "" :
prd, i_cpd = extract_compound(p[i*10+10:]) #Find new compound node id
else :
prd += str(i)
i += 1
graphe.node_name_equivalence[prd] = p
else:
prd = [k for k,name in graphe.node_name_equivalence.items() if name == p][0]
prod.append(prd)
return subs, prod
@staticmethod
def _get_reaction_code(e):
"""Finds KEGG reaction code(s) from reaction field in enzyme entry"""
rct = e.reaction
if len(rct) == 0:
return []
rct_codes = []
for r in rct:
i = r.find("[RN:")
if i != -1:
codes = r[i+4:].split("]")[0].split()
for code in codes:
if code[0] == "R" and len(code) == 6 and code[1:].isdigit():
rct_codes.append(code)
return rct_codes
def _get_rn_from_ec(self, ec_fi, rns):
"""Assign reaction(s) to EC in dict, from EC file"""
p_enzs = list(Enzyme.parse(open(joinP(self.dir_ec, ec_fi))))
for e in p_enzs:
rcts = self._get_reaction_code(e)
self.ec_rn[e.entry] = rcts
rns = rns.union(set(rcts))
return rns
def _get_all_reactions(self):
"""For all ECs in EC directory, get RN codes"""
if not os.path.exists(self.dir_ec) or len(os.listdir(self.dir_ec)) == 0:
logger.error("Empty EC directory (%s), get EC entries first, or correct directory path" %self.dir_ec)
raise SystemExit()
ecs = | |
<filename>tests/test_user_display.py
import hashlib
import hmac
import json
import uuid
from buildpg import V, Values
from datetime import date, datetime, timedelta, timezone
from foxglove import glove
from foxglove.db.helpers import SyncDb
from operator import itemgetter
from pytest_toolbox.comparison import RegexStr
from starlette.testclient import TestClient
from urllib.parse import urlencode
from src.schemas.messages import MessageStatus
def modify_url(url, settings, company='foobar'):
args = dict(company=company, expires=round(datetime(2032, 1, 1).timestamp()))
body = '{company}:{expires}'.format(**args).encode()
args['signature'] = hmac.new(settings.user_auth_key, body, hashlib.sha256).hexdigest()
return str(url) + ('&' if '?' in str(url) else '?') + urlencode(args)
def test_user_list(cli, settings, send_email, sync_db: SyncDb):
expected_msg_ids = []
for i in range(4):
uid = str(uuid.uuid4())
send_email(uid=uid, company_code='whoever', recipients=[{'address': f'{<EMAIL>'}])
expected_msg_ids.append(f'{uid}-{i}tcom')
send_email(uid=str(uuid.uuid4()), company_code='different1')
send_email(uid=str(uuid.uuid4()), company_code='different2')
r = cli.get(modify_url('/messages/email-test/', settings, 'whoever'))
assert r.status_code == 200, r.text
data = r.json()
assert data['count'] == 4
msg_ids = [h['external_id'] for h in data['items']]
assert msg_ids == list(reversed(expected_msg_ids))
first_item = data['items'][0]
assert first_item == {
'id': sync_db.fetchrow_b('select * from messages where :where', where=V('external_id') == expected_msg_ids[3])[
'id'
],
'external_id': expected_msg_ids[3],
'to_ext_link': None,
'to_address': '<EMAIL>',
'to_dst': '<<EMAIL>>',
'to_name': ' ',
'send_ts': RegexStr(r'\d{4}-\d{2}-\d{2}.*'),
'update_ts': RegexStr(r'\d{4}-\d{2}-\d{2}.*'),
'status': 'Sent',
'method': 'email-test',
'subject': 'test message',
}
def test_user_list_no_ext(cli, settings, send_email, sync_db: SyncDb):
send_email(
uid=str(uuid.uuid4()),
company_code='testing',
recipients=[{'address': '<EMAIL>'}],
subject_template='test message',
)
sync_db.execute_b('update messages set external_id=null')
r = cli.get(modify_url('/messages/email-test/', settings, 'testing'))
assert r.status_code == 200, r.text
data = r.json()
assert data['count'] == 1
first_item = data['items'][0]
assert first_item == {
'id': sync_db.fetchrow_b('select * from messages')['id'],
'external_id': None,
'to_ext_link': None,
'to_address': '<EMAIL>',
'to_dst': '<<EMAIL>>',
'to_name': ' ',
'send_ts': RegexStr(r'\d{4}-\d{2}-\d{2}.*'),
'update_ts': RegexStr(r'\d{4}-\d{2}-\d{2}.*'),
'status': 'Sent',
'method': 'email-test',
'subject': 'test message',
}
def test_user_search(cli, settings, send_email):
msgs = {}
for i, subject in enumerate(['apple', 'banana', 'cherry', 'durian']):
uid = str(uuid.uuid4())
send_email(uid=uid, company_code='whoever', recipients=[{'address': f'{<EMAIL>'}], subject_template=subject)
msgs[subject] = f'{uid}-{i}tcom'
send_email(uid=str(uuid.uuid4()), company_code='different1', subject_template='eggplant')
r = cli.get(modify_url('/messages/email-test/?q=cherry', settings, 'whoever'))
assert r.status_code == 200, r.text
data = r.json()
assert data['count'] == 1
item = data['items'][0]
assert item['external_id'] == msgs['cherry']
assert item['subject'] == 'cherry'
r = cli.get(modify_url('/messages/email-test/?q=eggplant', settings, 'whoever'))
assert r.status_code == 200, r.text
data = r.json()
assert data['count'] == 0
def test_user_search_space(cli, settings, send_email):
send_email(
company_code='testing',
recipients=[{'address': '<EMAIL>'}],
subject_template='foobar',
)
send_email(
company_code='testing',
recipients=[{'address': '<EMAIL>'}],
subject_template='bar',
)
send_email(
company_code='testing',
recipients=[{'address': '<EMAIL>'}],
subject_template='foo bar',
)
r = cli.get(modify_url('/messages/email-test/?q=foobar', settings, 'testing'))
assert r.status_code == 200, r.text
data = r.json()
assert data['count'] == 1
r = cli.get(modify_url('/messages/email-test/?q=foo%20bar', settings, 'testing'))
assert r.status_code == 200, r.text
data = r.json()
assert data['count'] == 1
def test_pagination(cli, settings, send_email):
for i in range(110):
send_email(
uid=str(uuid.uuid4()),
company_code='testing',
recipients=[{'address': f'{<EMAIL>'}],
subject_template='foobar',
)
for i in range(20):
send_email(
uid=str(uuid.uuid4()),
company_code='testing',
recipients=[{'address': f'{<EMAIL>'}],
subject_template='barfoo',
)
r = cli.get(modify_url('/messages/email-test/', settings, 'testing'))
assert r.status_code == 200, r.text
data = r.json()
first_item = data['items'][0]
assert len(data['items']) == 100
assert data['count'] == 130
r = cli.get(modify_url('/messages/email-test/', settings, 'testing') + '&offset=100')
assert r.status_code == 200, r.text
data = r.json()
assert first_item not in data['items']
assert len(data['items']) == 30
assert data['count'] == 130
def test_user_aggregate(cli, settings, send_email, sync_db: SyncDb, loop, worker):
for i in range(4):
send_email(uid=str(uuid.uuid4()), company_code='user-aggs', recipients=[{'address': f'{<EMAIL>'}])
msg_id = send_email(uid=str(uuid.uuid4()), company_code='user-aggs', recipients=[{'address': f'{<EMAIL>'}])
data = {'ts': int(2e10), 'event': 'open', '_id': msg_id, 'user_agent': 'testincalls'}
cli.post('/webhook/test/', json=data)
send_email(uid=str(uuid.uuid4()), company_code='different')
loop.run_until_complete(glove.redis.enqueue_job('update_aggregation_view'))
worker.test_run()
assert sync_db.fetchval('select count(*) from messages') == 6
r = cli.get(modify_url('/messages/email-test/aggregation/', settings, 'user-aggs'))
assert r.status_code == 200, r.text
data = r.json()
histogram = data.pop('histogram')
assert data == {
'all_90_day': 5,
'open_90_day': 1,
'all_7_day': 5,
'open_7_day': 1,
'all_28_day': 5,
'open_28_day': 1,
}
assert sorted(histogram, key=itemgetter('count')) == [
{'count': 1, 'day': f'{date.today():%Y-%m-%d}', 'status': 'Opened'},
{'count': 4, 'day': f'{date.today():%Y-%m-%d}', 'status': 'Sent'},
]
def test_user_aggregate_no_data(cli, settings, sync_db: SyncDb):
sync_db.execute('insert into companies (code) values ($1)', 'testing')
r = cli.get(modify_url('/messages/email-test/aggregation/', settings, 'testing'))
assert r.status_code == 200, r.text
data = r.json()
assert data == {
'histogram': [],
'all_90_day': 0,
'open_90_day': 0,
'all_7_day': 0,
'open_7_day': 0,
'all_28_day': 0,
'open_28_day': 0,
}
def test_user_tags(cli, settings, send_email):
uid1 = str(uuid.uuid4())
send_email(
uid=uid1,
company_code='tagtest',
tags=['trigger:broadcast', 'broadcast:123'],
recipients=[
{'address': '<EMAIL>', 'tags': ['user:1', 'shoesize:10']},
{'address': '<EMAIL>', 'tags': ['user:2', 'shoesize:8']},
],
)
uid2 = str(uuid.uuid4())
send_email(
uid=uid2,
company_code='tagtest',
tags=['trigger:other'],
recipients=[
{'address': '<EMAIL>', 'tags': ['user:3', 'shoesize:10']},
{'address': '<EMAIL>', 'tags': ['user:4', 'shoesize:8']},
],
)
send_email(uid=str(uuid.uuid4()), company_code='different1')
send_email(uid=str(uuid.uuid4()), company_code='different2')
r = cli.get(modify_url('/messages/email-test/', settings, 'tagtest') + '&tags=broadcast:123')
assert r.status_code == 200, r.text
data = r.json()
assert data['count'] == 2, json.dumps(data, indent=2)
assert {h['external_id'] for h in data['items']} == {f'{uid1}-1tcom', f'{uid1}-2tcom'}
r = cli.get(modify_url('/messages/email-test/', settings, 'tagtest') + '&tags=user:2')
assert r.status_code == 200, r.text
data = r.json()
assert data['count'] == 1, json.dumps(data, indent=2)
assert data['items'][0]['external_id'] == f'{uid1}-2tcom'
r = cli.get(modify_url('/messages/email-test/', settings, 'tagtest') + '&tags=trigger:other&tags=shoesize:8')
assert r.status_code == 200, r.text
data = r.json()
assert data['count'] == 1
assert data['items'][0]['external_id'] == f'{uid2}-4tcom'
def test_search_emails(cli: TestClient, send_email, settings):
send_email(recipients=[{'address': '<EMAIL>'}, {'address': '<EMAIL>'}])
r = cli.get(modify_url('/messages/email-test/', settings))
assert r.json()['count'] == 2
r = cli.get(modify_url(f'/messages/email-test/?{urlencode({"q": "<EMAIL>"})}', settings))
assert r.json()['count'] == 1
r = cli.get(modify_url(f'/messages/email-test/?{urlencode({"q": "bon & jovi"})}', settings))
assert r.json()['count'] == 0
def test_message_details(cli, settings, send_email, sync_db: SyncDb, worker, loop):
msg_ext_id = send_email(company_code='test-details')
data = {'ts': int(1e10), 'event': 'open', '_id': msg_ext_id, 'user_agent': 'test<PASSWORD>'}
r = cli.post('/webhook/test/', json=data)
assert r.status_code == 200, r.text
assert worker.test_run() == 2
message_id = sync_db.fetchval_b('select id from messages where :where', where=V('external_id') == msg_ext_id)
r = cli.get(modify_url(f'/messages/email-test/{message_id}/', settings, 'test-details'))
assert r.status_code == 200, r.text
data = r.json()
data['events'][0].pop('details')
assert data == {
'id': message_id,
'external_id': msg_ext_id,
'to_ext_link': None,
'to_address': '<EMAIL>',
'to_dst': '<<EMAIL>>',
'to_name': ' ',
'send_ts': RegexStr(r'\d{4}-\d{2}-\d{2}.*'),
'subject': 'test message',
'update_ts': RegexStr(r'\d{4}-\d{2}-\d{2}.*'),
'status': 'Opened',
'method': 'email-test',
'body': '<body>\nthis is a test\n</body>',
'events': [{'status': 'Opened', 'datetime': RegexStr(r'\d{4}-\d{2}-\d{2}.*')}],
'attachments': [],
}
def test_message_details_links(cli, settings, send_email, sync_db: SyncDb, worker, loop):
msg_ext_id = send_email(
company_code='test-details',
recipients=[
{
'first_name': 'Foo',
'last_name': 'Bar',
'user_link': '/whatever/123/',
'address': '<EMAIL>',
'pdf_attachments': [
{'name': 'testing.pdf', 'html': '<h1>testing</h1>', 'id': 123},
{'name': 'different.pdf', 'html': '<h1>different</h1>'},
],
}
],
)
message_id = sync_db.fetchval_b('select id from messages where :where', where=V('external_id') == msg_ext_id)
data = {'ts': int(2e12), 'event': 'open', '_id': msg_ext_id, 'user_agent': 'testincalls'}
r = cli.post('/webhook/test/', json=data)
assert r.status_code == 200, r.text
assert worker.test_run() == 2
r = cli.get(modify_url(f'/messages/email-test/{message_id}/', settings, 'test-details'))
assert r.status_code == 200, r.text
data = r.json()
data['events'][0].pop('details')
assert data == {
'id': message_id,
'external_id': msg_ext_id,
'to_ext_link': '/whatever/123/',
'to_address': '<EMAIL>',
'to_dst': 'Foo Bar <<EMAIL>>',
'to_name': '<NAME>',
'send_ts': RegexStr(r'\d{4}-\d{2}-\d{2}.*'),
'subject': 'test message',
'update_ts': RegexStr(r'\d{4}-\d{2}-\d{2}.*'),
'status': 'Opened',
'body': '<body>\nthis is a test\n</body>',
'method': 'email-test',
'events': [{'status': 'Opened', 'datetime': RegexStr(r'\d{4}-\d{2}-\d{2}.*')}],
'attachments': [['/attachment-doc/123/', 'testing.pdf'], ['#', 'different.pdf']],
}
def test_no_event_data(cli, settings, send_email, sync_db: SyncDb):
msg_ext_id = send_email(
company_code='test-details', recipients=[{'first_name': 'Foo', 'address': '<EMAIL>'}]
)
message_id = sync_db.fetchval_b('select id from messages where :where', where=V('external_id') == msg_ext_id)
sync_db.executemany_b(
'insert into events (:values__names) values :values',
[
Values(
ts=(datetime(2032, 6, 1) + timedelta(days=i, hours=i * 2)).replace(tzinfo=timezone.utc),
message_id=message_id,
status=MessageStatus.send,
)
for i in range(3)
],
)
r = cli.get(modify_url(f'/messages/email-test/{message_id}/', settings, 'test-details'))
assert r.json()['events'] == [
{'status': 'Sent', 'datetime': '2032-06-01T00:00:00+00:00'},
{'status': 'Sent', 'datetime': '2032-06-02T02:00:00+00:00'},
{'status': 'Sent', 'datetime': '2032-06-03T04:00:00+00:00'},
]
def test_invalid_message_id(cli, sync_db: SyncDb, settings, send_email):
msg_ext_id = send_email(
company_code='test-details', recipients=[{'first_name': 'Foo', 'address': '<EMAIL>'}]
)
message_id = sync_db.fetchval_b('select id from messages where :where', where=V('external_id') == msg_ext_id)
r = cli.get(modify_url(f'/messages/email-test/{message_id}/', settings, 'not_real_company'))
assert r.status_code == 404
r = cli.get(modify_url(f'/messages/email-test/999{message_id}/', settings, 'test-details'))
assert r.status_code == 404
def test_many_events(cli, settings, send_email, sync_db: SyncDb):
msg_ext_id = send_email(
company_code='test-details', recipients=[{'first_name': 'Foo', 'address': '<EMAIL>'}]
)
message_id = sync_db.fetchval_b('select id from messages where :where', where=V('external_id') == msg_ext_id)
sync_db.executemany_b(
'insert into events (:values__names) values :values',
[
Values(
ts=(datetime(2032, 6, 1) + timedelta(days=i, hours=i * 2)).replace(tzinfo=timezone.utc),
message_id=message_id,
status=MessageStatus.send,
extra=json.dumps({'foo': 'bar', 'v': i}),
)
for i in range(55)
],
)
r = cli.get(modify_url(f'/messages/email-test/{message_id}/', settings, 'test-details'))
assert r.status_code == 200, r.text
events = r.json()['events']
assert len(events) == 51
assert sync_db.fetchval('select count(*) from events') == 55
assert events[-1]['status'] == '5 more'
def test_user_sms_list(cli, settings, send_sms, sync_db: SyncDb):
ext_id = send_sms(company_code='snapcrap')
send_sms(uid=str(uuid.uuid4()), company_code='flip')
r = cli.get(modify_url('/messages/sms-test/', settings, 'snapcrap'))
assert r.status_code == 200, r.text
data = r.json()
assert data == {
'items': [
{
'id': 1,
'external_id': ext_id,
'to_ext_link': None,
'to_address': '+44 7896 541236',
'to_dst': '<+44 7896 541236>',
'to_name': ' ',
'subject': 'this is a test apples',
'send_ts': RegexStr(r'\d{4}-\d{2}-\d{2}.*'),
'update_ts': RegexStr(r'\d{4}-\d{2}-\d{2}.*'),
'status': 'Sent',
'method': 'sms-test',
},
],
'count': 1,
'spend': 0.012,
}
def test_valid_signature(cli, settings, sync_db: SyncDb):
sync_db.execute('insert into companies (code) values ($1)', 'whatever')
args = dict(company='whatever', expires=round(datetime(2032, 1, 1).timestamp()))
body = '{company}:{expires}'.format(**args).encode()
args['signature'] = hmac.new(settings.user_auth_key, body, hashlib.sha256).hexdigest()
r = cli.get('/messages/email-test/?' + urlencode(args))
assert r.status_code == 200, r.text
def test_invalid_signature(cli, settings):
args = dict(company='whatever', expires=round(datetime(2032, 1, 1).timestamp()))
body = '{company}:{expires}'.format(**args).encode()
args['signature'] = hmac.new(settings.user_auth_key, | |
partition_info = [(123, 'pepper', 't1', 4444), (234, 'pepper', 't2', 5555)]
expected_output = ['pepper, t1, 10000000000000000', 'pepper, t2, 10000000000000000']
with self.assertRaisesRegexp(Exception, 'Exceeded backup max tuple count of 1 quadrillion rows per table for:'):
get_partition_state(master_port, dbname, 'pg_aoseg', partition_info)
@patch('gppylib.operations.dump.dbconn.DbURL')
@patch('gppylib.operations.dump.dbconn.connect')
@patch('gppylib.operations.dump.execSQLForSingleton', return_value='100')
def test_get_partition_state_with_more_than_thousand_partition(self, mock1, mock2, mock3):
master_port=5432
dbname='testdb'
partition_info = [(123, 'pepper', 't1', 4444), (234, 'pepper', 't2', 5555)] * 1000
expected_output = ['pepper, t1, 100', 'pepper, t2, 100'] * 1000
result = get_partition_state(master_port, dbname, 'pg_aoseg', partition_info)
self.assertEqual(result, expected_output)
def test_get_filename_from_filetype_00(self):
table_type = 'ao'
master_datadir = 'foo'
backup_dir = None
timestamp_key = '20121212010101'
self.create_backup_dirs(dump_dirs=['20121212'])
expected_output = '%s/db_dumps/20121212/gp_dump_%s_ao_state_file' % (master_datadir, timestamp_key)
result = get_filename_from_filetype(table_type, master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, timestamp_key)
self.assertEqual(result, expected_output)
self.remove_backup_dirs(dump_dirs=['20121212'])
def test_get_filename_from_filetype_01(self):
table_type = 'co'
master_datadir = 'foo'
backup_dir = None
timestamp_key = '20121212010101'
self.create_backup_dirs(dump_dirs=['20121212'])
expected_output = '%s/db_dumps/20121212/gp_dump_%s_co_state_file' % (master_datadir, timestamp_key)
result = get_filename_from_filetype(table_type, master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, timestamp_key)
self.assertEqual(result, expected_output)
self.remove_backup_dirs(dump_dirs=['20121212'])
def test_get_filename_from_filetype_01_with_ddboost(self):
table_type = 'ao'
master_datadir = 'foo'
backup_dir = None
timestamp_key = '20121212010101'
self.create_backup_dirs(dump_dirs=['20121212'])
ddboost = True
dump_dir = 'backup/DCA-35'
expected_output = 'foo/backup/DCA-35/20121212/gp_dump_%s_ao_state_file' % (timestamp_key)
result = get_filename_from_filetype(table_type, master_datadir, backup_dir, dump_dir, self.dumper.dump_prefix, timestamp_key, ddboost)
self.assertEqual(result, expected_output)
self.remove_backup_dirs(dump_dirs=['20121212'])
def test_get_filename_from_filetype_02(self):
table_type = 'foo'
master_datadir = '/foo'
backup_dir = None
timestamp_key = '20121212010101'
with self.assertRaisesRegexp(Exception, 'Invalid table type *'):
get_filename_from_filetype(table_type, master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, timestamp_key)
def test_get_filename_from_filetype_02_with_ddboost(self):
table_type = 'co'
master_datadir = 'foo'
backup_dir = None
timestamp_key = '20121212010101'
self.create_backup_dirs(dump_dirs=['20121212'])
ddboost = True
dump_dir = 'backup/DCA-35'
expected_output = 'foo/backup/DCA-35/20121212/gp_dump_%s_co_state_file' % (timestamp_key)
result = get_filename_from_filetype(table_type, master_datadir, backup_dir, dump_dir, self.dumper.dump_prefix, timestamp_key, ddboost)
self.assertEqual(result, expected_output)
self.remove_backup_dirs(dump_dirs=['20121212'])
def test_write_state_file_00(self):
table_type = 'foo'
master_datadir = '/foo'
backup_dir = None
timestamp_key = '20121212010101'
partition_list = ['pepper, t1, 100', 'pepper, t2, 100']
with self.assertRaisesRegexp(Exception, 'Invalid table type *'):
write_state_file(table_type, master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, partition_list)
@patch('gppylib.operations.dump.get_filename_from_filetype', return_value='/tmp/db_dumps/20121212/gp_dump_20121212010101_ao_state_file')
def test_write_state_file_01(self, mock1):
table_type = 'ao'
master_datadir = 'foo'
backup_dir = None
timestamp_key = '20121212010101'
partition_list = ['pepper, t1, 100', 'pepper, t2, 100']
self.create_backup_dirs(top_dir='/tmp', dump_dirs=['20121212'])
write_state_file(table_type, master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, partition_list)
if not os.path.isfile('/tmp/db_dumps/20121212/gp_dump_20121212010101_ao_state_file'):
raise Exception('AO state file was not created successfully')
self.remove_backup_dirs(top_dir='/tmp', dump_dirs=['20121212'])
@patch('gppylib.operations.dump.execute_sql', return_value=[['public', 'ao_table', 123, 'CREATE', 'table', '2012: 1'], ['pepper', 'co_table', 333, 'TRUNCATE', '', '2033 :1 - 111']])
def test_get_last_operation_data_00(self, mock):
output = get_last_operation_data(1, 'foodb')
expected = ['public,ao_table,123,CREATE,table,2012: 1', 'pepper,co_table,333,TRUNCATE,,2033 :1 - 111']
self.assertEquals(output, expected)
@patch('gppylib.operations.dump.execute_sql', return_value=[])
def test_get_last_operation_data_01(self, mock):
output = get_last_operation_data(1, 'foodb')
expected = []
self.assertEquals(output, expected)
@patch('gppylib.operations.dump.execute_sql', return_value=[[123, 'table', '2012: 1'], [333, 'TRUNCATE', '', '2033 :1 - 111']])
def test_get_last_operation_data_02(self, mock):
with self.assertRaisesRegexp(Exception, 'Invalid return from query'):
get_last_operation_data(1, 'foodb')
@patch('gppylib.operations.dump.get_last_dump_timestamp', return_value='20121212121212')
@patch('gppylib.operations.dump.os.path.isfile', return_value=True)
@patch('gppylib.operations.dump.get_lines_from_file', return_value=['pepper, t1, 100', 'pepper, t2, 100'])
def test_get_last_state_00(self, mock1, mock2, mock3):
table_type = 'ao'
master_datadir = 'foo'
backup_dir = None
full_timestamp = '20121212010101'
expected_output = ['pepper, t1, 100', 'pepper, t2, 100']
output = get_last_state(table_type, master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, full_timestamp)
self.assertEqual(output, expected_output)
@patch('gppylib.operations.dump.get_last_dump_timestamp', return_value='20121212121212')
@patch('gppylib.operations.dump.os.path.isfile', return_value=False)
@patch('gppylib.operations.dump.generate_ao_state_filename', return_value='foo')
def test_get_last_state_01(self, mock1, mock2, mock3):
table_type = 'ao'
master_datadir = '/foo'
backup_dir = None
full_timestamp = '20121212010101'
with self.assertRaisesRegexp(Exception, 'ao state file does not exist: foo'):
get_last_state(table_type, master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, full_timestamp)
@patch('gppylib.operations.dump.get_last_dump_timestamp', return_value='20121212121212')
@patch('gppylib.operations.dump.os.path.isfile', return_value=True)
@patch('gppylib.operations.dump.get_lines_from_file', return_value=[])
def test_get_last_state_02(self, mock1, mock2, mock3):
table_type = 'ao'
master_datadir = 'foo'
backup_dir = None
full_timestamp = '20121212010101'
expected_output = []
output = get_last_state(table_type, master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, full_timestamp)
self.assertEqual(output, expected_output)
@patch('gppylib.operations.dump.get_last_dump_timestamp', return_value='20121212121212')
@patch('gppylib.operations.dump.os.path.isfile', return_value=True)
@patch('gppylib.operations.dump.get_lines_from_file', return_value=[])
@patch('gppylib.operations.dump.check_file_dumped_with_nbu', return_value=True)
@patch('gppylib.operations.dump.restore_file_with_nbu')
def test_get_last_state_03(self, mock1, mock2, mock3, mock4, mock5):
table_type = 'ao'
master_datadir = 'foo'
backup_dir = None
full_timestamp = '20121212010101'
netbackup_service_host = "mdw"
netbackup_block_size = "1024"
expected_output = []
output = get_last_state(table_type, master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, full_timestamp, netbackup_service_host, netbackup_block_size)
self.assertEqual(output, expected_output)
def test_compare_dict_00(self):
last_dict = {'pepper.t1':'100', 'pepper.t2':'200'}
curr_dict = {'pepper.t1':'200', 'pepper.t2':'200'}
expected_output = set(['pepper.t1'])
result = compare_dict(last_dict, curr_dict)
self.assertEqual(result, expected_output)
def test_compare_dict_01(self):
last_dict = {'pepper.t1':'100', 'pepper.t2':'200', 'pepper.t3':'300'}
curr_dict = {'pepper.t1':'100', 'pepper.t2':'100'}
expected_output = set(['pepper.t2'])
result = compare_dict(last_dict, curr_dict)
self.assertEqual(result, expected_output)
def test_compare_dict_02(self):
last_dict = {'pepper.t1':'100', 'pepper.t2':'200'}
curr_dict = {'pepper.t1':'100', 'pepper.t2':'200', 'pepper.t3':'300'}
expected_output = set(['pepper.t3'])
result = compare_dict(last_dict, curr_dict)
self.assertEqual(result, expected_output)
def test_compare_dict_03(self):
last_dict = {'pepper.t1':'100', 'pepper.t2':'200'}
curr_dict = {'pepper.t1':'100', 'pepper.t2':'200'}
expected_output = set([])
result = compare_dict(last_dict, curr_dict)
self.assertEqual(result, expected_output)
def test_create_partition_dict_00(self):
partition_list = ['pepper, t1, 100', 'pepper, t2, 200']
expected_output = {'pepper.t1':'100', 'pepper.t2':'200'}
result = create_partition_dict(partition_list)
self.assertEqual(result, expected_output)
def test_create_partition_dict_01(self):
partition_list = []
expected_output = {}
result = create_partition_dict(partition_list)
self.assertEqual(result, expected_output)
def test_create_partition_dict_02(self):
partition_list = ['pepper t1 100']
with self.assertRaisesRegexp(Exception, 'Invalid state file format *'):
create_partition_dict(partition_list)
@patch('gppylib.operations.dump.generate_increments_filename')
@patch('gppylib.operations.dump.os.path.isdir', return_value=False)
@patch('gppylib.operations.dump.os.path.isfile', return_value=False)
def test_get_last_dump_timestamp_00(self, mock1, mock2, mock3):
master_datadir = '/foo'
backup_dir = None
full_timestamp = '20121212010101'
result = get_last_dump_timestamp(master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, full_timestamp)
self.assertEqual(result, full_timestamp)
@patch('gppylib.operations.dump.get_lines_from_file', return_value=['20121212121210', '20121212121211'])
@patch('gppylib.operations.dump.generate_increments_filename')
@patch('gppylib.operations.dump.os.path.isdir', return_value=True)
@patch('gppylib.operations.dump.os.path.isfile', return_value=True)
def test_get_last_dump_timestamp_01(self, mock1, mock2, mock3, mock4):
master_datadir = 'foo'
backup_dir = None
full_timestamp = '20121212010101'
expected_output = '20121212121211'
result = get_last_dump_timestamp(master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, full_timestamp)
self.assertEqual(result, expected_output)
@patch('gppylib.operations.dump.generate_increments_filename')
@patch('gppylib.operations.dump.os.path.isdir', return_value=True)
@patch('gppylib.operations.dump.os.path.isfile', return_value=True)
@patch('gppylib.operations.dump.get_lines_from_file', return_value=['2012093009300q'])
def test_get_last_dump_timestamp_02(self, mock1, mock2, mock3, mock4):
master_datadir = 'foo'
backup_dir = None
full_timestamp = '20121212010101'
expected = '20120930093000'
with self.assertRaisesRegexp(Exception, 'get_last_dump_timestamp found invalid ts in file'):
get_last_dump_timestamp(master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, full_timestamp)
@patch('gppylib.operations.dump.generate_increments_filename')
@patch('gppylib.operations.dump.os.path.isdir', return_value=True)
@patch('gppylib.operations.dump.os.path.isfile', return_value=True)
@patch('gppylib.operations.dump.get_lines_from_file', return_value=[' 20120930093000 \n \n '])
def test_get_last_dump_timestamp_03(self, mock1, mock2, mock3, mock4):
master_datadir = 'foo'
backup_dir = None
full_timestamp = '20121212010101'
expected = '20120930093000'
result = get_last_dump_timestamp(master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, full_timestamp)
self.assertEqual(result, expected)
@patch('gppylib.operations.dump.generate_increments_filename')
@patch('gppylib.operations.dump.get_lines_from_file', return_value=[' 20120930093000 \n \n '])
@patch('gppylib.operations.dump.check_file_dumped_with_nbu', return_value=True)
@patch('gppylib.operations.dump.restore_file_with_nbu')
@patch('gppylib.operations.dump.os.path.exists', return_value=True)
def test_get_last_dump_timestamp_04(self, mock1, mock2, mock3, mock4, mock5):
master_datadir = 'foo'
backup_dir = None
full_timestamp = '20121212010101'
netbackup_service_host = "mdw"
netbackup_block_size = "1024"
expected = '20120930093000'
result = get_last_dump_timestamp(master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, full_timestamp, netbackup_service_host, netbackup_block_size)
self.assertEqual(result, expected)
@patch('gppylib.operations.dump.generate_increments_filename')
@patch('gppylib.operations.dump.check_file_dumped_with_nbu', return_value=False)
def test_get_last_dump_timestamp_05(self, mock1, mock2):
master_datadir = 'foo'
backup_dir = None
full_timestamp = '20121212010101'
netbackup_service_host = "mdw"
netbackup_block_size = "1024"
expected = '20121212010101'
result = get_last_dump_timestamp(master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, full_timestamp, netbackup_service_host, netbackup_block_size)
self.assertEqual(result, expected)
def test_get_pgstatlastoperations_dict_00(self):
last_operations = ['public,t1,1234,ALTER,,201212121212:101010']
last_operations_dict = get_pgstatlastoperations_dict(last_operations)
expected_output = {('1234', 'ALTER'): 'public,t1,1234,ALTER,,201212121212:101010'}
self.assertEqual(last_operations_dict, expected_output)
def test_get_pgstatlastoperations_dict_01(self):
last_operations = ['public,t1,1234,ALTER,,201212121212:101010', 'public,t2,1234,VACCUM,TRUNCATE,201212121212:101015']
last_operations_dict = get_pgstatlastoperations_dict(last_operations)
expected_output = {('1234', 'ALTER'): 'public,t1,1234,ALTER,,201212121212:101010',
('1234', 'VACCUM'): 'public,t2,1234,VACCUM,TRUNCATE,201212121212:101015'}
self.assertEqual(last_operations_dict, expected_output)
def test_get_pgstatlastoperations_dict_02(self):
last_operations = []
last_operations_dict = get_pgstatlastoperations_dict(last_operations)
expected_output = {}
self.assertEqual(last_operations_dict, expected_output)
def test_get_pgstatlastoperations_dict_03(self):
last_operations = ['public,t1,1234,ALTER,,201212121212:101010', '2345,VACCUM,TRUNCATE,201212121212:101015']
with self.assertRaisesRegexp(Exception, 'Wrong number of tokens in last_operation data for last backup'):
get_pgstatlastoperations_dict(last_operations)
def test_compare_metadata_00(self):
old_metadata = {('1234', 'ALTER'): 'public,t1,1234,ALTER,,201212121212:101010'}
cur_metadata = ['public,t1,1234,ALTER,,201212121212:101010']
dirty_tables = compare_metadata(old_metadata, cur_metadata)
self.assertEquals(dirty_tables, set())
def test_compare_metadata_01(self):
old_metadata = {('1234', 'ALTER'): 'public,t1,1234,ALTER,,201212121212:101010'}
cur_metadata = ['public,t1,1234,TRUNCATE,,201212121212:101010']
dirty_tables = compare_metadata(old_metadata, cur_metadata)
self.assertEquals(dirty_tables, set(['public.t1']))
def test_compare_metadata_02(self):
old_metadata = {('1234', 'ALTER'): 'public,t1,1234,ALTER,,201212121212:101010'}
cur_metadata = ['public,t1,1234,ALTER,,201212121212:102510']
dirty_tables = compare_metadata(old_metadata, cur_metadata)
self.assertEquals(dirty_tables, set(['public.t1']))
def test_compare_metadata_03(self):
old_metadata = {('1234', 'ALTER'): 'public,t1,1234,ALTER,,201212121212:101010'}
cur_metadata = ['public,t1,1234,ALTER,,201212121212:101010','public,t1,1234,TRUNCATE,,201212121212:101010']
dirty_tables = compare_metadata(old_metadata, cur_metadata)
self.assertEquals(dirty_tables, set(['public.t1']))
def test_compare_metadata_04(self):
old_metadata = {('1234', 'ALTER'): 'public,t1,1234,ALTER,,201212121212:101010'}
cur_metadata = ['public,t1,1234,ALTER,,201212121212:101010,']
with self.assertRaisesRegexp(Exception, 'Wrong number of tokens in last_operation data for current backup'):
compare_metadata(old_metadata, cur_metadata)
@patch('gppylib.operations.dump.get_last_dump_timestamp', return_value='20121212010100')
@patch('gppylib.operations.dump.get_lines_from_file', return_value=[])
def test_get_tables_with_dirty_metadata_00(self, mock1, mock2):
expected_output = set()
master_datadir = 'foo'
backup_dir = None
full_timestamp = '20121212010101'
cur_pgstatoperations = []
dirty_tables = get_tables_with_dirty_metadata(master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, full_timestamp, cur_pgstatoperations)
self.assertEqual(dirty_tables, expected_output)
@patch('gppylib.operations.dump.get_last_dump_timestamp', return_value='20121212010100')
@patch('gppylib.operations.dump.get_lines_from_file', return_value=['public,t1,1234,ALTER,CHANGE COLUMN,201212121212:102510', 'pepper,t2,2234,TRUNCATE,,201212121213:102510'])
def test_get_tables_with_dirty_metadata_01(self, mock1, mock2):
expected_output = set()
master_datadir = 'foo'
backup_dir = None
full_timestamp = '20121212010101'
cur_pgstatoperations = ['public,t1,1234,ALTER,CHANGE COLUMN,201212121212:102510', 'pepper,t2,2234,TRUNCATE,,201212121213:102510']
dirty_tables = get_tables_with_dirty_metadata(master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, full_timestamp, cur_pgstatoperations)
self.assertEqual(dirty_tables, expected_output)
@patch('gppylib.operations.dump.get_last_dump_timestamp', return_value='20121212010100')
@patch('gppylib.operations.dump.get_lines_from_file', return_value=['public,t1,1234,ALTER,CHANGE COLUMN,201212121212:102510', 'pepper,t2,2234,TRUNCATE,,201212121213:102511'])
def test_get_tables_with_dirty_metadata_02(self, mock1, mock2):
expected_output = set(['pepper.t2'])
master_datadir = 'foo'
backup_dir = None
full_timestamp = '20121212010101'
cur_pgstatoperations = ['public,t1,1234,ALTER,CHANGE COLUMN,201212121212:102510', 'pepper,t2,2234,TRUNCATE,,201212121213:102510']
dirty_tables = get_tables_with_dirty_metadata(master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, full_timestamp, cur_pgstatoperations)
self.assertEqual(dirty_tables, expected_output)
@patch('gppylib.operations.dump.get_last_dump_timestamp', return_value='20121212010100')
@patch('gppylib.operations.dump.get_lines_from_file', return_value=['pepper,t2,2234,TRUNCATE,,201212121213:102510'])
def test_get_tables_with_dirty_metadata_03(self, mock1, mock2):
expected_output = set(['public.t1'])
master_datadir = 'foo'
backup_dir = None
full_timestamp = '20121212010101'
cur_pgstatoperations = ['public,t1,1234,ALTER,CHANGE COLUMN,201212121212:102510', 'pepper,t2,2234,TRUNCATE,,201212121213:102510']
dirty_tables = get_tables_with_dirty_metadata(master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, full_timestamp, cur_pgstatoperations)
self.assertEqual(dirty_tables, expected_output)
@patch('gppylib.operations.dump.get_last_dump_timestamp', return_value='20121212010100')
@patch('gppylib.operations.dump.get_lines_from_file', return_value=['pepper,t1,2234,TRUNCATE,,201212121213:102510', 'pepper,t2,2234,TRUNCATE,,201212121213:102510'])
def test_get_tables_with_dirty_metadata_04(self, mock1, mock2):
expected_output = set(['pepper.t2', 'public.t3'])
master_datadir = 'foo'
backup_dir = None
full_timestamp = '20121212010101'
cur_pgstatoperations = ['pepper,t2,1234,ALTER,CHANGE COLUMN,201212121212:102510',
'pepper,t2,2234,TRUNCATE,,201212121213:102510',
'public,t3,2234,TRUNCATE,,201212121213:102510']
dirty_tables = get_tables_with_dirty_metadata(master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, full_timestamp, cur_pgstatoperations)
self.assertEqual(dirty_tables, expected_output)
@patch('gppylib.operations.dump.get_last_dump_timestamp', return_value='20121212010100')
@patch('gppylib.operations.dump.get_lines_from_file', return_value=['pepper,t1,1234,ALTER,CHANGE COLUMN,201212121212:102510', 'pepper,t2,2234,TRUNCATE,,201212121213:102510'])
def test_get_tables_with_dirty_metadata_05(self, mock1, mock2):
expected_output = set(['public.t1'])
master_datadir = 'foo'
backup_dir = None
full_timestamp = '20121212010101'
cur_pgstatoperations = ['public,t1,1234,ALTER,CHANGE COLUMN,201212121212:102510', 'pepper,t2,2234,TRUNCATE,,201212121213:102510']
dirty_tables = get_tables_with_dirty_metadata(master_datadir, backup_dir, self.dumper.dump_dir, self.dumper.dump_prefix, full_timestamp, cur_pgstatoperations)
| |
<reponame>pvegdahl/AdventOfCode2021<gh_stars>0
import json
import math
from typing import Union, Tuple, NamedTuple, Optional, List
import pytest
@pytest.mark.parametrize(
"input_string, expected",
[
("1", [1]),
("[1,2]", [(1, 2)]),
("[[[7,1],2],3]\n[[1,7],7]", [(((7, 1), 2), 3), ((1, 7), 7)]),
],
)
def test_parse_input(input_string, expected):
assert parse_input(input_string) == expected
def parse_input(input_string: str):
return [parse_line(line) for line in input_string.strip().split("\n")]
def parse_line(line: str):
return nested_lists_to_nested_tuples(json.loads(line))
def nested_lists_to_nested_tuples(as_list):
if not isinstance(as_list, List):
return as_list
return tuple(nested_lists_to_nested_tuples(i) for i in as_list)
class ExplosionSpec(NamedTuple):
index: Tuple[int, ...]
left_value: int
right_value: int
@pytest.mark.parametrize(
"snail_fish_number, index_so_far, expected",
[
((1, 2), None, None),
((1, 2), (0,), None),
((1, 2), (0, 0), None),
((1, 2), (0, 0, 0), None),
(
(1, 2),
(0, 0, 0, 0),
ExplosionSpec(index=(0, 0, 0, 0), left_value=1, right_value=2),
),
(((1, 2), 3), None, None),
(((1, 2), 3), (0, 0), None),
(
((1, 2), 3),
(0, 0, 0),
ExplosionSpec(index=(0, 0, 0, 0), left_value=1, right_value=2),
),
(
(((1, 2), 3), 4),
(0, 0),
ExplosionSpec(index=(0, 0, 0, 0), left_value=1, right_value=2),
),
(
((((1, 2), 3), 4), 5),
(0,),
ExplosionSpec(index=(0, 0, 0, 0), left_value=1, right_value=2),
),
(
(((((1, 2), 3), 4), 5), 6),
None,
ExplosionSpec(index=(0, 0, 0, 0), left_value=1, right_value=2),
),
(
(((((9, 8), 1), 2), 3), 4),
None,
ExplosionSpec(index=(0, 0, 0, 0), left_value=9, right_value=8),
),
(
(1, (2, (3, (4, (5, 6))))),
None,
ExplosionSpec(index=(1, 1, 1, 1), left_value=5, right_value=6),
),
(
(1, (2, (3, ((4, 5), 6)))),
None,
ExplosionSpec(index=(1, 1, 1, 0), left_value=4, right_value=5),
),
(
(((((1, 2), 3), 4), 5), (6, (7, (8, (9, 10))))),
None,
ExplosionSpec(index=(0, 0, 0, 0), left_value=1, right_value=2),
),
],
)
def test_find_first_explosion(snail_fish_number, index_so_far, expected):
assert (
find_first_explosion(snail_fish_number, index_so_far=index_so_far) == expected
)
def find_first_explosion(
snail_fish_number, index_so_far: Tuple[int, ...] = None
) -> Optional[ExplosionSpec]:
if index_so_far is None:
index_so_far = tuple()
if isinstance(snail_fish_number, int):
return None
if len(index_so_far) == 4:
return ExplosionSpec(
index=index_so_far,
left_value=snail_fish_number[0],
right_value=snail_fish_number[1],
)
else:
for i in range(len(snail_fish_number)):
new_index_so_far = index_so_far + (i,)
result = find_first_explosion(
snail_fish_number=snail_fish_number[i], index_so_far=new_index_so_far
)
if result:
return result
return None
def find_left_neighbor_index(
snail_fish_number, index: Tuple[int, ...]
) -> Optional[Tuple[int, ...]]:
if not any(index):
return None
else:
last_one_at = find_index_of_last_value(index, 1)
left_index = index[0:last_one_at] + (0,)
while is_tuple_at_location(snail_fish_number, left_index):
left_index += (1,)
return left_index
def is_tuple_at_location(snail_fish_number, index: Tuple[int, ...]):
value_at_index = get_value_at_index(
snail_fish_number=snail_fish_number, index=index
)
return isinstance(value_at_index, tuple)
def get_value_at_index(snail_fish_number, index):
value_at_index = snail_fish_number
for i in index:
value_at_index = value_at_index[i]
return value_at_index
def find_index_of_last_value(tt: Tuple[int, ...], target_value):
for i in reversed(range(len(tt))):
if tt[i] == target_value:
return i
return None
@pytest.mark.parametrize(
"snail_fish_number, index, expected",
[
((((((1, 2), 3), 4), 5), 6), (0, 0, 0, 0), None),
((1, (2, (3, (4, (5, 6))))), (1, 1, 1, 1), (1, 1, 1, 0)),
((1, (2, (3, ((4, 5), 6)))), (1, 1, 1, 0), (1, 1, 0)),
((1, (2, ((3, 4), ((4, 5), 6)))), (1, 1, 1, 0), (1, 1, 0, 1)),
(((1, (2, (3, 4))), ((((10, 9), 8), 7), 6)), (1, 0, 0, 0), (0, 1, 1, 1)),
],
)
def test_find_left_neighbor_index(snail_fish_number, index, expected):
assert (
find_left_neighbor_index(snail_fish_number=snail_fish_number, index=index)
== expected
)
def find_right_neighbor_index(snail_fish_number, index: Tuple[int, ...]):
if all(index):
return None
else:
last_zero_at = find_index_of_last_value(index, 0)
left_index = index[:last_zero_at] + (1,)
while is_tuple_at_location(snail_fish_number, left_index):
left_index += (0,)
return left_index
@pytest.mark.parametrize(
"snail_fish_number, index, expected",
[
((((((1, 2), 3), 4), 5), 6), (0, 0, 0, 0), (0, 0, 0, 1)),
((1, (2, (3, (4, (5, 6))))), (1, 1, 1, 1), None),
((1, (2, (3, ((4, 5), 6)))), (1, 1, 1, 0), (1, 1, 1, 1)),
((1, (2, ((3, 4), ((4, 5), 6)))), (1, 1, 1, 0), (1, 1, 1, 1)),
(((1, (2, (3, 4))), ((((10, 9), 8), 7), 6)), (1, 0, 0, 0), (1, 0, 0, 1)),
],
)
def test_find_right_neighbor_index(snail_fish_number, index, expected):
assert (
find_right_neighbor_index(snail_fish_number=snail_fish_number, index=index)
== expected
)
class ExplodeReplaceCommand:
def __init__(self, spec: ExplosionSpec, snail_fish_number):
self.snail_fish_number = snail_fish_number
self.explosion_spec = spec
self.left_index = find_left_neighbor_index(
snail_fish_number=snail_fish_number, index=spec.index
)
self.right_index = find_right_neighbor_index(
snail_fish_number=snail_fish_number, index=spec.index
)
def get_value(self, index: Tuple[int, ...]) -> Union[Tuple, int]:
if index == self.explosion_spec.index:
return 0
value_at_index = get_value_at_index(
snail_fish_number=self.snail_fish_number, index=index
)
if index == self.left_index:
return value_at_index + self.explosion_spec.left_value
if index == self.right_index:
return value_at_index + self.explosion_spec.right_value
if isinstance(value_at_index, int):
return value_at_index
return tuple(self.get_value(index + (i,)) for i in range(len(value_at_index)))
def explode(snail_fish_number, index_so_far: Tuple[int, ...] = None):
explosion_spec = find_first_explosion(snail_fish_number)
if not explosion_spec:
return snail_fish_number
return ExplodeReplaceCommand(
spec=explosion_spec, snail_fish_number=snail_fish_number
).get_value(tuple())
@pytest.mark.parametrize(
"snail_fish_number, expected",
[
(((((0, 1), 2), 3), 4), ((((0, 1), 2), 3), 4)),
((((((9, 8), 1), 2), 3), 4), ((((0, 9), 2), 3), 4)),
((7, (6, (5, (4, (3, 2))))), (7, (6, (5, (7, 0))))),
(((6, (5, (4, (3, 2)))), 1), ((6, (5, (7, 0))), 3)),
(
((3, (2, (1, (7, 3)))), (6, (5, (4, (3, 2))))),
((3, (2, (8, 0))), (9, (5, (4, (3, 2))))),
),
(
((3, (2, (8, 0))), (9, (5, (4, (3, 2))))),
((3, (2, (8, 0))), (9, (5, (7, 0)))),
),
],
)
def test_explode(snail_fish_number, expected):
assert explode(snail_fish_number) == expected
class SplitSfnCommand:
def __init__(self):
self.split_count = 0
def split(self, snail_fish_number):
if self.split_count > 0:
return snail_fish_number
if isinstance(snail_fish_number, int):
if snail_fish_number >= 10:
return self.split_number(snail_fish_number)
else:
return snail_fish_number
return tuple(self.split(sub_number) for sub_number in snail_fish_number)
def split_number(self, number):
self.split_count += 1
a = math.floor(number / 2.0)
b = number - a
return a, b
@pytest.mark.parametrize(
"snail_fish_number, expected",
[
((1, 2), (1, 2)),
((10, 2), ((5, 5), 2)),
((11, 2), ((5, 6), 2)),
((1, 12), (1, (6, 6))),
((1, 13), (1, (6, 7))),
((1, (2, 10)), (1, (2, (5, 5)))),
((1, (2, (3, 10))), (1, (2, (3, (5, 5))))),
((11, 12), ((5, 6), 12)),
],
)
def test_split_sfn(snail_fish_number, expected):
assert SplitSfnCommand().split(snail_fish_number) == expected
def sfn_add(sfn_a, sfn_b):
return sfn_a, sfn_b
@pytest.mark.parametrize(
"sfn_a, sfn_b, expected",
[
((1, 2), (3, 4), ((1, 2), (3, 4))),
((1, (2, (3, 4))), ((5, 6), 7), ((1, (2, (3, 4))), ((5, 6), 7))),
],
)
def test_sfn_add(sfn_a, sfn_b, expected):
assert sfn_add(sfn_a, sfn_b) == expected
def sfn_reduce(sfn):
previous_sfn = None
while sfn != previous_sfn:
while sfn != previous_sfn:
previous_sfn = sfn
sfn = explode(previous_sfn)
sfn = SplitSfnCommand().split(sfn)
return sfn
@pytest.mark.parametrize(
"sfn, expected",
[
((1, 2), (1, 2)),
((((((9, 8), 1), 2), 3), 4), ((((0, 9), 2), 3), 4)),
(
((3, (2, (1, (7, 3)))), (6, (5, (4, (3, 2))))),
((3, (2, (8, 0))), (9, (5, (7, 0)))),
),
((1, 13), (1, (6, 7))),
((12, 13), ((6, 6), (6, 7))),
(
(((((4, 3), 4), 4), (7, ((8, 4), 9))), (1, 1)),
((((0, 7), 4), ((7, 8), (6, 0))), (8, 1)),
),
],
)
def test_sfn_reduce(sfn, expected):
assert sfn_reduce(sfn) == expected
def add_and_reduce_many_sfns(sfns: List[Tuple]):
result = sfns[0]
for sfn in sfns[1:]:
result = sfn_reduce(sfn_add(result, sfn))
return result
def test_add_and_reduce_many_sfns():
assert add_and_reduce_many_sfns(
[
(((0, (4, 5)), (0, 0)), (((4, 5), (2, 6)), (9, 5))),
(7, (((3, 7), (4, 3)), ((6, 3), (8, 8)))),
((2, ((0, 8), (3, 4))), (((6, 7), 1), (7, (1, 6)))),
((((2, 4), 7), (6, (0, 5))), (((6, 8), (2, 8)), ((2, 1), (4, 5)))),
(7, (5, ((3, 8), (1, 4)))),
((2, (2, 2)), (8, (8, 1))),
(2, 9),
(1, (((9, 3), 9), ((9, 0), (0, 7)))),
(((5, (7, 4)), 7), 1),
((((4, 2), 2), 6), (8, 7)),
]
) == (
(((8, 7), (7, 7)), ((8, 6), (7, 7))),
(((0, 7), (6, 6)), (8, 7)),
)
def sfn_magnitude(sfn):
if isinstance(sfn, int):
return sfn
return 3 * sfn_magnitude(sfn[0]) + 2 * sfn_magnitude(sfn[1])
@pytest.mark.parametrize(
"sfn, expected",
[
((1, 2), 7),
((9, 1), 29),
(((1, 2), ((3, 4), 5)), 143),
(((((8, 7), (7, 7)), ((8, 6), (7, 7))), (((0, 7), (6, 6)), (8, 7))), 3488),
],
)
def test_sfn_magnitude(sfn, expected):
assert sfn_magnitude(sfn) == expected
def do_homework(sfns):
return sfn_magnitude(add_and_reduce_many_sfns(sfns))
EXAMPLE_HOMEWORK = [
(((0, (5, 8)), ((1, 7), (9, 6))), ((4, (1, 2)), ((1, 4), 2))),
(((5, (2, 8)), 4), (5, ((9, 9), 0))),
(6, (((6, 2), (5, 6)), ((7, 6), (4, 7)))),
(((6, (0, 7)), (0, 9)), (4, (9, (9, 0)))),
(((7, (6, 4)), (3, (1, 3))), (((5, 5), 1), 9)),
((6, ((7, 3), (3, 2))), (((3, 8), (5, 7)), 4)),
((((5, 4), (7, 7)), 8), ((8, 3), 8)),
((9, 3), ((9, 9), (6, (4, 9)))),
((2, ((7, 7), 7)), ((5, 8), ((9, 3), (0, 2)))),
((((5, 2), 5), (8, (3, 7))), ((5, (7, 5)), (4, 4))),
]
def test_do_homework():
assert do_homework(EXAMPLE_HOMEWORK) == 4140
def do_homework_part_2(sfns):
max_value = 0
for | |
tr_widths=tr_widths, tr_spaces=tr_spaces)
# Rows are ordered from bottom to top
# To use TrackManager, an ordered list of wiring types and their locations must be provided.
# Define two lists, one for the nch rows and one for the pch rows
# The lists are composed of dictionaries, one per row.
# Each dictionary has two list entries (g and ds), which are ordered lists of what wire types will be present
# in the g and ds sections of that row. Ordering is from bottom to top of the design.
wire_names = dict(
nch=[
# Pre-amp tail row
dict(
g=['clk', ],
ds=['bias', ]
),
# Pre-amp input row
dict(
g=['sig'],
ds=['sig']
),
# Regen-amp nmos
dict(
ds=['bias'],
g=['sig', ]
),
],
pch=[
# Regen amp pmos load
dict(
g=['sig', 'sig'],
ds=['bias']
),
# Pre-amp pmos load and pmos tail/enable
dict(
g=['clk', 'clk'],
ds=['sig']
),
]
)
# Set up the row information
# Row information contains the row properties like width/number of fins, orientation, intent, etc.
# Storing in a row dictionary/object allows for convenient fetching of data in later functions
row_tail = self.initialize_rows(row_name='tail',
orient='R0',
nch_or_pch='nch',
)
row_pre_in = self.initialize_rows(row_name='pre_in',
orient='R0',
nch_or_pch='nch',
)
row_regen_n = self.initialize_rows(row_name='regen_n',
orient='MX',
nch_or_pch='nch',
)
row_regen_p = self.initialize_rows(row_name='regen_p',
orient='R0',
nch_or_pch='pch',
)
row_pre_load = self.initialize_rows(row_name='pre_load',
orient='R0',
nch_or_pch='pch',
)
# Define the order of the rows (bottom to top) for this analogBase cell
self.set_global_rows(
[row_tail, row_pre_in, row_regen_n, row_regen_p, row_pre_load]
)
################################################################################
# 2:
# Initialize the transistors in the design
# Storing each transistor's information (name, location, row, size, etc) in a dictionary object allows for
# convient use later in the code, and also greatly simplifies the schematic generation
# The initialization sets the transistor's row, width, and source/drain net names for proper dummy creation
################################################################################
tail_1 = self.initialize_tx(name='tail_1', row=row_tail, fg_spec='tail_n', deff_net='PRE_AMP_SOURCE')
tail_2 = self.initialize_tx(name='tail_2', row=row_tail, fg_spec='tail_n', deff_net='PRE_AMP_SOURCE')
in_p = self.initialize_tx(name='in_p', row=row_pre_in, fg_spec='in_n',
seff_net='PRE_AMP_SOURCE', deff_net='DIN')
in_n = self.initialize_tx(name='in_n', row=row_pre_in, fg_spec='in_n',
seff_net='PRE_AMP_SOURCE', deff_net='DIP')
pre_load_p = self.initialize_tx(name='pre_load_p', row=row_pre_load, fg_spec='pre_load_p', deff_net='DIN')
pre_load_n = self.initialize_tx(name='pre_load_n', row=row_pre_load, fg_spec='pre_load_p', deff_net='DIP')
regen_n_p = self.initialize_tx(name='regen_n_p', row=row_regen_n, fg_spec='regen_n', deff_net='VOP')
regen_n_n = self.initialize_tx(name='regen_n_n', row=row_regen_n, fg_spec='regen_n', deff_net='VON')
regen_p_p = self.initialize_tx(name='regen_p_p', row=row_regen_p, fg_spec='regen_p',
seff_net='REGEN_SOURCE', deff_net='VOP')
regen_p_n = self.initialize_tx(name='regen_p_n', row=row_regen_p, fg_spec='regen_p',
seff_net='REGEN_SOURCE', deff_net='VON')
tail_p_1 = self.initialize_tx(name='tail_p_1', row=row_pre_load, fg_spec='tail_p', deff_net='REGEN_SOURCE')
tail_p_2 = self.initialize_tx(name='tail_p_2', row=row_pre_load, fg_spec='tail_p', deff_net='REGEN_SOURCE')
reset_p = self.initialize_tx(name='reset_p', row=row_regen_n, fg_spec='reset_n', deff_net='VOP')
reset_n = self.initialize_tx(name='reset_n', row=row_regen_n, fg_spec='reset_n', deff_net='VON')
# Compose a list of all the transistors so it can be iterated over later
transistors = [
tail_1, tail_2, in_p, in_n, pre_load_p, pre_load_n, regen_n_p, regen_n_n,
regen_p_p, regen_p_n, tail_p_1, tail_p_2, reset_p, reset_n
]
# Check that all transistors are even fingered
for tx in transistors:
if tx['fg'] % 2 == 1:
raise ValueError(
"Transistors must have even number of fingers. Transistor '{}' has {}".format(tx['name'], tx['fg]'])
)
################################################################################
# 3: Calculate transistor locations
# Based on the floorplan, want the tail, input, nmos regen, pmos regen, and pmos tail to be in a column
# and for convenience, place the reset and load in a column, but right/left justified
# Notation:
# fg_xxx refers to how wide (in fingers) a transistor or column of transistors is
# col_xxx refers to the location of the left most finger of a transistor or a column of transistors
################################################################################
fg_stack = max(tail_1['fg'], in_p['fg'], regen_n_p['fg'], regen_p_p['fg'], tail_p_1['fg'])
fg_side = max(reset_p['fg'], pre_load_p['fg'])
# Add an explicit gap in the middle for symmetry. Set to 0 to not have gap
fg_mid = 0
# Get the minimum gap between fingers of different transistors
# This varies between processes, so avoid hard-coding by using the method in self._tech_cls
fg_space = self._tech_cls.get_min_fg_sep(lch)
fg_total = fg_dum + fg_side + fg_space + fg_stack + fg_mid + fg_space + fg_stack + fg_space + fg_side + fg_dum
# Calculate the starting column index for each stack of transistors
col_side_left = fg_dum
col_stack_left = col_side_left + fg_side + fg_space
col_stack_right = col_stack_left + fg_stack + fg_space + fg_mid
col_side_right = col_stack_right + fg_stack + fg_space
# Calculate positions of transistors
# This uses helper functions to place each transistor within a stack/column of a specified starting index and
# width, and with a certain alignment (left, right, centered) within that column
self.assign_tx_column(tx=tail_1, offset=col_stack_left, fg_col=fg_stack, align=0)
self.assign_tx_column(tx=in_n, offset=col_stack_left, fg_col=fg_stack, align=0)
self.assign_tx_column(tx=regen_n_n, offset=col_stack_left, fg_col=fg_stack, align=0)
self.assign_tx_column(tx=regen_p_n, offset=col_stack_left, fg_col=fg_stack, align=0)
self.assign_tx_column(tx=tail_p_1, offset=col_stack_left, fg_col=fg_stack, align=0)
self.assign_tx_column(tx=tail_2, offset=col_stack_right, fg_col=fg_stack, align=0)
self.assign_tx_column(tx=in_p, offset=col_stack_right, fg_col=fg_stack, align=0)
self.assign_tx_column(tx=regen_n_p, offset=col_stack_right, fg_col=fg_stack, align=0)
self.assign_tx_column(tx=regen_p_p, offset=col_stack_right, fg_col=fg_stack, align=0)
self.assign_tx_column(tx=tail_p_2, offset=col_stack_right, fg_col=fg_stack, align=0)
self.assign_tx_column(tx=reset_n, offset=col_side_left, fg_col=fg_side, align=1)
self.assign_tx_column(tx=pre_load_n, offset=col_side_left, fg_col=fg_side, align=1)
self.assign_tx_column(tx=reset_p, offset=col_side_right, fg_col=fg_side, align=-1)
self.assign_tx_column(tx=pre_load_p, offset=col_side_right, fg_col=fg_side, align=-1)
################################################################################
# 4: Assign the transistor directions (s/d up vs down)
#
# Specify the directions that connections to the source and connections to the drain will go (up vs down)
# Doing so will also determine how the gate is aligned (ie will it be aligned to the source or drain)
# See the bootcamp for more details
# The helper functions used here help to abstract away whether the intended source/drain diffusion region of
# a transistor occurs on the even or odd columns of that device (BAG always considers the even columns of a
# device to be the 's').
# These helper functions allow a user to specify whether the even columns should be the transistors effective
# source or effective drain, so that the user does not need to worry about BAG's notation.
################################################################################
# Set tail transistor to have source on the leftmost diffusion (arbitrary) and source going down
self.set_tx_directions(tx=tail_1, seff='s', seff_dir=0)
# Assign the input to be anti-aligned, so that the input source and tail drain are vertically aligned
self.assign_tx_matched_direction(target_tx=in_p, source_tx=tail_1, seff_dir=0, aligned=False)
# Set regen nmos to arbitrarily have source on left
self.set_tx_directions(tx=regen_n_p, seff='s', seff_dir=0)
# Set regen pmos to align so sources and drains are vertically aligned. pmos source will go up
self.assign_tx_matched_direction(target_tx=regen_p_p, source_tx=regen_n_p, seff_dir=2, aligned=True)
# Set the regen tail stage to anti-align so tail drain aligns vertically with regen pmos source.
self.assign_tx_matched_direction(target_tx=tail_p_1, source_tx=regen_p_p, seff_dir=2, aligned=False)
# Arbitrarily set the s/d effective for the reset and pre-amp load, as they will not align to anything
self.set_tx_directions(tx=reset_p, seff='s', seff_dir=0)
self.set_tx_directions(tx=pre_load_p, seff='s', seff_dir=2)
# Do the same alignments for the mirror-symmetric negative half of the circuit
self.set_tx_directions(tx=tail_2, seff='s', seff_dir=0)
self.assign_tx_matched_direction(target_tx=in_n, source_tx=tail_2, seff_dir=0, aligned=False)
self.set_tx_directions(tx=regen_n_n, seff='s', seff_dir=0)
self.assign_tx_matched_direction(target_tx=regen_p_n, source_tx=regen_n_n, seff_dir=2, aligned=True)
self.assign_tx_matched_direction(target_tx=tail_p_2, source_tx=regen_p_n, seff_dir=2, aligned=False)
self.set_tx_directions(tx=reset_n, seff='s', seff_dir=0)
self.set_tx_directions(tx=pre_load_n, seff='s', seff_dir=2)
################################################################################
# 5: Draw the transistor rows, and the transistors
#
# All the difficult setup has been complete. Drawing the transistors is simple now.
# Note that we pass the wire_names dictionary defined above so that BAG knows how to space out
# the transistor rows. BAG uses this to calculate how many tracks to allocate to each
################################################################################
# Draw the transistor row bases
self.draw_base(lch, fg_total, self._ptap_w, self._ntap_w,
self._row_prop('width', 'nch'), self._row_prop('th', 'nch'),
self._row_prop('width', 'pch'), self._row_prop('th', 'pch'),
tr_manager=tr_manager, wire_names=wire_names,
n_orientations=self._row_prop('orient', 'nch'),
p_orientations=self._row_prop('orient', 'pch'),
top_layer=top_layer,
half_blk_x=False, half_blk_y=False,
)
# Draw the transistors
for tx in transistors:
tx['ports'] = self.draw_mos_conn(mos_type=tx['type'],
row_idx=tx['row_ind'],
col_idx=tx['col'],
fg=tx['fg'],
sdir=tx['sdir'],
ddir=tx['ddir'],
s_net=tx['s_net'],
d_net=tx['d_net'],
)
tx['s'] = tx['ports'][tx['seff']]
tx['d'] = tx['ports'][tx['deff']]
tx['g'] = tx['ports']['g']
################################################################################
# 6: Define horizontal tracks on which connections will be made
#
# Based on the wire_names dictionary defined in step 1), create TrackIDs on which horizontal connections will
# be made
################################################################################
tid_pream_tail = self.get_wire_id('nch', row_tail['index'], 'ds', wire_name='bias')
tid_d = self.get_wire_id('nch', row_pre_in['index'], 'ds', wire_name='sig')
tid_reset_gate = self.get_wire_id('nch', row_regen_n['index'], 'g', wire_name='sig')
tid_out_p_horz = self.get_wire_id('pch', row_regen_p['index'], 'g', wire_name='sig', wire_idx=0)
tid_out_n_horz = self.get_wire_id('pch', row_regen_p['index'], 'g', wire_name='sig', wire_idx=1)
tid_regen_vss = self.get_wire_id('nch', row_regen_n['index'], 'ds', wire_name='bias')
tid_regen_tail = self.get_wire_id('pch', row_regen_p['index'], 'ds', wire_name='bias')
tid_tail_regen_clk = self.get_wire_id('pch', row_pre_load['index'], 'g', wire_name='clk', wire_idx=0)
tid_tail_regen_clk_b = self.get_wire_id('pch', row_pre_load['index'], 'g', wire_name='clk', wire_idx=1)
tid_tail_preamp_clk = self.get_wire_id('nch', row_tail['index'], 'g', wire_name='clk')
tid_sig_in_horz = self.get_wire_id('nch', row_pre_in['index'], 'g', wire_name='sig')
tid_preamp_load_d = self.get_wire_id('pch', row_pre_load['index'], 'ds', wire_name='sig')
################################################################################
# 7: Perform wiring
#
# Use the self.connect_to_tracks, self.connect_differential_tracks, self.connect_wires, etc
# to perform connections
# Note that the drain/source/gate wire arrays of | |
species rowid in existing list')
previous_species_rowids = None
if previous_species_rowids is None:
refresh = True
current_species_rowids = ibs.get_annot_species_rowids(aid_list)
species_count = [
len(current_species_rowids) - current_species_rowids.count(species_rowid)
for species_rowid in species_rowids
]
species_nice_list = ibs.get_species_nice(species_rowids)
combined_list = list(zip(species_count, species_nice_list, species_rowids))
if refresh:
logger.info('REFRESHING!')
combined_list = sorted(combined_list)
species_count_list = [combined[0] for combined in combined_list]
species_nice_list = [combined[1] for combined in combined_list]
species_rowids = [combined[2] for combined in combined_list]
species_text_list = ibs.get_species_texts(species_rowids)
species_rowids_json = ut.to_json(species_rowids)
hotkey_list = [index + 1 for index in range(len(species_nice_list))]
species_selected_list = [species == species_ for species_ in species_text_list]
species_list = list(
zip(hotkey_list, species_nice_list, species_text_list, species_selected_list)
)
species_extended_list = []
other_selected = False
zipped = list(zip(species_count_list, species_nice_list, species_selected_list))
for index, (species_count, species_nice, species_selected) in enumerate(zipped):
if species_selected:
species_nice += ' (default)'
args = (
len(current_species_rowids) - species_count,
species_nice,
)
if index >= hotkeys:
logger.info('% 5d : %s' % args)
else:
logger.info('% 5d * : %s' % args)
if len(species_list) >= hotkeys:
species_extended_list = species_list[hotkeys:]
species_list = species_list[:hotkeys]
extended_flag_list = [_[3] for _ in species_extended_list]
if True in extended_flag_list:
other_selected = True
species_list = species_list + [
(len(species_list) + 1, 'Other', const.UNKNOWN, other_selected)
]
example_species_list = [
(key, const.SPECIES_MAPPING[key][1])
for key in sorted(list(const.SPECIES_MAPPING.keys()))
if const.SPECIES_MAPPING[key][0] is not None and key != const.UNKNOWN
]
callback_url = url_for('submit_species')
return appf.template(
'turk',
'species',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
image_src=image_src,
previous=previous,
species_list=species_list,
species_extended_list=species_extended_list,
species_rowids_json=species_rowids_json,
example_species_list=example_species_list,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
callback_url=callback_url,
callback_method='POST',
EMBEDDED_CSS=None,
EMBEDDED_JAVASCRIPT=None,
review=review,
)
@register_route('/turk/part/type/', methods=['GET'])
def turk_part_types(
part_rowid=None,
imgsetid=None,
previous=None,
hotkeys=8,
refresh=False,
previous_part_types=None,
**kwargs,
):
ibs = current_app.ibs
imgsetid = None if imgsetid == '' or imgsetid == 'None' else imgsetid
gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
part_rowid_list = ut.flatten(ibs.get_annot_part_rowids(aid_list))
part_rowid_list = list(set(part_rowid_list))
reviewed_list = appf.imageset_part_type_processed(ibs, part_rowid_list)
try:
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(part_rowid_list),)
except ZeroDivisionError:
progress = '0.00'
part_rowid = request.args.get('part_rowid', '')
if len(part_rowid) > 0:
part_rowid = int(part_rowid)
else:
part_rowid_list_ = ut.filterfalse_items(part_rowid_list, reviewed_list)
if len(part_rowid_list_) == 0:
part_rowid = None
else:
part_rowid = random.choice(part_rowid_list_)
padding = 0.15
review = 'review' in request.args.keys()
finished = part_rowid is None
display_instructions = request.cookies.get('ia-part-type_instructions_seen', 1) == 0
if not finished:
aid = ibs.get_part_aids(part_rowid)
gid = ibs.get_annot_gids(aid)
image_src = routes_ajax.part_src(part_rowid, pad=padding)
part_type = ibs.get_part_types(part_rowid)
else:
aid = None
gid = None
image_src = None
part_type = None
imagesettext = ibs.get_imageset_text(imgsetid)
all_part_rowid_list = ibs._get_all_part_rowids()
all_part_types = sorted(set(ibs.get_part_types(all_part_rowid_list)))
if ibs.containerized:
hostname = const.CONTAINER_NAME
else:
hostname = ibs.dbname
if ibs.dbname == 'WD_Master' or hostname == 'wilddog':
all_part_types = all_part_types + [
'standard',
'short_black',
'long_black',
'double_black_brown',
'double_black_white',
'triple_black',
'long_white',
]
all_part_types = sorted(list(set(all_part_types) - set([const.UNKNOWN])))
if not refresh and previous_part_types is not None:
try:
for previous_part_type in previous_part_types:
assert previous_part_type in all_part_types
all_part_types = previous_part_types
except Exception:
logger.info('Error finding previous part_type in existing list')
previous_part_types = None
if previous_part_types is None:
refresh = True
current_part_types = ibs.get_part_types(all_part_rowid_list)
all_part_type_count = [
len(current_part_types) - current_part_types.count(all_part_type)
for all_part_type in all_part_types
]
all_part_nices = [
const.PARTS_MAPPING.get(all_part_type, all_part_type)
for all_part_type in all_part_types
]
combined_list = list(zip(all_part_type_count, all_part_nices, all_part_types))
if refresh:
logger.info('REFRESHING!')
combined_list = sorted(combined_list)
part_type_count_list = [combined[0] for combined in combined_list]
part_nice_list = [combined[1] for combined in combined_list]
part_type_list = [combined[2] for combined in combined_list]
part_type_list_json = ut.to_json(part_type_list)
hotkey_list = [index + 1 for index in range(len(part_type_list))]
part_type_selected_list = [part_type == part_type_ for part_type_ in part_type_list]
part_type_option_list = list(
zip(hotkey_list, part_nice_list, part_type_list, part_type_selected_list)
)
part_type_extended_list = []
other_selected = False
zipped = list(zip(part_type_count_list, part_nice_list, part_type_selected_list))
for index, (part_type_count, part_nice, part_type_selected) in enumerate(zipped):
if part_type_selected:
part_nice += ' (default)'
args = (
len(current_part_types) - part_type_count,
part_nice,
)
if index >= hotkeys:
logger.info('% 5d : %s' % args)
else:
logger.info('% 5d * : %s' % args)
if len(part_type_option_list) >= hotkeys:
part_type_extended_list = part_type_option_list[hotkeys:]
part_type_option_list = part_type_option_list[:hotkeys]
extended_flag_list = [_[3] for _ in part_type_extended_list]
if True in extended_flag_list:
other_selected = True
part_type_option_list = part_type_option_list + [
(len(part_type_option_list) + 1, 'Other', const.UNKNOWN, other_selected)
]
callback_url = url_for('submit_part_types')
return appf.template(
'turk',
'part_type',
imgsetid=imgsetid,
part_rowid=part_rowid,
gid=gid,
aid=aid,
image_src=image_src,
previous=previous,
part_type_option_list=part_type_option_list,
part_type_extended_list=part_type_extended_list,
part_type_list_json=part_type_list_json,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
callback_url=callback_url,
callback_method='POST',
EMBEDDED_CSS=None,
EMBEDDED_JAVASCRIPT=None,
review=review,
)
@register_route('/turk/viewpoint/', methods=['GET'])
def turk_viewpoint(**kwargs):
"""
CommandLine:
python -m wbia.web.app --exec-turk_viewpoint --db PZ_Master1
Example:
>>> # SCRIPT
>>> from wbia.other.ibsfuncs import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='PZ_Master1')
>>> aid_list_ = ibs.find_unlabeled_name_members(suspect_yaws=True)
>>> aid_list = ibs.filter_aids_to_quality(aid_list_, 'good', unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = appf.get_turk_annot_args(appf.imageset_annot_viewpoint_processed)
(aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
viewpoint_text = ibs.get_annot_viewpoints(aid)
value = appf.VIEWPOINT_MAPPING_INVERT.get(viewpoint_text, None)
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('ia-viewpoint_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
image_src = routes_ajax.annotation_src(aid)
species = ibs.get_annot_species_texts(aid)
else:
gid = None
image_src = None
species = None
imagesettext = ibs.get_imageset_text(imgsetid)
species_rowids = ibs._get_all_species_rowids()
species_nice_list = ibs.get_species_nice(species_rowids)
combined_list = sorted(zip(species_nice_list, species_rowids))
species_nice_list = [combined[0] for combined in combined_list]
species_rowids = [combined[1] for combined in combined_list]
species_text_list = ibs.get_species_texts(species_rowids)
species_selected_list = [species == species_ for species_ in species_text_list]
species_list = list(zip(species_nice_list, species_text_list, species_selected_list))
species_list = [('Unspecified', const.UNKNOWN, True)] + species_list
return appf.template(
'turk',
'viewpoint',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
value=value,
image_src=image_src,
previous=previous,
species_list=species_list,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review,
)
@register_route('/turk/viewpoint2/', methods=['GET'])
def turk_viewpoint2(**kwargs):
"""
CommandLine:
python -m wbia.web.app --exec-turk_viewpoint --db PZ_Master1
Example:
>>> # SCRIPT
>>> from wbia.other.ibsfuncs import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb(defaultdb='PZ_Master1')
>>> aid_list_ = ibs.find_unlabeled_name_members(suspect_yaws=True)
>>> aid_list = ibs.filter_aids_to_quality(aid_list_, 'good', unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = appf.get_turk_annot_args(appf.imageset_annot_viewpoint_processed)
(aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
viewpoint = ibs.get_annot_viewpoints(aid)
viewpoint1, viewpoint2, viewpoint3 = appf.convert_viewpoint_to_tuple(viewpoint)
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('ia-viewpoint_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
image_src = routes_ajax.annotation_src(aid)
species = ibs.get_annot_species_texts(aid)
else:
gid = None
image_src = None
species = None
imagesettext = ibs.get_imageset_text(imgsetid)
species_rowids = ibs._get_all_species_rowids()
species_nice_list = ibs.get_species_nice(species_rowids)
combined_list = sorted(zip(species_nice_list, species_rowids))
species_nice_list = [combined[0] for combined in combined_list]
species_rowids = [combined[1] for combined in combined_list]
species_text_list = ibs.get_species_texts(species_rowids)
species_selected_list = [species == species_ for species_ in species_text_list]
species_list = list(zip(species_nice_list, species_text_list, species_selected_list))
species_list = [('Unspecified', const.UNKNOWN, True)] + species_list
return appf.template(
'turk',
'viewpoint2',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
viewpoint1=viewpoint1,
viewpoint2=viewpoint2,
viewpoint3=viewpoint3,
image_src=image_src,
previous=previous,
species_list=species_list,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review,
)
@register_route('/turk/viewpoint3/', methods=['GET'])
def turk_viewpoint3(**kwargs):
with ut.Timer('[turk_viewpoint3]'):
ibs = current_app.ibs
tup = appf.get_turk_annot_args(appf.imageset_annot_viewpoint_processed)
(
aid_list,
reviewed_list,
imgsetid,
src_ag,
dst_ag,
progress,
aid,
previous,
) = tup
viewpoint = None if aid is None else ibs.get_annot_viewpoints(aid)
viewpoint_code = const.YAWALIAS.get(viewpoint, None)
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = (
request.cookies.get('ia-viewpoint3_instructions_seen', 1) == 0
)
if not finished:
gid = ibs.get_annot_gids(aid)
image_src = routes_ajax.annotation_src(aid)
species = ibs.get_annot_species_texts(aid)
else:
gid = None
image_src = None
species = None
imagesettext = ibs.get_imageset_text(imgsetid)
species_rowids = ibs._get_all_species_rowids()
species_nice_list = ibs.get_species_nice(species_rowids)
combined_list = sorted(zip(species_nice_list, species_rowids))
species_nice_list = [combined[0] for combined in combined_list]
species_rowids = [combined[1] for combined in combined_list]
species_text_list = ibs.get_species_texts(species_rowids)
species_selected_list = [species == species_ for species_ in species_text_list]
species_list = list(
zip(species_nice_list, species_text_list, species_selected_list)
)
species_list = [('Unspecified', const.UNKNOWN, True)] + species_list
axis_preference = request.cookies.get('ia-viewpoint3_axis_preference', None)
template = appf.template(
'turk',
'viewpoint3',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
viewpoint_code=viewpoint_code,
axis_preference=axis_preference,
image_src=image_src,
previous=previous,
species_list=species_list,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review,
)
return template
def commit_current_query_object_names(query_object, ibs):
r"""
Args:
query_object (wbia.AnnotInference):
ibs (wbia.IBEISController): image analysis api
"""
# Ensure connected components are used to relabel names
query_object.relabel_using_reviews()
# Transfers any remaining internal feedback into staging
# TODO: uncomment once buffer is dead
# query_object.write_wbia_staging_feedback()
# Commit a delta of the current annotmatch
query_object.write_wbia_annotmatch_feedback()
query_object.write_wbia_name_assignment()
def precompute_current_review_match_images(
ibs,
query_object,
global_feedback_limit=GLOBAL_FEEDBACK_LIMIT,
view_orientation='vertical',
):
from wbia.web import apis_query
review_aid1_list, review_aid2_list = query_object.get_filtered_edges(
GLOBAL_FEEDBACK_CONFIG_DICT
)
qreq_ = query_object.qreq_
assert len(review_aid1_list) == len(review_aid2_list), 'not aligned'
# Precompute
zipped = list(zip(review_aid1_list, review_aid2_list))
prog = ut.ProgIter(
enumerate(zipped), length=len(review_aid2_list), label='Rending images'
)
for index, (aid1, aid2) in prog:
if index > global_feedback_limit * 2:
break
cm, aid1, aid2 = query_object.lookup_cm(aid1, aid2)
try:
apis_query.ensure_review_image(
ibs, aid2, cm, qreq_, view_orientation=view_orientation
)
except KeyError as ex:
ut.printex(
ex,
'Failed to make review image. falling back',
tb=True,
keys=['cm.qaid', 'aid2'],
iswarning=True,
)
apis_query.ensure_review_image(
ibs,
aid2,
cm,
qreq_,
view_orientation=view_orientation,
| |
+ net['id']))
return ext_net
else:
epg = self.aim_mgr.get(
ctx, aim_res.EndpointGroup.from_dn(
net['apic:distinguished_names']['EndpointGroup']))
return epg
provider = get_net_group(dst_net, dst_cird)
# Verify Flow Classifier mapping
contract = self.aim_mgr.get(
ctx, aim_res.Contract(
tenant_name=apic_tn,
name=self.sfc_driver._generate_contract_name(provider.name,
sg.name)))
self.assertIsNotNone(contract)
subject = self.aim_mgr.get(
ctx, aim_res.ContractSubject(
tenant_name=apic_tn,
contract_name=contract.name,
name='ptc_' + pc['id']))
self.assertIsNotNone(subject)
self.assertEqual(['openstack_AnyFilter'], subject.bi_filters)
for net, pref, cidr in [(src_net, 'src_', src_cidr),
(dst_net, 'dst_', dst_cird)]:
group = get_net_group(net, cidr)
if net['apic:svi']:
ext_net = group
subnets = [cidr]
if cidr in ['0.0.0.0/0', '::/0']:
# use default external EPG
subnets = ['192.168.3.11/1', '0.0.0.0/1', '8000::/1',
'::/1']
for sub in subnets:
ext_sub = self.aim_mgr.get(ctx, aim_res.ExternalSubnet(
tenant_name=ext_net.tenant_name,
l3out_name=ext_net.l3out_name,
external_network_name=ext_net.name, cidr=sub))
self.assertIsNotNone(ext_sub)
self.assertIsNotNone(ext_net)
self.assertTrue(
contract.name in (ext_net.consumed_contract_names if
pref == 'src_' else
ext_net.provided_contract_names),
"%s not in ext net %s" % (contract.name,
ext_net.__dict__))
else:
epg = group
self.assertTrue(
contract.name in (epg.consumed_contract_names if
pref == 'src_' else
epg.provided_contract_names))
for ppg in ppgs:
self._verify_ppg_mapping(ppg, apic_tn)
device_cluster = self.aim_mgr.get(
ctx, aim_sg.DeviceCluster(tenant_name=apic_tn,
name='ppg_' + ppg['id']))
device_clusters.append(device_cluster)
dcc = self.aim_mgr.get(
ctx, aim_sg.DeviceClusterContext(
tenant_name=sg.tenant_name,
contract_name="any",
service_graph_name=sg.name,
node_name=device_cluster.name))
self.assertIsNotNone(dcc)
self.assertEqual(device_cluster.name, dcc.device_cluster_name)
self.assertEqual(apic_tn, dcc.device_cluster_tenant_name)
# Get ingress/egress BD
pp = self.show_port_pair(ppg['port_pairs'][0])['port_pair']
ingress_net = self._get_port_network(pp['ingress'])
egress_net = self._get_port_network(pp['egress'])
ingress_bd = ingress_net[
'apic:distinguished_names']['BridgeDomain']
egress_bd = egress_net[
'apic:distinguished_names']['BridgeDomain']
dci = aim_sg.DeviceClusterInterface(
tenant_name=device_cluster.tenant_name,
device_cluster_name=device_cluster.name, name='ingress')
dcic = aim_sg.DeviceClusterInterfaceContext(
tenant_name=apic_tn, contract_name="any",
service_graph_name=sg.name, node_name=device_cluster.name,
connector_name='consumer')
dcic = self.aim_mgr.get(ctx, dcic)
self.assertIsNotNone(dcic)
self.assertEqual(ingress_bd, dcic.bridge_domain_dn)
self.assertEqual(dci.dn, dcic.device_cluster_interface_dn)
self.assertNotEqual('', dcic.service_redirect_policy_dn)
dci = aim_sg.DeviceClusterInterface(
tenant_name=device_cluster.tenant_name,
device_cluster_name=device_cluster.name, name='egress')
dcic = aim_sg.DeviceClusterInterfaceContext(
tenant_name=apic_tn, contract_name="any",
service_graph_name=sg.name, node_name=device_cluster.name,
connector_name='provider')
dcic = self.aim_mgr.get(ctx, dcic)
self.assertIsNotNone(dcic)
self.assertEqual(egress_bd, dcic.bridge_domain_dn)
self.assertEqual(dci.dn, dcic.device_cluster_interface_dn)
self.assertNotEqual('', dcic.service_redirect_policy_dn)
self.assertEqual(
g_utils.deep_sort(
list({'name': x.name, 'device_cluster_name': x.name,
'device_cluster_tenant_name': x.tenant_name}
for x in device_clusters)),
g_utils.deep_sort(sg.linear_chain_nodes))
def _verify_pc_delete(self, pc):
ctx = self._aim_context
self.delete_port_chain(pc['id'])
# PC and Flowc unmapped
routers_count = ctx.db_session.query(l3_db.Router).count()
self.assertEqual(routers_count,
len(self.aim_mgr.find(ctx, aim_res.Contract)))
self.assertEqual(routers_count,
len(self.aim_mgr.find(ctx, aim_res.ContractSubject)))
self.assertEqual(
[], self.aim_mgr.find(ctx, aim_sg.DeviceClusterContext))
self.assertEqual(
[], self.aim_mgr.find(ctx, aim_sg.DeviceClusterInterfaceContext))
self.assertEqual([], self.aim_mgr.find(ctx, aim_sg.ServiceGraph))
# PPGs unmapped
self.assertEqual(
0, len(self.aim_mgr.find(ctx, aim_sg.ServiceRedirectPolicy)))
self.assertEqual(
0, len(self.aim_mgr.find(ctx, aim_sg.ConcreteDeviceInterface)))
self.assertEqual(
0, len(self.aim_mgr.find(ctx, aim_sg.ConcreteDevice)))
self.assertEqual(
0, len(self.aim_mgr.find(ctx, aim_sg.DeviceCluster)))
self.assertEqual(
0, len(self.aim_mgr.find(ctx, aim_sg.DeviceClusterInterface)))
self.assertEqual(
0, len(self.aim_mgr.find(ctx,
aim_sg.ServiceRedirectMonitoringPolicy)))
self.assertEqual(
0, len(self.aim_mgr.find(ctx, aim_sg.ServiceRedirectHealthGroup)))
ppgs = [self.show_port_pair_group(x)['port_pair_group'] for x in
pc['port_pair_groups']]
for ppg in ppgs:
pps = [self.show_port_pair(x)['port_pair'] for x in
ppg['port_pairs']]
for pp in pps:
iprt = self._show_port(pp['ingress'])
eprt = self._show_port(pp['egress'])
iepg = self.aim_mech._get_epg_by_network_id(self._ctx.session,
iprt['network_id'])
eepg = self.aim_mech._get_epg_by_network_id(self._ctx.session,
eprt['network_id'])
self.assertTrue(self.aim_mgr.get(ctx, iepg).sync)
self.assertTrue(self.aim_mgr.get(ctx, eepg).sync)
def _delete_network(self, network_id):
req = self.new_delete_request('networks', network_id)
return req.get_response(self.api)
class TestPortPair(TestAIMServiceFunctionChainingBase):
def test_port_pair_validation(self):
net1 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
p1 = self._make_port(self.fmt, net1['network']['id'])['port']
net2 = self._make_network(self.fmt, 'net2', True)
self._make_subnet(self.fmt, net1, '192.168.1.1', '192.168.1.0/24')
p2 = self._make_port(self.fmt, net2['network']['id'])['port']
self._bind_port_to_host(p1['id'], 'h1')
self._bind_port_to_host(p2['id'], 'h2')
self.create_port_pair(ingress=p1['id'], egress=p2['id'],
expected_res_status=201)
# Same network ports
p3 = self._make_port(self.fmt, net2['network']['id'])['port']
p4 = self._make_port(self.fmt, net2['network']['id'])['port']
self._bind_port_to_host(p3['id'], 'h1')
self._bind_port_to_host(p4['id'], 'h2')
self.create_port_pair(ingress=p3['id'], egress=p4['id'],
expected_res_status=500)
# Also unbound ports can be used
p5 = self._make_port(self.fmt, net1['network']['id'])['port']
self.create_port_pair(ingress=p3['id'], egress=p5['id'],
expected_res_status=400)
# Ports with no domain
def test_port_pair_validation_host_on_vmm_and_phys_domains(self):
# hosts h1 & h2 on physdom already
# create vmm domain and put all hosts on it.
self.vmmdom = aim_res.VMMDomain(type='OpenStack', name='vmm1')
self.aim_mgr.create(self._aim_context, self.vmmdom)
self.aim_mgr.create(
self._aim_context, aim_infra.HostDomainMappingV2(
host_name='h1', domain_name=self.vmmdom.name,
domain_type='OpenStack'))
self.aim_mgr.create(
self._aim_context, aim_infra.HostDomainMappingV2(
host_name='h2', domain_name=self.vmmdom.name,
domain_type='OpenStack'))
net1 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
net2 = self._make_network(self.fmt, 'net2', True)
self._make_subnet(self.fmt, net1, '192.168.1.1', '192.168.1.0/24')
p1 = self._make_port(self.fmt, net1['network']['id'])['port']
p2 = self._make_port(self.fmt, net2['network']['id'])['port']
self._bind_port_to_host(p1['id'], 'h1')
self._bind_port_to_host(p2['id'], 'h2')
self.create_port_pair(ingress=p1['id'], egress=p2['id'],
expected_res_status=201)
def test_port_pair_validation_no_domain(self):
self.aim_mgr.delete(self._aim_context, self.physdom)
net1 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
net2 = self._make_network(self.fmt, 'net2', True)
self._make_subnet(self.fmt, net1, '192.168.1.1', '192.168.1.0/24')
p1 = self._make_port(self.fmt, net1['network']['id'])['port']
p2 = self._make_port(self.fmt, net2['network']['id'])['port']
self._bind_port_to_host(p1['id'], 'h1')
# H3 has no domain specified
self._bind_port_to_host(p2['id'], 'h3')
self.create_port_pair(ingress=p1['id'], egress=p2['id'],
expected_res_status=500)
# Both ports no domain
p3 = self._make_port(self.fmt, net1['network']['id'])['port']
self._bind_port_to_host(p3['id'], 'h4')
self.create_port_pair(ingress=p3['id'], egress=p2['id'],
expected_res_status=500)
# Add domain, but different than H1
pd = self.aim_mgr.create(
self._aim_context, aim_infra.HostDomainMappingV2(
host_name='h3', domain_name='diff-name',
domain_type='PhysDom'))
self.create_port_pair(ingress=p1['id'], egress=p2['id'],
expected_res_status=500)
# Multi domain per host
self.aim_mgr.create(self._aim_context, aim_infra.HostDomainMappingV2(
host_name='h3', domain_name=self.physdom.name,
domain_type='PhysDom'))
self.create_port_pair(ingress=p1['id'], egress=p2['id'],
expected_res_status=500)
# Delete extra domain
self.aim_mgr.delete(self._aim_context, pd)
self.create_port_pair(ingress=p1['id'], egress=p2['id'],
expected_res_status=201)
def test_port_pair_validation_trunk(self):
net1 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
snet1 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net1, '192.167.0.1', '172.16.17.32/24')
net2 = self._make_network(self.fmt, 'net2', True)
self._make_subnet(self.fmt, net1, '192.168.1.1', '192.168.1.0/24')
snet2 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net1, '192.168.3.11', '192.168.127.12/24')
p1 = self._make_port(self.fmt, net1['network']['id'])['port']
p2 = self._make_port(self.fmt, net2['network']['id'])['port']
sp1 = self._make_port(self.fmt, snet1['network']['id'])['port']
sp2 = self._make_port(self.fmt, snet2['network']['id'])['port']
trunk1 = self._create_resource('trunk', port_id=p1['id'])
trunk2 = self._create_resource('trunk', port_id=p2['id'])
self._bind_port_to_host(p1['id'], 'h1')
self._bind_port_to_host(p2['id'], 'h2')
self._bind_subport(self._ctx, trunk1, sp1)
self._bind_subport(self._ctx, trunk2, sp2)
self.driver._trunk_plugin.add_subports(
self._ctx, trunk1['trunk']['id'],
{'sub_ports': [{'port_id': sp1['id'],
'segmentation_type': 'vlan',
'segmentation_id': 100}]})
self.driver._trunk_plugin.add_subports(
self._ctx, trunk2['trunk']['id'],
{'sub_ports': [{'port_id': sp2['id'],
'segmentation_type': 'vlan',
'segmentation_id': 100}]})
self.create_port_pair(ingress=sp1['id'], egress=sp2['id'],
expected_res_status=201)
class TestPortPairOpflexAgent(TestAIMServiceFunctionChainingBase):
def setUp(self):
kwargs = {'tenant_network_types': ['opflex']}
super(TestPortPairOpflexAgent, self).setUp(**kwargs)
def _register_agent(self, host, agent_conf):
agent = {'host': host}
agent.update(agent_conf)
self.aim_mech.plugin.create_or_update_agent(
context.get_admin_context(), agent)
def test_port_pair_with_opflex_agent_vlan_nets(self):
# Correct work flow with both nets of type vlan.
kwargs = {'provider:network_type': 'vlan'}
net1 = self._make_network(self.fmt, 'net1', True,
arg_list=tuple(list(kwargs.keys())), **kwargs)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
p1 = self._make_port(self.fmt, net1['network']['id'])['port']
net2 = self._make_network(self.fmt, 'net2', True,
arg_list=tuple(list(kwargs.keys())), **kwargs)
self._make_subnet(self.fmt, net2, '192.168.1.1', '192.168.1.0/24')
p2 = self._make_port(self.fmt, net2['network']['id'])['port']
self._register_agent('h1', AGENT_CONF_OPFLEX)
self._register_agent('h2', AGENT_CONF_OPFLEX)
self._bind_port_to_host(p1['id'], 'h1')
self._bind_port_to_host(p2['id'], 'h2')
self.create_port_pair(ingress=p1['id'], egress=p2['id'],
expected_res_status=201)
def test_port_pair_invalid_with_opflex_agent_opflex_nets(self):
# Validate that opflex type nets are invalid.
kwargs = {'provider:network_type': 'vlan'}
net1 = self._make_network(self.fmt, 'net1', True,
arg_list=tuple(list(kwargs.keys())), **kwargs)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
p1 = self._make_port(self.fmt, net1['network']['id'])['port']
# create the second net as net type opflex.
net2 = self._make_network(self.fmt, 'net2', True)
self._make_subnet(self.fmt, net2, '192.168.1.1', '192.168.1.0/24')
p2 = self._make_port(self.fmt, net2['network']['id'])['port']
self._register_agent('h1', AGENT_CONF_OPFLEX)
self._register_agent('h2', AGENT_CONF_OPFLEX)
self._bind_port_to_host(p1['id'], 'h1')
self._bind_port_to_host(p2['id'], 'h2')
self.create_port_pair(ingress=p1['id'], egress=p2['id'],
expected_res_status=500)
class TestPortPairGroup(TestAIMServiceFunctionChainingBase):
def test_ppg_validation(self):
# Correct creation
net1 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
net2 = self._make_network(self.fmt, 'net2', True)
self._make_subnet(self.fmt, net2, '192.168.1.1', '192.168.1.0/24')
# Service 1
p11 = self._make_port(self.fmt, net1['network']['id'])['port']
self._bind_port_to_host(p11['id'], 'h1')
p12 = self._make_port(self.fmt, net2['network']['id'])['port']
self._bind_port_to_host(p12['id'], 'h1')
pp1 = self.create_port_pair(ingress=p11['id'], egress=p12['id'],
expected_res_status=201)['port_pair']
# Service 2
p21 = self._make_port(self.fmt, net1['network']['id'])['port']
self._bind_port_to_host(p21['id'], 'h2')
p22 = self._make_port(self.fmt, net2['network']['id'])['port']
self._bind_port_to_host(p22['id'], 'h2')
pp2 = self.create_port_pair(ingress=p21['id'], egress=p22['id'],
expected_res_status=201)['port_pair']
# This goes through
ppg1 = self.create_port_pair_group(
port_pairs=[pp1['id'], pp2['id']],
expected_res_status=201)['port_pair_group']
# Use invalid pairs
net3 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net3, '192.168.0.1', '192.168.0.0/24')
p31 = self._make_port(self.fmt, net3['network']['id'])['port']
self._bind_port_to_host(p31['id'], 'h1')
pp3 = self.create_port_pair(ingress=p21['id'], egress=p31['id'],
expected_res_status=201)['port_pair']
self.delete_port_pair_group(ppg1['id'])
self.create_port_pair_group(port_pairs=[pp1['id'], pp3['id']],
expected_res_status=500)
# Works with only one PP
ppg2 = self.create_port_pair_group(
port_pairs=[pp3['id']],
expected_res_status=201)['port_pair_group']
# But update fails
self.update_port_pair_group(
ppg2['id'], port_pairs=[pp3['id'], pp1['id']],
expected_res_status=500)
def test_ppg_update(self):
# Correct creation
net1 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
net2 = self._make_network(self.fmt, 'net2', True)
self._make_subnet(self.fmt, net2, '192.168.1.1', '192.168.1.0/24')
# Service 1
p11 = self._make_port(self.fmt, net1['network']['id'])['port']
self._bind_port_to_host(p11['id'], 'h1')
p12 = self._make_port(self.fmt, net2['network']['id'])['port']
self._bind_port_to_host(p12['id'], 'h1')
pp1 = self.create_port_pair(ingress=p11['id'], egress=p12['id'],
expected_res_status=201)['port_pair']
# This goes through
ppg1 = self.create_port_pair_group(
port_pairs=[pp1['id']],
expected_res_status=201)['port_pair_group']
# Same ID update works
self.update_port_pair_group(ppg1['id'], port_pairs=[pp1['id']],
expected_res_status=200)
def test_healthcheck_group(self):
# Correct creation
net1 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
net2 = self._make_network(self.fmt, 'net2', True)
self._make_subnet(self.fmt, net2, '192.168.1.1', '192.168.1.0/24')
# Service 1
p11 = self._make_port(self.fmt, net1['network']['id'])['port']
self._bind_port_to_host(p11['id'], 'h1')
p12 = self._make_port(self.fmt, net2['network']['id'])['port']
self._bind_port_to_host(p12['id'], 'h1')
pp1 = self.create_port_pair(ingress=p11['id'], egress=p12['id'],
expected_res_status=201)['port_pair']
ppg1 = self.create_port_pair_group(
port_pairs=[pp1['id']], port_pair_group_parameters={
'healthcheck_type': 'tcp', 'healthcheck_frequency': 60,
'healthcheck_tcp_port': 8080},
expected_res_status=201)['port_pair_group']
self.assertEqual('tcp', ppg1['port_pair_group_parameters'][
'healthcheck_type'])
self.assertEqual(60, ppg1['port_pair_group_parameters'][
'healthcheck_frequency'])
self.assertEqual(8080, ppg1['port_pair_group_parameters'][
'healthcheck_tcp_port'])
self.delete_port_pair_group(ppg1['id'])
self.create_port_pair_group(
port_pairs=[pp1['id']], port_pair_group_parameters={
'healthcheck_type': 'no', 'healthcheck_frequency': 60,
'healthcheck_tcp_port': 8080},
expected_res_status=400)
self.create_port_pair_group(
port_pairs=[pp1['id']], port_pair_group_parameters={
'healthcheck_type': 'tcp', 'healthcheck_frequency': -1,
'healthcheck_tcp_port': 8080},
expected_res_status=400)
self.create_port_pair_group(
port_pairs=[pp1['id']], port_pair_group_parameters={
'healthcheck_type': 'tcp', 'healthcheck_frequency': 60,
'healthcheck_tcp_port': 80800},
expected_res_status=400)
ppg1 = self.create_port_pair_group(
port_pairs=[pp1['id']], port_pair_group_parameters={
'healthcheck_type': 'icmp'},
expected_res_status=201)['port_pair_group']
self.assertEqual('icmp', ppg1['port_pair_group_parameters'][
'healthcheck_type'])
self.assertTrue('check_frequency' not in ppg1[
'port_pair_group_parameters'])
self.assertTrue('tcp_port' not in ppg1[
'port_pair_group_parameters'])
class TestFlowClassifier(TestAIMServiceFunctionChainingBase):
def test_fc_validation(self):
# Correct classifier
net1 = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net1, '192.168.0.1', '192.168.0.0/24')
net2 = self._make_network(self.fmt, 'net2', True)
self._make_subnet(self.fmt, net1, '192.168.1.1', '192.168.1.0/24')
fc = self.create_flow_classifier(
l7_parameters={
'logical_source_network': net1['network']['id'],
'logical_destination_network': net2['network']['id']},
source_ip_prefix='192.168.0.0/24',
destination_ip_prefix='192.168.1.0/24',
expected_res_status=201)['flow_classifier']
self.delete_flow_classifier(fc['id'], expected_res_status=204)
# Wrong FCs
self.create_flow_classifier(
l7_parameters={
'logical_source_network': net1['network']['id'],
'logical_destination_network': net2['network']['id']},
source_ip_prefix='192.168.0.0/24', expected_res_status=400)
self.create_flow_classifier(
l7_parameters={
'logical_source_network': net1['network']['id'],
'logical_destination_network': net2['network']['id']},
destination_ip_prefix='192.168.1.0/24',
expected_res_status=400)
self.create_flow_classifier(
l7_parameters={'logical_source_network': net1['network']['id']},
source_ip_prefix='192.168.0.0/24',
destination_ip_prefix='192.168.1.0/24', expected_res_status=400)
self.create_flow_classifier(
l7_parameters={
'logical_destination_network': net2['network']['id']},
source_ip_prefix='192.168.0.0/24',
destination_ip_prefix='192.168.1.0/24', expected_res_status=400)
self.create_flow_classifier(
l7_parameters={
'logical_source_network': net1['network']['id'],
'logical_destination_network': net1['network']['id']},
source_ip_prefix='192.168.0.0/24', expected_res_status=400)
self.create_flow_classifier(
l7_parameters={
'logical_source_network': net1['network']['id'],
'logical_destination_network': ''},
source_ip_prefix='192.168.0.0/24', expected_res_status=400)
self.create_flow_classifier(
l7_parameters={
'logical_source_network': net1['network']['id'],
'logical_destination_network': net2['network']['id'],
}, source_ip_prefix='192.168.0.0/24',
destination_ip_prefix='192.168.0.0/24', expected_res_status=400)
self._delete_network(net2['network']['id'])
self.create_flow_classifier(
l7_parameters={
'logical_source_network': net1['network']['id'],
'logical_destination_network': net2['network']['id']},
source_ip_prefix='192.168.0.0/24',
destination_ip_prefix='192.168.1.0/24', expected_res_status=404)
net_svi = self._make_network(self.fmt, 'net_svi', True,
arg_list=self.extension_attributes,
**{'apic:svi': True})
self._make_subnet(self.fmt, net_svi, '192.168.0.1', '192.168.0.0/24')
fc = self.create_flow_classifier(
l7_parameters={
'logical_source_network': net_svi['network']['id'],
'logical_destination_network': net_svi['network']['id']},
source_ip_prefix='192.168.0.0/24',
destination_ip_prefix='192.168.1.0/24',
expected_res_status=201)['flow_classifier']
# Same subnets, different networks.
net3 = self._make_network(self.fmt, 'net3', True)
self._make_subnet(self.fmt, net1, '192.168.2.1', '192.168.2.0/24')
net4 = self._make_network(self.fmt, 'net4', True)
self._make_subnet(self.fmt, net1, '192.168.3.1', | |
from Node import Node
class LinkedList:
def __init__(self, node=None):
self.head = node
def insertFront(self, newValue):
# Debug
toPrint = False
newNode = Node(newValue)
if toPrint:
print("New Node value = " + str(newNode.value))
newNode.next = self.head
if toPrint:
print("New Node dot next = " + str(newNode.next))
if newNode.next != None:
print("Value of new node dot next = " + str(newNode.next.value))
self.head = newNode
def insertBack(self, newValue):
newNode = Node(newValue)
# List is empty
if self.head == None:
self.insertFront(newValue)
return
# List is not empty
tail = self.head
while tail.next != None:
tail = tail.next
tail.next = newNode
def insertNodeFront(self, newNode):
pass
def insertNodeBack(self, newNode):
pass
def extend(self, newll):
pass
def search(self, k):
"""
Finds the first element with value k in this linked list by a simple linear search,
returning a list containing the node of this element and its' index. If the element is not within this linked list, search
will return a list container the values None and '-1'.
:complexity = big O of n.
:param k: (type) -> element to find within this linked list.
:return [x, index]: x -> Node of the element found or None; index: int -> The index of the the element.
"""
x = self.head
index = 0
while x != None and x.value != k:
x = x.next
index += 1
# Check to see if element was found.
if x == None:
index = -1
return [x, index]
def removeFront(self):
"""
Remove the first element in the linked list and return it.
:return removed: Node or None -> element removed
"""
# Check to see if list is empty.
if self.head == None:
return None
# List is not empty.
removed = self.head
self.head = self.head.next
return removed
def removeBack(self):
"""
Remove the last element in the linked list and return it.
:return: tail: Node or None -> element removed
"""
# Check to see if list is empty.
if self.head == None:
return None
tail = self.head
prevTail = self.head
while tail.next != None:
tail = tail.next
if tail.next != None:
prevTail = prevTail.next
prevTail.next = None # remove tail
return tail
def remove(self, k):
"""
First search for the first occurrence of the given value 'k' in this
linked list. If found remove the Node containing the value 'k' from the
list.
:param k:
:return removed: Node or None -> element removed
"""
prev = self._find_previous(k)
# Check empty list or zero occurrence found.
if prev == None:
return None
# Prev is first element
if prev == self.head:
return self.removeFront()
x = prev.next
prev.next = x.next
return x
def findMidPoint(self):
"""
Find the mid point of the linked list.
:return: slow: Tuple(x: Node or None, count: int) -> element in the middle of this linked list and its' index in a tuple.
"""
count = 0
fast = self.head
slow = self.head
while fast != None and fast.next != None and fast.next.next != None:
count += 1
fast = fast.next.next
slow = slow.next
return (slow, count)
def reverse(self):
"""
Reverse the linked list.
:return: None
"""
# Debug
toPrint = False
if toPrint:
print("Debugging reverse method! \n")
lostNode = self.head
prev = self.head
x = self.head
if toPrint:
print("Lost Node value: " + str(lostNode.value))
print("Prev Node value: " + str(prev.value))
print("X Node value: " + str(x.value))
while lostNode != None and lostNode.next != None:
x = lostNode
if toPrint:
print("Is x == self.head? " + str(x == self.head))
if x == self.head:
if toPrint:
print("X is equal to head!")
lostNode = lostNode.next
x.next = None
if toPrint:
print("Value of x: " + str(x.value))
print("Value of lostNode: " + str(lostNode.value))
else:
if toPrint:
print("X is not equal to head!")
lostNode = lostNode.next
x.next = prev
prev = x
## if lostNode.next == None:
## x = lostNode
## x.next = prev
## prev = x
if toPrint:
print("Value of lostNode: " + str(lostNode.value))
print("Value of x: " + str(x.value))
print("Value of prev: " + str(prev.value))
# Add Last Node
x = lostNode
x.next = prev
self.head = x
def reverseUpTo(self, i):
"""
Reverse the list only up to element at index <i>.
:param i: index where the reverse process stops
:return x: node pointing to a sublist of the original list in reverse order
"""
lostNode = self.head
x = self.head
prev = self.head
firstElem = self.head
counter = 0
while lostNode != None and lostNode.next != None and counter < i:
counter += 1
x = lostNode
if x == self.head:
lostNode = lostNode.next
x.next = None
else:
lostNode = lostNode.next
x.next = prev
prev = x
if i - counter == 1:
# Add Last Node
x = lostNode
x.next = prev
elif i - counter != 0:
# Raise Index Error
# raise IndexError
pass
# Update head and concatenate reversed node with the rest of the
# original ll.
self.head = x
firstElem.next = lostNode
return x
def reverseUpToNode(self, node):
"""
Reverse the list up to the node and including <node>.
:param node: element in this ll where the reverse process stops
:return x: node pointing to a sublist of the original list in reverse order
"""
lostNode = self.head
x = self.head
prev = self.head
firstElem = self.head
while lostNode != None and lostNode.next != None and lostNode != node:
x = lostNode
if x == self.head:
lostNode = lostNode.next
x.next = None
else:
lostNode = lostNode.next
x.next = prev
prev = x
if x != node:
if lostNode == node:
# Add Last Node
x = lostNode
lostNode = lostNode.next
x.next = prev
## print("lostNode final value: " + str(lostNode.value))
## print("lostNode next value: " + str(lostNode.next.value))
## print("x final value: " + str(x.value))
## print("x next value: " + str(x.next.value))
# Update head and concatenate reversed node with the rest of the
# original ll.
self.head = x
firstElem.next = lostNode
return x
def isPalindrome(self):
"""
Check to see if this ll is a palindrome or not.
For a ll to be a palindrome all elements from the mid point and on should
be a 'mirror' reflection of all the elements before the mid point.
Here 'mirror' is used to refer to all properties that apply
when light is reflected off of a physical mirror. Specifically speaking
it is referring to the pi/2 rotation that is present when holding a writen
note in front of a mirror.
=== Invariants ===
Solves the problem in O(n) steps and O(1) additional steps regardless of
n.
:return: {bool}
"""
## print("isPalindrome debugging!")
midPoint, indexOfMid = self.findMidPoint()
nextMidPoint = midPoint.next
print("MidPoint: " + str(midPoint.value) + ", nextMidPoint: " + str(nextMidPoint.value))
print("Index of Mid: " + str(indexOfMid))
if indexOfMid % 2 == 0:
# len is odd
nextMidPoint
reverseX = self.reverseUpTo(indexOfMid)
else:
# len is even
reverseX = self.reverseUpTo(indexOfMid + 1)
print("My orginal self: ")
self.printList()
while reverseX != None and reverseX.next != None and nextMidPoint != None and nextMidPoint.next != None:
print("reverseX value: " + str(reverseX.value) + ", nMidPointX.value: " + str(nextMidPoint.value))
if reverseX.value != nextMidPoint.value:
return False
reverseX = reverseX.next
nextMidPoint = nextMidPoint.next
return True
def getItem(self, i):
"""
Get item at index <i>.
Raise IndexError if i is out of range or greater than the length of the list.
:param i: int -> index of item that the user wants.:
:return item: None or Node -> node at index <i>:
"""
x = self.head
index = 0
while x != None and x.next != None and index != i:
x = x.next
index += 1
if index != i:
raise IndexError()
return x
def printList(self):
# Debug
toPrint = False
x = self.head
printStr = "["
while x != None:
if toPrint:
print("Head is not null! ")
printStr += str(x.value) + ", "
if toPrint:
| |
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'RT DOSE IOD': ['Patient'],
'AMBULATORY ECG IOD': ['Patient'],
'SURFACE SEGMENTATION IOD': ['Patient'],
'MAMMOGRAPHY CAD SR IOD': ['Patient'],
'VL MICROSCOPIC IMAGE IOD': ['Patient'],
'RT BEAMS TREATMENT RECORD IOD': ['Patient'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Patient'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'RT IMAGE IOD': ['Patient'],
'SC IMAGE IOD': ['Patient'],
None: ['Patient'],
'SEGMENTATION IOD': ['Patient'],
'PET IMAGE IOD': ['Patient'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'DIGITAL X-RAY IMAGE IOD': ['Patient'],
'REAL WORLD VALUE MAPPING IOD': ['Patient'],
'SPATIAL REGISTRATION IOD': ['Patient'],
'COLON CAD SR IOD': ['Patient'],
'INTRAVASCULAR OCT IMAGE IOD': ['Patient'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'ENHANCED PET IMAGE IOD': ['Patient'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Patient'],
'US MULTI-FRAME IMAGE IOD': ['Patient'],
'ENHANCED X-RAY RF IMAGE IOD': ['Patient'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Patient'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Patient'],
'US IMAGE IOD': ['Patient'],
'GENERAL ECG IOD': ['Patient'],
'XRF IMAGE IOD': ['Patient'],
'ENCAPSULATED CDA IOD': ['Patient'],
'ENHANCED SR IOD': ['Patient'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'GENERAL AUDIO WAVEFORM IOD': ['Patient'],
'MR IMAGE IOD': ['Patient'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Patient'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Patient'],
'ARTERIAL PULSE WAVEFORM IOD': ['Patient'],
},
# BreedRegistrationSequence
0x00102294L: {
'BASIC STRUCTURED DISPLAY IOD': ['Patient'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Patient'],
'RT BRACHY TREATMENT RECORD IOD': ['Patient'],
'RT STRUCTURE SET IOD': ['Patient'],
'RT PLAN IOD': ['Patient'],
'CR IMAGE IOD': ['Patient'],
'RAW DATA IOD': ['Patient'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Patient'],
'ENHANCED MR IMAGE IOD': ['Patient'],
'BASIC CARDIAC EP IOD': ['Patient'],
'RT TREATMENT SUMMARY RECORD IOD': ['Patient'],
'12-LEAD ECG IOD': ['Patient'],
'RESPIRATORY WAVEFORM IOD': ['Patient'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Patient'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Patient'],
'BASIC VOICE AUDIO IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Patient'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Patient'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Patient'],
'BASIC TEXT SR IOD': ['Patient'],
'NM IMAGE IOD': ['Patient'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'LENSOMETRY MEASUREMENTS IOD': ['Patient'],
'MR SPECTROSCOPY IOD': ['Patient'],
'ENCAPSULATED PDF IOD': ['Patient'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CHEST CAD SR IOD': ['Patient'],
'HEMODYNAMIC IOD': ['Patient'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Patient'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Patient'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Patient'],
'ENHANCED MR COLOR IMAGE IOD': ['Patient'],
'ENHANCED CT IMAGE IOD': ['Patient'],
'X-RAY RADIATION DOSE SR IOD': ['Patient'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Patient'],
'PROCEDURE LOG IOD': ['Patient'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Patient'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Patient'],
'STEREOMETRIC RELATIONSHIP IOD': ['Patient'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Patient'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Patient'],
'VL ENDOSCOPIC IMAGE IOD': ['Patient'],
'KERATOMETRY MEASUREMENTS IOD': ['Patient'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Patient'],
'COMPREHENSIVE SR IOD': ['Patient'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Patient'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Patient'],
'SPATIAL FIDUCIALS IOD': ['Patient'],
'RT ION PLAN IOD': ['Patient'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CT IMAGE IOD': ['Patient'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Patient'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Patient'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Patient'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'RT DOSE IOD': ['Patient'],
'AMBULATORY ECG IOD': ['Patient'],
'SURFACE SEGMENTATION IOD': ['Patient'],
'MAMMOGRAPHY CAD SR IOD': ['Patient'],
'VL MICROSCOPIC IMAGE IOD': ['Patient'],
'RT BEAMS TREATMENT RECORD IOD': ['Patient'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Patient'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'RT IMAGE IOD': ['Patient'],
'SC IMAGE IOD': ['Patient'],
None: ['Patient'],
'SEGMENTATION IOD': ['Patient'],
'PET IMAGE IOD': ['Patient'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'DIGITAL X-RAY IMAGE IOD': ['Patient'],
'REAL WORLD VALUE MAPPING IOD': ['Patient'],
'SPATIAL REGISTRATION IOD': ['Patient'],
'COLON CAD SR IOD': ['Patient'],
'INTRAVASCULAR OCT IMAGE IOD': ['Patient'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'ENHANCED PET IMAGE IOD': ['Patient'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Patient'],
'US MULTI-FRAME IMAGE IOD': ['Patient'],
'ENHANCED X-RAY RF IMAGE IOD': ['Patient'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Patient'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Patient'],
'US IMAGE IOD': ['Patient'],
'GENERAL ECG IOD': ['Patient'],
'XRF IMAGE IOD': ['Patient'],
'ENCAPSULATED CDA IOD': ['Patient'],
'ENHANCED SR IOD': ['Patient'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'GENERAL AUDIO WAVEFORM IOD': ['Patient'],
'MR IMAGE IOD': ['Patient'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Patient'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Patient'],
'ARTERIAL PULSE WAVEFORM IOD': ['Patient'],
},
# EncapsulatedDocument
0x00420011L: {
'ENCAPSULATED CDA IOD': ['Encapsulated Document'],
'ENCAPSULATED PDF IOD': ['Encapsulated Document'],
'IMPLANT ASSEMBLY TEMPLATE IOD': ['Implant Assembly'],
None: ['Encapsulated Document', 'Implant Assembly'],
},
# ResponsiblePerson
0x00102297L: {
'BASIC STRUCTURED DISPLAY IOD': ['Patient'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Patient'],
'RT BRACHY TREATMENT RECORD IOD': ['Patient'],
'RT STRUCTURE SET IOD': ['Patient'],
'RT PLAN IOD': ['Patient'],
'CR IMAGE IOD': ['Patient'],
'RAW DATA IOD': ['Patient'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Patient'],
'ENHANCED MR IMAGE IOD': ['Patient'],
'BASIC CARDIAC EP IOD': ['Patient'],
'RT TREATMENT SUMMARY RECORD IOD': ['Patient'],
'12-LEAD ECG IOD': ['Patient'],
'RESPIRATORY WAVEFORM IOD': ['Patient'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Patient'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Patient'],
'BASIC VOICE AUDIO IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Patient'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Patient'],
'SPECTACLE PRESCIPTION REPORT IOD': ['Patient'],
'BASIC TEXT SR IOD': ['Patient'],
'NM IMAGE IOD': ['Patient'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'LENSOMETRY MEASUREMENTS IOD': ['Patient'],
'MR SPECTROSCOPY IOD': ['Patient'],
'ENCAPSULATED PDF IOD': ['Patient'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CHEST CAD SR IOD': ['Patient'],
'HEMODYNAMIC IOD': ['Patient'],
'OPHTHALMIC AXIAL MEASUREMENTS IOD': ['Patient'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Patient'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Patient'],
'ENHANCED MR COLOR IMAGE IOD': ['Patient'],
'ENHANCED CT IMAGE IOD': ['Patient'],
'X-RAY RADIATION DOSE SR IOD': ['Patient'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Patient'],
'PROCEDURE LOG IOD': ['Patient'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Patient'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Patient'],
'STEREOMETRIC RELATIONSHIP IOD': ['Patient'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Patient'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Patient'],
'VL ENDOSCOPIC IMAGE IOD': ['Patient'],
'KERATOMETRY MEASUREMENTS IOD': ['Patient'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Patient'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Patient'],
'COMPREHENSIVE SR IOD': ['Patient'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Patient'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Patient'],
'SPATIAL FIDUCIALS IOD': ['Patient'],
'RT ION PLAN IOD': ['Patient'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Patient'],
'CT IMAGE IOD': ['Patient'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Patient'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Patient'],
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Patient'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'RT DOSE IOD': ['Patient'],
'AMBULATORY ECG IOD': ['Patient'],
'SURFACE SEGMENTATION IOD': ['Patient'],
'MAMMOGRAPHY CAD SR IOD': ['Patient'],
'VL MICROSCOPIC IMAGE IOD': ['Patient'],
'RT BEAMS TREATMENT RECORD IOD': ['Patient'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Patient'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'RT IMAGE IOD': ['Patient'],
'SC IMAGE IOD': ['Patient'],
None: ['Patient'],
'SEGMENTATION IOD': ['Patient'],
'PET IMAGE IOD': ['Patient'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'DIGITAL X-RAY IMAGE IOD': ['Patient'],
'REAL WORLD VALUE MAPPING IOD': ['Patient'],
'SPATIAL REGISTRATION IOD': ['Patient'],
'COLON CAD SR IOD': ['Patient'],
'INTRAVASCULAR OCT IMAGE IOD': ['Patient'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Patient'],
'ENHANCED PET IMAGE IOD': ['Patient'],
'VISUAL ACUITY MEASUREMENTS IOD': ['Patient'],
'US MULTI-FRAME IMAGE IOD': ['Patient'],
'ENHANCED X-RAY RF IMAGE IOD': ['Patient'],
'RT BEAMS DELIVERY INSTRUCTION IOD': ['Patient'],
'SUBJECTIVE REFRACTION MEASUREMENTS IOD': ['Patient'],
'US IMAGE IOD': ['Patient'],
'GENERAL ECG IOD': ['Patient'],
'XRF IMAGE IOD': ['Patient'],
'ENCAPSULATED CDA IOD': ['Patient'],
'ENHANCED SR IOD': ['Patient'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Patient'],
'GENERAL AUDIO WAVEFORM IOD': ['Patient'],
'MR IMAGE IOD': ['Patient'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Patient'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Patient'],
'ARTERIAL PULSE WAVEFORM IOD': ['Patient'],
},
# ResponsiblePersonRole
0x00102298L: {
'BASIC STRUCTURED DISPLAY IOD': ['Patient'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Patient'],
'RT BRACHY TREATMENT RECORD IOD': ['Patient'],
'RT STRUCTURE SET IOD': ['Patient'],
'RT PLAN IOD': ['Patient'],
'CR IMAGE IOD': ['Patient'],
'RAW DATA IOD': ['Patient'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Patient'],
'ENHANCED MR IMAGE IOD': ['Patient'],
'BASIC CARDIAC EP IOD': ['Patient'],
'RT TREATMENT SUMMARY RECORD IOD': ['Patient'],
'12-LEAD ECG IOD': ['Patient'],
'RESPIRATORY WAVEFORM IOD': ['Patient'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Patient'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Patient'],
'BASIC VOICE AUDIO IOD': ['Patient'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Patient'],
'ENHANCED | |
Citation.reference.startswith(q))
return jsonify([
{
"text": citation.reference,
"id": citation.id,
}
for citation in citations
])
@api_blueprint.route('/api/evidence/<int:evidence_id>')
def get_evidence_by_id(evidence_id):
"""Get an evidence by its identifier as JSON."""
evidence = manager.get_evidence_by_id_or_404(evidence_id)
return jsonify(evidence.to_json(include_id=True))
####################################
# AUTHOR
####################################
@api_blueprint.route('/api/query/<int:query_id>/authors')
def get_all_authors(query_id):
"""Get a list of all authors in the graph produced by the given URL parameters.
---
tags:
- query
parameters:
- name: query_id
in: path
description: The database query identifier
required: true
type: integer
"""
graph: BELGraph = manager.cu_get_graph_from_query_id_or_404(query_id)
return jsonify(sorted(graph.get_authors()))
@api_blueprint.route('/api/author/suggestion/')
def suggest_authors():
"""Return list of authors matching the author keyword.
---
tags:
- author
parameters:
- name: q
in: query
description: The search term
required: true
type: string
"""
q = request.args.get('q')
if not q:
return jsonify([])
authors = manager.session.query(Author).filter(Author.name_contains(q)).all()
return jsonify([
{
"id": author.id,
"text": author.name,
}
for author in authors
])
####################################
# EDGES
####################################
@api_blueprint.route('/api/edge')
@roles_required('admin')
def get_edges():
"""Get all edges.
---
tags:
- edge
parameters:
- name: limit
in: query
description: The number of edges to return
required: false
type: integer
- name: offset
in: query
description: The number of edges to return
required: false
type: integer
"""
edge_query = manager.session.query(Edge)
edge_query = add_edge_filter(edge_query)
return jsonify([
manager._help_get_edge_entry(edge=edge, user=current_user)
for edge in edge_query.all()
])
@api_blueprint.route('/api/edge/by_bel/statement/<bel>')
def get_edges_by_bel(bel: str):
"""Get edges that match the given BEL.
---
tags:
- edge
parameters:
- name: bel
in: path
description: A BEL statement
required: true
type: string
"""
edge_query = manager.query_edges(bel=bel)
edge_query = add_edge_filter(edge_query)
return jsonify_edges(edge_query)
@api_blueprint.route('/api/edge/by_bel/source/<source_bel>')
def get_edges_by_source_bel(source_bel: str):
"""Get edges whose sources match the given BEL.
---
tags:
- edge
parameters:
- name: source_bel
in: path
description: A BEL term
required: true
type: string
"""
edge_query = manager.query_edges(source=source_bel)
edge_query = add_edge_filter(edge_query)
return jsonify_edges(edge_query)
@api_blueprint.route('/api/edge/by_bel/target/<target_bel>')
def get_edges_by_target_bel(target_bel: str):
"""Get edges whose targets match the given BEL.
---
tags:
- edge
parameters:
- name: target_bel
in: path
description: A BEL term
required: true
type: string
"""
edge_query = manager.query_edges(target=target_bel)
edge_query = add_edge_filter(edge_query)
return jsonify_edges(edge_query)
@api_blueprint.route('/api/edge/by_type/<source_function>/<target_function>')
def get_edges_typed(source_function: str, target_function: str):
"""Get edges whose source and target match the given types.
---
tags:
- edge
parameters:
- name: source_function
in: path
description: A BEL Type
required: true
type: string
- name: target_function
in: path
description: A BEL Type
required: true
type: string
"""
edge_query = manager.query_edges(
source_function=source_function,
target_function=target_function,
)
edge_query = add_edge_filter(edge_query)
return jsonify_edges(edge_query)
@api_blueprint.route('/api/edge/<edge_hash>')
def get_edge_by_hash(edge_hash: str):
"""Get an edge data dictionary by hash.
---
tags:
- edge
parameters:
- name: edge_hash
in: path
description: The PyBEL hash of an edge
required: true
type: string
"""
edge = manager.get_edge_by_hash_or_404(edge_hash)
return jsonify(manager._help_get_edge_entry(edge=edge, user=current_user))
@api_blueprint.route('/api/edge/hash_starts/<edge_hash>')
@roles_required('admin')
def search_edge_by_hash(edge_hash: str):
"""Get an edge data dictionary by the beginning of its hash.
---
tags:
- edge
parameters:
- name: edge_hash
in: path
description: The PyBEL hash of an edge
required: true
type: string
"""
edges = manager.session.query(Edge).filter(Edge.md5.startswith(edge_hash))
return jsonify([
edge.to_json(include_id=True)
for edge in edges
])
@api_blueprint.route('/api/edge/<edge_hash>/vote/up')
@login_required
def store_up_vote(edge_hash: str):
"""Vote an edge up.
---
tags:
- edge
parameters:
- name: edge_hash
in: path
description: The PyBEL hash of an edge
required: true
type: string
"""
edge = manager.get_edge_by_hash_or_404(edge_hash)
vote = manager.get_or_create_vote(edge, current_user, agreed=True)
return jsonify(vote.to_json())
@api_blueprint.route('/api/edge/<edge_hash>/vote/down')
@login_required
def store_down_vote(edge_hash: str):
"""Vote an edge down.
---
tags:
- edge
parameters:
- name: edge_hash
in: path
description: The PyBEL hash of an edge
required: true
type: string
"""
edge = manager.get_edge_by_hash_or_404(edge_hash)
vote = manager.get_or_create_vote(edge, current_user, agreed=False)
return jsonify(vote.to_json())
@api_blueprint.route('/api/edge/<edge_hash>/comment', methods=('GET', 'POST'))
@login_required
def store_comment(edge_hash: str):
"""Add a comment to the edge.
---
tags:
- edge
parameters:
- name: edge_hash
in: path
description: The PyBEL hash of an edge
required: true
type: string
"""
edge = manager.get_edge_by_hash_or_404(edge_hash)
comment = request.args.get('comment')
if comment is None:
abort(403, 'Comment not found') # FIXME put correct code
comment = EdgeComment(
user=current_user,
edge=edge,
comment=comment
)
manager.session.add(comment)
manager.session.commit()
return jsonify(comment.to_json())
####################################
# NODES
####################################
def jsonify_nodes(nodes: Iterable[Node]) -> Response:
"""Convert a list of nodes to json."""
return jsonify([
node.to_json()
for node in nodes
])
def jsonify_edges(edges: Iterable[Edge]) -> Response:
"""Convert a list of edges to JSON."""
return jsonify([
edge.to_json(include_id=True)
for edge in edges
])
@api_blueprint.route('/api/node/')
@roles_required('admin')
def get_nodes():
"""Get all nodes.
---
tags:
- node
parameters:
- name: limit
in: query
description: The number of edges to return
required: false
type: integer
- name: offset
in: query
description: The number of edges to return
required: false
type: integer
"""
limit = request.args.get('limit', type=int)
offset = request.args.get('offset', type=int)
bq = manager.session.query(Node)
if limit:
bq = bq.limit(limit)
if offset:
bq = bq.offset(offset)
return jsonify_nodes(bq)
@api_blueprint.route('/api/node/<node_hash>')
def get_node_by_hash(node_hash: str):
"""Get a node by its hash.
---
tags:
- node
parameters:
- name: node_hash
in: path
description: The PyBEL hash of a node
required: true
type: string
"""
node = manager.get_node_by_hash_or_404(node_hash)
rv = get_enriched_node_json(node)
return jsonify(rv)
@api_blueprint.route('/api/node/by_bel/<bel>')
def nodes_by_bel(bel: str):
"""Get all nodes that match the given BEL.
---
tags:
- node
"""
nodes = manager.query_nodes(bel=bel)
return jsonify_nodes(nodes)
@api_blueprint.route('/api/node/by_name/<name>')
def nodes_by_name(name: str):
"""Get all nodes with the given name.
---
tags:
- node
"""
nodes = manager.query_nodes(name=name)
return jsonify_nodes(nodes)
@api_blueprint.route('/api/namespace/<namespace>/nodes')
def nodes_by_namespace(namespace: str):
"""Get all nodes with identifiers from the given namespace.
---
tags:
- namespace
"""
nodes = manager.query_nodes(namespace=namespace)
return jsonify_nodes(nodes)
@api_blueprint.route('/api/namespace/<namespace>/name/<name>/nodes')
def nodes_by_namespace_name(namespace: str, name: str):
"""Get all nodes with the given namespace and name.
---
tags:
- namespace
"""
nodes = manager.query_nodes(namespace=namespace, name=name)
return jsonify_nodes(nodes)
def get_enriched_node_json(node: Node) -> Optional[Mapping]:
"""Enrich the node data with some of the Bio2BEL managers."""
node = node.as_bel()
namespace = node.get(NAMESPACE)
if namespace is None:
return
node['annotations'] = {}
if namespace.upper() == 'HGNC' and bio2bel.hgnc_manager:
model = bio2bel.hgnc_manager.get_node(node)
node['annotations']['HGNC'] = {'missing': True} if model is None else model.to_dict()
elif namespace.upper() == 'CHEBI' and bio2bel.chebi_manager:
model = bio2bel.chebi_manager.get_chemical_from_data(node)
node['annotations']['CHEBI'] = {'missing': True} if model is None else model.to_json()
elif namespace.upper() in {'EGID', 'ENTREZ', 'NCBIGENE'} and bio2bel.entrez_manager:
model = bio2bel.entrez_manager.lookup_node(node)
node['annotations']['ENTREZ'] = {'missing': True} if model is None else model.to_json()
elif namespace.upper() in {'EXPASY', 'EC', 'ECCODE', 'EC-CODE'} and bio2bel.expasy_manager:
model = bio2bel.expasy_manager.get_enzyme_by_id(node.name or node.identifier)
node['annotations']['EXPASY'] = {'missing': True} if model is None else model.to_json()
elif namespace.lower() in {'gocc', 'goccid', 'gobp', 'gobpid', 'go'} and bio2bel.go_manager:
model = bio2bel.go_manager.lookup_term(node)
node['annotations']['GO'] = {'missing': True} if model is None else model.to_json()
elif namespace.lower() in {'mesh', 'meshc', 'meshpp', 'meshd', 'meshcs'} and bio2bel.mesh_manager:
model = bio2bel.mesh_manager.look_up_node(node)
node['annotations']['MESH'] = {'missing': True} if model is None else model.to_json()
elif namespace.lower() == 'rgd' and bio2bel.rgd_manager:
model = bio2bel.rgd_manager.get_rat_gene_from_bel(node)
node['annotations']['RGD'] = {'missing': True} if model is None else model.to_json()
elif namespace.lower() == 'mgi' and bio2bel.mgi_manager:
model = bio2bel.mgi_manager.get_mouse_gene_from_bel(node)
node['annotations']['MGI'] = {'missing': True} if model is None else model.to_json()
elif namespace.lower() in {'hbp', 'conso'} and bio2bel.conso_manager:
model = bio2bel.conso_manager.normalize_node(node)
node['annotations']['CONSO'] = (
{'missing': True} if model is None else
bio2bel.conso_manager.get_json(model.identifier)
)
return node
@api_blueprint.route('/api/node/suggestion/')
def get_node_suggestion():
"""Suggest a node.
---
tags:
- node
parameters:
- name: q
in: query
description: The search term
required: true
type: string
"""
q = request.args.get('q')
if not q:
return jsonify([])
nodes = manager.session.query(Node).filter(Node.bel_contains(q)).order_by(func.length(Node.bel))
return jsonify([
{
"text": node.bel,
"id": node.md5,
}
for node in nodes
])
####################################
# PIPELINE
####################################
@api_blueprint.route('/api/pipeline/suggestion/')
def get_pipeline_function_names():
"""Send a list of functions to use in the pipeline."""
q = request.args.get('q')
if not q:
return jsonify([])
q = q.casefold()
return jsonify([
p.replace("_", " ").capitalize()
for p in no_arguments_map
if q in p.replace("_", " ").casefold()
])
@api_blueprint.route('/api/query/<int:query_id>', methods=['DELETE'])
@login_required
def drop_query_by_id(query_id: int):
"""Delete a query.
User must own the query to drop it.
---
tags:
- query
parameters:
- name: query_id
in: path
description: The database identifier of a query
required: true
type: integer
"""
query = manager.get_query_by_id_or_404(query_id)
if not current_user.is_admin or query.user != current_user:
abort(403)
manager.session.delete(query)
manager.session.commit()
return next_or_jsonify(f'Dropped query {query_id}')
@api_blueprint.route('/api/query', methods=['DELETE'])
@roles_required('admin')
def drop_queries():
"""Drop all queries.
User must be admin to drop all queries.
---
tags:
- query
"""
manager.session.query(UserQuery).delete()
manager.session.commit()
manager.session.query(models.Query).delete()
manager.session.commit()
return next_or_jsonify('Dropped all queries')
@api_blueprint.route('/api/user/<int:user_id>/query', methods=['DELETE'])
@login_required
def drop_user_queries(user_id):
"""Drop all queries associated with the user.
---
tags:
- query
- user
parameters:
- name: user_id
in: path
description: The database identifier of a user
required: true
type: integer
"""
if not (current_user.is_admin or user_id == current_user.id):
abort(403)
manager.drop_queries_by_user_id(user_id)
return next_or_jsonify(f'Dropped all queries associated with {current_user}')
@api_blueprint.route('/api/query/<int:query_id>/info')
def query_to_network(query_id: int):
"""Return info from a given query identifier.
---
tags:
- query
parameters:
- name: query_id
in: path
description: The database identifier of a query
required: true
type: integer
"""
query = manager.cu_get_query_by_id_or_404(query_id=query_id)
rv = query.to_json(include_id=True)
if query.user_query and query.user_query.user:
rv['creator'] = str(query.user_query.user)
network_ids = rv['network_ids']
rv['networks'] = [
f'{name} v{version}'
| |
<reponame>kmader/qbi-2019-py
#!/usr/bin/env python
# coding: utf-8
# # ETHZ: 227-0966-00L
# # Quantitative Big Imaging
# # March 28, 2019
#
# ## Shape Analysis
# In[1]:
import seaborn as sns
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (8, 8)
plt.rcParams["figure.dpi"] = 150
plt.rcParams["font.size"] = 14
plt.rcParams["font.family"] = ["sans-serif"]
plt.rcParams["font.sans-serif"] = ["DejaVu Sans"]
plt.style.use("ggplot")
sns.set_style("whitegrid", {"axes.grid": False})
# # Literature / Useful References
#
# - <NAME>, Morphometry with R
# - [Online](http://link.springer.com/book/10.1007%2F978-0-387-77789-4) through ETHZ
# - [Buy it](http://www.amazon.com/Morphometrics-R-Use-Julien-Claude/dp/038777789X)
# - <NAME>, “The Image Processing Handbook”,(Boca Raton, CRC Press)
# - Available [online](http://dx.doi.org/10.1201/9780203881095) within domain ethz.ch (or proxy.ethz.ch / public VPN)
# - Principal Component Analysis
# - <NAME>. and <NAME> (2002). Modern Applied Statistics with S, Springer-Verlag
# - Shape Tensors
# - http://www.cs.utah.edu/~gk/papers/vissym04/
# - <NAME>. (2010). BoneJ: Free and extensible bone image analysis in ImageJ. Bone, 47, 1076–9. doi:10.1016/j.bone.2010.08.023
# - <NAME>. , et al. (2013). A quantitative framework for the 3D characterization of the osteocyte lacunar system. Bone, 57(1), 142–154. doi:10.1016/j.bone.2013.06.026
#
# - <NAME>, <NAME>. Principles of Digital Image Processing:
# Core Algorithms. Springer-Verlag, London, 2009.
# - <NAME>. Digital Image Processing. Springer-Verlag,
# Berlin-Heidelberg, 6. edition, 2005.
# - <NAME>. Recognizing Planar Objects Using Invariant Image
# Features, from Lecture notes in computer science, p. 676. Springer,
# Berlin, 1993.
# - http://en.wikipedia.org/wiki/Image_moment
#
#
# # Previously on QBI ...
#
# - Image Enhancment
# - Highlighting the contrast of interest in images
# - Minimizing Noise
# - Segmentation
# - Understanding value histograms
# - Dealing with multi-valued data
# - Automatic Methods
# - Hysteresis Method, K-Means Analysis
# - Regions of Interest
# - Contouring
# - Machine Learning
# # Learning Objectives
#
# ## Motivation (Why and How?)
# - How do we quantify where and how big our objects are?
# - How can we say something about the shape?
# - How can we compare objects of different sizes?
# - How can we compare two images on the basis of the shape as calculated from the images?
# - How can we put objects into an finite element simulation? or make pretty renderings?
# # Outline
#
# - Motivation (Why and How?)
# - Object Characterization
# - Volume
# - Center and Extents
# - Anisotropy
#
# ***
#
# - Shape Tensor
# - Principal Component Analysis
# - Ellipsoid Representation
# - Scale-free metrics
# - Anisotropy, Oblateness
# - Meshing
# - Marching Cubes
# - Isosurfaces
# - Surface Area
# # Motivation
#
#
# We have dramatically simplified our data, but there is still too much.
#
# - We perform an experiment bone to see how big the cells are inside the tissue
# $$\downarrow$$ 
#
# ### 2560 x 2560 x 2160 x 32 bit
# _56GB / sample_
# - Filtering and Enhancement!
# $$\downarrow$$
# - 56GB of less noisy data
#
# ***
#
# - __Segmentation__
#
# $$\downarrow$$
#
# ### 2560 x 2560 x 2160 x 1 bit
# (1.75GB / sample)
#
# - Still an aweful lot of data
# # What did we want in the first place
#
# ### _Single number_:
# * volume fraction,
# * cell count,
# * average cell stretch,
# * cell volume variability
# # Component Labeling
#
# Once we have a clearly segmented image, it is often helpful to identify the sub-components of this image. The easist method for identifying these subcomponents is called component labeling which again uses the neighborhood $\mathcal{N}$ as a criterion for connectivity, resulting in pixels which are touching being part of the same object.
#
#
# In general, the approach works well since usually when different regions are touching, they are related. It runs into issues when you have multiple regions which agglomerate together, for example a continuous pore network (1 object) or a cluster of touching cells.
#
# Here we show some examples from Cityscape Data taken in Aachen (https://www.cityscapes-dataset.com/)
# In[2]:
from skimage.io import imread
import numpy as np
import matplotlib.pyplot as plt
car_img = imread("ext-figures/aachen_img.png")
seg_img = imread("ext-figures/aachen_label.png")[::4, ::4] == 26
print("image dimensions", car_img.shape, seg_img.shape)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 8))
ax1.imshow(car_img)
ax1.set_title("Input Image")
ax2.imshow(seg_img, cmap="bone")
ax2.set_title("Segmented Image")
# The more general formulation of the problem is for networks (roads, computers, social). Are the points start and finish connected?
# In[3]:
from skimage.morphology import label
help(label)
# In[4]:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 8))
ax1.imshow(seg_img, cmap="bone")
ax1.set_title("Segmented Image")
lab_img = label(seg_img)
ax2.imshow(lab_img, cmap=plt.cm.gist_earth)
ax2.set_title("Labeled Image")
# In[5]:
fig, (ax3) = plt.subplots(1, 1)
ax3.hist(lab_img.ravel())
ax3.set_title("Label Counts")
ax3.set_yscale("log")
# # Component Labeling: Algorithm
#
# We start off with all of the pixels in either foreground (1) or background (0)
# In[6]:
from skimage.morphology import label
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
seg_img = np.eye(9, dtype=int)
seg_img[4, 4] = 0
seg_img += seg_img[::-1]
sns.heatmap(seg_img, annot=True, fmt="d")
# Give each point in the image a unique label
# - For each point $(x,y)\in\text{Foreground}$
# - Set value to $I_{x,y} = x+y*width+1$
# In[7]:
idx_img = np.zeros_like(seg_img)
for x in range(seg_img.shape[0]):
for y in range(seg_img.shape[1]):
if seg_img[x, y] > 0:
idx_img[x, y] = x + y * seg_img.shape[0] + 1
sns.heatmap(idx_img, annot=True, fmt="d", cmap="nipy_spectral")
# In a [brushfire](http://www.sciencedirect.com/science/article/pii/S0921889007000966)-style algorithm
# - For each point $(x,y)\in\text{Foreground}$
# - For each point $(x^{\prime},y^{\prime})\in\mathcal{N}(x,y)$
# - if $(x^{\prime},y^{\prime})\in\text{Foreground}$
# - Set the label to $\min(I_{x,y}, I_{x^{\prime},y^{\prime}})$
# - Repeat until no more labels have been changed
# In[8]:
fig, m_axs = plt.subplots(2, 2, figsize=(20, 20))
last_img = idx_img.copy()
img_list = [last_img]
for iteration, c_ax in enumerate(m_axs.flatten(), 1):
cur_img = last_img.copy()
for x in range(last_img.shape[0]):
for y in range(last_img.shape[1]):
if last_img[x, y] > 0:
i_xy = last_img[x, y]
for xp in [-1, 0, 1]:
if (x + xp < last_img.shape[0]) and (x + xp >= 0):
for yp in [-1, 0, 1]:
if (y + yp < last_img.shape[1]) and (y + yp >= 0):
i_xpyp = last_img[x + xp, y + yp]
if i_xpyp > 0:
new_val = min(i_xy, i_xpyp, cur_img[x, y])
if cur_img[x, y] != new_val:
print(
(x, y),
i_xy,
"vs",
(x + xp, y + yp),
i_xpyp,
"->",
new_val,
)
cur_img[x, y] = new_val
img_list += [cur_img]
sns.heatmap(cur_img, annot=True, fmt="d", cmap="nipy_spectral", ax=c_ax)
c_ax.set_title("Iteration #{}".format(iteration))
if (cur_img == last_img).all():
print("Done")
break
else:
print(
"Iteration",
iteration,
"Groups",
len(np.unique(cur_img[cur_img > 0].ravel())),
"Changes",
np.sum(cur_img != last_img),
)
last_img = cur_img
# The image very quickly converges and after 4 iterations the task is complete. For larger more complicated images with thousands of components this task can take longer, but there exist much more efficient [algorithms](https://www.cs.princeton.edu/~rs/AlgsDS07/01UnionFind.pdf) for labeling components which alleviate this issue.
# In[9]:
from matplotlib.animation import FuncAnimation
from IPython.display import HTML
fig, c_ax = plt.subplots(1, 1, figsize=(5, 5), dpi=150)
def update_frame(i):
plt.cla()
sns.heatmap(
img_list[i],
annot=True,
fmt="d",
cmap="nipy_spectral",
ax=c_ax,
cbar=False,
vmin=img_list[0].min(),
vmax=img_list[0].max(),
)
c_ax.set_title(
"Iteration #{}, Groups {}".format(
i + 1, len(np.unique(img_list[i][img_list[i] > 0].ravel()))
)
)
# write animation frames
anim_code = FuncAnimation(
fig, update_frame, frames=len(img_list) - 1, interval=1000, repeat_delay=2000
).to_html5_video()
plt.close("all")
HTML(anim_code)
# # Bigger Images
# How does the same algorithm apply to bigger images
# In[10]:
from skimage.io import imread
from skimage.morphology import label
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
seg_img = (imread("ext-figures/aachen_label.png")[::4, ::4] == 26)[110:130:2, 370:420:3]
seg_img[9, 1] = 1
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 7), dpi=150)
sns.heatmap(seg_img, annot=True, fmt="d", ax=ax1, cmap="nipy_spectral", cbar=False)
idx_img = seg_img * np.arange(len(seg_img.ravel())).reshape(seg_img.shape)
sns.heatmap(idx_img, annot=True, fmt="d", ax=ax2, cmap="nipy_spectral", cbar=False)
# In[11]:
last_img = idx_img.copy()
img_list = [last_img]
for iteration in range(99):
cur_img = last_img.copy()
for x in range(last_img.shape[0]):
for y in range(last_img.shape[1]):
if last_img[x, y] > 0:
i_xy = last_img[x, y]
for xp in [-1, 0, 1]:
if (x + xp < last_img.shape[0]) and (x + xp >= 0):
for yp in [-1, 0, 1]:
if (y + yp < last_img.shape[1]) and (y + yp >= 0):
i_xpyp = last_img[x + xp, y + yp]
if i_xpyp > 0:
new_val = min(i_xy, i_xpyp, cur_img[x, y])
if cur_img[x, y] != new_val:
cur_img[x, y] = new_val
img_list += [cur_img]
if (cur_img == last_img).all():
print("Done")
break
else:
print(
"Iteration",
iteration,
"Groups",
len(np.unique(cur_img[cur_img > 0].ravel())),
"Changes",
np.sum(cur_img != last_img),
)
last_img = cur_img
# In[12]:
from matplotlib.animation import FuncAnimation
from IPython.display import HTML
fig, c_ax = plt.subplots(1, 1, figsize=(5, 5), dpi=150)
def update_frame(i):
plt.cla()
sns.heatmap(
img_list[i],
annot=True,
fmt="d",
cmap="nipy_spectral",
ax=c_ax,
cbar=False,
vmin=img_list[0].min(),
vmax=img_list[0].max(),
)
c_ax.set_title(
"Iteration #{}, Groups {}".format(
i + 1, len(np.unique(img_list[i][img_list[i] > 0].ravel()))
)
)
# write animation frames
anim_code = FuncAnimation(
fig, update_frame, frames=len(img_list) - 1, interval=500, repeat_delay=1000
).to_html5_video()
plt.close("all")
HTML(anim_code)
# # Different Neighborhoods
# We can expand beyond the 3x3 neighborhood to a 5x5 for example
# In[13]:
last_img = idx_img.copy()
img_list = [last_img]
for iteration in range(99):
cur_img = last_img.copy()
for x in range(last_img.shape[0]):
for y in range(last_img.shape[1]):
if last_img[x, y] > 0:
i_xy = last_img[x, y]
for xp in [-2, -1, 0, 1, 2]:
if (x + xp < last_img.shape[0]) and (x + xp >= | |
<filename>src/franz/openrdf/tests/tests.py
# coding=utf-8
################################################################################
# Copyright (c) 2006-2017 Franz Inc.
# All rights reserved. This program and the accompanying materials are
# made available under the terms of the MIT License which accompanies
# this distribution, and is available at http://opensource.org/licenses/MIT
################################################################################
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import pytest
from future import standard_library
standard_library.install_aliases()
from past.builtins import long
from future.builtins import range, next, object, str
from future.utils import iteritems, iterkeys
from ..exceptions import RequestError
from ..exceptions import ServerException
from ..sail.allegrographserver import AllegroGraphServer
from ..repository.repository import Repository
from ..query.query import QueryLanguage
from ..vocabulary.rdf import RDF
from ..vocabulary.rdfs import RDFS
from ..vocabulary.owl import OWL
from ..vocabulary.xmlschema import XMLSchema
from ..query.dataset import Dataset
from ..rio.rdfformat import RDFFormat
from ..rio.rdfwriter import NTriplesWriter
from ..rio.rdfxmlwriter import RDFXMLWriter
from ..model import BNode, Literal, Statement, URI, ValueFactory
from nose.tools import eq_, assert_raises, raises
from franz.miniclient.request import backend
use_curl = backend.__name__ == 'curl'
if use_curl:
import pycurl
import os, datetime, locale, io, subprocess, sys, warnings
locale.setlocale(locale.LC_ALL, '')
def trace(formatter, values=None, stamp=False):
prefix = '\ntests [%s]:' % (datetime.datetime.now()) if stamp else '\n'
if values:
formatter = locale.format_string(formatter, values, grouping=True)
print(prefix, formatter)
CURRENT_DIRECTORY = os.path.dirname(__file__)
LOCALHOST = 'localhost'
AG_HOST = os.environ.get('AGRAPH_HOST', LOCALHOST)
AG_PORT = int(os.environ.get('AGRAPH_PORT', '10035'))
AG_SSLPORT = int(os.environ.get('AGRAPH_SSL_PORT', '10036'))
AG_PROXY = os.environ.get('AGRAPH_PROXY')
AG_ONSERVER = AG_HOST == LOCALHOST
USER = os.environ.get('AGRAPH_USER', 'test')
PASSWORD = os.environ.get('AGRAPH_PASSWORD', '<PASSWORD>')
CATALOG = os.environ.get('AGRAPH_CATALOG', 'tests')
# Support "/" as an alias for the root catalog.
if CATALOG == '/':
CATALOG = None
STORE = 'agraph_test'
def teardown_module():
"""Module level teardown function."""
server = AllegroGraphServer(AG_HOST, AG_PORT, USER, PASSWORD, proxy=AG_PROXY)
catalog = server.openCatalog(CATALOG)
for repo in catalog.listRepositories():
catalog.deleteRepository(repo)
RAISE_EXCEPTION_ON_VERIFY_FAILURE = False
def verify(expressionValue, targetValue, quotedExpression, testNum):
"""
Verify that 'expressionValue' equals 'targetValue'. If not,
raise an exception, or print a message advertising the failure.
"""
if not expressionValue == targetValue:
message = ("Diagnostic failure in test %s. Expression '%s' returns '%s' where '%s' expected." %
(testNum, quotedExpression, expressionValue, targetValue))
if RAISE_EXCEPTION_ON_VERIFY_FAILURE:
raise Exception(message)
else:
print("BWEEP BWEEP BWEEP BWEEP BWEEP BWEEP BWEEP BWEEP BWEEP BWEEP BWEEP BWEEP BWEEP BWEEP BWEEP \n ", message)
def test0():
server = AllegroGraphServer(AG_HOST, AG_PORT, USER, PASSWORD, proxy=AG_PROXY)
print("Available catalogs", server.listCatalogs())
def connect(accessMode=Repository.RENEW):
"""
Connect is called by the other tests to startup the connection to the test database.
"""
print("Default working directory is '%s'" % CURRENT_DIRECTORY)
server = AllegroGraphServer(AG_HOST, AG_PORT, USER, PASSWORD, proxy=AG_PROXY)
print("Available catalogs", server.listCatalogs())
catalog = server.openCatalog(CATALOG)
stores = catalog.listRepositories()
print("Available repositories in catalog '%s': %s" % (catalog.getName(), catalog.listRepositories()))
# Instead of renewing the database, clear it.
if accessMode == Repository.RENEW:
mode = Repository.CREATE if STORE not in stores else Repository.OPEN
else:
mode = accessMode
myRepository = catalog.getRepository(STORE, mode)
myRepository.initialize()
connection = myRepository.getConnection()
connection.disableDuplicateSuppression()
if accessMode == Repository.RENEW:
connection.clear()
connection.clearNamespaces()
print("Repository %s is up! It contains %i statements." % (
myRepository.getDatabaseName(), connection.size()))
return connection
def test1(accessMode=Repository.RENEW):
"""
Tests getting the repository up.
"""
return connect(accessMode)
def test2():
conn = connect()
## create some resources and literals to make statements out of
alice = conn.createURI("http://example.org/people/alice")
bob = conn.createURI("http://example.org/people/bob")
#bob = conn.createBNode()
name = conn.createURI("http://example.org/ontology/name")
person = conn.createURI("http://example.org/ontology/Person")
bobsName = conn.createLiteral("Bob")
alicesName = conn.createLiteral("Alice")
print("Triple count before inserts: ", conn.size())
for s in conn.getStatements(None, None, None, None): print(s)
## alice is a person
conn.add(alice, RDF.TYPE, person)
## alice's name is "Alice"
conn.add(alice, name, alicesName)
## bob is a person
conn.add(bob, RDF.TYPE, person)
## bob's name is "Bob":
conn.add(bob, name, bobsName)
print("Triple count: ", conn.size())
verify(conn.size(), 4, 'conn.size()', 2)
for s in conn.getStatements(None, None, None, None): print(s)
conn.remove(bob, name, bobsName)
print("Triple count: ", conn.size())
verify(conn.size(), 3, 'conn.size()', 2)
conn.add(bob, name, bobsName)
return conn
def test3():
conn = test2()
try:
queryString = "SELECT ?s ?p ?o WHERE {?s ?p ?o .}"
tupleQuery = conn.prepareTupleQuery("SPARQL", queryString)
result = tupleQuery.evaluate()
verify(result.rowCount(), 4, 'len(result)', 3)
try:
for bindingSet in result:
s = bindingSet.getValue("s")
p = bindingSet.getValue("p")
o = bindingSet.getValue("o")
print("%s %s %s" % (s, p, o))
finally:
result.close();
finally:
conn.close();
def test4():
conn = test2()
alice = conn.createURI("http://example.org/people/alice")
# statements = conn.getStatements(alice, None, None, tripleIDs=True)
print("Searching for Alice using getStatements():")
statements = conn.getStatements(alice, None, None)
statements.enableDuplicateFilter() ## there are no duplicates, but this exercises the code that checks
verify(statements.rowCount(), 2, 'statements.rowCount()', 3)
for s in statements:
print(s)
statements.close()
def test5():
"""
Typed Literals
"""
conn = connect()
exns = "http://example.org/people/"
alice = conn.createURI("http://example.org/people/alice")
age = conn.createURI(namespace=exns, localname="age")
weight = conn.createURI(namespace=exns, localname="weight")
favoriteColor = conn.createURI(namespace=exns, localname="favoriteColor")
birthdate = conn.createURI(namespace=exns, localname="birthdate")
ted = conn.createURI(namespace=exns, localname="Ted")
red = conn.createLiteral('Red')
rouge = conn.createLiteral('Rouge', language="fr")
fortyTwo = conn.createLiteral('42', datatype=XMLSchema.INT)
fortyTwoInteger = conn.createLiteral('42', datatype=XMLSchema.LONG)
fortyTwoUntyped = conn.createLiteral('42')
date = conn.createLiteral('1984-12-06', datatype=XMLSchema.DATE)
time = conn.createLiteral('1984-12-06T09:00:00', datatype=XMLSchema.DATETIME)
weightFloat = conn.createLiteral('20.5', datatype=XMLSchema.FLOAT)
weightUntyped = conn.createLiteral('20.5')
stmt1 = conn.createStatement(alice, age, fortyTwo)
stmt2 = conn.createStatement(ted, age, fortyTwoUntyped)
conn.add(stmt1)
conn.addStatement(stmt2)
conn.addTriple(alice, weight, weightUntyped)
conn.addTriple(ted, weight, weightFloat)
conn.addTriples([(alice, favoriteColor, red),
(ted, favoriteColor, rouge),
(alice, birthdate, date),
(ted, birthdate, time)])
for obj in [None, fortyTwo, fortyTwoUntyped, conn.createLiteral('20.5', datatype=XMLSchema.FLOAT), conn.createLiteral('20.5'),
red, rouge]:
print("Retrieve triples matching '%s'." % obj)
statements = conn.getStatements(None, None, obj)
for s in statements:
print(s)
for obj in ['42', '"42"', '20.5', '"20.5"', '"20.5"^^xsd:float', '"Rouge"@fr', '"Rouge"', '"1984-12-06"^^xsd:date']:
print("Query triples matching '%s'." % obj)
queryString = """PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
SELECT ?s ?p ?o WHERE {?s ?p ?o . filter (?o = %s)}""" % obj
tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, queryString)
result = tupleQuery.evaluate();
for bindingSet in result:
s = bindingSet[0]
p = bindingSet[1]
o = bindingSet[2]
print("%s %s %s" % (s, p, o))
## Search for date using date object in triple pattern.
print("Retrieve triples matching DATE object.")
statements = conn.getStatements(None, None, date)
for s in statements:
print(s)
## Search for datetime using datetime object in triple pattern.
print("Retrieve triples matching DATETIME object.")
statements = conn.getStatements(None, None, time)
for s in statements:
print(s)
## Search for specific date value.
print("Match triples having specific DATE value.")
statements = conn.getStatements(None, None, '"1984-12-06"^^<http://www.w3.org/2001/XMLSchema#date>')
for s in statements:
print(s)
## Search for specific datetime value.
print("Match triples having specific DATETIME value.")
statements = conn.getStatements(None, None, '"1984-12-06T09:00:00"^^<http://www.w3.org/2001/XMLSchema#dateTime>')
for s in statements:
print(s)
## Search for triples of type xsd:date using SPARQL query.
print("Use SPARQL to find triples where the value matches a specific xsd:date.")
queryString = """SELECT ?s ?p WHERE {?s ?p "1984-12-06"^^<http://www.w3.org/2001/XMLSchema#date> }"""
tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, queryString)
result = tupleQuery.evaluate();
for bindingSet in result:
s = bindingSet[0]
p = bindingSet[1]
print("%s %s" % (s, p))
## Search for triples of type xsd:datetime using SPARQL query.
print("Use SPARQL to find triples where the value matches a specific xsd:dateTime.")
queryString = """SELECT ?s ?p WHERE {?s ?p "1984-12-06T09:00:00"^^<http://www.w3.org/2001/XMLSchema#dateTime> }"""
tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, queryString)
result = tupleQuery.evaluate();
for bindingSet in result:
s = bindingSet[0]
p = bindingSet[1]
print("%s %s" % (s, p))
def test6(conn = None):
if conn is None:
conn = connect()
else:
conn.clear()
print("Starting example test6().")
# The following paths are relative to os.getcwd(), the working directory.
print("Default working directory is '%s'" % (CURRENT_DIRECTORY))
# If you get a "file not found" error, use os.chdir("your directory path") to
# point to the location of the data files. For AG Free Edition on Windows:
#os.chdir("C:\Program Files\AllegroGraphFJE32\python")
print("Current working directory is '%s'" % (os.getcwd()))
path1 = os.path.join(CURRENT_DIRECTORY, "vc-db-1.rdf")
path2 = os.path.join(CURRENT_DIRECTORY, "kennedy.ntriples")
baseURI = "http://example.org/example/local"
context = conn.createURI("http://example.org#vcards")
conn.setNamespace("vcd", "http://www.w3.org/2001/vcard-rdf/3.0#");
## read kennedy triples into the null context:
print("Load kennedy.ntriples.")
#conn.add(path2, base=baseURI, format=RDFFormat.NTRIPLES, contexts=None)
conn.add(path2, base=baseURI, format=RDFFormat.NTRIPLES)
## read vcards triples into the context 'context':
print("Load vcards triples.")
conn.addFile(path1, baseURI, format=RDFFormat.RDFXML, context=context);
print("After loading, repository contains %i vcard triples in context '%s'\n and %i kennedy triples in context '%s'." % (
conn.size(context), context, conn.size('null'), 'null'))
verify(conn.size(context), 16, 'conn.size(context)', 6)
verify(conn.size('null'), 1214, "conn.size('null)", 6)
return conn
def test7():
conn = test6()
print("Match all and print subjects and contexts")
result = conn.getStatements(None, None, None, None, limit=25, tripleIDs=True)
assert len(result) == 25
first_ids = set()
for row in result:
print(row.getSubject(), row.getContext())
first_ids.add(row.getTripleID())
# Test limit/offset
result = conn.getStatements(None, None, None, None, limit=25, offset=25, tripleIDs=True)
assert len(result) == 25
second_ids = set()
for row in result:
second_ids.add(row.getTripleID())
assert not first_ids.intersection(second_ids)
print("\nSame thing with SPARQL query (can't retrieve triples in the null context)")
queryString = "SELECT DISTINCT ?s ?c WHERE {graph ?c {?s ?p ?o .} }"
tupleQuery = conn.prepareTupleQuery(QueryLanguage.SPARQL, queryString)
result = tupleQuery.evaluate();
for i, bindingSet in enumerate(result):
print(bindingSet[0], bindingSet[1])
conn.close()
def test8():
conn = test6()
context = | |
<reponame>renatomello/qibo
"""Test special features of core gates."""
import pytest
import numpy as np
from qibo import K, gates
from qibo.models import Circuit
from qibo.tests.utils import random_state
####################### Test `_construct_unitary` feature #######################
GATES = [
("H", (0,), np.array([[1, 1], [1, -1]]) / np.sqrt(2)),
("X", (0,), np.array([[0, 1], [1, 0]])),
("Y", (0,), np.array([[0, -1j], [1j, 0]])),
("Z", (1,), np.array([[1, 0], [0, -1]])),
("S", (2,), np.array([[1, 0], [0, 1j]])),
("T", (2,), np.array([[1, 0], [0, np.exp(1j * np.pi / 4.0)]])),
("CNOT", (0, 1), np.array([[1, 0, 0, 0], [0, 1, 0, 0],
[0, 0, 0, 1], [0, 0, 1, 0]])),
("CZ", (1, 3), np.array([[1, 0, 0, 0], [0, 1, 0, 0],
[0, 0, 1, 0], [0, 0, 0, -1]])),
("SWAP", (2, 4), np.array([[1, 0, 0, 0], [0, 0, 1, 0],
[0, 1, 0, 0], [0, 0, 0, 1]])),
("FSWAP", (2, 4), np.array([[1, 0, 0, 0], [0, 0, 1, 0],
[0, 1, 0, 0], [0, 0, 0, -1]])),
("TOFFOLI", (1, 2, 3), np.array([[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0]]))
]
@pytest.mark.parametrize("gate,qubits,target_matrix", GATES)
def test__construct_unitary(backend, gate, qubits, target_matrix):
"""Check that `_construct_unitary` method constructs the proper matrix."""
gate = getattr(gates, gate)(*qubits)
K.assert_allclose(gate.matrix, target_matrix)
GATES = [
("RX", lambda x: np.array([[np.cos(x / 2.0), -1j * np.sin(x / 2.0)],
[-1j * np.sin(x / 2.0), np.cos(x / 2.0)]])),
("RY", lambda x: np.array([[np.cos(x / 2.0), -np.sin(x / 2.0)],
[np.sin(x / 2.0), np.cos(x / 2.0)]])),
("RZ", lambda x: np.diag([np.exp(-1j * x / 2.0), np.exp(1j * x / 2.0)])),
("U1", lambda x: np.diag([1, np.exp(1j * x)])),
("CU1", lambda x: np.diag([1, 1, 1, np.exp(1j * x)]))
]
@pytest.mark.parametrize("gate,target_matrix", GATES)
def test__construct_unitary_rotations(backend, gate, target_matrix):
"""Check that `_construct_unitary` method constructs the proper matrix."""
theta = 0.1234
if gate == "CU1":
gate = getattr(gates, gate)(0, 1, theta)
else:
gate = getattr(gates, gate)(0, theta)
K.assert_allclose(gate.matrix, target_matrix(theta))
K.assert_allclose(gate.matrix, target_matrix(theta))
def test__construct_unitary_controlled(backend):
theta = 0.1234
rotation = np.array([[np.cos(theta / 2.0), -np.sin(theta / 2.0)],
[np.sin(theta / 2.0), np.cos(theta / 2.0)]])
target_matrix = np.eye(4, dtype=rotation.dtype)
target_matrix[2:, 2:] = rotation
gate = gates.RY(0, theta).controlled_by(1)
K.assert_allclose(gate.matrix, target_matrix)
gate = gates.RY(0, theta).controlled_by(1, 2)
with pytest.raises(NotImplementedError):
unitary = gate.matrix
###############################################################################
########################### Test `Collapse` features ##########################
@pytest.mark.parametrize("nqubits,targets", [(5, [2, 4]), (6, [3, 5])])
def test_measurement_collapse_distributed(backend, accelerators, nqubits, targets):
initial_state = random_state(nqubits)
c = Circuit(nqubits, accelerators)
output = c.add(gates.M(*targets, collapse=True))
result = c(np.copy(initial_state))
slicer = nqubits * [slice(None)]
for t, r in zip(targets, output.samples()[0]):
slicer[t] = int(r)
slicer = tuple(slicer)
initial_state = initial_state.reshape(nqubits * (2,))
target_state = np.zeros_like(initial_state)
target_state[slicer] = initial_state[slicer]
norm = (np.abs(target_state) ** 2).sum()
target_state = target_state.ravel() / np.sqrt(norm)
K.assert_allclose(result.state(), target_state)
def test_collapse_after_measurement(backend):
qubits = [0, 2, 3]
c = Circuit(5)
c.add((gates.H(i) for i in range(5)))
output = c.add(gates.M(*qubits, collapse=True))
c.add((gates.H(i) for i in range(5)))
result = c()
bitstring = output.samples()[0]
final_state = result.state()
ct = Circuit(5)
for i, r in zip(qubits, bitstring):
if r:
ct.add(gates.X(i))
ct.add((gates.H(i) for i in qubits))
target_state = ct()
K.assert_allclose(final_state, target_state, atol=1e-15)
###############################################################################
########################## Test gate parameter setter #########################
def test_rx_parameter_setter(backend):
"""Check the parameter setter of RX gate."""
def exact_state(theta):
phase = np.exp(1j * theta / 2.0)
gate = np.array([[phase.real, -1j * phase.imag],
[-1j * phase.imag, phase.real]])
return gate.dot(np.ones(2)) / np.sqrt(2)
theta = 0.1234
gate = gates.RX(0, theta=theta)
initial_state = K.cast(np.ones(2) / np.sqrt(2))
final_state = gate(initial_state)
target_state = exact_state(theta)
K.assert_allclose(final_state, target_state)
theta = 0.4321
gate.parameters = theta
initial_state = K.cast(np.ones(2) / np.sqrt(2))
final_state = gate(initial_state)
target_state = exact_state(theta)
K.assert_allclose(final_state, target_state)
###############################################################################
########################### Test gate decomposition ###########################
@pytest.mark.parametrize(("target", "controls", "free"),
[(0, (1,), ()), (2, (0, 1), ()),
(3, (0, 1, 4), (2, 5)),
(7, (0, 1, 2, 3, 4), (5, 6)),
(5, (0, 2, 4, 6, 7), (1, 3)),
(8, (0, 2, 4, 6, 9), (3, 5, 7))])
@pytest.mark.parametrize("use_toffolis", [True, False])
def test_x_decomposition_execution(backend, target, controls, free, use_toffolis):
"""Check that applying the decomposition is equivalent to applying the multi-control gate."""
gate = gates.X(target).controlled_by(*controls)
nqubits = max((target,) + controls + free) + 1
initial_state = random_state(nqubits)
targetc = Circuit(nqubits)
targetc.add(gate)
target_state = targetc(np.copy(initial_state))
c = Circuit(nqubits)
c.add(gate.decompose(*free, use_toffolis=use_toffolis))
final_state = c(np.copy(initial_state))
K.assert_allclose(final_state, target_state, atol=1e-6)
###############################################################################
########################### Test gate decomposition ###########################
def test_one_qubit_gate_multiplication(backend):
gate1 = gates.X(0)
gate2 = gates.H(0)
final_gate = gate1 @ gate2
assert final_gate.__class__.__name__ == "Unitary"
target_matrix = (np.array([[0, 1], [1, 0]]) @
np.array([[1, 1], [1, -1]]) / np.sqrt(2))
K.assert_allclose(final_gate.matrix, target_matrix)
final_gate = gate2 @ gate1
assert final_gate.__class__.__name__ == "Unitary"
target_matrix = (np.array([[1, 1], [1, -1]]) / np.sqrt(2) @
np.array([[0, 1], [1, 0]]))
K.assert_allclose(final_gate.matrix, target_matrix)
gate1 = gates.X(1)
gate2 = gates.X(1)
assert (gate1 @ gate2).__class__.__name__ == "I"
assert (gate2 @ gate1).__class__.__name__ == "I"
def test_two_qubit_gate_multiplication(backend):
theta, phi = 0.1234, 0.5432
gate1 = gates.fSim(0, 1, theta=theta, phi=phi)
gate2 = gates.SWAP(0, 1)
final_gate = gate1 @ gate2
target_matrix = (np.array([[1, 0, 0, 0],
[0, np.cos(theta), -1j * np.sin(theta), 0],
[0, -1j * np.sin(theta), np.cos(theta), 0],
[0, 0, 0, np.exp(-1j * phi)]]) @
np.array([[1, 0, 0, 0], [0, 0, 1, 0],
[0, 1, 0, 0], [0, 0, 0, 1]]))
K.assert_allclose(final_gate.matrix, target_matrix)
# Check that error is raised when target qubits do not agree
with pytest.raises(NotImplementedError):
final_gate = gate1 @ gates.SWAP(0, 2)
###############################################################################
################################# Test dagger #################################
GATES = [
("H", (0,)),
("X", (0,)),
("Y", (0,)),
("Z", (0,)),
("S", (0,)),
("SDG", (0,)),
("T", (0,)),
("TDG", (0,)),
("RX", (0, 0.1)),
("RY", (0, 0.2)),
("RZ", (0, 0.3)),
("U1", (0, 0.1)),
("U2", (0, 0.2, 0.3)),
("U3", (0, 0.1, 0.2, 0.3)),
("CNOT", (0, 1)),
("CRX", (0, 1, 0.1)),
("CRZ", (0, 1, 0.3)),
("CU1", (0, 1, 0.1)),
("CU2", (0, 1, 0.2, 0.3)),
("CU3", (0, 1, 0.1, 0.2, 0.3)),
("fSim", (0, 1, 0.1, 0.2))
]
@pytest.mark.parametrize("gate,args", GATES)
def test_dagger(backend, gate, args):
gate = getattr(gates, gate)(*args)
nqubits = len(gate.qubits)
c = Circuit(nqubits)
c.add((gate, gate.dagger()))
initial_state = random_state(nqubits)
final_state = c(np.copy(initial_state))
K.assert_allclose(final_state, initial_state)
GATES = [
("H", (3,)),
("X", (3,)),
("Y", (3,)),
("S", (3,)),
("SDG", (3,)),
("T", (3,)),
("TDG", (3,)),
("RX", (3, 0.1)),
("U1", (3, 0.1)),
("U3", (3, 0.1, 0.2, 0.3))
]
@pytest.mark.parametrize("gate,args", GATES)
def test_controlled_dagger(backend, gate, args):
gate = getattr(gates, gate)(*args).controlled_by(0, 1, 2)
c = Circuit(4)
c.add((gate, gate.dagger()))
initial_state = random_state(4)
final_state = c(np.copy(initial_state))
K.assert_allclose(final_state, initial_state)
@pytest.mark.parametrize("gate_1,gate_2", [("S", "SDG"), ("T", "TDG")])
@pytest.mark.parametrize("qubit", (0, 2, 4))
def test_dagger_consistency(gate_1, gate_2, qubit):
gate_1 = getattr(gates, gate_1)(qubit)
gate_2 = getattr(gates, gate_2)(qubit)
c = Circuit(qubit+1)
c.add((gate_1, gate_2))
initial_state = random_state(qubit+1)
final_state = c(np.copy(initial_state))
K.assert_allclose(final_state, initial_state)
@pytest.mark.parametrize("nqubits", [1, 2])
def test_unitary_dagger(backend, nqubits):
matrix = np.random.random((2 ** nqubits, 2 ** nqubits))
gate = gates.Unitary(matrix, *range(nqubits))
c = Circuit(nqubits)
c.add((gate, gate.dagger()))
initial_state = random_state(nqubits)
final_state = c(np.copy(initial_state))
target_state = np.dot(matrix, initial_state)
target_state = np.dot(np.conj(matrix).T, target_state)
K.assert_allclose(final_state, target_state)
def test_controlled_unitary_dagger(backend):
from scipy.linalg import expm
matrix = np.random.random((2, 2))
matrix = expm(1j * (matrix + matrix.T))
gate = gates.Unitary(matrix, 0).controlled_by(1, 2, 3, 4)
c = Circuit(5)
c.add((gate, gate.dagger()))
initial_state = random_state(5)
final_state = c(np.copy(initial_state))
K.assert_allclose(final_state, initial_state)
def test_generalizedfsim_dagger(backend):
from scipy.linalg import expm
phi = 0.2
matrix = np.random.random((2, 2))
matrix = expm(1j * (matrix + matrix.T))
gate = gates.GeneralizedfSim(0, 1, matrix, phi)
c = Circuit(2)
c.add((gate, gate.dagger()))
initial_state = random_state(2)
final_state = c(np.copy(initial_state))
K.assert_allclose(final_state, initial_state)
@pytest.mark.parametrize("nqubits", [4, 5])
def test_variational_layer_dagger(backend, nqubits):
theta = 2 * np.pi * np.random.random((2, nqubits))
pairs = list((i, i + 1) for i in range(0, nqubits - 1, 2))
gate = gates.VariationalLayer(range(nqubits), pairs,
gates.RY, gates.CZ,
theta[0], theta[1])
c = Circuit(nqubits)
c.add((gate, gate.dagger()))
initial_state = random_state(nqubits)
final_state = c(np.copy(initial_state))
K.assert_allclose(final_state, initial_state)
###############################################################################
##################### Test repeated execution with channels ###################
def test_noise_channel_repeated(backend):
thetas = np.random.random(4)
probs = 0.1 * np.random.random([4, 3]) + 0.2
gatelist = [gates.X, gates.Y, gates.Z]
c = Circuit(4)
c.add((gates.RY(i, t) for i, t in enumerate(thetas)))
c.add((gates.PauliNoiseChannel(i, px, py, pz, seed=123)
for i, (px, py, pz) in enumerate(probs)))
final_state = c(nshots=40)
np.random.seed(123)
target_state = []
for _ in range(40):
noiseless_c = Circuit(4)
noiseless_c.add((gates.RY(i, t) for i, t in enumerate(thetas)))
for i, ps in enumerate(probs):
for p, gate in zip(ps, gatelist):
if np.random.random() < p:
noiseless_c.add(gate(i))
target_state.append(noiseless_c())
K.assert_allclose(final_state, target_state)
def test_reset_channel_repeated(backend):
initial_state = random_state(5)
c = Circuit(5)
c.add(gates.ResetChannel(2, p0=0.3, p1=0.3, seed=123))
final_state = c(K.cast(np.copy(initial_state)), nshots=30)
np.random.seed(123)
target_state = []
collapse = gates.M(2, collapse=True)
collapse.nqubits = 5
xgate = gates.X(2)
for _ | |
= -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({glyc_SFOe: -1.0,
glyc_e: SFO_Abnd})
model.add_reactions([reaction])
#glyc_SFOe <-> glyc_SFOc
glyc_SFOc = Metabolite('glyc_SFOc', formula='C3H8O3', name='Glycerol', compartment='SFOc', charge= 0)
reaction = Reaction('SFO_glyct')
reaction.name = 'Glycerol transport'
reaction.subsystem = 'Transport'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({glyc_SFOe: -1.0,
glyc_SFOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#atp_SFOc + glyc_SFOc <-> adp_SFOc + glyc3p_SFOc + h_SFOc
glyc3p_SFOc = Metabolite('glyc3p_SFOc', formula='C3H7O6P', name='Glycerol 3-phosphate', compartment='SFOc', charge= -2)
reaction = Reaction('SFO_GLYK')
reaction.name = 'Glycerol kinase'
reaction.subsystem = 'Glycerol utilization'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({glyc_SFOc: -1.0,
atp_SFOc: -1.0,
adp_SFOc: 1.0,
glyc3p_SFOc: 1.0,
h_SFOc: 1.0,
ATP_SLP_SFO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#dhap_SFOc + h_SFOc + nadh_SFOc <-> glyc3p_SFOc + nad_SFOc
reaction = Reaction('SFO_G3PD1')
reaction.name = 'Glycerol-3-phosphate dehydrogenase (NAD)'
reaction.subsystem = 'Glycerol utilization'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({dhap_SFOc: -1.0,
h_SFOc: -1.0,
nadh_SFOc: -1.0,
glyc3p_SFOc: 1.0,
nad_SFOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
###Energy Generation
#adp_SFOc + pi_SFOc + 4.0 h_SFOi <-> atp_SFOc + 3.0 h_SFOc + h2o_SFOc
h_SFOi = Metabolite('h_SFOi', formula='H', name='H+', compartment='SFOi', charge=1)
reaction = Reaction('SFO_ATPS4r')
#This reaction differs from the BiGG reaction because this model assumes a different compartment for ion motive force generation
reaction.name = '*ATP Synthase'
reaction.subsystem = 'Energy Generation'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({adp_SFOc: -1.0,
pi_SFOc: -1.0,
h_SFOi: -4.0,
atp_SFOc: 1.0,
h_SFOc: 3.0,
h2o_SFOc: 1.0,
ATP_IMF_SFO: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
##Other
#h2o_SFOe <-> h2o_e
h2o_SFOe = Metabolite('h2o_SFOe', formula='H2O', name='H2O', compartment='SFOe', charge=0)
reaction = Reaction('SFO_EX_h2o')
reaction.name = 'SFO h2o Exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({h2o_e: SFO_Abnd,
h2o_SFOe: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#h2o_SFOe <-> h2o_SFOc
reaction = Reaction('SFO_H2Ot')
reaction.name = 'H2O transport'
reaction.subsystem = 'Transport'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({h2o_SFOe: -1.0,
h2o_SFOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#co2_SFOe <-> co2_e
co2_SFOe = Metabolite('h_co2e', formula='CO2', name='CO2', compartment='SFOe', charge=0)
reaction = Reaction('SFO_EX_co2')
reaction.name = 'SFO co2 Exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({co2_e: SFO_Abnd,
co2_SFOe: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#co2_SFOe <-> co2_SFOc
reaction = Reaction('SFO_co2t')
reaction.name = 'CO2 transport'
reaction.subsystem = 'Transport'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({co2_SFOe: -1.0,
co2_SFOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#ATP Hydrolysis
#atp_SFOc + h2o_SFOc <-> adp_SFOc + pi_SFOc + h_SFOc + ATP_COMM_e
reaction = Reaction('SFO_ATP_Hydrolysis')
reaction.name = 'ATP Hydrolysis'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_SFOc: -1.0,
h2o_SFOc: -1.0,
adp_SFOc: 1.0,
pi_SFOc: 1.0,
h_SFOc: 1.0,
ATP_HYDR_SFO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
##Import and Export Reactions For Energy Calculations
h_SFOe = Metabolite('h_SFOe', formula='H', name='Proton', compartment='SFOe', charge= 1)
# Formate Transport
# for_SFOe <-> for_e
for_SFOc = Metabolite('for_SFOc', formula='CHO2', name='Formate', compartment='SFOc', charge=-1)
for_SFOe = Metabolite('for_SFOe', formula='CHO2', name='Formate', compartment='SFOe', charge=-1)
reaction = Reaction('SFO_EX_for')
reaction.name = 'SFO for exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({for_e: SFO_Abnd,
for_SFOe: -1})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# for_SFOe + h_SFOe <-> for_SFOc + h_SFOc
reaction = Reaction('SFO_Formate_import')
reaction.name = 'Formate import'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({for_SFOe: -1.0,
h_SFOe: -1.0,
for_SFOc: 1.0,
h_SFOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
# for_SFOc + h_SFOc <-> for_SFOe + h_SFOe
reaction = Reaction('SFO_Formate_export')
reaction.name = 'Formate_export'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({for_SFOc: -1.0,
h_SFOc: -1.0,
for_SFOe: 1.0,
h_SFOe: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Acetate Transport
#ac_SFOe <-> ac_e
ac_SFOe = Metabolite('ac_SFOe', formula='C2H3O2', name='Acetate', compartment='SFOe', charge= -1)
reaction = Reaction('SFO_EX_ac')
reaction.name = 'SFO ac exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({ac_e: SFO_Abnd,
ac_SFOe: -1})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#ac_SFOc + h_SFOc <-> ac_SFOe + h_SFOe
reaction = Reaction('SFO_Acetate_export')
reaction.name = 'Acetate export'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({ac_SFOc: -1.0,
h_SFOc: -1.0,
ac_SFOe: 1.0,
h_SFOe: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Lactate Transport
#lac__D_SFOe <-> lac__D_e
lac__D_SFOe = Metabolite('lac_SFOe', formula='C3H5O3', name='D-Lactate', compartment='SFOe', charge= -1)
reaction = Reaction('SFO_EX_lac__D')
reaction.name = 'SFO lac__D exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({lac__D_e: SFO_Abnd,
lac__D_SFOe: -1})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#lac__D_SFOc + h_SFOc <-> lac__D_SFOe + h_SFOe
reaction = Reaction('SFO_Lactate_export')
reaction.name = 'Lactate export'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({lac__D_SFOc: -1.0,
h_SFOc: -1.0,
lac__D_SFOe: 1.0,
h_SFOe: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Ethanol Transport
#etoh_SFOe <-> etoh_e
etoh_SFOe = Metabolite('etoh_SFOe', formula='C2H6O', name='Ethanol', compartment='SFOe', charge= 0)
reaction = Reaction('SFO_EX_etoh')
reaction.name = 'SFO etoh exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({etoh_e: SFO_Abnd,
etoh_SFOe: -1})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#etoh_SFOe <-> etoh_SFOc
reaction = Reaction('SFO_Ethanol_import')
reaction.name = 'Ethanol import'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({etoh_SFOe: -1.0,
etoh_SFOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#etoh_SFOc <-> etoh_SFOe
reaction = Reaction('SFO_Ethanol_export')
reaction.name = 'Ethanol export'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({etoh_SFOc: -1.0,
etoh_SFOe: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Proton Transport
#h_SFOe <-> h_e
reaction = Reaction('SFO_EX_h')
reaction.name = 'SFO h exchange'
reaction.subsystem = 'Exchange'
reaction.lower_bound = -1000. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({h_e: SFO_Abnd,
h_SFOe: -1})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#h_SFOe <-> h_SFOc
reaction = Reaction('SFO_H_import')
reaction.name = 'H+ import'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({h_SFOe: -1.0,
h_SFOc: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#h_SFOc <-> h_SFOe
reaction = Reaction('SFO_H_export')
reaction.name = 'H+ export'
reaction.subsystem = 'Transport'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({h_SFOc: -1.0,
h_SFOe: 1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#ATP for Transport
SFO_ATP_Transport = Metabolite('SFO_ATP_Transport', formula='', name='', compartment='e')
reaction = Reaction('SFO_Transport_ATP')
reaction.name = 'Transport ATP'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({SFO_ATP_Transport: -1})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Formate_Transport_ATP
#atp_SFOc + h2o_SFOc <-> adp_SFOc + pi_SFOc + h_SFOc
reaction = Reaction('SFO_Formate_Transport_ATP')
reaction.name = 'Formate Transport ATP'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_SFOc: -1.0,
h2o_SFOc: -1.0,
adp_SFOc: 1.0,
pi_SFOc: 1.0,
h_SFOc: 1.0,
ATP_TRANS_SFO: -1.0})
model.add_reactions([reaction])
print(reaction.name + ": " + str(reaction.check_mass_balance()))
#Acetate_Transport_ATP
#atp_SFOc + h2o_SFOc <-> adp_SFOc + pi_SFOc + h_SFOc
reaction = Reaction('SFO_Acetate_Transport_ATP')
reaction.name = 'Acetate Transport ATP Hydrolysis'
reaction.subsystem = 'ATP Hydrolysis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.add_metabolites({atp_SFOc: -1.0,
h2o_SFOc: -1.0,
adp_SFOc: | |
= \
harmonic_oscillator_continuous_prior_mean_Michelangelo_deriv
dyn_GP_prior_mean = \
harmonic_oscillator_continuous_prior_mean_Michelangelo_u
dyn_GP_prior_mean_deriv = \
harmonic_oscillator_continuous_prior_mean_Michelangelo_deriv_u
elif 'No_observer' in system:
observer = None
observer_prior_mean = None
dyn_GP_prior_mean = None
observe_data = dim1_observe_data
init_state = reshape_pt1(np.array([[1, 0]]))
init_state_estim = reshape_pt1(np.array([[0, 0, 0]]))
init_control = reshape_pt1([0]) # imposed instead u(t=0)!
constrain_u = [-dyn_kwargs.get('gamma'),
dyn_kwargs.get('gamma')] # must be a python list!
constrain_x = [] # must be a python list!
grid_inf = -2
grid_sup = 2
# Create kernel
if dyn_kwargs.get('gamma') == 0:
input_dim = init_state.shape[1]
else:
input_dim = init_state.shape[1] + init_control.shape[1]
kernel = GPy.kern.RBF(input_dim=input_dim, variance=47,
lengthscale=np.array([1, 1]),
ARD=True)
kernel.unconstrain()
kernel.variance.set_prior(GPy.priors.Gaussian(30, 1))
kernel.lengthscale.set_prior(
GPy.priors.MultivariateGaussian(np.array([10, 10]),
np.diag([1, 1])))
elif 'Continuous/VanderPol' in system:
discrete = False
dyn_kwargs = {'mu': 2, 'gamma': 1.2, 'omega': np.pi / 10}
dynamics = VanderPol_dynamics
controller = sin_controller_02D
if 'No_observer' in system:
observer = None
observer_prior_mean = None
dyn_GP_prior_mean = None
elif 'GP_Michelangelo' in system:
observer = duffing_observer_Michelangelo_GP
dyn_kwargs['prior_kwargs'] = {'mu': 2, 'gamma': 1.2,
'omega': np.pi / 10, 'dt': dt,
'dt_before_subsampling': 0.001}
dyn_kwargs['continuous_model'] = continuous_model
dyn_kwargs['prior_kwargs']['observer_gains'] = {'g': 20, 'k1': 5,
'k2': 5, 'k3': 1}
dyn_kwargs['saturation'] = np.array(
[8 * dyn_kwargs.get('prior_kwargs').get('mu') - 1,
3 * dyn_kwargs.get('prior_kwargs').get('mu')])
observer_prior_mean = \
VanderPol_continuous_prior_mean_Michelangelo_deriv
dyn_GP_prior_mean = VanderPol_continuous_prior_mean_Michelangelo_u
dyn_GP_prior_mean_deriv = \
VanderPol_continuous_prior_mean_Michelangelo_deriv_u
observe_data = dim1_observe_data
init_state = reshape_pt1(np.array([[0, 4]]))
init_state_estim = reshape_pt1(np.array([[0, 0, 0]]))
init_control = reshape_pt1([0, 0]) # imposed instead u(t=0)!
constrain_u = [-dyn_kwargs.get('gamma'),
dyn_kwargs.get('gamma')] # must be a python list!
constrain_x = [] # must be a python list!
grid_inf = -2
grid_sup = 2
# Create kernel
if dyn_kwargs.get('gamma') == 0:
input_dim = init_state.shape[1]
else:
input_dim = init_state.shape[1] + init_control.shape[1]
kernel = GPy.kern.RBF(input_dim=input_dim, variance=30,
lengthscale=np.array([1, 3, 150, 150]),
ARD=True)
kernel.unconstrain()
kernel.variance.set_prior(GPy.priors.Gaussian(30, 10))
kernel.lengthscale.set_prior(
GPy.priors.MultivariateGaussian(np.array([1, 3, 150, 150]),
np.diag([1, 1, 50, 50])))
elif 'Continuous/Modified_Duffing_Cossquare' in system:
discrete = False
dyn_GP_prior_mean_deriv = None
dyn_kwargs = {'alpha': 2, 'beta': 2, 'delta': 0.3, 'gamma': 0.4,
'omega': 1.2}
dynamics = duffing_modified_cossquare
controller = sin_controller_02D
if 'GP_Michelangelo' in system:
observer = duffing_observer_Michelangelo_GP
dyn_kwargs['prior_kwargs'] = {'alpha': 2, 'beta': 2,
'delta': 0.3, 'gamma': 0.4,
'omega': 1.2, 'dt': dt,
'dt_before_subsampling': 0.001}
dyn_kwargs['continuous_model'] = continuous_model
dyn_kwargs['saturation'] = np.array(
[- 5 * dyn_kwargs.get('beta') - 5 * dyn_kwargs.get('alpha'),
-5 * dyn_kwargs.get('delta')])
observer_prior_mean = \
duffing_cossquare_continuous_prior_mean_Michelangelo_deriv
dyn_GP_prior_mean = \
duffing_cossquare_continuous_prior_mean_Michelangelo_u
dyn_GP_prior_mean_deriv = \
duffing_cossquare_continuous_prior_mean_Michelangelo_deriv_u
observe_data = dim1_observe_data
init_state = reshape_pt1(np.array([[0, 1]]))
init_state_estim = reshape_pt1(np.array([[0, 0, 0]]))
init_control = reshape_pt1([0, 0]) # imposed instead u(t=0)!
constrain_u = [-dyn_kwargs.get('gamma'),
dyn_kwargs.get('gamma')] # must be a python list!
constrain_x = [] # must be a python list!
grid_inf = -1
grid_sup = 1
# Create kernel
if dyn_kwargs.get('gamma') == 0:
input_dim = init_state.shape[1]
else:
input_dim = init_state.shape[1] + init_control.shape[1]
kernel = GPy.kern.RBF(input_dim=input_dim, variance=110,
lengthscale=np.array([5, 15, 150, 150]),
ARD=True)
kernel.unconstrain()
kernel.variance.set_prior(GPy.priors.Gaussian(110, 10))
kernel.lengthscale.set_prior(
GPy.priors.MultivariateGaussian(np.array([5, 15, 150, 150]),
np.diag([0.1, 0.5, 10, 10])))
else:
raise Exception('Unknown system')
# Set derivative_function for continuous models
if continuous_model:
if dyn_GP_prior_mean:
logging.warning('A prior mean has been defined for the GP though '
'a continuous model is being used. Check this is '
'really what you want to do, as a prior mean is '
'often known for discrete models without being '
'available for continuous ones.')
def derivative_function(X, U, y_observed, GP):
X = reshape_pt1(X)
u = lambda t, kwargs, t0, init_control: reshape_pt1(U)[t]
y = lambda t, kwargs: reshape_pt1(y_observed)[t]
Xdot = np.array([observer(t, X[t], u, y, t0, init_control, GP,
dyn_kwargs) for t in
range(len(X))])
Xdot = Xdot.reshape(X.shape)
return Xdot.reshape(X.shape)
else:
derivative_function = None
# Generate data: simulate dynamics
xtraj, utraj, t_utraj = simulate_dynamics(t_span=t_span, t_eval=t_eval,
t0=t0, dt=dt,
init_control=init_control,
init_state=init_state,
dynamics=dynamics,
controller=controller,
process_noise_var=process_noise_var,
optim_method=optim_method,
dyn_config=dyn_kwargs,
discrete=discrete,
verbose=verbose)
# Observe data: only position, observer reconstitutes velocity
# Get observations over t_eval and simulate xhat only over t_eval
y_observed, t_y, xtraj_estim = \
simulate_estimations(system=system, observe_data=observe_data,
t_eval=t_eval, t0=t0, tf=tf, dt=dt,
meas_noise_var=true_meas_noise_var,
init_control=init_control,
init_state_estim=init_state_estim,
controller=controller, observer=observer,
optim_method=optim_method,
dyn_config=dyn_kwargs, xtraj=xtraj,
GP=observer_prior_mean, discrete=discrete,
verbose=verbose)
# Create initial data for GP, noiseless or noisy X, noiseless U, noisy Y
X, U, Y = form_GP_data(system=system, xtraj=xtraj,
xtraj_estim=xtraj_estim, utraj=utraj,
meas_noise_var=true_meas_noise_var,
y_observed=y_observed,
derivative_function=derivative_function,
model=observer_prior_mean)
# True dynamics: (xt, ut) -> xt+1 if no observer, (xt, ut) -> phi(xt,ut) if
# Michelangelo. If no observer, simulate system for 10*dt starting at xt
# and return result at t+dt
if ('Michelangelo' in system) and ('Duffing' in system):
# Return xi_t instead of x_t+1 from x_t,u_t
true_dynamics = lambda x, control: \
- dyn_kwargs.get('beta') * x[:, 0] ** 3 - dyn_kwargs.get('alpha') \
* x[:, 0] - dyn_kwargs.get('delta') * x[:, 1]
elif ('justvelocity' in system) and ('Duffing' in system):
if not continuous_model:
true_dynamics = lambda x, control: dynamics_traj(
x0=reshape_pt1(x), u=lambda t, kwarg, t0, init_control:
interpolate(t, np.concatenate((reshape_dim1(np.arange(
len(control))), control), axis=1),
t0=t0, init_value=init_control),
t0=t0, dt=dt, init_control=init_control, version=dynamics,
meas_noise_var=0, process_noise_var=process_noise_var,
method=optim_method, t_span=[0, dt], t_eval=[dt],
kwargs=dyn_kwargs)[:, -1]
else:
true_dynamics = lambda x, control: \
dynamics(t=t0, x=x, u=lambda t, kwarg, t0, init_control:
interpolate(t, np.concatenate((reshape_dim1(np.arange(
len(control))), control), axis=1), t0=t0,
init_value=init_control),
t0=t0, init_control=control,
process_noise_var=process_noise_var,
kwargs=dyn_kwargs)[:, -1]
elif ('Michelangelo' in system) and ('Harmonic_oscillator' in system):
# Return xi_t instead of x_t+1 from x_t,u_t
true_dynamics = lambda x, control: \
- dyn_kwargs.get('k') / dyn_kwargs.get('m') * x[:, 0]
elif ('Michelangelo' in system) and ('Pendulum' in system):
# Return xi_t instead of x_t+1 from x_t,u_t
true_dynamics = lambda x, control: \
- dyn_kwargs.get('g') / dyn_kwargs.get('l') * np.sin(x[:, 0]) \
- dyn_kwargs.get('k') / dyn_kwargs.get('m') * x[:, 1]
elif ('Michelangelo' in system) and ('VanderPol' in system):
# Return xi_t instead of x_t+1 from x_t,u_t
true_dynamics = lambda x, control: reshape_pt1(
dyn_kwargs.get('mu') * (1 - x[:, 0] ** 2) * x[:, 1] - x[:, 0])
elif (('Michelangelo' in system) or ('justvelocity_highgain' in system)) \
and not any(k in system for k in ('Duffing', 'Harmonic_oscillator',
'Pendulum', 'VanderPol')):
raise Exception('No ground truth has been defined.')
else:
true_dynamics = lambda x, control: dynamics_traj(
x0=reshape_pt1(x), u=lambda t, kwarg, t0, init_control:
interpolate(t, np.concatenate((reshape_dim1(np.arange(
len(control))), control), axis=1),
t0=t0, init_value=init_control),
t0=t0, dt=dt, init_control=init_control, version=dynamics,
meas_noise_var=0, process_noise_var=process_noise_var,
method=optim_method, t_span=[0, dt], t_eval=[dt],
kwargs=dyn_kwargs)
# Create config file from all params (not optimal, for cluster use
# make cluster_this_script.py in which config is directly a system
# argument given in command line and chosen from a set of predefined
# config files)
if not controller or not np.any(utraj):
no_control = True
else:
no_control = False
config = Config(true_meas_noise_var=true_meas_noise_var,
process_noise_var=process_noise_var,
system=system,
optim_method=optim_method,
nb_samples=nb_samples,
t0_span=t0_span,
tf_span=tf_span,
t0=t0,
tf=tf,
dt=dt,
dt_before_subsampling=dyn_kwargs['prior_kwargs'][
'dt_before_subsampling'],
nb_rollouts=nb_rollouts,
rollout_length=rollout_length,
rollout_controller=rollout_controller,
nb_loops=nb_loops,
sliding_window_size=sliding_window_size,
verbose=verbose,
monitor_experiment=monitor_experiment,
multioutput_GP=multioutput_GP,
sparse=sparse,
memory_saving=memory_saving,
restart_on_loop=restart_on_loop,
GP_optim_method=GP_optim_method,
meas_noise_var=meas_noise_var,
hyperparam_optim=hyperparam_optim,
batch_adaptive_gain=batch_adaptive_gain,
discrete=discrete,
dynamics=dynamics,
controller=controller,
init_state=init_state,
init_state_estim=init_state_estim,
init_control=init_control,
input_dim=input_dim,
observer=observer,
true_dynamics=true_dynamics,
no_control=no_control,
dyn_kwargs=dyn_kwargs,
prior_kwargs=dyn_kwargs['prior_kwargs'],
observer_gains=dyn_kwargs['prior_kwargs'][
'observer_gains'],
saturation=dyn_kwargs['saturation'],
observer_prior_mean=observer_prior_mean,
prior_mean=dyn_GP_prior_mean,
prior_mean_deriv=dyn_GP_prior_mean_deriv,
derivative_function=derivative_function,
continuous_model=continuous_model,
observe_data=observe_data,
constrain_u=constrain_u,
constrain_x=constrain_x,
grid_inf=grid_inf,
grid_sup=grid_sup,
kernel=kernel)
config.update(dyn_kwargs)
config.dyn_kwargs.update(saturation=config.saturation,
prior_kwargs=config.prior_kwargs)
config.dyn_kwargs['prior_kwargs']['observer_gains'] = config.observer_gains
# Create GP
dyn_kwargs.update({'dt': dt, 't0': t0, 'tf': tf, 't_span': t_span,
'init_state': init_state,
'init_state_estim': init_state_estim,
'init_control': init_control,
'observer_prior_mean': observer_prior_mean,
'true_noise_var': true_meas_noise_var,
'batch_adaptive_gain': batch_adaptive_gain})
dyn_GP = Simple_GP_Dyn(X, U, Y, config)
# Learn simple GP of dynamics, by seeing pairs (x_t, u_t) -> y_t
data_to_save = {'xtraj': xtraj, 'xtraj_estim': xtraj_estim,
'y_observed': y_observed}
if batch_adaptive_gain:
gain_time = np.array(
[dyn_kwargs['prior_kwargs']['observer_gains']['g']])
data_to_save.update({'gain_time': gain_time})
elif 'adaptive' in system:
output_error = reshape_dim1(np.square(xtraj[:, 0] - xtraj_estim[:, 0]))
gain_time = reshape_dim1(xtraj_estim[:, -1])
data_to_save.update(
{'gain_time': gain_time, 'output_error': output_error})
save_outside_data(dyn_GP, data_to_save)
plot_outside_data(dyn_GP, data_to_save)
dyn_GP.learn()
# Run rollouts using only priors, before learning (step=-1)
rollouts_folder = os.path.join(dyn_GP.results_folder, 'Rollouts_0')
new_rollouts_folder = os.path.join(dyn_GP.results_folder,
'Rollouts_-1')
shutil.copytree(rollouts_folder, new_rollouts_folder)
old_step, dyn_GP.step = dyn_GP.step, 0
old_sample_idx, dyn_GP.sample_idx = dyn_GP.sample_idx, 0
if 'justvelocity_adaptive' in config.system:
# Do not adapt observer gains for closed-loop rollouts
dyn_GP.evaluate_closedloop_rollouts(
WDC_justvelocity_observer_highgain_GP,
config.observe_data, no_GP_in_observer=True)
if config.prior_mean:
dyn_GP.evaluate_kalman_rollouts(
WDC_justvelocity_observer_highgain_GP,
config.observe_data, config.discrete,
no_GP_in_observer=True, only_prior=True)
else:
dyn_GP.evaluate_closedloop_rollouts(
config.observer, config.observe_data,
no_GP_in_observer=True)
if config.prior_mean:
dyn_GP.evaluate_kalman_rollouts(
config.observer, config.observe_data, config.discrete,
no_GP_in_observer=True, only_prior=True)
if config.prior_mean:
# Also run open-loop rollouts with prior before learning
dyn_GP.evaluate_rollouts(only_prior=True)
dyn_GP.step = old_step
dyn_GP.sample_idx = old_sample_idx
dyn_GP.save()
if 'justvelocity_adaptive' in system:
# Do not adapt observer gains for closed-loop rollouts
dyn_GP.evaluate_kalman_rollouts(
WDC_justvelocity_observer_highgain_GP, observe_data, discrete)
dyn_GP.evaluate_closedloop_rollouts(
WDC_justvelocity_observer_highgain_GP, observe_data)
else:
dyn_GP.evaluate_kalman_rollouts(observer, observe_data, discrete)
dyn_GP.evaluate_closedloop_rollouts(observer, observe_data)
# Alternate between estimating xtraj from observations (or just getting
# new xtraj), estimating fhat from new xtraj(_estim), and loop
for i in range(1, nb_loops):
# Update params and initial states after the first pass
if restart_on_loop:
dyn_kwargs = update_params_on_loop(system, dyn_kwargs)
else:
init_state | |
name_='Src_Link_Addr')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Src_Link_Addr':
obj_ = NDPSrcLinkAddrType.factory()
obj_.build(child_)
self.set_Src_Link_Addr(obj_)
# end class RouterSolicitationOptionsType
class RouterAdvertisementType(GeneratedsSuper):
"""Routers send out Router Advertisement messages periodically, or in
response to Router Solicitations. (type=134; code=0)1-bit
"Managed address configuration" flag. When set, it indicates
that addresses are available via Dynamic Host Configuration
Protocol. If the M flag is set, the O flag is redundant and can
be ignored because DHCPv6 will return all available
configuration information.1-bit "Other configuration" flag. When
set, it indicates that other configuration information is
available via DHCPv6. Examples of such information are DNS-
related information or information on other servers within the
network."""
subclass = None
superclass = None
def __init__(self, other_config_flag=None, managed_address_config_flag=None, Cur_Hop_Limit=None, Router_Lifetime=None, Reachable_Time=None, Retrans_Timer=None, Options=None):
self.other_config_flag = _cast(bool, other_config_flag)
self.managed_address_config_flag = _cast(bool, managed_address_config_flag)
self.Cur_Hop_Limit = Cur_Hop_Limit
self.Router_Lifetime = Router_Lifetime
self.Reachable_Time = Reachable_Time
self.Retrans_Timer = Retrans_Timer
self.Options = Options
def factory(*args_, **kwargs_):
if RouterAdvertisementType.subclass:
return RouterAdvertisementType.subclass(*args_, **kwargs_)
else:
return RouterAdvertisementType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Cur_Hop_Limit(self): return self.Cur_Hop_Limit
def set_Cur_Hop_Limit(self, Cur_Hop_Limit): self.Cur_Hop_Limit = Cur_Hop_Limit
def get_Router_Lifetime(self): return self.Router_Lifetime
def set_Router_Lifetime(self, Router_Lifetime): self.Router_Lifetime = Router_Lifetime
def get_Reachable_Time(self): return self.Reachable_Time
def set_Reachable_Time(self, Reachable_Time): self.Reachable_Time = Reachable_Time
def get_Retrans_Timer(self): return self.Retrans_Timer
def set_Retrans_Timer(self, Retrans_Timer): self.Retrans_Timer = Retrans_Timer
def get_Options(self): return self.Options
def set_Options(self, Options): self.Options = Options
def get_other_config_flag(self): return self.other_config_flag
def set_other_config_flag(self, other_config_flag): self.other_config_flag = other_config_flag
def get_managed_address_config_flag(self): return self.managed_address_config_flag
def set_managed_address_config_flag(self, managed_address_config_flag): self.managed_address_config_flag = managed_address_config_flag
def export(self, outfile, level, namespace_='PacketObj:', name_='RouterAdvertisementType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RouterAdvertisementType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='PacketObj:', name_='RouterAdvertisementType'):
if self.other_config_flag is not None and 'other_config_flag' not in already_processed:
already_processed.append('other_config_flag')
outfile.write(' other_config_flag="%s"' % self.gds_format_boolean(self.gds_str_lower(str(self.other_config_flag)), input_name='other_config_flag'))
if self.managed_address_config_flag is not None and 'managed_address_config_flag' not in already_processed:
already_processed.append('managed_address_config_flag')
outfile.write(' managed_address_config_flag="%s"' % self.gds_format_boolean(self.gds_str_lower(str(self.managed_address_config_flag)), input_name='managed_address_config_flag'))
def exportChildren(self, outfile, level, namespace_='PacketObj:', name_='RouterAdvertisementType', fromsubclass_=False):
if self.Cur_Hop_Limit is not None:
self.Cur_Hop_Limit.export(outfile, level, namespace_, name_='Cur_Hop_Limit')
if self.Router_Lifetime is not None:
self.Router_Lifetime.export(outfile, level, namespace_, name_='Router_Lifetime')
if self.Reachable_Time is not None:
self.Reachable_Time.export(outfile, level, namespace_, name_='Reachable_Time')
if self.Retrans_Timer is not None:
self.Retrans_Timer.export(outfile, level, namespace_, name_='Retrans_Timer')
if self.Options is not None:
self.Options.export(outfile, level, namespace_, name_='Options')
def hasContent_(self):
if (
self.Cur_Hop_Limit is not None or
self.Router_Lifetime is not None or
self.Reachable_Time is not None or
self.Retrans_Timer is not None or
self.Options is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RouterAdvertisementType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.other_config_flag is not None and 'other_config_flag' not in already_processed:
already_processed.append('other_config_flag')
showIndent(outfile, level)
outfile.write('other_config_flag = %s,\n' % (self.other_config_flag,))
if self.managed_address_config_flag is not None and 'managed_address_config_flag' not in already_processed:
already_processed.append('managed_address_config_flag')
showIndent(outfile, level)
outfile.write('managed_address_config_flag = %s,\n' % (self.managed_address_config_flag,))
def exportLiteralChildren(self, outfile, level, name_):
if self.Cur_Hop_Limit is not None:
showIndent(outfile, level)
outfile.write('Cur_Hop_Limit=%s,\n' % quote_python(self.Cur_Hop_Limit).encode(ExternalEncoding))
if self.Router_Lifetime is not None:
showIndent(outfile, level)
outfile.write('Router_Lifetime=%s,\n' % quote_python(self.Router_Lifetime).encode(ExternalEncoding))
if self.Reachable_Time is not None:
showIndent(outfile, level)
outfile.write('Reachable_Time=%s,\n' % quote_python(self.Reachable_Time).encode(ExternalEncoding))
if self.Retrans_Timer is not None:
showIndent(outfile, level)
outfile.write('Retrans_Timer=%s,\n' % quote_python(self.Retrans_Timer).encode(ExternalEncoding))
if self.Options is not None:
showIndent(outfile, level)
outfile.write('Options=model_.RouterAdvertisementOptionsType(\n')
self.Options.exportLiteral(outfile, level, name_='Options')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('other_config_flag', node)
if value is not None and 'other_config_flag' not in already_processed:
already_processed.append('other_config_flag')
if value in ('true', '1'):
self.other_config_flag = True
elif value in ('false', '0'):
self.other_config_flag = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('managed_address_config_flag', node)
if value is not None and 'managed_address_config_flag' not in already_processed:
already_processed.append('managed_address_config_flag')
if value in ('true', '1'):
self.managed_address_config_flag = True
elif value in ('false', '0'):
self.managed_address_config_flag = False
else:
raise_parse_error(node, 'Bad boolean attribute')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Cur_Hop_Limit':
Cur_Hop_Limit_ = child_.text
Cur_Hop_Limit_ = self.gds_validate_string(Cur_Hop_Limit_, node, 'Cur_Hop_Limit')
self.Cur_Hop_Limit = Cur_Hop_Limit_
elif nodeName_ == 'Router_Lifetime':
Router_Lifetime_ = child_.text
Router_Lifetime_ = self.gds_validate_string(Router_Lifetime_, node, 'Router_Lifetime')
self.Router_Lifetime = Router_Lifetime_
elif nodeName_ == 'Reachable_Time':
Reachable_Time_ = child_.text
Reachable_Time_ = self.gds_validate_string(Reachable_Time_, node, 'Reachable_Time')
self.Reachable_Time = Reachable_Time_
elif nodeName_ == 'Retrans_Timer':
Retrans_Timer_ = child_.text
Retrans_Timer_ = self.gds_validate_string(Retrans_Timer_, node, 'Retrans_Timer')
self.Retrans_Timer = Retrans_Timer_
elif nodeName_ == 'Options':
obj_ = RouterAdvertisementOptionsType.factory()
obj_.build(child_)
self.set_Options(obj_)
# end class RouterAdvertisementType
class RouterAdvertisementOptionsType(GeneratedsSuper):
"""Router Advertisement messages include zero or more options, some of
which may appear multiple times in the same message."""
subclass = None
superclass = None
def __init__(self, Src_Link_Addr=None, MTU=None, Prefix_Info=None):
self.Src_Link_Addr = Src_Link_Addr
self.MTU = MTU
self.Prefix_Info = Prefix_Info
def factory(*args_, **kwargs_):
if RouterAdvertisementOptionsType.subclass:
return RouterAdvertisementOptionsType.subclass(*args_, **kwargs_)
else:
return RouterAdvertisementOptionsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Src_Link_Addr(self): return self.Src_Link_Addr
def set_Src_Link_Addr(self, Src_Link_Addr): self.Src_Link_Addr = Src_Link_Addr
def get_MTU(self): return self.MTU
def set_MTU(self, MTU): self.MTU = MTU
def get_Prefix_Info(self): return self.Prefix_Info
def set_Prefix_Info(self, Prefix_Info): self.Prefix_Info = Prefix_Info
def export(self, outfile, level, namespace_='PacketObj:', name_='RouterAdvertisementOptionsType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RouterAdvertisementOptionsType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='PacketObj:', name_='RouterAdvertisementOptionsType'):
pass
def exportChildren(self, outfile, level, namespace_='PacketObj:', name_='RouterAdvertisementOptionsType', fromsubclass_=False):
if self.Src_Link_Addr is not None:
self.Src_Link_Addr.export(outfile, level, namespace_, name_='Src_Link_Addr')
if self.MTU is not None:
self.MTU.export(outfile, level, namespace_, name_='MTU')
if self.Prefix_Info is not None:
self.Prefix_Info.export(outfile, level, namespace_, name_='Prefix_Info')
def hasContent_(self):
if (
self.Src_Link_Addr is not None or
self.MTU is not None or
self.Prefix_Info is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RouterAdvertisementOptionsType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.Src_Link_Addr is not None:
showIndent(outfile, level)
outfile.write('Src_Link_Addr=model_.NDPSrcLinkAddrType(\n')
self.Src_Link_Addr.exportLiteral(outfile, level, name_='Src_Link_Addr')
showIndent(outfile, level)
outfile.write('),\n')
if self.MTU is not None:
showIndent(outfile, level)
outfile.write('MTU=model_.NDPMTUType(\n')
self.MTU.exportLiteral(outfile, level, name_='MTU')
showIndent(outfile, level)
outfile.write('),\n')
if self.Prefix_Info is not None:
showIndent(outfile, level)
outfile.write('Prefix_Info=model_.NDPPrefixInfoType(\n')
self.Prefix_Info.exportLiteral(outfile, level, name_='Prefix_Info')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Src_Link_Addr':
obj_ = NDPSrcLinkAddrType.factory()
obj_.build(child_)
self.set_Src_Link_Addr(obj_)
elif nodeName_ == 'MTU':
obj_ = NDPMTUType.factory()
obj_.build(child_)
self.set_MTU(obj_)
elif nodeName_ == 'Prefix_Info':
obj_ = NDPPrefixInfoType.factory()
obj_.build(child_)
self.set_Prefix_Info(obj_)
# end class RouterAdvertisementOptionsType
class NeighborSolicitationType(GeneratedsSuper):
"""Nodes send Neighbor Solicitations to request the link-layer address
of a target node while also providing their own link-layer
address to the target. Neighbor Solicitations are multicast when
the node needs to resolve an address and unicast when the node
seeks to verify the reachability of a neighbor. (type=135;
code=0)"""
subclass = None
superclass = None
def __init__(self, Target_IPv6_Addr=None, Options=None):
self.Target_IPv6_Addr = Target_IPv6_Addr
self.Options = Options
def factory(*args_, **kwargs_):
if NeighborSolicitationType.subclass:
return NeighborSolicitationType.subclass(*args_, **kwargs_)
else:
return NeighborSolicitationType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Target_IPv6_Addr(self): return self.Target_IPv6_Addr
def set_Target_IPv6_Addr(self, Target_IPv6_Addr): self.Target_IPv6_Addr = Target_IPv6_Addr
def get_Options(self): return self.Options
def set_Options(self, Options): self.Options = Options
def export(self, outfile, level, namespace_='PacketObj:', name_='NeighborSolicitationType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='NeighborSolicitationType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='PacketObj:', name_='NeighborSolicitationType'):
pass
def exportChildren(self, outfile, level, namespace_='PacketObj:', name_='NeighborSolicitationType', fromsubclass_=False):
if self.Target_IPv6_Addr is not None:
self.Target_IPv6_Addr.export(outfile, level, namespace_, name_='Target_IPv6_Addr')
if self.Options is not None:
self.Options.export(outfile, level, namespace_, name_='Options')
def hasContent_(self):
if (
self.Target_IPv6_Addr is not None or
self.Options is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='NeighborSolicitationType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.Target_IPv6_Addr is not None:
showIndent(outfile, level)
outfile.write('Target_IPv6_Addr=%s,\n' | |
import torch
from torch.jit import script, trace
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import csv
import random
import re
import os
import unicodedata
import codecs
from io import open
import itertools
import math
USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if USE_CUDA else "cpu")
corpus_name = "cornell movie-dialogs corpus"
corpus = os.path.join("data", corpus_name)
def printLines(file, n=10):
with open(file, "rb") as datafile:
lines = datafile.readlines()
for line in lines[:n]:
print(line)
printLines(os.path.join(corpus, "movie_lines.txt"))
def loadLines(fileName, fields):
lines = {}
with open(fileName, "r", encoding="iso-8859-1") as f:
for line in f:
values = line.split(" +++$+++ ")
lineObj = {}
for i, field in enumerate(fields):
lineObj[field] = values[i]
lines[lineObj["lineID"]] = lineObj
return lines
def loadConversations(fileName, lines, fields):
conversations = []
with open(fileName, "r", encoding="iso-8859-1") as f:
for line in f:
values = line.split(" +++$+++ ")
convObj = {}
for i, field in enumerate(fields):
convObj[field] = values[i]
utterance_id_pattern = re.compile("L[0-9]+")
lineIds = utterance_id_pattern.findall(convObj["utteranceIDs"])
convObj["lines"] = []
for lineId in lineIds:
convObj["lines"].append(lines[lineId])
conversations.append(convObj)
return conversations
def extractSentencePairs(conversations):
qa_pairs = []
for conversation in conversations:
for i in range(len(conversation["lines"]) - 1):
inputLine = conversation["lines"][i]["text"].strip()
targetLine = conversation["lines"][i + 1]["text"].strip()
if inputLine and targetLine:
qa_pairs.append([inputLine, targetLine])
return qa_pairs
datafile = os.path.join(corpus, "formatted_movie_lines.txt")
delimiter = "\t"
delimiter = str(codecs.decode(delimiter, "unicode_escape"))
lines = {}
conversations = []
MOVIE_LINES_FIELDS = ["lineID", "characterID", "movieID", "character", "text"]
MOVIE_CONVERSATIONS_FIELDS = ["character1ID", "character2ID", "movieID", "utteranceIDs"]
print("\nProcessing corpus...")
lines = loadLines(os.path.join(corpus, "movie_lines.txt"), MOVIE_LINES_FIELDS)
print("\nLoading conversations...")
conversations = loadConversations(os.path.join(corpus, "movie_conversations.txt"), lines, MOVIE_CONVERSATIONS_FIELDS)
print("\nWriting newly formatted file...")
with open(datafile, "w", encoding="utf-8") as outputfile:
writer = csv.writer(outputfile, delimiter=delimiter, lineterminator="\n")
for pair in extractSentencePairs(conversations):
writer.writerow(pair)
print("\nSample lines from file:")
printLines(datafile)
PAD_token = 0
SOS_token = 1
EOS_token = 2
class Voc:
def __init__(self, name):
self.name = name
self.trimmed = False
self.word2index = {}
self.word2count = {}
self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS"}
self.num_words = 3
def addSentence(self, sentence):
for word in sentence.split(" "):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.num_words
self.word2count[word] = 1
self.index2word[self.num_words] = word
self.num_words += 1
else:
self.word2count[word] += 1
def trim(self, min_count):
if self.trimmed:
return
self.trimmed = True
keep_words = []
for k, v in self.word2count.items():
if v >= min_count:
keep_words.append(k)
print(
"keep_words {} / {} = {:.4f}".format(
len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)
)
)
self.word2index = {}
self.word2count = {}
self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS"}
self.num_words = 3
for word in keep_words:
self.addWord(word)
MAX_LENGTH = 10
def unicodeToAscii(s):
return "".join(c for c in unicodedata.normalize("NFD", s) if unicodedata.category(c) != "Mn")
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
s = re.sub(r"\s+", r" ", s).strip()
return s
def readVocs(datafile, corpus_name):
print("Reading lines...")
lines = open(datafile, encoding="utf-8").read().strip().split("\n")
pairs = [[normalizeString(s) for s in l.split("\t")] for l in lines]
voc = Voc(corpus_name)
return voc, pairs
def filterPair(p):
return len(p[0].split(" ")) < MAX_LENGTH and len(p[1].split(" ")) < MAX_LENGTH
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
def loadPrepareData(corpus, corpus_name, datafile, save_dir):
print("Start preparing training data ...")
voc, pairs = readVocs(datafile, corpus_name)
print("Read {!s} sentence pairs".format(len(pairs)))
pairs = filterPairs(pairs)
print("Trimmed to {!s} sentence pairs".format(len(pairs)))
print("Counting words...")
for pair in pairs:
voc.addSentence(pair[0])
voc.addSentence(pair[1])
print("Counted words:", voc.num_words)
return voc, pairs
save_dir = os.path.join("data", "save")
voc, pairs = loadPrepareData(corpus, corpus_name, datafile, save_dir)
print("\npairs:")
for pair in pairs[:10]:
print(pair)
MIN_COUNT = 3
def trimRareWords(voc, pairs, MIN_COUNT):
voc.trim(MIN_COUNT)
keep_pairs = []
for pair in pairs:
input_sentence = pair[0]
output_sentence = pair[1]
keep_input = True
keep_output = True
for word in input_sentence.split(" "):
if word not in voc.word2index:
keep_input = False
break
for word in output_sentence.split(" "):
if word not in voc.word2index:
keep_output = False
break
if keep_input and keep_output:
keep_pairs.append(pair)
print(
"Trimmed from {} pairs to {}, {:.4f} of total".format(
len(pairs), len(keep_pairs), len(keep_pairs) / len(pairs)
)
)
return keep_pairs
pairs = trimRareWords(voc, pairs, MIN_COUNT)
def indexesFromSentence(voc, sentence):
return [voc.word2index[word] for word in sentence.split(" ")] + [EOS_token]
def zeroPadding(l, fillvalue=PAD_token):
return list(itertools.zip_longest(*l, fillvalue=fillvalue))
def binaryMatrix(l, value=PAD_token):
m = []
for i, seq in enumerate(l):
m.append([])
for token in seq:
if token == PAD_token:
m[i].append(0)
else:
m[i].append(1)
return m
def inputVar(l, voc):
indexes_batch = [indexesFromSentence(voc, sentence) for sentence in l]
lengths = torch.tensor([len(indexes) for indexes in indexes_batch])
padList = zeroPadding(indexes_batch)
padVar = torch.LongTensor(padList)
return padVar, lengths
def outputVar(l, voc):
indexes_batch = [indexesFromSentence(voc, sentence) for sentence in l]
max_target_len = max([len(indexes) for indexes in indexes_batch])
padList = zeroPadding(indexes_batch)
mask = binaryMatrix(padList)
mask = torch.BoolTensor(mask)
padVar = torch.LongTensor(padList)
return padVar, mask, max_target_len
def batch2TrainData(voc, pair_batch):
pair_batch.sort(key=lambda x: len(x[0].split(" ")), reverse=True)
input_batch, output_batch = [], []
for pair in pair_batch:
input_batch.append(pair[0])
output_batch.append(pair[1])
inp, lengths = inputVar(input_batch, voc)
output, mask, max_target_len = outputVar(output_batch, voc)
return inp, lengths, output, mask, max_target_len
small_batch_size = 5
batches = batch2TrainData(voc, [random.choice(pairs) for _ in range(small_batch_size)])
input_variable, lengths, target_variable, mask, max_target_len = batches
print("input_variable:", input_variable)
print("lengths:", lengths)
print("target_variable:", target_variable)
print("mask:", mask)
print("max_target_len:", max_target_len)
class EncoderRNN(nn.Module):
def __init__(self, hidden_size, embedding, n_layers=1, dropout=0):
super(EncoderRNN, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.embedding = embedding
self.gru = nn.GRU(
hidden_size, hidden_size, n_layers, dropout=(0 if n_layers == 1 else dropout), bidirectional=True
)
def forward(self, input_seq, input_lengths, hidden=None):
embedded = self.embedding(input_seq)
packed = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths)
outputs, hidden = self.gru(packed, hidden)
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs)
outputs = outputs[:, :, : self.hidden_size] + outputs[:, :, self.hidden_size :]
return outputs, hidden
class Attn(nn.Module):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.method = method
if self.method not in ["dot", "general", "concat"]:
raise ValueError(self.method, "is not an appropriate attention method.")
self.hidden_size = hidden_size
if self.method == "general":
self.attn = nn.Linear(self.hidden_size, hidden_size)
elif self.method == "concat":
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.FloatTensor(hidden_size))
def dot_score(self, hidden, encoder_output):
return torch.sum(hidden * encoder_output, dim=2)
def general_score(self, hidden, encoder_output):
energy = self.attn(encoder_output)
return torch.sum(hidden * energy, dim=2)
def concat_score(self, hidden, encoder_output):
energy = self.attn(torch.cat((hidden.expand(encoder_output.size(0), -1, -1), encoder_output), 2)).tanh()
return torch.sum(self.v * energy, dim=2)
def forward(self, hidden, encoder_outputs):
if self.method == "general":
attn_energies = self.general_score(hidden, encoder_outputs)
elif self.method == "concat":
attn_energies = self.concat_score(hidden, encoder_outputs)
elif self.method == "dot":
attn_energies = self.dot_score(hidden, encoder_outputs)
attn_energies = attn_energies.t()
return F.softmax(attn_energies, dim=1).unsqueeze(1)
class LuongAttnDecoderRNN(nn.Module):
def __init__(self, attn_model, embedding, hidden_size, output_size, n_layers=1, dropout=0.1):
super(LuongAttnDecoderRNN, self).__init__()
self.attn_model = attn_model
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout = dropout
self.embedding = embedding
self.embedding_dropout = nn.Dropout(dropout)
self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=(0 if n_layers == 1 else dropout))
self.concat = nn.Linear(hidden_size * 2, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.attn = Attn(attn_model, hidden_size)
def forward(self, input_step, last_hidden, encoder_outputs):
embedded = self.embedding(input_step)
embedded = self.embedding_dropout(embedded)
rnn_output, hidden = self.gru(embedded, last_hidden)
attn_weights = self.attn(rnn_output, encoder_outputs)
context = attn_weights.bmm(encoder_outputs.transpose(0, 1))
rnn_output = rnn_output.squeeze(0)
context = context.squeeze(1)
concat_input = torch.cat((rnn_output, context), 1)
concat_output = torch.tanh(self.concat(concat_input))
output = self.out(concat_output)
output = F.softmax(output, dim=1)
return output, hidden
def maskNLLLoss(inp, target, mask):
nTotal = mask.sum()
crossEntropy = -torch.log(torch.gather(inp, 1, target.view(-1, 1)).squeeze(1))
loss = crossEntropy.masked_select(mask).mean()
loss = loss.to(device)
return loss, nTotal.item()
def train(
input_variable,
lengths,
target_variable,
mask,
max_target_len,
encoder,
decoder,
embedding,
encoder_optimizer,
decoder_optimizer,
batch_size,
clip,
max_length=MAX_LENGTH,
):
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_variable = input_variable.to(device)
target_variable = target_variable.to(device)
mask = mask.to(device)
lengths = lengths.to("cpu")
loss = 0
print_losses = []
n_totals = 0
encoder_outputs, encoder_hidden = encoder(input_variable, lengths)
decoder_input = torch.LongTensor([[SOS_token for _ in range(batch_size)]])
decoder_input = decoder_input.to(device)
decoder_hidden = encoder_hidden[: decoder.n_layers]
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
for t in range(max_target_len):
decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden, encoder_outputs)
decoder_input = target_variable[t].view(1, -1)
mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t])
loss += mask_loss
print_losses.append(mask_loss.item() * nTotal)
n_totals += nTotal
else:
for t in range(max_target_len):
decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden, encoder_outputs)
_, topi = decoder_output.topk(1)
decoder_input = torch.LongTensor([[topi[i][0] for i in range(batch_size)]])
decoder_input = decoder_input.to(device)
mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t])
loss += mask_loss
print_losses.append(mask_loss.item() * nTotal)
n_totals += nTotal
loss.backward()
_ = nn.utils.clip_grad_norm_(encoder.parameters(), clip)
_ = nn.utils.clip_grad_norm_(decoder.parameters(), clip)
encoder_optimizer.step()
decoder_optimizer.step()
return sum(print_losses) / n_totals
def trainIters(
model_name,
voc,
pairs,
encoder,
decoder,
encoder_optimizer,
decoder_optimizer,
embedding,
encoder_n_layers,
decoder_n_layers,
save_dir,
n_iteration,
batch_size,
print_every,
save_every,
clip,
corpus_name,
loadFilename,
):
training_batches = [
batch2TrainData(voc, [random.choice(pairs) for _ in range(batch_size)]) for _ in range(n_iteration)
]
print("Initializing ...")
start_iteration = 1
print_loss = 0
if loadFilename:
start_iteration = checkpoint["iteration"] + 1
print("Training...")
for iteration in range(start_iteration, n_iteration + 1):
training_batch = training_batches[iteration - 1]
input_variable, lengths, target_variable, mask, max_target_len = training_batch
loss = train(
input_variable,
lengths,
target_variable,
mask,
max_target_len,
encoder,
decoder,
embedding,
encoder_optimizer,
decoder_optimizer,
batch_size,
clip,
)
print_loss += loss
if iteration % print_every == | |
<reponame>luismesas/MYNT-EYE-D-SDK<filename>tools/analytics/imu_analytics.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 Slightech Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
from __future__ import print_function
import os
import sys
TOOLBOX_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(TOOLBOX_DIR, 'internal'))
# pylint: disable=import-error,wrong-import-position
from data import DataError, Dataset, ROSBag, MYNTEYE, What
TIME_SCALE_FACTORS = {
's': 1.,
'm': 1. / 60,
'h': 1. / 3600
}
ANGLE_DEGREES = 'd'
ANGLE_RADIANS = 'r'
ANGLE_UNITS = (ANGLE_DEGREES, ANGLE_RADIANS)
BIN_CONFIG_NAME = 'imu_analytics_bin.cfg'
BIN_IMU_NAME = 'imu_analytics_imu.bin'
BIN_TEMP_NAME = 'imu_analytics_temp.bin'
IMU_ALL = 0
IMU_ACCEL = 1
IMU_GYRO = 2
class RawDataset(Dataset):
def __init__(self, path, dataset_creator):
super(RawDataset, self).__init__(path)
self.dataset_creator = dataset_creator
self._digest()
def _digest(self):
dataset = self.dataset_creator(self.path)
results = dataset.collect(What.imu, What.temp)
self._dataset = dataset
self._results = results
self._has_imu = What.imu in results.keys()
self._has_temp = What.temp in results.keys()
print(' ' + ', '.join('{}: {}'.format(k, len(v))
for k, v in results.items()))
@staticmethod
def _hypot(*args):
from math import sqrt
return sqrt(sum(x ** 2 for x in args))
def plot(self, t_scale_factor, gryo_converter,
ax_accel_x, ax_accel_y, ax_accel_z, ax_accel,
ax_gyro_x, ax_gyro_y, ax_gyro_z, ax_temp):
results = self._results
if self._has_imu:
# accel
accels = [imu for imu in results[What.imu] \
if imu.flag == IMU_ALL or imu.flag == IMU_ACCEL]
if accels:
print('accels: {}'.format(len(accels)))
accel_t_beg = accels[0].timestamp
accel_ts = [(accel.timestamp - accel_t_beg) * t_scale_factor
for accel in accels]
ax_accel_x.plot(accel_ts, [v.accel_x for v in accels])
ax_accel_y.plot(accel_ts, [v.accel_y for v in accels])
ax_accel_z.plot(accel_ts, [v.accel_z for v in accels])
ax_accel.plot(accel_ts, [self._hypot(v.accel_x, v.accel_y, v.accel_z)
for v in accels])
# gyro
gyros = [imu for imu in results[What.imu] \
if imu.flag == IMU_ALL or imu.flag == IMU_GYRO]
if gyros:
print('gyros: {}'.format(len(gyros)))
gyro_t_beg = gyros[0].timestamp
gyro_ts = [(gyro.timestamp - gyro_t_beg) * t_scale_factor
for gyro in gyros]
import math
my_gryo_converter = \
lambda x: gryo_converter(x, math.degrees, math.radians)
ax_gyro_x.plot(gyro_ts, [my_gryo_converter(v.gyro_x)
for v in gyros])
ax_gyro_y.plot(gyro_ts, [my_gryo_converter(v.gyro_y)
for v in gyros])
ax_gyro_z.plot(gyro_ts, [my_gryo_converter(v.gyro_z)
for v in gyros])
if self._has_temp:
temp_t_beg = results[What.temp][0].timestamp
temp_ts = [(temp.timestamp - temp_t_beg) * t_scale_factor
for temp in results[What.temp]]
ax_temp.plot(temp_ts, [temp.value for temp in results[What.temp]])
def generate(self, *what): # pylint: disable=unused-argument
raise DataError('DataError: method not implemented')
def iterate(self, action, *what): # pylint: disable=unused-argument
raise DataError('DataError: method not implemented')
def collect(self, *what): # pylint: disable=unused-argument
raise DataError('DataError: method not implemented')
@property
def timebeg(self):
return self._dataset.timebeg
@property
def timeend(self):
return self._dataset.timeend
@property
def duration(self):
return self._dataset.duration
@property
def has_imu(self):
return self._has_imu
@property
def has_temp(self):
return self._has_temp
class BinDataset(RawDataset):
"""
Binary memory-mapped files of large dataset.
References:
https://stackoverflow.com/questions/5854515/large-plot-20-million-samples-gigabytes-of-data
https://stackoverflow.com/questions/1053928/very-large-matrices-using-python-and-numpy
"""
# def __init__(self, path, dataset_creator):
# super(BinDataset, self).__init__(path, dataset_creator)
def _digest(self):
bindir = os.path.splitext(self.path)[0]
bincfg = os.path.join(bindir, BIN_CONFIG_NAME)
if os.path.isfile(bincfg):
with open(bincfg, 'r') as f_cfg:
import yaml
cfg = yaml.load(f_cfg)
self._info = cfg['info']
self._binimu = os.path.join(bindir, cfg['bins']['imu'])
self._bintemp = os.path.join(bindir, cfg['bins']['temp'])
print('find binary files ...')
print(' binimu: {}'.format(self._binimu))
print(' bintemp: {}'.format(self._bintemp))
print(' bincfg: {}'.format(bincfg))
if self._exists():
while True:
sys.stdout.write('Do you want to use it directly? [Y/n] ')
choice = raw_input().lower()
if choice == '' or choice == 'y':
return
elif choice == 'n':
break
else:
print('Please respond with \'y\' or \'n\'.')
self._convert()
def _exists(self):
return os.path.isfile(self._binimu) or os.path.isfile(self._bintemp)
def _convert(self):
import numpy as np
dataset = self.dataset_creator(self.path)
bindir = os.path.splitext(self.path)[0]
if not os.path.exists(bindir):
os.makedirs(bindir)
binimu = os.path.join(bindir, BIN_IMU_NAME)
bintemp = os.path.join(bindir, BIN_TEMP_NAME)
bincfg = os.path.join(bindir, BIN_CONFIG_NAME)
print('save to binary files ...')
print(' binimu: {}'.format(binimu))
print(' bintemp: {}'.format(bintemp))
print(' bincfg: {}'.format(bincfg))
has_imu = False
has_temp = False
with open(binimu, 'wb') as f_imu, open(bintemp, 'wb') as f_temp:
imu_t_beg = -1
imu_count = 0
temp_t_beg = -1
temp_count = 0
for result in dataset.generate(What.imu, What.temp):
if What.imu in result:
imu = result[What.imu]
if imu_t_beg == -1:
imu_t_beg = imu.timestamp
np.array([(
(imu.timestamp - imu_t_beg), imu.flag,
imu.accel_x, imu.accel_y, imu.accel_z,
self._hypot(imu.accel_x, imu.accel_y, imu.accel_z),
imu.gyro_x, imu.gyro_y, imu.gyro_z
)], dtype="f8, i4, f8, f8, f8, f8, f8, f8, f8").tofile(f_imu)
imu_count = imu_count + 1
has_imu = True
if What.temp in result:
temp = result[What.temp]
if temp_t_beg == -1:
temp_t_beg = temp.timestamp
np.array([(
(temp.timestamp - temp_t_beg),
temp.value
)], dtype="f8, f8").tofile(f_temp)
temp_count = temp_count + 1
has_temp = True
sys.stdout.write('\r imu: {}, temp: {}'.format(imu_count, temp_count))
sys.stdout.write('\n')
# pylint: disable=attribute-defined-outside-init
self._info = {
'timebeg': dataset.timebeg,
'timeend': dataset.timeend,
'duration': dataset.duration,
'has_imu': has_imu,
'has_temp': has_temp
}
self._binimu = binimu
self._bintemp = bintemp
with open(bincfg, 'w') as f_cfg:
import yaml
yaml.dump({'info': self._info, 'bins': {
'imu': BIN_IMU_NAME,
'temp': BIN_TEMP_NAME
}}, f_cfg, default_flow_style=False)
def plot(self, t_scale_factor, gryo_converter,
ax_accel_x, ax_accel_y, ax_accel_z, ax_accel,
ax_gyro_x, ax_gyro_y, ax_gyro_z, ax_temp):
import numpy as np
if self.has_imu:
imus = np.memmap(self._binimu, dtype=[
('t', 'f8'), ('flag', 'i4'),
('accel_x', 'f8'), ('accel_y', 'f8'), ('accel_z', 'f8'),
('accel', 'f8'),
('gyro_x', 'f8'), ('gyro_y', 'f8'), ('gyro_z', 'f8'),
], mode='r')
accels = imus[(imus['flag'] == IMU_ALL) | (imus['flag'] == IMU_ACCEL)]
accels_t = accels['t'] * t_scale_factor
ax_accel_x.plot(accels_t, accels['accel_x'])
ax_accel_y.plot(accels_t, accels['accel_y'])
ax_accel_z.plot(accels_t, accels['accel_z'])
ax_accel.plot(accels_t, accels['accel'])
gyros = imus[(imus['flag'] == IMU_ALL) | (imus['flag'] == IMU_GYRO)]
gyros_t = gyros['t'] * t_scale_factor
my_gryo_converter = \
lambda x: gryo_converter(x, np.degrees, np.radians)
ax_gyro_x.plot(gyros_t, my_gryo_converter(gyros['gyro_x']))
ax_gyro_y.plot(gyros_t, my_gryo_converter(gyros['gyro_y']))
ax_gyro_z.plot(gyros_t, my_gryo_converter(gyros['gyro_z']))
if self.has_temp:
temps = np.memmap(self._bintemp, dtype=[
('t', 'f8'), ('value', 'f8')
], mode='r')
temps_t = temps['t'] * t_scale_factor
ax_temp.plot(temps_t, temps['value'])
@property
def timebeg(self):
return self._info['timebeg']
@property
def timeend(self):
return self._info['timeend']
@property
def duration(self):
return self._info['duration']
@property
def has_imu(self):
return self._info['has_imu']
@property
def has_temp(self):
return self._info['has_temp']
def analyze(dataset, profile):
if not profile.time_unit:
if dataset.duration > 3600:
time_unit = 'h'
elif dataset.duration > 60:
time_unit = 'm'
else:
time_unit = 's'
else:
time_unit = profile.time_unit
t_name = 'time ({})'.format(time_unit)
t_scale_factor = TIME_SCALE_FACTORS[time_unit]
time_limits = profile.time_limits
if not time_limits:
time_limits = [0, dataset.duration * t_scale_factor]
accel_limits = profile.accel_limits
gyro_limits = profile.gyro_limits
temp_limits = profile.temp_limits
auto = profile.auto
import matplotlib.pyplot as plt
fig_1 = plt.figure(1, [16, 12])
fig_1.suptitle('IMU Analytics')
fig_1.subplots_adjust(wspace=0.4, hspace=0.2)
ax_accel_x = fig_1.add_subplot(241)
ax_accel_x.set_title('accel_x')
ax_accel_x.set_xlabel(t_name)
ax_accel_x.set_ylabel('accel_x (m/s^2)')
ax_accel_x.axis('auto')
ax_accel_x.set_xlim(time_limits)
if not auto and accel_limits and accel_limits[0]:
ax_accel_x.set_ylim(accel_limits[0])
ax_accel_y = fig_1.add_subplot(242)
ax_accel_y.set_title('accel_y')
ax_accel_y.set_xlabel(t_name)
ax_accel_y.set_ylabel('accel_y (m/s^2)')
ax_accel_y.axis('auto')
ax_accel_y.set_xlim(time_limits)
if not auto and accel_limits and accel_limits[1]:
ax_accel_y.set_ylim(accel_limits[1])
ax_accel_z = fig_1.add_subplot(243)
ax_accel_z.set_title('accel_z')
ax_accel_z.set_xlabel(t_name)
ax_accel_z.set_ylabel('accel_z (m/s^2)')
ax_accel_z.axis('auto')
ax_accel_z.set_xlim(time_limits)
if not auto and accel_limits and accel_limits[2]:
ax_accel_z.set_ylim(accel_limits[2])
ax_accel = fig_1.add_subplot(244)
ax_accel.set_title('accel hypot(x,y,z)')
ax_accel.set_xlabel(t_name)
ax_accel.set_ylabel('accel (m/s^2)')
ax_accel.axis('auto')
ax_accel.set_xlim(time_limits)
if not auto and accel_limits and accel_limits[3]:
ax_accel.set_ylim(accel_limits[3])
ax_gyro_ylabels = {
ANGLE_DEGREES: 'deg/sec',
ANGLE_RADIANS: 'rad/sec'
}
ax_gyro_ylabel = ax_gyro_ylabels[profile.gyro_show_unit]
ax_gyro_x = fig_1.add_subplot(245)
ax_gyro_x.set_title('gyro_x')
ax_gyro_x.set_xlabel(t_name)
ax_gyro_x.set_ylabel('gyro_x ({})'.format(ax_gyro_ylabel))
ax_gyro_x.axis('auto')
ax_gyro_x.set_xlim(time_limits)
if not auto and gyro_limits and gyro_limits[0]:
ax_gyro_x.set_ylim(gyro_limits[0])
ax_gyro_y = fig_1.add_subplot(246)
ax_gyro_y.set_title('gyro_y')
ax_gyro_y.set_xlabel(t_name)
ax_gyro_y.set_ylabel('gyro_y ({})'.format(ax_gyro_ylabel))
ax_gyro_y.axis('auto')
ax_gyro_y.set_xlim(time_limits)
if not auto and gyro_limits and gyro_limits[1]:
ax_gyro_y.set_ylim(gyro_limits[1])
ax_gyro_z = fig_1.add_subplot(247)
ax_gyro_z.set_title('gyro_z')
ax_gyro_z.set_xlabel(t_name)
ax_gyro_z.set_ylabel('gyro_z ({})'.format(ax_gyro_ylabel))
ax_gyro_z.axis('auto')
ax_gyro_z.set_xlim(time_limits)
if not auto and gyro_limits and gyro_limits[2]:
ax_gyro_z.set_ylim(gyro_limits[2])
ax_temp = None
if dataset.has_temp:
ax_temp = fig_1.add_subplot(248)
ax_temp.set_title('temperature')
ax_temp.set_xlabel(t_name)
ax_temp.set_ylabel('temperature (degree Celsius)')
ax_temp.axis('auto')
ax_temp.set_xlim(time_limits)
if not auto and temp_limits:
ax_temp.set_ylim(temp_limits)
def gryo_converter(x, degrees, radians):
if profile.gyro_show_unit == profile.gyro_data_unit:
return x
if profile.gyro_show_unit == ANGLE_DEGREES and \
profile.gyro_data_unit == ANGLE_RADIANS:
return degrees(x)
if profile.gyro_show_unit == ANGLE_RADIANS and \
profile.gyro_data_unit == ANGLE_DEGREES:
return radians(x)
sys.exit('Error: gryo_converter wrong logic')
dataset.plot(t_scale_factor, gryo_converter,
ax_accel_x, ax_accel_y, ax_accel_z, ax_accel,
ax_gyro_x, ax_gyro_y, ax_gyro_z, ax_temp)
outdir = profile.outdir
if outdir:
figpath = os.path.join(outdir, 'imu_analytics.png')
print('save figure to:\n {}'.format(figpath))
if not os.path.exists(outdir):
os.makedirs(outdir)
fig_1.savefig(figpath, dpi=100)
show_secs = profile.show_secs
if show_secs > 0:
plt.show(block=False)
plt.pause(show_secs)
plt.close()
else:
plt.show()
def _parse_args():
def limits_type(string, num=1):
if not string:
return None
if num < 1:
sys.exit('Error: limits_type must be greater than one pair')
pairs = string.split(':')
pairs_len = len(pairs)
if pairs_len == 1:
values = pairs[0].split(',')
if len(values) != 2:
sys.exit('Error: limits_type must be two values'
' as \'min,max\' for each pair')
results = (float(values[0]), float(values[1]))
if num > 1:
return [results for i in xrange(num)]
else:
return results
elif pairs_len == num:
results = []
for i in xrange(num):
if pairs[i]:
values = pairs[i].split(',')
if len(values) != 2:
sys.exit('Error: limits_type must be two values'
' as \'min,max\' for each pair')
results.append((float(values[0]), float(values[1])))
else:
results.append(None)
return results
else:
sys.exit('Error: | |
# -*- coding: utf-8 -*-
# create a function from_table_to_page_object to generate code for page objects
# name (str), id (str), set (bool), get(bool), click(bool), advance(bool), check(bool)
# from this table, generate code in .py file
# separate page objects from higher level functions
# define a decorator to try again if it hits StaleElement. usable in click(), set_() and check()
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import element_to_be_clickable, any_of
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException, StaleElementReferenceException, ElementClickInterceptedException
from re import search, VERBOSE
#==================================================================================================================
class PageObject():
'''
base class with the most used functions by the other page objects
'''
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(self.driver, 15)
def get(self, id_):
return self.wait.until(element_to_be_clickable((By.ID, id_)))
def set_(self, id_, value):
try:
elem = self.get(id_)
elem.clear()
elem.send_keys(value)
except StaleElementReferenceException:
print('hit stale element')
self.set_(id_, value)
def set_date(self, id_, date):
# fix the name. other inputs other than date also require this method
elem = self.get(id_)
elem.clear()
self.driver.execute_script("arguments[0].value = arguments[1]",
elem, date)
def check(self, id_, expected_value):
try:
elem = self.get(id_)
return elem.get_attribute('value') == expected_value
except StaleElementReferenceException:
print('hit stale element')
self.check_(id_, expected_value)
def advanced(self, id_):
self.get(id_)
return True
def click(self, id_):
elem = self.get(id_)
elem.click()
#==================================================================================================================
class SiafiAuth(PageObject):
authentication_url = 'https://siafi.tesouro.gov.br'
cpf_input_id = 'j_username'
password_input_id = '<PASSWORD>'
captcha_input_id = 'j_captcha'
already_open_session_btn_id = 'formRemoverContexto:botaoConfirmar'
accept_terms_btn_id = 'frmTemplateAcesso:btnConcordar'
search_input_btn_id = 'frmMenu:acessoRapido'
def __init__(self, driver):
super().__init__(driver)
def go_to_authentication_page(self):
self.driver.get(self.authentication_url)
def set_cpf(self, cpf):
self.set_(self.cpf_input_id, cpf)
def set_password(self, password):
self.set_(self.password_input_id, password)
def put_focus_on_captcha_input(self):
self.click(self.captcha_input_id)
def detect_captcha_resolution(self):
condition1 = element_to_be_clickable((By.ID, self.already_open_session_btn_id))
condition2 = element_to_be_clickable((By.ID, self.accept_terms_btn_id))
conditions = (condition1, condition2)
# waits until one of the 2 buttons shows up for 100 seconds
self.wait = WebDriverWait(self.driver, timeout = 100)
# get the first button that appears
elem = self.wait.until(any_of(*conditions))
# restore initial timeout conditions
self.wait = WebDriverWait(self.driver, 15)
# figure out which button is it
matched_btn = elem.get_attribute('id')
# if it is "accept terms", simply click it and move on
if matched_btn == self.accept_terms_btn_id:
elem.click()
return
# if it is "already open session", click it
elif matched_btn == self.already_open_session_btn_id:
elem.click()
# then wait for "accept terms" and click it too
elem = self.wait.until(condition2)
elem.click()
return
def advanced_to_initial_page(self):
return self.advanced(self.search_input_btn_id)
#==================================================================================================================
class SiafiInitialPage(PageObject):
search_input_id = 'frmMenu:acessoRapido'
submit_btn_id = 'frmMenu:botaoAcessoRapidoVerificaTipoTransacao'
dh_input_id = 'form_manterDocumentoHabil:codigoTipoDocHabil_input'
def __init__(self, driver):
super().__init__(driver)
def set_search_text(self, search_text):
self.set_(self.search_input_id, search_text)
def check_search_text(self, expected_text):
return self.check(self.search_input_id, expected_text)
def submit_search(self):
# standardize this. call it click
self.click(self.submit_btn_id)
def advanced_to_incdh(self):
return self.advanced(self.dh_input_id)
#==================================================================================================================
class IncdhInitialPage(SiafiInitialPage):
confirm_btn_id = 'form_manterDocumentoHabil:btnConfirmarTipoDoc'
dh_title_id = 'form_manterDocumentoHabil:tituloTipoDocHabil'
ug_input_id = 'form_manterDocumentoHabil:pagadoraRecebedora'
def __init__(self, driver):
super().__init__(driver)
def set_dh(self, dh):
self.set_(self.dh_input_id, dh) # inherited from SiafiInitialPage
def check_dh_title(self, expected_text):
condition = lambda d: d.find_element(By.ID, self.dh_title_id).get_attribute('innerHTML') == expected_text
self.wait.until(condition)
return True
def submit(self):
# standardize this. call it click
self.click(self.confirm_btn_id)
def advanced_to_dh_basic_data(self):
return self.advanced(self.ug_input_id)
#==================================================================================================================
class BasicData(PageObject):
due_date_input_id = 'form_manterDocumentoHabil:dataVencimento_calendarInputDate'
process_num_input_id = 'form_manterDocumentoHabil:processo_input'
validation_date_input_id = 'form_manterDocumentoHabil:dataAteste_calendarInputDate'
value_input_id = 'form_manterDocumentoHabil:valorPrincipalDocumento_input'
recipient_input_id = 'form_manterDocumentoHabil:credorDevedor_input' # may throw StaleElement when checking
observation_textarea_id = 'form_manterDocumentoHabil:observacao'
doc_emitter_input_id = 'form_manterDocumentoHabil:tableDocsOrigem:0:emitenteDocOrigem_input'
recipient_name_id = 'form_manterDocumentoHabil:nomeCredorDevedor'
confirm_basic_data_btn_id = 'form_manterDocumentoHabil:btnConfirmarDadosBasicos'
pco_tab_id = 'form_manterDocumentoHabil:abaPrincipalComOrcamentoId'
pco_situation_input_id = 'form_manterDocumentoHabil:campo_situacao_input'
def __init__(self, driver):
super().__init__(driver)
def set_due_date(self, due_date):
self.set_date(self.due_date_input_id, due_date)
def set_process_num(self, process_num):
self.set_(self.process_num_input_id, process_num)
def set_validation_date(self, validation_date):
self.set_date(self.validation_date_input_id, validation_date)
def set_value(self, value):
self.set_(self.value_input_id, value)
def set_recipient(self, recipient):
self.set_(self.recipient_input_id, recipient)
def set_observation(self, observation):
self.set_(self.observation_textarea_id, observation)
def is_recipient_name_loaded(self):
# standardize this. call it check
condition = lambda d: d.find_element(By.ID, self.recipient_name_id).get_attribute('innerHTML') != ''
try:
self.wait.until(condition)
return True
except TimeoutException:
print("Recipient is not registered yet")
return False
def check_due_date(self, expected_due_date):
return self.check(self.due_date_input_id, expected_due_date)
def check_process_num(self, expected_process_num):
return self.check(self.process_num_input_id, expected_process_num)
def check_validation_date(self, expected_validation_date):
return self.check(self.validation_date_input_id, expected_validation_date)
def check_value(self, expected_value):
return self.check(self.value_input_id, expected_value)
def check_recipient(self, expected_recipient):
return self.check(self.recipient_input_id, expected_recipient)
def check_observation(self, expected_observation):
return self.check(self.observation_textarea_id, expected_observation)
def click_confirm_basic_data_btn(self):
self.click(self.confirm_basic_data_btn_id)
def is_pco_clickable(self):
return self.advanced(self.pco_tab_id)
def click_pco_tab(self):
# may throw StaleElementReferenceException, which is not treated so far
self.click(self.pco_tab_id)
def advanced_to_pco(self):
return self.advanced(self.pco_situation_input_id)
# it does not have a method to evaluate errors in Siafi's validation
# errors are class="error"
# also, it throws TimeoutException without saying anything. encapsulate that to return a meaningful message
#==================================================================================================================
class SourceDoc(PageObject):
include_source_doc_btn_id = 'form_manterDocumentoHabil:tableDocsOrigem_painel_incluir'
doc_emitter_input_id = 'form_manterDocumentoHabil:tableDocsOrigem:0:emitenteDocOrigem_input'
doc_date_input_id = 'form_manterDocumentoHabil:tableDocsOrigem:0:dataEmissaoDocOrigem_calendarInputDate'
doc_num_input_id = 'form_manterDocumentoHabil:tableDocsOrigem:0:numeroDocOrigem_input'
doc_value_input_id = 'form_manterDocumentoHabil:tableDocsOrigem:0:valorDocOrigem_input'
doc_confirm_btn_id = 'form_manterDocumentoHabil:tableDocsOrigem_painel_confirmar'
first_doc_checkbtn_id = 'form_manterDocumentoHabil:tableDocsOrigem:0:tableDocsOrigem_check_selecao'
def __init__(self, driver):
super().__init__(driver)
def click_include_source_doc(self):
self.click(self.include_source_doc_btn_id)
def did_it_include_source_doc(self):
# standardize this. there is checking an input and there is checking if it advanced
# assumes there is only one source doc
return self.advanced(self.doc_emitter_input_id)
def set_doc_emitter(self, doc_emitter):
self.set_(self.doc_emitter_input_id, doc_emitter)
def set_doc_date(self, doc_date):
self.set_date(self.doc_date_input_id, doc_date)
def set_doc_num(self, doc_num):
self.set_(self.doc_num_input_id, doc_num)
def set_doc_value(self, doc_value):
self.set_(self.doc_value_input_id, doc_value)
def check_doc_emitter(self, expected_doc_emitter):
return self.check(self.doc_emitter_input_id, expected_doc_emitter)
def check_doc_date(self, expected_doc_date):
return self.check(self.doc_date_input_id, expected_doc_date)
def check_doc_num(self, expected_doc_num):
return self.check(self.doc_num_input_id, expected_doc_num)
def check_doc_value(self, expected_doc_value):
return self.check(self.doc_value_input_id, expected_doc_value)
def click_confirm_btn(self):
self.click(self.doc_confirm_btn_id)
def did_it_confirm_source_doc(self):
return self.advanced(self.first_doc_checkbtn_id)
#==================================================================================================================
class Pco(PageObject):
situation_input_id = 'form_manterDocumentoHabil:campo_situacao_input'
confirm_situation_btn_id = 'form_manterDocumentoHabil:botao_ConfirmarSituacao'
ne_input_id = 'form_manterDocumentoHabil:lista_PCO:0:PCO_item_num_empenho_input'
subelement_input_id = 'form_manterDocumentoHabil:lista_PCO:0:PCO_item_num_subitem_input'
ledger_account_input_id = 'form_manterDocumentoHabil:lista_PCO:0:PCO_item_campoClassificacaoA_input_classificacao_contabil'
benefits_account_input_id = 'form_manterDocumentoHabil:lista_PCO:0:PCO_item_campoClassificacaoB_input_classificacao_contabil'
value_input_id = 'form_manterDocumentoHabil:lista_PCO:0:PCO_item_valor_item_input'
confirm_btn_id = 'form_manterDocumentoHabil:lista_PCO_painel_confirmar'
pco_created_checkbox_id = 'form_manterDocumentoHabil:lista_PCO:0:painel_collapse_PCO_selecao'
payment_data_tab_id = 'form_manterDocumentoHabil:abaDadosPagRecId'
payment_data_table_checkbox_id = 'form_manterDocumentoHabil:lista_DPgtoOB:siafiTableCheck_DP_OB_cabecalho'
def __init__(self, driver):
super().__init__(driver)
def set_situation_code(self, situation_code):
self.set_(self.situation_input_id, situation_code)
def check_situation_code(self, expected_situation_code):
return self.check(self.situation_input_id, expected_situation_code)
def click_confirm_situation_btn(self):
self.click(self.confirm_situation_btn_id)
def set_ne(self, ne):
self.set_(self.ne_input_id, ne)
def set_subelement(self, subelement):
self.set_date(self.subelement_input_id, subelement)
def set_ledger_account(self, ledger_account):
self.set_date(self.ledger_account_input_id, ledger_account)
def set_benefits_account(self, benefits_account):
self.set_date(self.benefits_account_input_id, benefits_account)
def set_value(self, value):
self.set_(self.value_input_id, value)
def check_ne(self, expected_ne):
return self.check(self.ne_input_id, expected_ne)
def check_subelement(self, expected_subelement):
return self.check(self.subelement_input_id, expected_subelement)
def check_ledger_account(self, expected_ledger_account):
return self.check(self.ledger_account_input_id, expected_ledger_account)
def check_benefits_account(self, expected_benefits_account):
return self.check(self.benefits_account_input_id, expected_benefits_account)
def check_value(self, expected_value):
return self.check(self.value_input_id, expected_value)
def click_confirm_btn(self):
self.click(self.confirm_btn_id)
def is_pco_created(self):
return self.advanced(self.pco_created_checkbox_id)
def click_payment_data_tab(self):
self.click(self.payment_data_tab_id)
def advanced_to_payment_data(self):
return self.advanced(self.payment_data_table_checkbox_id)
class PaymentData(PageObject):
include_row_btn_id = 'form_manterDocumentoHabil:lista_DPgtoOB_painel_incluir'
recipient_input_id = 'form_manterDocumentoHabil:lista_DPgtoOB:0:codigoFavorecido_input'
value_input_id = 'form_manterDocumentoHabil:lista_DPgtoOB:0:valorPredoc_input'
confirm_row_btn_id = 'form_manterDocumentoHabil:lista_DPgtoOB_painel_confirmar'
first_row_checkbox_id = 'form_manterDocumentoHabil:lista_DPgtoOB:0:siafiTableCheck_DP_OB_selecao'
predoc_btn_id = 'form_manterDocumentoHabil:lista_DPgtoOB:0:btnPredoc'
bank_code_input_id = 'form_manterDocumentoHabil:favorecido_banco_input'
def __init__(self, driver):
super().__init__(driver)
def click_include_row_btn(self):
self.click(self.include_row_btn_id)
def did_it_include_row(self): # buggy
return self.advanced(self.recipient_input_id)
def set_recipient(self, recipient):
self.set_(self.recipient_input_id, recipient)
def set_value(self, value):
self.set_(self.value_input_id, value)
def check_recipient(self, expected_recipient):
return self.check(self.recipient_input_id, expected_recipient)
def check_value(self, expected_value):
return self.check(self.value_input_id, expected_value)
def click_confirm_row(self):
self.click(self.confirm_row_btn_id)
def is_first_row_created(self):
return self.advanced(self.first_row_checkbox_id)
def click_pre_doc(self):
self.click(self.predoc_btn_id)
def advanced_to_pre_doc(self):
return self.advanced(self.bank_code_input_id)
class PreDoc(PageObject):
bank_code_input_id = 'form_manterDocumentoHabil:favorecido_banco_input'
branch_num_input_id = 'form_manterDocumentoHabil:favorecido_agencia_input'
account_num_input_id = 'form_manterDocumentoHabil:favorecido_conta_input'
gov_bank_code_input_id = 'form_manterDocumentoHabil:pagador_banco_input'
gov_branch_num_input_id = 'form_manterDocumentoHabil:pagador_agencia_input'
observation_textarea_id = 'form_manterDocumentoHabil:observacaoPredoc'
confirm_btn_id = 'form_manterDocumentoHabil:btnConfirmarPredoc'
cost_center_btn_id = 'form_manterDocumentoHabil:abaCentroCustoId' # gets intercepted
cost_center_month_input_id = 'form_manterDocumentoHabil:cvMesReferenciaCentroCusto_input'
def __init__(self, driver):
super().__init__(driver)
def set_bank_code(self, bank_code):
self.set_(self.bank_code_input_id, bank_code)
def set_branch_num(self, branch_num):
self.set_(self.branch_num_input_id, branch_num)
def set_account_num(self, account_num):
self.set_(self.account_num_input_id, account_num)
def set_gov_bank_code(self, gov_bank_code):
self.set_(self.gov_bank_code_input_id, gov_bank_code)
def set_gov_branch_num(self, gov_branch_num):
self.set_(self.gov_branch_num_input_id, gov_branch_num)
def set_observation(self, observation):
self.set_(self.observation_textarea_id, observation)
def check_bank_code(self, expected_bank_code):
return self.check(self.bank_code_input_id, expected_bank_code)
def check_branch_num(self, expected_branch_num):
return self.check(self.branch_num_input_id, expected_branch_num)
def check_account_num(self, expected_account_num):
return self.check(self.account_num_input_id, expected_account_num)
def check_gov_bank_code(self, expected_gov_bank_code):
return self.check(self.gov_bank_code_input_id, expected_gov_bank_code)
def check_gov_branch_num(self, expected_gov_branch_num):
return self.check(self.gov_branch_num_input_id, expected_gov_branch_num)
def check_observation(self, expected_observation):
return self.check(self.observation_textarea_id, expected_observation)
def click_confirm_btn(self):
self.click(self.confirm_btn_id)
def is_predoc_confirmed(self):
return self.advanced(self.cost_center_btn_id)
def click_cost_center(self):
# remove infinite loop and set a fixed number of times to try it
while True:
try:
self.click(self.cost_center_btn_id)
break
except ElementClickInterceptedException:
print('click intercepted')
time.sleep(0.1)
def advanced_to_cost_center(self):
return self.advanced(self.cost_center_month_input_id)
class CostCenter(PageObject):
first_row_checkbox_id = 'form_manterDocumentoHabil:consolidado_dataTable:0:consolidado_subTable:0:consolidado_checkBox__'
month_input_id = 'form_manterDocumentoHabil:cvMesReferenciaCentroCusto_input'
year_input_id = 'form_manterDocumentoHabil:cvAnoReferenciaCentroCusto_input'
include_btn_id = 'form_manterDocumentoHabil:btnIncluirNovoVinculoCentroCusto'
total_cost_text_id = 'form_manterDocumentoHabil:centroCusto_informado_toogle_panel_valor_total'
def __init__(self, driver):
super().__init__(driver)
def click_first_row_checkbox(self):
self.click(self.first_row_checkbox_id)
def set_month(self, month):
self.set_(self.month_input_id, month)
def set_year(self, year):
self.set_(self.year_input_id, year)
def check_month(self, expected_month):
return self.check(self.month_input_id, expected_month)
def check_year(self, expected_year):
return self.check(self.year_input_id, expected_year)
def click_confirm(self):
self.click(self.include_btn_id)
def check_total_cost(self, expected_value):
condition = lambda d: d.find_element(By.ID, self.total_cost_text_id).get_attribute('innerHTML') == expected_value
self.wait.until(condition)
return True
class Register(PageObject):
register_dh_btn_id = 'form_manterDocumentoHabil:btnRegistrar'
notification_text_id = 'form_manterDocumentoHabil:tableNsGeradas:0:outOrigem'
return_btn_id = 'form_manterDocumentoHabil:btnRetornarResultadoRegistrar'
dh_num_text_id = 'form_manterDocumentoHabil:numeroDocumentoHabil_outputText'
def __init__(self, driver):
super().__init__(driver)
def click_register_dh(self):
self.click(self.register_dh_btn_id)
def advanced_to_registered_panel(self):
return self.advanced(self.notification_text_id)
def extract_string_in_class_legend(self): # delegate to some string processing class
elem = | |
'404151004',
'404152006',
'404153001',
'404154007',
'404155008',
'404157000',
'404169008',
'404172001',
'413389003',
'413441006',
'413442004',
'413537009',
'413656006',
'413842007',
'413843002',
'413847001',
'414166008',
'414780005',
'414785000',
'414791003',
'415112005',
'415283002',
'415284008',
'415285009',
'415286005',
'415287001',
'420302007',
'420519005',
'420788006',
'421246008',
'421283008',
'421835000',
'422052002',
'422853008',
'425688002',
'425749006',
'425869007',
'425941003',
'426071002',
'426124006',
'426217000',
'426248008',
'426336007',
'426370008',
'426642002',
'426885008',
'427056005',
'427141003',
'427642009',
'427658007',
'430338009',
'441559006',
'441962003',
'442537007',
'443487006',
'444597005',
'444910004',
'444911000',
'445227008',
'445269007',
'445406001',
'445448008',
'447100004',
'447656001',
'447658000',
'447766003',
'447805007',
'447806008',
'447989004',
'448212009',
'448213004',
'448217003',
'448220006',
'448231003',
'448254007',
'448269008',
'448317000',
'448319002',
'448354009',
'448371005',
'448372003',
'448376000',
'448384001',
'448386004',
'448387008',
'448447004',
'448465000',
'448468003',
'448553002',
'448555009',
'448560008',
'448561007',
'448607004',
'448609001',
'448663003',
'448666006',
'448672006',
'448709005',
'448738008',
'448774004',
'448865007',
'448867004',
'448995000',
'449053004',
'449058008',
'449059000',
'449063007',
'449065000',
'449072004',
'449074003',
'449075002',
'449108003',
'449173006',
'449176003',
'449177007',
'449216004',
'449217008',
'449218003',
'449219006',
'449220000',
'449221001',
'449222008',
'449292003',
'449307001',
'449318001',
'449386007',
'449418000',
'449419008',
'450521000124109',
'58961005',
'61291000119103',
'61301000119102',
'61311000119104',
'61321000119106',
'698646006',
'699657009',
'699818003',
'702446006',
'702476004',
'702785000',
'702786004',
'702977001',
'703387000',
'703626001',
'705061009',
'709471005',
'713325002',
'713483007',
'713516007',
'713718006',
'713897006',
'714251006',
'714463003',
'715664005',
'715950008',
'716788007',
'718195003',
'718200007',
'721302006',
'721303001',
'721304007',
'721305008',
'721306009',
'721308005',
'721310007',
'721313009',
'721314003',
'721555001',
'721762007',
'722795004',
'722953004',
'722954005',
'723889003',
'724644005',
'724645006',
'724647003',
'724648008',
'724649000',
'724650000',
'725390002',
'725437002',
'726721002',
'733598001',
'733627006',
'734066005',
'84811000119107',
'84831000119102',
'91854005',
'91855006',
'91856007',
'91857003',
'91858008',
'91860005',
'91861009',
'92508006',
'92509003',
'92510008',
'92511007',
'92512000',
'92513005',
'92514004',
'92515003',
'92516002',
'92811003',
'92812005',
'92813000',
'92814006',
'92817004',
'92818009',
'93133006',
'93134000',
'93135004',
'93136003',
'93137007',
'93138002',
'93139005',
'93140007',
'93141006',
'93142004',
'93143009',
'93144003',
'93145002',
'93146001',
'93147005',
'93148000',
'93149008',
'93150008',
'93151007',
'93152000',
'93169003',
'93182006',
'93183001',
'93184007',
'93185008',
'93186009',
'93187000',
'93188005',
'93189002',
'93190006',
'93191005',
'93192003',
'93193008',
'93194002',
'93195001',
'93196000',
'93197009',
'93198004',
'93199007',
'93200005',
'93201009',
'93202002',
'93203007',
'93204001',
'93205000',
'93206004',
'93207008',
'93208003',
'93450001',
'93451002',
'93487009',
'93488004',
'93489007',
'93492006',
'93493001',
'93494007',
'93495008',
'93496009',
'93497000',
'93498005',
'93500006',
'93501005',
'93505001',
'93506000',
'93507009',
'93509007',
'93510002',
'93514006',
'93515007',
'93516008',
'93518009',
'93519001',
'93520007',
'93521006',
'93522004',
'93523009',
'93524003',
'93525002',
'93526001',
'93527005',
'93528000',
'93530003',
'93531004',
'93532006',
'93533001',
'93534007',
'93536009',
'93537000',
'93541001',
'93542008',
'93543003',
'93545005',
'93546006',
'93547002',
'93548007',
'93549004',
'93550004',
'93551000',
'93552007',
'93554008',
'93555009',
'94071006',
'94148006',
'94686001',
'94687005',
'94688000',
'94690004',
'94707004',
'94708009',
'94709001',
'94710006',
'94711005',
'94712003',
'94714002',
'94715001',
'94716000',
'94718004',
'94719007',
'95186006',
'95187002',
'95188007',
'95192000',
'95193005',
'95194004',
'95209008',
'95210003',
'95224004',
'95225003',
'95226002',
'95230004',
'95231000',
'95260009',
'95261008',
'95263006',
'95264000'
}
class Mammography(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent mammography tests.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Diagnostic Study, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with screening or diagnostic mammography. This is a grouping of HCPCS and LOINC codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.108.12.1018'
VALUE_SET_NAME = 'Mammography'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
HCPCSLEVELII = {
'G0202',
'G0204',
'G0206'
}
LOINC = {
'24604-1',
'24605-8',
'24606-6',
'24610-8',
'26175-0',
'26176-8',
'26177-6',
'26287-3',
'26289-9',
'26291-5',
'26346-7',
'26347-5',
'26348-3',
'26349-1',
'26350-9',
'26351-7',
'36319-2',
'36625-2',
'36626-0',
'36627-8',
'36642-7',
'36962-9',
'37005-6',
'37006-4',
'37016-3',
'37017-1',
'37028-8',
'37029-6',
'37030-4',
'37037-9',
'37038-7',
'37052-8',
'37053-6',
'37539-4',
'37542-8',
'37543-6',
'37551-9',
'37552-7',
'37553-5',
'37554-3',
'37768-9',
'37769-7',
'37770-5',
'37771-3',
'37772-1',
'37773-9',
'37774-7',
'37775-4',
'38070-9',
'38071-7',
'38072-5',
'38090-7',
'38091-5',
'38807-4',
'38820-7',
'38854-6',
'38855-3',
'39150-8',
'39152-4',
'39153-2',
'39154-0',
'42168-5',
'42169-3',
'42174-3',
'42415-0',
'42416-8',
'46335-6',
'46336-4',
'46337-2',
'46338-0',
'46339-8',
'46342-2',
'46350-5',
'46351-3',
'46354-7',
'46355-4',
'46356-2',
'46380-2',
'48475-8',
'48492-3',
'69150-1',
'69251-7',
'69259-0',
'72137-3',
'72138-1',
'72139-9',
'72140-7',
'72141-5',
'72142-3',
'86462-9',
'86463-7'
}
class MarfansSyndrome(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent a diagnosis of Marfan's syndrome.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with diagnosis codes for Marfan's syndrome with or without cardiovascular, ocular, skeletal, or epithelial manifestations. This is a grouping of ICD-9-CM, ICD-10-CM and SNOMED CT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.113.12.1048'
VALUE_SET_NAME = "Marfan's Syndrome"
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'Q8740',
'Q87410',
'Q87418',
'Q8742',
'Q8743'
}
ICD9CM = {
'75982'
}
SNOMEDCT = {
'15960021000119107',
'15993551000119100',
'16055631000119106',
'19346006',
'234035006',
'57201002',
'763839005'
}
class Measles(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent measles infections.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) category related to Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with measles infections. This is a grouping of ICD-10-CM and SNOMED CT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.110.12.1053'
VALUE_SET_NAME = 'Measles'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'B050',
'B051',
'B052',
'B053',
'B054',
'B0581',
'B0589',
'B059'
}
SNOMEDCT = {
'105841000119101',
'111873003',
'14189004',
'161419000',
'186561002',
'186562009',
'195900001',
'240483006',
'240484000',
'28463004',
'371111005',
'38921001',
'406592004',
'60013002',
'74918002'
}
class MentalHealthDiagnoses(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent a mental health diagnoses.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) attribute related to Principal Diagnosis.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying mental health diagnoses.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.105.12.1004'
VALUE_SET_NAME = 'Mental Health Diagnoses'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
ICD10CM = {
'F0390',
'F0391',
'F200',
'F201',
'F202',
'F203',
'F205',
'F2081',
'F2089',
'F209',
'F21',
'F22',
'F23',
'F24',
'F250',
'F251',
'F258',
'F259',
'F28',
'F29',
'F3010',
'F3011',
'F3012',
'F3013',
'F302',
'F303',
'F304',
'F308',
'F309',
'F310',
'F3110',
'F3111',
'F3112',
'F3113',
'F312',
'F3130',
'F3131',
'F3132',
'F314',
'F315',
'F3160',
'F3161',
'F3162',
'F3163',
'F3164',
'F3170',
'F3171',
'F3172',
'F3173',
'F3174',
'F3175',
'F3176',
'F3177',
'F3178',
'F3181',
'F3189',
'F319',
'F320',
'F321',
'F322',
'F323',
'F324',
'F325',
'F328',
'F3281',
'F3289',
'F329',
'F330',
'F331',
'F332',
'F333',
'F3340',
'F3341',
'F3342',
'F338',
'F339',
'F340',
'F341',
'F348',
'F3481',
'F3489',
'F349',
'F39',
'F4000',
'F4001',
'F4002',
'F4010',
'F4011',
'F40210',
'F40218',
'F40220',
'F40228',
'F40230',
'F40231',
'F40232',
'F40233',
'F40240',
'F40241',
'F40242',
'F40243',
'F40248',
'F40290',
'F40291',
'F40298',
'F408',
'F409',
'F410',
'F411',
'F413',
'F418',
'F419',
'F42',
'F422',
'F423',
'F424',
'F428',
'F429',
'F430',
'F4310',
'F4311',
'F4312',
'F4320',
'F4321',
'F4322',
'F4323',
'F4324',
'F4325',
'F4329',
'F438',
'F439',
'F440',
'F441',
'F442',
'F444',
'F445',
'F446',
'F447',
'F4481',
'F4489',
'F449',
'F450',
'F451',
'F4520',
'F4521',
'F4522',
'F4529',
'F4541',
'F4542',
'F458',
'F459',
'F481',
'F482',
'F488',
'F489',
'F5000',
'F5001',
'F5002',
'F502',
'F508',
'F5081',
'F5082',
'F5089',
'F509',
'F5101',
'F5102',
'F5103',
'F5104',
'F5105',
'F5109',
'F5111',
'F5112',
'F5113',
'F5119',
'F513',
'F514',
'F515',
'F518',
'F519',
'F520',
'F521',
'F5221',
'F5222',
'F5231',
'F5232',
'F524',
'F525',
'F526',
'F528',
'F529',
'F53',
'F530',
'F531',
'F59',
'F600',
'F601',
'F602',
'F603',
'F604',
'F605',
'F606',
'F607',
'F6081',
'F6089',
'F609',
'F630',
'F631',
'F632',
'F633',
'F6381',
'F6389',
'F639',
'F640',
'F641',
'F642',
'F648',
'F649',
'F650',
'F651',
'F652',
'F653',
'F654',
'F6550',
'F6551',
'F6552',
'F6581',
'F6589',
'F659',
'F66',
'F6810',
'F6811',
'F6812',
'F6813',
'F688',
'F68A',
'F69',
'F800',
'F801',
'F802',
'F804',
'F8081',
'F8082',
'F8089',
'F809',
'F810',
'F812',
'F8181',
'F8189',
'F819',
'F82',
'F840',
'F842',
'F843',
'F845',
'F848',
'F849',
'F88',
'F89',
'F900',
'F901',
'F902',
'F908',
'F909',
'F910',
'F911',
'F912',
'F913',
'F918',
'F919',
'F930',
'F938',
'F939',
'F940',
'F941',
'F942',
'F948',
'F949',
'F950',
'F951',
'F952',
'F958',
'F959',
'F980',
'F981',
'F9821',
'F9829',
'F983',
'F984',
'F985',
'F988',
'F989',
'F99'
}
SNOMEDCT = {
'101421000119107',
'10211000132109',
'10278007',
'10327003',
'10349009',
'104851000119103',
'10532003',
'105421000119105',
'10586006',
'106021000119105',
'10743001000119103',
'10760421000119102',
'10760461000119107',
'10783000',
'10811121000119102',
'10811161000119107',
'10835871000119104',
'10875004',
'109006',
'109805003',
'10981006',
'109956006',
'111477005',
'111480006',
'111482003',
'111483008',
'111484002',
'111485001',
'111486000',
'111487009',
'111490003',
'111491004',
'111492006',
'1145003',
'11806006',
'11941006',
'1196001',
'12277671000119109',
'12277711000119108',
'12348006',
'12367551000119100',
'126943008',
'12939007',
'129562004',
'129602009',
'129604005',
'129605006',
'129606007',
'12969000',
'130121000119104',
'133091000119105',
'133121000119109',
'13313007',
'133991000119109',
'13438001',
'13581000',
'13601005',
'13670005',
'13746004',
'1376001',
'1380006',
'1383008',
'14070001',
'14077003',
'14144000',
'14183003',
'141991000119109',
'142001000119106',
'142011000119109',
'14291003',
'14495005',
'1499003',
'15193003',
'153071000119108',
'15639000',
'15662003',
'1581000119101',
'15840001',
'1591000119103',
'15921731000119106',
'15945005',
'15977008',
'162004',
'16219201000119101',
'162218007',
'162313000',
'16265701000119107',
'16265951000119109',
'16266831000119100',
'16266991000119108',
'162722001',
'16276361000119109',
'16295005',
'16506000',
'16805009',
'1686006',
'16966009',
'16990005',
'17155009',
'17226007',
'17262008',
'1740001000004102',
'17496003',
'17782008',
'17961008',
'18003009',
'18085000',
'1816003',
'18186001',
'18193002',
'18260003',
'18393005',
'18478005',
'1855002',
'18573003',
'187921002',
'18818009',
'18941000',
'191447007',
'191449005',
'191451009',
'191452002',
'191454001',
'191455000',
'191457008',
'191458003',
'191459006',
'191461002',
'191463004',
'191464005',
'191465006',
'191466007',
'191471000',
'191478006',
'191483003',
'191484009',
'191485005',
'191486006',
'191493005',
'191494004',
'191495003',
'191496002',
'191499009',
'191519005',
'191525009',
'191526005',
'191527001',
'191530008',
'191531007',
'191538001',
'191539009',
'191542003',
'191547009',
'191548004',
'191554003',
'191555002',
'191559008',
'191561004',
'191562006',
'191563001',
'191564007',
'191565008',
'191567000',
'191569002',
'191570001',
'191571002',
'191572009',
'191574005',
'191577003',
'191583000',
'191584006',
'191586008',
'191588009',
'191590005',
'191592002',
'191593007',
'191595000',
'191597008',
'191601008',
'191602001',
'191604000',
'191606003',
'191610000',
'191611001',
'191613003',
'191615005',
'191616006',
'191618007',
'191620005',
'191621009',
'191623007',
'191625000',
'191627008',
'191629006',
'191630001',
'191632009',
'191634005',
'191636007',
| |
self._scalar_dict[tag] = []
self._scalar_dict[tag].append([timestamp, global_step, float(scalar_value)])
def get_logdir(self):
"""Returns the logging directory associated with this `SummaryWriter`."""
return self._file_writer.get_logdir()
def add_scalar(self, tag, value, global_step=None):
"""Adds scalar data to the event file.
Parameters
----------
tag : str
Name for the scalar plot.
value : float, tuple, list, or dict
If value is a float, the corresponding curve would have no name attached in the
plot.
If value is a tuple or list, it must have two elements with the first one
representing the name of the value and the second one as the float value. The
name of the value will be attached to the corresponding curve in the plot. This
is useful when users want to draw multiple curves in the same plot. It internally
calls `_add_scalars`.
If value is a dict, it's a mapping from strs to float values, with strs
representing the names of the float values. This is convenient when users want
to log a collection of float values with different names for visualizing them in
the same plot without repeatedly calling `add_scalar` for each value. It internally
calls `_add_scalars`.
global_step : int
Global step value to record.
Examples
--------
>>> import numpy as np
>>> from mxboard import SummaryWriter
>>> xs = np.arange(start=0, stop=2 * np.pi, step=0.01)
>>> y_sin = np.sin(xs)
>>> y_cos = np.cos(xs)
>>> y_exp_sin = np.exp(y_sin)
>>> y_exp_cos = np.exp(y_cos)
>>> y_sin2 = y_sin * y_sin
>>> with SummaryWriter(logdir='./logs') as sw:
>>> for x, y1, y2, y3, y4, y5 in zip(xs, y_sin, y_cos, y_exp_sin, y_exp_cos, y_sin2):
>>> sw.add_scalar('curves', {'sin': y1, 'cos': y2}, x * 100)
>>> sw.add_scalar('curves', ('exp(sin)', y3), x * 100)
>>> sw.add_scalar('curves', ['exp(cos)', y4], x * 100)
>>> sw.add_scalar('curves', y5, x * 100)
"""
if isinstance(value, (tuple, list, dict)):
if isinstance(value, (tuple, list)):
if len(value) != 2:
raise ValueError('expected two elements in value, while received %d'
% len(value))
value = {value[0]: value[1]}
self._add_scalars(tag, value, global_step)
else:
self._file_writer.add_summary(scalar_summary(tag, value), global_step)
self._append_to_scalar_dict(self.get_logdir() + '/' + tag,
value, global_step, time.time())
def _add_scalars(self, tag, scalar_dict, global_step=None):
"""Adds multiple scalars to summary. This enables drawing multiple curves in one plot.
Parameters
----------
tag : str
Name for the plot.
scalar_dict : dict
Values to be saved.
global_step : int
Global step value to record.
"""
timestamp = time.time()
fw_logdir = self._file_writer.get_logdir()
for scalar_name, scalar_value in scalar_dict.items():
fw_tag = fw_logdir + '/' + tag + '/' + scalar_name
if fw_tag in self._all_writers.keys():
fw = self._all_writers[fw_tag]
else:
fw = FileWriter(logdir=fw_tag, max_queue=self._max_queue,
flush_secs=self._flush_secs, filename_suffix=self._filename_suffix,
verbose=self._verbose)
self._all_writers[fw_tag] = fw
fw.add_summary(scalar_summary(tag, scalar_value), global_step)
self._append_to_scalar_dict(fw_tag, scalar_value, global_step, timestamp)
def export_scalars(self, path):
"""Exports to the given path an ASCII file containing all the scalars written
so far by this instance, with the following format:
{writer_id : [[timestamp, step, value], ...], ...}
"""
if os.path.exists(path) and os.path.isfile(path):
logging.warning('%s already exists and will be overwritten by scalar dict', path)
with open(path, "w") as f:
json.dump(self._scalar_dict, f)
def clear_scalar_dict(self):
"""Empties scalar dictionary."""
self._scalar_dict = {}
def add_histogram(self, tag, values, global_step=None, bins='default'):
"""Add histogram data to the event file.
Note: This function internally calls `asnumpy()` if `values` is an MXNet NDArray.
Since `asnumpy()` is a blocking function call, this function would block the main
thread till it returns. It may consequently affect the performance of async execution
of the MXNet engine.
Parameters
----------
tag : str
Name for the `values`.
values : MXNet `NDArray` or `numpy.ndarray`
Values for building histogram.
global_step : int
Global step value to record.
bins : int or sequence of scalars or str
If `bins` is an int, it defines the number equal-width bins in the range
`(values.min(), values.max())`.
If `bins` is a sequence, it defines the bin edges, including the rightmost edge,
allowing for non-uniform bin width.
If `bins` is a str equal to 'default', it will use the bin distribution
defined in TensorFlow for building histogram.
Ref: https://www.tensorflow.org/programmers_guide/tensorboard_histograms
The rest of supported strings for `bins` are 'auto', 'fd', 'doane', 'scott',
'rice', 'sturges', and 'sqrt'. etc. See the documentation of `numpy.histogram`
for detailed definitions of those strings.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
"""
if bins == 'default':
bins = self._get_default_bins()
self._file_writer.add_summary(histogram_summary(tag, values, bins), global_step)
def add_image(self, tag, image, global_step=None):
"""Add image data to the event file.
This function supports input as a 2D, 3D, or 4D image.
If the input image is 2D, a channel axis is prepended as the first dimension
and image will be replicated three times and concatenated along the channel axis.
If the input image is 3D, it will be replicated three times and concatenated along
the channel axis. If the input image is 4D, which is a batch images, all the
images will be spliced as a sprite image for display.
Note: This function requires the ``pillow`` package.
Note: This function internally calls `asnumpy()` for MXNet `NDArray` inputs.
Since `asnumpy()` is a blocking function call, this function would block the main
thread till it returns. It may consequently affect the performance of async execution
of the MXNet engine.
Parameters
----------
tag : str
Name for the `image`.
image : MXNet `NDArray` or `numpy.ndarray`
Image is one of the following formats: (H, W), (C, H, W), (N, C, H, W).
If the input is a batch of images, a grid of images is made by stitching them
together.
If data type is float, values must be in range [0, 1], and then they are
rescaled to range [0, 255]. Note that this does not change the values of the
input `image`. A copy of the input `image` is created instead.
If data type is 'uint8`, values are unchanged.
global_step : int
Global step value to record.
"""
self._file_writer.add_summary(image_summary(tag, image), global_step)
def add_audio(self, tag, audio, sample_rate=44100, global_step=None):
"""Add audio data to the event file.
Note: This function internally calls `asnumpy()` for MXNet `NDArray` inputs.
Since `asnumpy()` is a blocking function call, this function would block the main
thread till it returns. It may consequently affect the performance of async execution
of the MXNet engine.
Parameters
----------
tag : str
Name for the `audio`.
audio : MXNet `NDArray` or `numpy.ndarray`
Audio data squeezable to a 1D tensor. The values of the tensor are in the range
`[-1, 1]`.
sample_rate : int
Sample rate in Hz.
global_step : int
Global step value to record.
"""
self._file_writer.add_summary(audio_summary(tag, audio, sample_rate=sample_rate),
global_step)
def add_text(self, tag, text, global_step=None):
"""Add text data to the event file.
Parameters
----------
tag : str
Name for the `text`.
text : str
Text to be saved to the event file.
global_step : int
Global step value to record.
"""
self._file_writer.add_summary(text_summary(tag, text), global_step)
if tag not in self._text_tags:
self._text_tags.append(tag)
extension_dir = self.get_logdir() + '/plugins/tensorboard_text/'
if not os.path.exists(extension_dir):
os.makedirs(extension_dir)
with open(extension_dir + 'tensors.json', 'w') as fp:
json.dump(self._text_tags, fp)
def add_embedding(self, tag, embedding, labels=None, images=None, global_step=None):
"""Adds embedding projector data to the event file. It will also create a config file
used by the embedding projector in TensorBoard. The folder containing the embedding
data is named using the formula:
If global_step is not None, the folder name is `tag + '_' + str(global_step).zfill(6)`;
else, the folder name is `tag`.
For example, tag = 'mnist', global_step = 12, the folder's name is 'mnist_000012';
when global_step = None, the folder's name is 'mnist'.
See the following reference for the meanings of labels and images.
Ref: https://www.tensorflow.org/versions/r1.2/get_started/embedding_viz
Note: This function internally calls `asnumpy()` for MXNet `NDArray` inputs.
Since `asnumpy()` is a blocking function call, this function would block the main
thread till it returns. It may consequently affect the performance of async execution
of the MXNet engine.
Parameters
----------
tag : str
Name for the `embedding`.
embedding : MXNet `NDArray` or `numpy.ndarray`
A matrix whose each row is the feature vector of a data point.
labels : MXNet `NDArray` or `numpy.ndarray` or a list of elements convertible to str.
Labels corresponding to the data points in the `embedding`. If the labels | |
gas = CEOSGas(PRMIX, eos_kwargs=eos_kwargs, HeatCapacityGases=properties.HeatCapacityGases)
>>> liquid = CEOSLiquid(PRMIX, eos_kwargs=eos_kwargs, HeatCapacityGases=properties.HeatCapacityGases)
>>> flasher = FlashVL(constants, properties, liquid=liquid, gas=gas)
>>> zs = [0.965, 0.018, 0.017]
>>> PT = flasher.flash(T=110.0, P=1e5, zs=zs)
>>> PT.VF, PT.gas.zs, PT.liquid0.zs
(0.10365, [0.881788, 2.6758e-05, 0.11818], [0.97462, 0.02007, 0.005298])
A few more flashes with the same system to showcase the functionality
of the :obj:`flash <Flash.flash>` interface:
>>> flasher.flash(P=1e5, VF=1, zs=zs).T
133.6
>>> flasher.flash(T=133, VF=0, zs=zs).P
518367.4
>>> flasher.flash(P=PT.P, H=PT.H(), zs=zs).T
110.0
>>> flasher.flash(P=PT.P, S=PT.S(), zs=zs).T
110.0
>>> flasher.flash(T=PT.T, H=PT.H(), zs=zs).T
110.0
>>> flasher.flash(T=PT.T, S=PT.S(), zs=zs).T
110.0
References
----------
.. [1] Michelsen, <NAME>., and <NAME>. Thermodynamic Models:
Fundamentals & Computational Aspects. Tie-Line Publications, 2007.
.. [2] <NAME>., <NAME>, and <NAME>. The
Properties of Gases and Liquids. 5th edition. New York: McGraw-Hill
Professional, 2000.
.. [3] Gmehling, Jürgen, <NAME>, <NAME>, and <NAME>.
Chemical Thermodynamics for Process Simulation. <NAME> & Sons, 2019.
'''
PT_SS_MAXITER = 5000
PT_SS_TOL = 1e-13
# Settings for near-boundary conditions
PT_SS_POLISH_TOL = 1e-25
PT_SS_POLISH = True
PT_SS_POLISH_VF = 1e-6 # 5e-8
PT_SS_POLISH_MAXITER = 1000
SS_2P_STAB_HIGHEST_COMP_DIFF = False
SS_2P_STAB_COMP_DIFF_MIN = None
PT_methods = [PT_SS, PT_SS_MEHRA, PT_SS_GDEM3, PT_NEWTON_lNKVF]
PT_algorithms = [sequential_substitution_2P, sequential_substitution_Mehra_2P,
sequential_substitution_GDEM3_2P, nonlin_2P_newton]
PT_STABILITY_MAXITER = 500 # 30 good professional default; 500 used in source DTU
PT_STABILITY_XTOL = 5E-9 # 1e-12 was too strict; 1e-10 used in source DTU; 1e-9 set for some points near critical where convergence stopped; even some more stopped at higher Ts
SS_ACCELERATION = False
SS_acceleration_method = None
VF_guess_methods = [WILSON_GUESS, IDEAL_PSAT, TB_TC_GUESS]
dew_bubble_flash_algos = [dew_bubble_Michelsen_Mollerup, dew_bubble_newton_zs,
SS_VF_simultaneous]
dew_T_flash_algos = bubble_T_flash_algos = dew_bubble_flash_algos
dew_P_flash_algos = bubble_P_flash_algos = dew_bubble_flash_algos
VF_flash_algos = [SS_VF_simultaneous]
DEW_BUBBLE_QUASI_NEWTON_XTOL = 1e-8
DEW_BUBBLE_NEWTON_XTOL = 1e-5
DEW_BUBBLE_QUASI_NEWTON_MAXITER = 200
DEW_BUBBLE_NEWTON_MAXITER = 200
TPV_HSGUA_BISECT_XTOL = 1e-9
TPV_HSGUA_BISECT_YTOL = 1e-6
TPV_HSGUA_BISECT_YTOL_ONLY = True
TPV_HSGUA_NEWTON_XTOL = 1e-9
TPV_HSGUA_NEWTON_MAXITER = 1000
TPV_HSGUA_NEWTON_SOLVER = 'hybr'
HSGUA_NEWTON_ANALYTICAL_JAC = True
solids = None
skip_solids = True
K_composition_independent = False
max_liquids = 1
max_phases = 2
def __init__(self, constants, correlations, gas, liquid, settings=default_settings):
self.constants = constants
self.correlations = correlations
self.liquid = liquid
self.liquids = liquids = [liquid]
self.gas = gas
self.settings = settings
self.N = constants.N
self.cmps = constants.cmps
self.stab = StabilityTester(Tcs=constants.Tcs, Pcs=constants.Pcs, omegas=constants.omegas)
self.flash_pure = FlashPureVLS(constants=constants, correlations=correlations,
gas=gas, liquids=[liquid], solids=[],
settings=settings)
self.K_composition_independent = gas.composition_independent and liquid.composition_independent
self.ideal_gas_basis = gas.ideal_gas_basis and liquid.ideal_gas_basis
if gas is None:
raise ValueError("Gas model is required")
if liquid is None:
raise ValueError("Liquid model is required")
#
self.phases = [gas, liquid]
liquids_to_unique_liquids = []
unique_liquids, unique_liquid_hashes = [], []
for i, l in enumerate(liquids):
h = l.model_hash()
if h not in unique_liquid_hashes:
unique_liquid_hashes.append(h)
unique_liquids.append(l)
liquids_to_unique_liquids.append(i)
else:
liquids_to_unique_liquids.append(unique_liquid_hashes.index(h))
if gas:
gas_hash = gas.model_hash(True)
gas_to_unique_liquid = None
for i, l in enumerate(liquids):
h = l.model_hash(True)
if gas_hash == h:
gas_to_unique_liquid = liquids_to_unique_liquids[i]
break
self.gas_to_unique_liquid = gas_to_unique_liquid
self.liquids_to_unique_liquids = liquids_to_unique_liquids
self.unique_liquids = unique_liquids
self.unique_liquid_count = len(unique_liquids)
self.unique_phases = [gas] + unique_liquids
self.unique_phase_count = 1 + self.unique_liquid_count
self.unique_liquid_hashes = unique_liquid_hashes
def flash_TVF(self, T, VF, zs, solution=None, hot_start=None):
return self.flash_TVF_2P(T, VF, zs, self.liquid, self.gas, solution=solution, hot_start=hot_start)
def flash_TVF_2P(self, T, VF, zs, liquid, gas, solution=None, hot_start=None):
if self.K_composition_independent:
# Assume pressure independent for guess
P, xs, ys, iterations, err = solve_T_VF_IG_K_composition_independent(VF, T, zs, gas, liquid, xtol=1e-10)
l, g = liquid.to(T=T, P=P, zs=xs), gas.to(T=T, P=P, zs=ys)
return P, l, g, iterations, err
constants, correlations = self.constants, self.correlations
dew_bubble_xtol = self.DEW_BUBBLE_QUASI_NEWTON_XTOL
dew_bubble_newton_xtol = self.DEW_BUBBLE_NEWTON_XTOL
dew_bubble_maxiter = self.DEW_BUBBLE_QUASI_NEWTON_MAXITER
if hot_start is not None:
P, xs, ys = hot_start.P, hot_start.liquid0.zs, hot_start.gas.zs
else:
for method in self.VF_guess_methods:
try:
if method is dew_bubble_newton_zs:
xtol = dew_bubble_newton_xtol
else:
xtol = dew_bubble_xtol
_, P, _, xs, ys = TP_solve_VF_guesses(zs=zs, method=method, constants=constants,
correlations=correlations, T=T, VF=VF,
xtol=xtol, maxiter=dew_bubble_maxiter)
break
except Exception as e:
print(e)
if VF == 1.0:
dew = True
integral_VF = True
comp_guess = xs
algos = self.dew_T_flash_algos
elif VF == 0.0:
dew = False
integral_VF = True
comp_guess = ys
algos = self.bubble_T_flash_algos
else:
integral_VF = False
algos = self.VF_flash_algos
if integral_VF:
for algo in algos:
try:
sln = algo(P, fixed_val=T, zs=zs, liquid_phase=liquid, gas_phase=gas,
iter_var='P', fixed_var='T', V_over_F=VF,
maxiter=dew_bubble_maxiter, xtol=dew_bubble_xtol,
comp_guess=comp_guess)
break
except Exception as e:
print(e)
continue
guess, comp_guess, iter_phase, const_phase, iterations, err = sln
if dew:
l, g = iter_phase, const_phase
else:
l, g = const_phase, iter_phase
return guess, l, g, iterations, err
else:
raise NotImplementedError("TODO")
def flash_PVF(self, P, VF, zs, solution=None, hot_start=None):
return self.flash_PVF_2P(P, VF, zs, self.liquid, self.gas, solution=solution, hot_start=hot_start)
def flash_PVF_2P(self, P, VF, zs, liquid, gas, solution=None, hot_start=None):
if self.K_composition_independent:
# Assume pressure independent for guess
T, xs, ys, iterations, err = solve_P_VF_IG_K_composition_independent(VF, P, zs, gas, liquid, xtol=1e-10)
l, g = liquid.to(T=T, P=P, zs=xs), gas.to(T=T, P=P, zs=ys)
return T, l, g, iterations, err
constants, correlations = self.constants, self.correlations
dew_bubble_xtol = self.DEW_BUBBLE_QUASI_NEWTON_XTOL
dew_bubble_maxiter = self.DEW_BUBBLE_QUASI_NEWTON_MAXITER
dew_bubble_newton_xtol = self.DEW_BUBBLE_NEWTON_XTOL
if hot_start is not None:
T, xs, ys = hot_start.T, hot_start.liquid0.zs, hot_start.gas.zs
else:
for method in self.VF_guess_methods:
try:
if method is dew_bubble_newton_zs:
xtol = dew_bubble_newton_xtol
else:
xtol = dew_bubble_xtol
T, _, _, xs, ys = TP_solve_VF_guesses(zs=zs, method=method, constants=constants,
correlations=correlations, P=P, VF=VF,
xtol=xtol, maxiter=dew_bubble_maxiter)
break
except Exception as e:
print(e)
if VF == 1.0:
dew = True
integral_VF = True
comp_guess = xs
algos = self.dew_P_flash_algos
elif VF == 0.0:
dew = False
integral_VF = True
comp_guess = ys
algos = self.bubble_P_flash_algos
else:
integral_VF = False
algos = self.VF_flash_algos
if integral_VF:
for algo in algos:
try:
sln = algo(T, fixed_val=P, zs=zs, liquid_phase=liquid, gas_phase=gas,
iter_var='T', fixed_var='P', V_over_F=VF,
maxiter=dew_bubble_maxiter, xtol=dew_bubble_xtol,
comp_guess=comp_guess)
break
except Exception as e:
print(e)
continue
guess, comp_guess, iter_phase, const_phase, iterations, err = sln
if dew:
l, g = iter_phase, const_phase
else:
l, g = const_phase, iter_phase
return guess, l, g, iterations, err
else:
raise NotImplementedError("TODO")
def stability_test_Michelsen(self, T, P, zs, min_phase, other_phase,
existing_comps=None, skip=None,
expect_liquid=False, expect_aqueous=False,
handle_iffy=False, lowest_dG=False,
highest_comp_diff=False, min_comp_diff=None,
all_solutions=False):
existing_phases = len(existing_comps) if existing_comps is not None else 0
gen = self.stab.incipient_guesses(T, P, zs, expect_liquid=expect_liquid,
expect_aqueous=expect_aqueous, existing_phases=existing_phases) #random=10000 has yet to help
always_stable = True
stable = True
if skip is not None:
(gen() for i in range(skip))
iffy_solution = None
lowest_solution, dG_min = None, -1e100
comp_diff_solution, comp_diff_max = None, 0.0
if existing_comps is None:
existing_comps = [zs]
if all_solutions:
all_solutions_list = []
for i, trial_comp in enumerate(gen):
try:
sln = stabiliy_iteration_Michelsen(min_phase, trial_comp, test_phase=other_phase,
maxiter=self.PT_STABILITY_MAXITER, xtol=self.PT_STABILITY_XTOL)
sum_zs_test, Ks, zs_test, V_over_F, trial_zs, appearing_zs, dG_RT = sln
lnK_2_tot = 0.0
for k in self.cmps:
lnK = log(Ks[k])
lnK_2_tot += lnK*lnK
sum_criteria = abs(sum_zs_test - 1.0)
if sum_criteria < 1e-9 or lnK_2_tot < 1e-7 or zs == trial_zs:
continue
if existing_comps:
existing_phase = False
min_diff = 1e100
for existing_comp in existing_comps:
diff = sum([abs(existing_comp[i] - appearing_zs[i]) for i in self.cmps])/self.N
min_diff = min(min_diff, diff)
if diff < 1e-4:
existing_phase = True
break
diffs2 = [abs(1.0-(existing_comp[i]/appearing_zs[i])) for i in self.cmps]
diff2 = sum(diffs2)/self.N
if diff2 < .02:
existing_phase = True
break
# Continue stability testing if min_diff is too low?
if existing_phase:
continue
# some stability test-driven VFs are converged to about the right solution - but just a little on the other side
# For those cases, we need to let SS determine the result
stable = V_over_F < -1e-6 or V_over_F > (1.0 + 1e-6) #not (0.0 < V_over_F < 1.0)
if not stable:
always_stable = stable
if all_solutions:
stab_guess_name = self.stab.incipient_guess_name(i, expect_liquid=expect_liquid)
all_solutions_list.append((trial_zs, appearing_zs, V_over_F, stab_guess_name, i, sum_criteria, lnK_2_tot))
if not stable:
if highest_comp_diff:
if min_diff > comp_diff_max:
if min_comp_diff is not None and min_diff > min_comp_diff and not all_solutions:
highest_comp_diff = highest_comp_diff = False
break
comp_diff_solution = (trial_zs, appearing_zs, V_over_F, i, sum_criteria, lnK_2_tot)
comp_diff_max = min_diff
continue
if lowest_dG:
if dG_RT > dG_min:
dG_min = dG_RT
lowest_solution = (trial_zs, appearing_zs, V_over_F, i, sum_criteria, lnK_2_tot)
continue
if handle_iffy and sum_criteria < 1e-5:
iffy_solution = (trial_zs, appearing_zs, V_over_F, i, sum_criteria, lnK_2_tot)
# continue
elif all_solutions:
continue
else:
break
except UnconvergedError:
pass
if all_solutions:
return all_solutions_list
if not always_stable:
if not lowest_dG and not highest_comp_diff and not handle_iffy:
pass
elif highest_comp_diff:
trial_zs, appearing_zs, V_over_F, i, sum_criteria, lnK_2_tot = comp_diff_solution
elif lowest_dG:
trial_zs, appearing_zs, V_over_F, i, sum_criteria, lnK_2_tot = lowest_solution
elif handle_iffy:
trial_zs, | |
score is the best
returns: chosen_play: tuple of:
play: tuple of two ints
play3d: tuple of three ints
score: float
diag: list of tuples of tree ints
"""
# initiallization
o_scores = []
n_o_scores = []
d_scores = []
n_d_scores = []
diags = []
centroid = np.array([(self.size - 1) / 2. for _ in range(3)])
# getting list of scores
for play, play3d in zip(self.valid_pos, self.valid_3dpos):
o_score, n_o_score, d_score, n_d_score, diag = self.get_score(play3d)
o_scores.append(o_score)
n_o_scores.append(n_o_score)
d_scores.append(d_score)
n_d_scores.append(n_d_score)
diags.append([item for item in diag])
# eliminate everything that does not have the max score
max_score = max(max(o_scores), max(d_scores))
o_indexes = [i for i in range(len(o_scores)) if o_scores[i] == max_score]
d_indexes = [i for i in range(len(d_scores)) if d_scores[i] == max_score]
o_scores = [o_scores[i] for i in o_indexes]
n_o_scores = [n_o_scores[i] for i in o_indexes]
diags = [diags[i] for i in o_indexes]
o_plays = [self.valid_pos[i] for i in o_indexes]
o_plays3d = [self.valid_3dpos[i] for i in o_indexes]
d_scores = [d_scores[i] for i in d_indexes]
n_d_scores = [n_d_scores[i] for i in d_indexes]
d_plays = [self.valid_pos[i] for i in d_indexes]
d_plays3d = [self.valid_3dpos[i] for i in d_indexes]
# Select the play
if max_score == self.win - 0.5 and len(o_scores) > 0: # this play is winner
return o_plays[0], o_plays3d[0], o_scores[0], diags[0]
if max_score == self.win - 0.5 and len(d_scores) > 0: # this avoids a winner play
return d_plays[0], d_plays3d[0], d_scores[0], None
if len(o_scores) == 1 and len(d_scores) == 0: # will play the best offensive move
return o_plays[0], o_plays3d[0], o_scores[0], diags[0]
if len(o_scores) == 0 and len(d_scores) == 1: # will play the best defensive move
return d_plays[0], d_plays3d[0], d_scores[0], None
if len(o_scores) > 1 and len(d_scores) == 0: # will play an offensive move but there is more than one
# first select based on the number of diags giving the score
max_n = max(n_o_scores)
o_indexes = [i for i in range(len(n_o_scores)) if n_o_scores[i] == max_n]
o_scores = [o_scores[i] for i in o_indexes]
n_o_scores = [n_o_scores[i] for i in o_indexes]
diags = [diags[i] for i in o_indexes]
o_plays = [o_plays[i] for i in o_indexes]
o_plays3d = [o_plays3d[i] for i in o_indexes]
if len(o_scores) == 1: # this is the best
return o_plays[0], o_plays3d[0], o_scores[0], diags[0]
else: # there is more than one option that tied, chose the one more centered
dists = [((np.array(play3d) - centroid) ** 2).sum() for play3d in o_plays3d]
mindist = min(dists)
o_indexes = [i for i in range(len(dists)) if dists[i] == mindist]
o_scores = [o_scores[i] for i in o_indexes]
diags = [diags[i] for i in o_indexes]
o_plays = [o_plays[i] for i in o_indexes]
o_plays3d = [o_plays3d[i] for i in o_indexes]
index = random.randrange(len(o_indexes))
return o_plays[index], o_plays3d[index], o_scores[index], diags[index]
if len(o_scores) == 0 and len(d_scores) > 1: # we will play an defensive move but there is more than one
# first select based on the number of diags giving the score
max_n = max(n_d_scores)
d_indexes = [i for i in range(len(n_d_scores)) if n_d_scores[i] == max_n]
d_scores = [d_scores[i] for i in d_indexes]
d_plays = [d_plays[i] for i in d_indexes]
d_plays3d = [d_plays3d[i] for i in d_indexes]
if len(d_scores) == 1: # this is the best
return d_plays[0], d_plays3d[0], d_scores[0], None
else: # there is more than one option that tied, chose the one more centered
dists = [((np.array(play3d) - centroid) ** 2).sum() for play3d in d_plays3d]
mindist = min(dists)
d_indexes = [i for i in range(len(dists)) if dists[i] == mindist]
d_scores = [d_scores[i] for i in d_indexes]
d_plays = [d_plays[i] for i in d_indexes]
d_plays3d = [d_plays3d[i] for i in d_indexes]
index = random.randrange(len(d_indexes))
return d_plays[index], d_plays3d[index], d_scores[index], None
if len(o_scores) > 0 and len(d_scores) > 0: # there are offensive and defensive scores tied
# remove all options that do not have the maximum number of diags giving that score
max_n = max(max(n_o_scores), max(n_d_scores))
o_indexes = [i for i in range(len(n_o_scores)) if n_o_scores[i] == max_n]
d_indexes = [i for i in range(len(n_d_scores)) if n_d_scores[i] == max_n]
o_scores = [o_scores[i] for i in o_indexes]
n_o_scores = [n_o_scores[i] for i in o_indexes]
diags = [diags[i] for i in o_indexes]
o_plays = [o_plays[i] for i in o_indexes]
o_plays3d = [o_plays3d[i] for i in o_indexes]
d_scores = [d_scores[i] for i in d_indexes]
n_d_scores = [n_d_scores[i] for i in d_indexes]
d_plays = [d_plays[i] for i in d_indexes]
d_plays3d = [d_plays3d[i] for i in d_indexes]
if len(o_scores) > 0 and len(d_scores) == 0: # will play an offensive move
if len(o_scores) == 1: # there is only one option
return o_plays[0], o_plays3d[0], o_scores[0], diags[0]
else: # there there is more than one option, chose based on centrality
dists = [((np.array(play3d) - centroid) ** 2).sum() for play3d in o_plays3d]
mindist = min(dists)
o_indexes = [i for i in range(len(dists)) if dists[i] == mindist]
o_scores = [o_scores[i] for i in o_indexes]
diags = [diags[i] for i in o_indexes]
o_plays = [o_plays[i] for i in o_indexes]
o_plays3d = [o_plays3d[i] for i in o_indexes]
index = random.randrange(len(o_indexes))
return o_plays[index], o_plays3d[index], o_scores[index], diags[index]
if len(o_scores) == 0 and len(d_scores) > 0: # will play a defensive move
if len(d_scores) == 1: # we chose this one
return d_plays[0], d_plays3d[0], d_scores[0], None # diags[0] is useless
else: # there are ties, chose based on centrality
dists = [((np.array(play3d) - centroid) ** 2).sum() for play3d in d_plays3d]
mindist = min(dists)
d_indexes = [i for i in range(len(dists)) if dists[i] == mindist]
d_scores = [d_scores[i] for i in d_indexes]
d_plays = [d_plays[i] for i in d_indexes]
d_plays3d = [d_plays3d[i] for i in d_indexes]
index = random.randrange(len(d_indexes))
return d_plays[index], d_plays3d[index], d_scores[index], None # diags[0] is useless
if len(o_scores) > 0 and len(d_scores) > 0: # there are ties, play the offensive move
if len(o_scores) == 1: # there is only one option
return o_plays[0], o_plays3d[0], o_scores[0], diags[0]
else: # there there is more than one option, chose based on centrality
dists = [((np.array(play3d) - centroid) ** 2).sum() for play3d in o_plays3d]
mindist = min(dists)
o_indexes = [i for i in range(len(dists)) if dists[i] == mindist]
o_scores = [o_scores[i] for i in o_indexes]
diags = [diags[i] for i in o_indexes]
o_plays = [o_plays[i] for i in o_indexes]
o_plays3d = [o_plays3d[i] for i in o_indexes]
index = random.randrange(len(o_indexes))
return o_plays[index], o_plays3d[index], o_scores[index], diags[index]
else:
raise(ValueError, 'this should not have happened')
else:
raise (ValueError, 'this should not have happened')
def get_pl(self):
"""tis method gets the state for each player"""
self.pl1 = self.state == 1
self.pl2 = self.state == 2
self.empty = self.state == 0
self.game_over = self.empty.sum() == 0
def clone(self):
"""clone the current instance except the children (to make it faster)"""
newself = copy.deepcopy(self)
newself.children = []
return newself
def run_play(self, play=None):
"""update a state with a play. If play is none it will find the best play.
if it is a play it will update the state if the play is valid
play: tuple of two ints or None
returns: success: bool (whether the state was updated or not)"""
if self.game_over:
return False
if play is None:
play, play3d, score, diag = self.get_best_score_play()
if self.play[play] >= self.size: # the play is ilegal
return False
play3d = (play[0], play[1], self.play[play]) # 3d position played
offensive_score, num_offensive_score, defensive_score, num_defensive_score, best_diag = self.get_score(play3d)
self.last_play = play # last play in this state
self.last_3dplay = play3d
self.play[play] += 1 # updates play
self.state[play3d] = self.next_turn # updates state
if self.next_turn == 1: # update the position of the player in this turn
self.pl1[play3d] = 1
else:
self.pl2[play3d] = 1
self.empty[play3d] = 0 # update the empty states
self.next_turn, self.previous_turn = self.previous_turn, self.next_turn # swaps turns
self.get_valid_pos() # updates valid_pos and valid_3dpos
if offensive_score == self.win - 0.5: # updates winner
self.winner = self.previous_turn
self.winning_diag = best_diag
# self.get_winner() #todo eliminate after testing
self.game_over = self.empty.sum() == 0 or self.winner > 0 # updates game over
self.history.append({'turn': self.previous_turn, 'play':self.last_play, 'play3d': | |
#---------------------------------------------------------------------------------------#
# Name : FLP.py
# Author: T.K.R.Arvind
# Date : 27th December 2020
# CLI : FLP.py
#
# This program is for floating point computation and related opeartions and is
# mainly written to cross-check the result produced on hardware implementation.
# So part of this code results or operations are done as bit wise computations.
# This has four main features written with minimal library functions:
# a) Floating Point addition
# b) Floating point subtraction
# c) Float to decimal conversion
# d) Decimal to Float conversion
# It rounds FLP numbers by 'ROUND to EVEN TIE to ZERO' method
#---------------------------------------------------------------------------------------#
import sys
import math
import argparse #for command line interface only
#========================== 1)Floating point addition subtraction ========================#
# This takes in five parameter as inputs
# 1) floata : floating point a
# 2) floatb : floating point b
# 3) Ewidth : width of the exponent
# 4) sign : indication addition/subtraction
# 5) display: determines whether to print intermediate step results
# It returns the resultant of two numbers
#
#=======================================================================================#
#
def FLPADD(floata,floatb,Ewidth,sign,display):
floata = floata.lower()
floatb = floatb.lower()
if(floata == "nan" or floata == "inf" or floata == "minf" or floatb == "nan" or floatb == "inf" or floatb == "minf"):
print("\npass special values in Hex")
sys.exit(1)
flpa = floata.replace("0x",'')
flpb = floatb.replace("0x",'')
N = len(flpa) # taking the value of (N-bit)/4 number and should be integer
if(N != len(flpb)):
print("Length of the N-bit numbers dont match")
sys.exit(1)
if((math.log(N,2)).is_integer()): #checking whether input is incomplete
N = N*4 # number of bits in N-bit binary floating point representation
if(N <= Ewidth+1):
print("Exponent width is more than number of bits")
sys.exit(1)
else:
print("\nInput Numbers are incomplete or not a floating value.")
sys.exit(1)
ina = ''
inb = ''
try:
ina = (bin(int(flpa,16))).lstrip('0b').zfill(N) #converting Hexa inputs to binary numbers
inb = (bin(int(flpb,16))).lstrip('0b').zfill(N)
except ValueError:
print("\nInput Numbers are not a floating value.")
sys.exit(1)
if(display):
print('\n#--------- Inputs ------------#')
print('Floating point A :',ina[0]+'_'+ina[1:Ewidth+1]+'_'+ina[Ewidth+1:])
print('Floating point B :',inb[0]+'_'+inb[1:Ewidth+1]+'_'+inb[Ewidth+1:])
print('Is sutraction :',str(sign))
#--------------------------- alignment starts here --------------------------------#
# In alignment the code checks for special values and returns special values. It finds the
# maximum and minimum and aligns the minimum value with respect to the difference in
# the exponenet value or depending on other factor.
#----------------------------------------------------------------------------------#
opn = int(ina[0],2)^int(inb[0],2)^sign # 1 means subtraction
expa = int(ina[1:Ewidth+1],2) # storing biased exponent A
expb = int(inb[1:Ewidth+1],2) # storing biased exponent B
mana = ('0' if expa==0 else '1') +ina[Ewidth+1:] # storing mantissa A without hidden bit
manb = ('0' if expb==0 else '1') +inb[Ewidth+1:] # storing mantissa B without hidden bit
bias = 2**(Ewidth-1)-1
EInf = 2**(Ewidth)-1
HBManWidth = len(mana) #includes hidden bit in length
InfWoSign = '1'*Ewidth + '0'* (HBManWidth-1)
NaN = '1'*N
#processing the special values like Nan and Inf here
if(ina == NaN or inb == NaN): #If any are NaN just return N
return NaN
elif(ina[1:] == InfWoSign and inb[1:] != InfWoSign): #if any one is infinity return infinity with sign
return ina
elif(inb[1:] == InfWoSign and ina[1:] != InfWoSign):
return inb
elif(ina[1:] == InfWoSign):
if(inb == ina[0]+InfWoSign): #both are same signed infinity
if(sign == 0): #addition operation
return ina
else:
return NaN
else:
if(sign == 1): #both are different signed infinity
return ina #addition operation
else:
return NaN
newsign = int(inb[0],2) ^ sign
Newinb = str(newsign) + inb[1:] #taking subtraction into effect
swap = 0 # indicator to swap the inputs to assign MaxMan etc
if(expb > expa):
swap = 1
elif(expa == expb):
if(int(manb,2) > int(mana,2)):
swap = 1
MaxMan = mana
MinMan = manb
MaxExp = expa
MinExp = expb
sign = '1' if(ina[0]=='1') else '0'
if(swap == 1):
MaxMan = manb
MinMan = mana
MaxExp = expb
MinExp = expa
sign = '1' if(Newinb[0]=='1') else '0' #taking subtraction into consideration
#finding the shift value with which the number id shifted
delE = 0
if(MinExp==0 and MaxExp>0): #right shift be E-1 when one is denormal
delE = MaxExp-1
else:
delE = MaxExp-MinExp
MaxMan += '000'
MinMan += '000' #simply filling with GRS bits
if(delE <= HBManWidth+4): #just not to run of processors bit :)
ShiftedMan = '0'*delE + MinMan[0:]
else:
ShiftedMan = '0'*(HBManWidth+4) + MinMan[0:]
AlignedMan = ''
if(len(ShiftedMan[0:HBManWidth+2]) < (HBManWidth+2)): #if width of aligned is less than or equal to (hb+Manwidth+GR) bit
AlignedMan = (ShiftedMan[0:HBManWidth+2]).rjust(HBManWidth+2,'0')
else:
AlignedMan = ShiftedMan[0:HBManWidth+2]
if (int(ShiftedMan[HBManWidth+2:HBManWidth+4],2)>0): # calculating stickybit values
AlignedMan += '1'
else:
AlignedMan += '0'
if(display):
print('\n#------- Alignment -----------#')
print('is subtraction? :',opn)
print('Rshift Mantissa :',delE)
print('mantissa to shift:',MinMan[0]+'_'+MinMan[1:-3]+'_'+MinMan[-3:])
print('Aligned mantissa :',AlignedMan[0]+'_'+AlignedMan[1:-3]+'_'+AlignedMan[-3:])
print('Maximum mantissa :',MaxMan[0]+'_'+MaxMan[1:-3]+'_'+MaxMan[-3:])
#--------------------------- arithmetic starts here --------------------------------#
# The computation of numbers happens here in two's complement method
#-----------------------------------------------------------------------------------#
FA = len(AlignedMan) # number of Full adder needed
complementedMan = '' # This is just a initialisation not to be confused
if(opn == 1): #1's complement only if subtraction
for i in range(0,len(AlignedMan)):
if(AlignedMan[i] == '0'):
complementedMan += '1'
else:
complementedMan += '0'
else:
complementedMan = AlignedMan
cin = opn
partialsum = 0
arithmeticResult = ''
for i in range(FA-1,-1,-1): #moving from lsbth position to 0
partialsum += cin
partialsum += 1 if(complementedMan[i] =='1') else 0
partialsum += 1 if(MaxMan[i] =='1') else 0
arithmeticResult = str(partialsum %2) + arithmeticResult
cin = 1 if(partialsum >1) else 0
partialsum = 0
if (opn==1): #it cannot produce a negative result as it is swap even for delexp = 0
arithmeticResult = '0' + arithmeticResult
else:
arithmeticResult = str(cin)+ arithmeticResult
if(display):
print('\n#------- Arithmetic ----------#')
print('lsb cin '+str(cin))
print('Mantissa Min Val :', AlignedMan[0]+'_'+AlignedMan[1:-3]+'_'+AlignedMan[-3:])
print('Complemented Val :', complementedMan[0]+'_'+complementedMan[1:-3]+'_'+ complementedMan[-3:])
print('Mantissa Max Val :', MaxMan[0]+'_'+MaxMan[1:-3]+'_'+MaxMan[-3:])
print('Arithmetic Val :', arithmeticResult[:2]+'_'+arithmeticResult[2:-3]+'_'+arithmeticResult[-3:])
#--------------------------- Normalisation starts here --------------------------------#
# This normalises the arithmetic results by left shifting or right shifting the mantissa
# and reflecting its effect on the exponent.
#--------------------------------------------------------------------------------------#
NormalisedMan = ''
NormalisedExp = MaxExp
preshift = 2*N #some big number
try:
preshift = arithmeticResult[1:].index('1') #starting from hidden so neglected carry bit
except ValueError:
preshift = 2*N #some big number
if(opn == 0 ): #only if addition and has produced a carry
if(arithmeticResult[0]=='1'):
NormalisedMan = arithmeticResult[1:]#removing carry bit as it becomes a hiddn bit
NormalisedExp +=1
if(NormalisedExp >= 2**(Ewidth)-1 ):#if exp goes to Infinity return inf
Inf = sign +'1'*Ewidth
return Inf.rjust(N,'0')
else:
NormalisedMan = arithmeticResult[2:]#removing hidden bit as it is the first bit
elif(MaxExp == 0 ):
NormalisedMan = arithmeticResult[2:]
elif(MaxExp > preshift):
NormalisedExp -= preshift
NormalisedMan = arithmeticResult[preshift+2:]#removing the hidden bit present at preshift
else:
NormalisedMan = arithmeticResult[MaxExp+1:]
NormalisedExp = 0;
t =(len(MaxMan)) #if Normalised man is less than Mantissa +GRS
if(len(NormalisedMan) <= t ): #K is less than mantissa and GRS bits then pad 0
NormalisedMan = NormalisedMan.ljust(t-1,'0')
NormalisedExp = bin(NormalisedExp).lstrip('0b').rjust(Ewidth,'0')
PreRound = '0' + NormalisedExp + NormalisedMan
if(display):
print('\n#----- Normalisation ---------#')
print('Max-Exp Value :', str(MaxExp))
print('preshift value :', str(preshift))
print('NormalisedExp is :', NormalisedExp)
print('NormalisedMan is :', NormalisedMan)
print('Pre Rounding is :',PreRound[0]+'_'+PreRound[1:Ewidth+1]+'_'+PreRound[Ewidth+1:N]+'_'+PreRound[N:])
#--------------------------- Rounding starts here --------------------------------#
Round = RND2EVNTIE20(PreRound,N,0.0,display)
if(Round[0] == '1'): #during rouning there is a possibility that exp becomed infinity
Inf = sign +'1'*EWidth
return Inf.rjust(N,'0')
else:#fixing the sign
Round = sign+Round[1:]
return Round
#
#=======================================================================================#
#================================= 2)LeadZerofinder ===================================#
# This function takes two parameters as inputs
# 1) fraction : fraction for which l index to be found
# 2) MaxIter : Maxiteration to be performed in fraction
# It returns two values as outputs
# 1) fraction : resultant fraction after Lead One is found
# 2) Npos : Position at which Lead one is present
#
#=====================================================================================#
#
def LEAD0FINDER(fraction,MaxIter =10):
iter = 1
while(iter <= MaxIter):
fraction *= 2
if(fraction < 1):
iter +=1
else:
fraction -= 1
return fraction,iter
return fraction,iter
#=======================================================================================#
| |
the last header of the raw chunk is before the checkpoint height
then it has been checked for validity.
'''
headers_obj = app_state.headers
checkpoint = headers_obj.checkpoint
coin = headers_obj.coin
end_height = start_height + len(raw_chunk) // HEADER_SIZE
def extract_header(height):
start = (height - start_height) * HEADER_SIZE
return raw_chunk[start: start + HEADER_SIZE]
def verify_chunk_contiguous_and_set(next_raw_header, to_height):
# Set headers backwards from a proven header, verifying the prev_hash links.
for height in reversed(range(start_height, to_height)):
raw_header = extract_header(height)
if coin.header_prev_hash(next_raw_header) != coin.header_hash(raw_header):
raise MissingHeader('prev_hash does not connect')
headers_obj.set_one(height, raw_header)
next_raw_header = raw_header
try:
# For pre-checkpoint headers with a verified proof, just set the headers after
# verifying the prev_hash links
if end_height < checkpoint.height:
# Set the last proven header
last_header = extract_header(end_height - 1)
headers_obj.set_one(end_height - 1, last_header)
verify_chunk_contiguous_and_set(last_header, end_height - 1)
return headers_obj.longest_chain()
# For chunks prior to but connecting to the checkpoint, no proof is required
verify_chunk_contiguous_and_set(checkpoint.raw_header, checkpoint.height)
# Process any remaining headers forwards from the checkpoint
chain = None
for height in range(max(checkpoint.height + 1, start_height), end_height):
_header, chain = headers_obj.connect(extract_header(height))
return chain or headers_obj.longest_chain()
finally:
headers_obj.flush()
async def _negotiate_protocol(self):
'''Raises: RPCError, TaskTimeout'''
method = 'server.version'
args = (PACKAGE_VERSION, [ version_string(PROTOCOL_MIN), version_string(PROTOCOL_MAX) ])
try:
server_string, protocol_string = await self.send_request(method, args)
self.logger.debug(f'server string: {server_string}')
self.logger.debug(f'negotiated protocol: {protocol_string}')
self.ptuple = protocol_tuple(protocol_string)
assert PROTOCOL_MIN <= self.ptuple <= PROTOCOL_MAX
except (AssertionError, ValueError) as e:
raise DisconnectSessionError(f'{method} failed: {e}', blacklist=True)
async def _get_checkpoint_headers(self):
'''Raises: RPCError, TaskTimeout'''
while True:
start_height, count = self._required_checkpoint_headers()
if not count:
break
logger.info(f'{count:,d} checkpoint headers needed')
await self._request_chunk(start_height, count)
async def _request_chunk(self, height, count):
'''Returns the greatest height successfully connected (might be lower than expected
because of a small server response).
Raises: RPCError, TaskTimeout, DisconnectSessionError'''
self.logger.info(f'requesting {count:,d} headers from height {height:,d}')
method = 'blockchain.block.headers'
cp_height = app_state.headers.checkpoint.height
if height + count >= cp_height:
cp_height = 0
try:
result = await self.send_request(method, (height, count, cp_height))
rec_count = result['count']
last_height = height + rec_count - 1
if count != rec_count:
self.logger.info(f'received just {rec_count:,d} headers')
raw_chunk = bytes.fromhex(result['hex'])
assert len(raw_chunk) == HEADER_SIZE * rec_count
if cp_height:
hex_root = result['root']
branch = [hex_str_to_hash(item) for item in result['branch']]
self._check_header_proof(hex_root, branch, raw_chunk[-HEADER_SIZE:], last_height)
self.chain = self._connect_chunk(height, raw_chunk)
except (AssertionError, KeyError, TypeError, ValueError,
IncorrectBits, InsufficientPoW, MissingHeader) as e:
raise DisconnectSessionError(f'{method} failed: {e}', blacklist=True)
self.logger.info(f'connected {rec_count:,d} headers up to height {last_height:,d}')
return last_height
async def _subscribe_headers(self):
'''Raises: RPCError, TaskTimeout, DisconnectSessionError'''
self._handlers[HEADERS_SUBSCRIBE] = self._on_new_tip
tip = await self.send_request(HEADERS_SUBSCRIBE)
await self._on_new_tip(tip)
def _secs_to_next_ping(self):
return self.last_send + 300 - time.time()
async def _ping_loop(self):
'''Raises: RPCError, TaskTimeout'''
method = 'server.ping'
while True:
await sleep(self._secs_to_next_ping())
if self._secs_to_next_ping() < 1:
self.logger.debug(f'sending {method}')
await self.send_request(method)
def _check_header_proof(self, hex_root, branch, raw_header, height):
'''Raises: DisconnectSessionError'''
expected_root = Net.VERIFICATION_BLOCK_MERKLE_ROOT
if hex_root != expected_root:
raise DisconnectSessionError(f'bad header merkle root {hex_root} expected '
f'{expected_root}', blacklist=True)
header = Net.COIN.deserialized_header(raw_header, height)
proven_root = hash_to_hex_str(_root_from_proof(header.hash, branch, height))
if proven_root != expected_root:
raise DisconnectSessionError(f'invalid header proof {proven_root} expected '
f'{expected_root}', blacklist=True)
self.logger.debug(f'good header proof for height {height}')
async def _on_new_tip(self, json_tip):
'''Raises: RPCError, TaskTimeout, DisconnectSessionError'''
try:
raw_header = bytes.fromhex(json_tip['hex'])
height = json_tip['height']
assert isinstance(height, int), "height must be an integer"
except Exception as e:
raise DisconnectSessionError(f'error connecting tip: {e} {json_tip}')
if height < Net.CHECKPOINT.height:
raise DisconnectSessionError(f'server tip height {height:,d} below checkpoint')
self.chain = None
self.tip = None
tip = Net.COIN.deserialized_header(raw_header, height)
while True:
try:
self.tip, self.chain = self._connect_header(tip.height, tip.raw)
self.logger.debug(f'connected tip at height {height:,d}')
self._network.check_main_chain_event.set()
return
except (IncorrectBits, InsufficientPoW) as e:
raise DisconnectSessionError(f'bad header provided: {e}', blacklist=True)
except MissingHeader:
pass
# Try to connect and then re-check. Note self.tip might have changed.
await self._catch_up_to_tip_throttled(tip)
async def _catch_up_to_tip_throttled(self, tip):
'''Raises: DisconnectSessionError, BatchError, TaskTimeout'''
# Avoid thundering herd effect by having one session catch up per tip
done_event = SVSession._connecting_tips.get(tip.raw)
if done_event:
self.logger.debug(f'another session is connecting my tip {tip.hex_str()}')
await done_event.wait()
else:
self.logger.debug(f'connecting my own tip {tip.hex_str()}')
SVSession._connecting_tips[tip.raw] = app_state.async_.event()
try:
await self._catch_up_to_tip(tip)
finally:
SVSession._connecting_tips.pop(tip.raw).set()
async def _catch_up_to_tip(self, tip):
'''Raises: DisconnectSessionError, BatchError, TaskTimeout'''
headers_obj = app_state.headers
cp_height = headers_obj.checkpoint.height
max_height = max(chain.height for chain in headers_obj.chains())
heights = [cp_height + 1]
step = 1
height = min(tip.height, max_height)
while height > cp_height:
heights.append(height)
height -= step
step += step
height = await self._request_headers_at_heights(heights)
# Catch up
while height < tip.height:
height = await self._request_chunk(height + 1, 2016)
async def _subscribe_to_script_hash(self, script_hash: str) -> None:
'''Raises: RPCError, TaskTimeout'''
status = await self.send_request(SCRIPTHASH_SUBSCRIBE, [script_hash])
await self._on_queue_status_changed(script_hash, status)
async def _unsubscribe_from_script_hash(self, script_hash: str) -> bool:
return await self.send_request(SCRIPTHASH_UNSUBSCRIBE, [script_hash])
async def _on_status_changed(self, script_hash: str, status: str) -> None:
keydata = self._keyinstance_map.get(script_hash)
if keydata is None:
self.logger.error(f'received status notification for unsubscribed {script_hash}')
return
keyinstance_id, script_type = keydata
# Accounts needing a notification.
accounts = [account for account, subs in self._subs_by_account.items()
if script_hash in subs and
_history_status(account.get_key_history(keyinstance_id, script_type)) != status]
if not accounts:
return
# Status has changed; get history
result = await self.request_history(script_hash)
self.logger.debug(f'received history of {keyinstance_id} length {len(result)}')
try:
history = [(item['tx_hash'], item['height']) for item in result]
tx_fees = {item['tx_hash']: item['fee'] for item in result if 'fee' in item}
# Check that txids are unique
assert len(set(tx_hash for tx_hash, tx_height in history)) == len(history), \
f'server history for {keyinstance_id} has duplicate transactions'
except (AssertionError, KeyError) as e:
self._network._on_status_queue.put_nowait((script_hash, status)) # re-queue
raise DisconnectSessionError(f'bad history returned: {e}')
# Check the status; it can change legitimately between initial notification and
# history request
hstatus = _history_status(history)
if hstatus != status:
self.logger.warning(
f'history status mismatch {hstatus} vs {status} for {keyinstance_id}')
for account in accounts:
if history != account.get_key_history(keyinstance_id, script_type):
self.logger.debug("_on_status_changed new=%s old=%s", history,
account.get_key_history(keyinstance_id, script_type))
await account.set_key_history(keyinstance_id, script_type, history, tx_fees)
async def _main_server_batch(self):
'''Raises: DisconnectSessionError, BatchError, TaskTimeout'''
async with timeout_after(10):
async with self.send_batch(raise_errors=True) as batch:
batch.add_request('server.banner')
batch.add_request('server.donation_address')
batch.add_request('server.peers.subscribe')
server = self.server
try:
server.state.banner = _require_string(batch.results[0])
server.state.donation_address = _require_string(batch.results[1])
server.state.peers = self._parse_peers_subscribe(batch.results[2])
self._network.trigger_callback('banner')
except AssertionError as e:
raise DisconnectSessionError(f'main server requests bad batch response: {e}')
def _parse_peers_subscribe(self, result):
peers = []
for host_details in _require_list(result):
host_details = _require_list(host_details)
host = _require_string(host_details[1])
for v in host_details[2]:
if re.match(r"[st]\d*", _require_string(v)):
protocol, port = v[0], v[1:]
try:
peers.append(SVServer.unique(host, port, protocol))
except ValueError:
pass
self.logger.info(f'{len(peers)} servers returned from server.peers.subscribe')
return peers
async def _request_headers_at_heights(self, heights):
'''Requests the headers as a batch and connects them, lowest height first.
Return the greatest connected height (-1 if none connected).
Raises: DisconnectSessionError, BatchError, TaskTimeout
'''
async def _request_header_batch(batch_heights):
nonlocal good_height
self.logger.debug(f'requesting {len(batch_heights):,d} headers '
f'at heights {batch_heights}')
async with timeout_after(10):
async with self.send_batch(raise_errors=True) as batch:
for height in batch_heights:
batch.add_request(method,
(height, cp_height if height <= cp_height else 0))
try:
for result, height in zip(batch.results, batch_heights):
if height <= cp_height:
hex_root = result['root']
branch = [hex_str_to_hash(item) for item in result['branch']]
raw_header = bytes.fromhex(result['header'])
self._check_header_proof(hex_root, branch, raw_header, height)
else:
raw_header = bytes.fromhex(result)
_header, self.chain = self._connect_header(height, raw_header)
good_height = height
except MissingHeader:
hex_str = hash_to_hex_str(Net.COIN.header_hash(raw_header))
self.logger.info(f'failed to connect at height {height:,d}, '
f'hash {hex_str} last good {good_height:,d}')
except (AssertionError, KeyError, TypeError, ValueError) as e:
raise DisconnectSessionError(f'bad {method} response: {e}')
heights = sorted(set(heights))
cp_height = Net.CHECKPOINT.height
method = 'blockchain.block.header'
good_height = -1
min_good_height = max((height for height in heights if height <= cp_height), default=-1)
for chunk in chunks(heights, 100):
await _request_header_batch(chunk)
if good_height < min_good_height:
raise DisconnectSessionError(f'cannot connect to checkpoint', blacklist=True)
return good_height
async def handle_request(self, request):
if isinstance(request, Notification):
handler = self._handlers.get(request.method)
else:
handler = None
coro = handler_invocation(handler, request)()
return await coro
async def connection_lost(self):
await super().connection_lost()
self._closed_event.set()
#
# API exposed to the rest of this file
#
async def disconnect(self, reason, *, blacklist=False):
if blacklist:
self.server.state.last_blacklisted = time.time()
self.logger.error(f'disconnecting and blacklisting: {reason}')
else:
self.logger.error(f'disconnecting: {reason}')
await self.close()
async def run(self):
'''Called when a connection is established to manage the connection.
Raises: RPCError, BatchError, TaskTimeout, DisconnectSessionError
'''
# Negotiate the protocol before doing anything else
await self._negotiate_protocol()
# Checkpoint headers are essential to attempting tip connection
await self._get_checkpoint_headers()
# Then subscribe headers and connect the server's tip
await self._subscribe_headers()
# Only once the tip is connected to our set of chains do we consider the
# session good and add it to | |
from __future__ import annotations
import discord
from discord import Embed, HTTPException, Forbidden, NotFound, Client, Message
from ....cfg import bbData, cfg
from .... import lib
from .. import criminal
from ....botState import logger
import asyncio
from .. import bounty
from typing import Dict, Union, List
from ....baseClasses import serializable
def makeBountyEmbed(bounty : bounty.Bounty) -> Embed:
"""Construct a discord.Embed for listing in a bountyBoardChannel
:param Bounty bounty: The bounty to describe in this embed
:return: A discord.Embed describing statistics about the passed bounty
:rtype: discord.Embed
"""
embed = Embed(title=bounty.criminal.name,
colour=bbData.factionColours[bounty.faction] if bounty.faction in bbData.factionColours else \
bbData.factionColours["neutral"])
embed.set_footer(text=bounty.faction.title())
embed.set_thumbnail(url=bounty.criminal.icon)
embed.add_field(name="**Reward:**", value=lib.stringTyping.commaSplitNum(bounty.reward) + " Credits")
routeStr = ""
for system in bounty.route:
if bounty.systemChecked(system):
routeStr += "~~"
if 0 < bounty.route.index(bounty.answer) - bounty.route.index(system) < cfg.closeBountyThreshold:
routeStr += "**" + system + "**"
else:
routeStr += system
routeStr += "~~"
else:
routeStr += system
routeStr += ", "
embed.add_field(name="**Route:**", value=routeStr[:-2], inline=False)
embed.add_field(name="-", value="> ~~Already checked systems~~\n> **Criminal spotted here recently**")
# embed.add_field(value="`Stars indicate systems where the criminal has recently been spotted.`",
# name="`Crossed-through systems have already been checked.`")
# embed.add_field(name="**Difficulty:**", value=str(bounty.criminal.techLevel))
# embed.add_field(name="**See the culprit's loadout with:**",
# value="`" + cfg.commandPrefix + "loadout criminal " + bounty.criminal.name + "`")
return embed
stopwatchIcon = 'https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/120/twitter/259/stopwatch_23f1.png'
noBountiesEmbed = Embed(description='> Please check back later, or use the `notify bounties` ' \
+ 'command to be notified when they spawn!',
colour=discord.Colour.dark_orange())
noBountiesEmbed.set_author(name='No Bounties Available', icon_url=stopwatchIcon)
class bountyBoardChannel(serializable.Serializable):
"""A channel which stores a continuously updating listing message for every active bounty.
Initialisation atts: These attributes are used only when loading in the BBC from dictionary-serialised format.
They must be used to initialise the BBC before the BBC can be used.
:var messagesToBeLoaded: A dictionary of bounty listings to be loaded into the BBC, where keys are message IDs,
and values are criminal dicts
:vartype messagesToBeLoaded: dict[int, dict]
:var channelIDToBeLoaded: The discord channel ID of the channel where this BBC is active, to be loaded into the BBC
:vartype channelIDToBeLoaded: int
:var noBountiesMsgToBeLoaded: The id of the message to be loaded indicating that the BBC is empty, if one exists
:vartype noBountiesMsgToBeLoaded: int
Runtime atts: These are the attributes that contribute to the BBC's runtime functionality, unlike initialisation atts.
:var bountyMessages: A dictionary associating faction names with the bounty listings associated with that faction.
Listings are stored as a dictionary of the listing's criminal to the
message ID of the listing message.
:vartype bountyMessages: dict[str, dict[criminal, int]]
:var noBountiesMessage: Either a reference to a discord.message indicating that the BBC is empty,
or None if no empty board message exists
:vartype noBountiesMessage: discord.message or None
:var channel: The channel where this BBC's listings are to be posted
:vartype channel: discord.TextChannel
"""
def __init__(self, channelIDToBeLoaded : int, messagesToBeLoaded : Dict[int, dict],
noBountiesMsgToBeLoaded : Union[int, None]):
"""
:param int channelIDToBeLoaded: The discord channel ID of the channel where this BBC is active,
to be loaded into the BBC
:param messagesToBeLoaded: A dictionary of bounty listings to be loaded into the BBC, where keys are message IDs,
and values are criminal dicts
:type messagesToBeLoaded: dict[int, dict]
:param int noBountiesMsgToBeLoaded: The id of the message to be loaded indicating that the BBC is empty, if one exists
"""
self.messagesToBeLoaded = messagesToBeLoaded
self.channelIDToBeLoaded = channelIDToBeLoaded
self.noBountiesMsgToBeLoaded = noBountiesMsgToBeLoaded
# dict of "faction": {criminal: int message ID}
self.bountyMessages = {}
# discord message object to be filled when no bounties exist
self.noBountiesMessage = None
# discord channel object
self.channel = None
async def init(self, client : Client, factions : List[str]):
"""Initialise the BBC's attributes to allow it to function.
Initialisation is done here rather than in the constructor as initialisation can only be done asynchronously.
:param discord.Client client: A logged in client instance used to fetch the BBC's message and channel instances
:param list[str] factions: A list of faction names with which bounties can be associated
"""
for fac in factions:
self.bountyMessages[fac] = {}
self.channel = client.get_channel(self.channelIDToBeLoaded)
for id in self.messagesToBeLoaded:
crim = criminal.Criminal.fromDict(self.messagesToBeLoaded[id])
try:
msg = await self.channel.fetch_message(id)
self.bountyMessages[crim.faction][crim] = msg
except HTTPException:
succeeded = False
for tryNum in range(cfg.httpErrRetries):
try:
msg = await self.channel.fetch_message(id)
self.bountyMessages[crim.faction][crim] = msg
succeeded = True
except HTTPException:
await asyncio.sleep(cfg.httpErrRetryDelaySeconds)
continue
break
if not succeeded:
logger.log("BBC", "init", "HTTPException thrown when fetching listing for criminal: " + crim.name,
category='bountyBoards', eventType="LISTING_LOAD-HTTPERR")
except Forbidden:
logger.log("BBC", "init", "Forbidden exception thrown when fetching listing for criminal: " + crim.name,
category='bountyBoards', eventType="LISTING_LOAD-FORBIDDENERR")
except NotFound:
logger.log("BBC", "init", "Listing message for criminal no longer exists: " + crim.name,
category='bountyBoards', eventType="LISTING_LOAD-NOT_FOUND")
if self.noBountiesMsgToBeLoaded == -1:
self.noBountiesMessage = None
if self.isEmpty():
try:
# self.noBountiesMessage = await self.channel.send(cfg.bbcNoBountiesMsg)
self.noBountiesMessage = await self.channel.send(embed=noBountiesEmbed)
except HTTPException:
succeeded = False
for tryNum in range(cfg.httpErrRetries):
try:
self.noBountiesMessage = await self.channel.send(embed=noBountiesEmbed)
succeeded = True
except HTTPException:
await asyncio.sleep(cfg.httpErrRetryDelaySeconds)
continue
break
if not succeeded:
logger.log("BBC", "init", "HTTPException thrown when sending no bounties message",
category='bountyBoards', eventType="NOBTYMSG_LOAD-HTTPERR")
self.noBountiesMessage = None
except Forbidden:
logger.log("BBC", "init", "Forbidden exception thrown when sending no bounties message",
category='bountyBoards', eventType="NOBTYMSG_LOAD-FORBIDDENERR")
self.noBountiesMessage = None
else:
try:
self.noBountiesMessage = await self.channel.fetch_message(self.noBountiesMsgToBeLoaded)
except HTTPException:
succeeded = False
for tryNum in range(cfg.httpErrRetries):
try:
self.noBountiesMessage = await self.channel.fetch_message(self.noBountiesMsgToBeLoaded)
succeeded = True
except HTTPException:
await asyncio.sleep(cfg.httpErrRetryDelaySeconds)
continue
break
if not succeeded:
logger.log("BBC", "init", "HTTPException thrown when fetching no bounties message",
category='bountyBoards', eventType="NOBTYMSG_LOAD-HTTPERR")
except Forbidden:
logger.log("BBC", "init", "Forbidden exception thrown when fetching no bounties message",
category='bountyBoards', eventType="NOBTYMSG_LOAD-FORBIDDENERR")
except NotFound:
logger.log("BBC", "init", "No bounties message no longer exists", category='bountyBoards',
eventType="NOBTYMSG_LOAD-NOT_FOUND")
self.noBountiesMessage = None
# del self.messagesToBeLoaded
# del self.channelIDToBeLoaded
# del self.noBountiesMsgToBeLoaded
def hasMessageForBounty(self, bounty : bounty.Bounty) -> bool:
"""Decide whether this BBC stores a listing for the given bounty
:param Bounty bounty: The bounty to check for listing existence
:return: True if this BBC stores a listing for bounty, False otherwise
:rtype: bool
"""
return bounty.criminal in self.bountyMessages[bounty.criminal.faction]
def getMessageForBounty(self, bounty : bounty.Bounty) -> Message:
"""Return the message acting as a listing for the given bounty
:param Bounty bounty: The bounty to fetch a listing for
:return: This BBC's message listing the given bounty
:rtype: discord.Message
"""
return self.bountyMessages[bounty.criminal.faction][bounty.criminal]
def isEmpty(self) -> bool:
"""Decide whether this BBC stores any bounty listings
:return: False if this BBC stores any bounty listings, True otherwise
:rtype: bool
"""
for faction in self.bountyMessages:
if bool(self.bountyMessages[faction]):
return False
return True
async def addBounty(self, bounty : bounty.Bounty, message : Message):
"""Treat the given message as a listing for the given bounty, and store it in the database.
If the BBC was previously empty, remove the empty bounty board message if one exists.
If a HTTP error is thrown when attempting to remove the empty board message,
wait and retry the removal for the number of times defined in cfg
:param Bounty bounty: The bounty to associate with the given message
:param discord.Message message: The message acting as a listing for the given bounty
"""
removeMsg = False
if self.isEmpty():
removeMsg = True
if self.hasMessageForBounty(bounty):
raise KeyError("BNTY_BRD_CH-ADD-BNTY_EXSTS: Attempted to add a bounty to a bountyboardchannel, " \
+ "but the bounty is already listed")
logger.log("BBC", "addBty",
"Attempted to add a bounty to a bountyboardchannel, but the bounty is already listed: " \
+ bounty.criminal.name, category='bountyBoards', eventType="LISTING_ADD-EXSTS")
self.bountyMessages[bounty.criminal.faction][bounty.criminal] = message
if removeMsg:
try:
await self.noBountiesMessage.delete()
except HTTPException:
succeeded = False
for tryNum in range(cfg.httpErrRetries):
try:
await self.noBountiesMessage.delete()
succeeded = True
except HTTPException:
await asyncio.sleep(cfg.httpErrRetryDelaySeconds)
continue
break
if not succeeded:
print("addBounty HTTPException")
except Forbidden:
print("addBounty Forbidden")
except AttributeError:
print("addBounty no message")
async def removeBounty(self, bounty : bounty.Bounty):
"""Remove the listing message stored for the given bounty from the database.
This does not attempt to delete the message from discord.
If the BBC is now empty, send an empty bounty board message.
If a HTTP error is thrown when sending the empty BBC message,
wait and retry the removal for the number of times defined in cfg
:param Bounty bounty: The bounty whose listing should be removed from the database
:raise KeyError: If the database does not store a listing for the given bounty
"""
if not self.hasMessageForBounty(bounty):
| |
import logging
import string
import datetime
import json
import uuid
import urllib.request, urllib.parse, urllib.error
import base64
import requests
import os.path
import sys
import random
import re
from pyramid.config import Configurator
from pyramid.renderers import JSONP, render
from pyramid.response import Response
from pyramid.httpexceptions import HTTPFound, HTTPNotFound
from pyramid.view import view_config
from src.sgd.frontend.yeastgenome.views.misc_views import not_found
from src.sgd.frontend.frontend_interface import FrontendInterface
class YeastgenomeFrontend(FrontendInterface):
def __init__(self, backend_url, heritage_url, log_directory):
self.backend_url = backend_url
self.heritage_url = heritage_url
self.log = set_up_logging(log_directory, 'yeastgenome')
self.locuses = dict()
self.now = datetime.datetime.now()
def get_renderer(self, method_name):
if method_name in ['home', 'download_table', 'download_citations']:
return None
elif method_name in ['header', 'footer', 'enrichment']:
return 'jsonp'
else:
return 'src:sgd/frontend/yeastgenome/static/templates/' + method_name + '.jinja2'
def response_wrapper(self, method_name, request):
request_id = str(uuid.uuid4())
callback = None if 'callback' not in request.GET else request.GET['callback']
self.log.info(request_id + ' ' + method_name + ('' if 'identifier' not in request.matchdict else ' ' + request.matchdict['identifier']))
def f(data):
self.log.info(request_id + ' end')
if callback is not None:
return Response(body="%s(%s)" % (callback, data), content_type='application/json')
else:
if data is not None:
return data
else:
return HTTPNotFound()
return f
def check_date(self):
new_time = datetime.datetime.now()
if new_time.date() != self.now.date() and new_time.hour >= 3:
self.locuses = dict()
self.now = new_time
return True
def locus_list(self, list_name):
return self.get_obj('locus_list', None, obj_url=self.backend_url + '/locus/' + list_name)
def get_obj(self, obj_type, obj_repr, obj_url=None):
if obj_url is None:
obj_url = self.backend_url + '/' + obj_type + '/' + obj_repr
try:
obj = get_json(obj_url)
except:
return HTTPNotFound()
if obj is None:
return None
# basic info
return {
obj_type: obj,
obj_type + '_js': json.dumps(obj)
}
def strain(self, strain_repr):
obj = self.get_obj('strain', strain_repr)
# get the genbank url and add to obj
genbank_url = None
for url in obj['strain']['urls']:
if url['category'] == 'genbank':
genbank_url = url['link']
obj['genbank_url'] = genbank_url
return obj
def ecnumber(self, biocon_repr):
return self.get_obj('ecnumber', biocon_repr)
def dataset(self, bioitem_repr):
dataset = self.get_obj('dataset', bioitem_repr)
return dataset
def keyword(self, keyword_repr):
return self.get_obj('keyword', keyword_repr)
def experiment(self, experiment_repr):
return self.get_obj('experiment', experiment_repr)
def observable(self, biocon_repr):
return self.get_obj('observable', biocon_repr)
def phenotype_ontology(self):
return self.get_obj('ontology', None, obj_url=self.backend_url + '/observable/ypo')
def go(self, biocon_repr):
return self.get_obj('go_term', None, obj_url=self.backend_url + '/go/' + biocon_repr)
def go_ontology(self, biocon_repr):
return self.get_obj('ontology', None, obj_url=self.backend_url + '/go/' + biocon_repr)
def disease(self, biocon_repr):
return self.get_obj('disease', None, obj_url=self.backend_url + '/disease/' + biocon_repr)
def disease_ontology(self, biocon_repr):
return self.get_obj('ontology', None, obj_url=self.backend_url + '/disease/' + biocon_repr)
def chemical(self, chemical_repr):
return self.get_obj('chemical', chemical_repr)
def domain(self, domain_repr):
return self.get_obj('domain', domain_repr)
def reserved_name(self, reserved_name_repr):
obj = self.get_obj('reservedname', reserved_name_repr)
# Redirect to underlying locus page if the reservedname has a locus
if 'reservedname_js' in obj:
js_dict = json.loads(obj['reservedname_js'])
if js_dict['locus']:
return HTTPFound(js_dict['locus']['link'])
return obj
def author(self, author_repr):
return self.get_obj('author', author_repr)
def contig(self, contig_repr):
return self.get_obj('contig', contig_repr)
def redirect(self, page, params):
if page == 'interaction':
if len(params) > 0:
return HTTPFound('/locus/' + list(params.values())[0] + '/interaction')
else:
page = urllib.request.urlopen(self.heritage_url + '/cgi-bin/interaction_search').read()
return Response(page)
elif page == 'literature':
if len(params) > 0:
return HTTPFound('/locus/' + list(params.values())[0] + '/literature')
elif page == 'protein':
if len(params) > 0:
return HTTPFound('/locus/' + list(params.values())[0] + '/protein')
elif page == 'homology':
if len(params) > 0:
return HTTPFound('/locus/' + list(params.values())[0] + '/homology')
elif page == 'expression':
del params['type']
if len(params) > 0:
return HTTPFound('/locus/' + list(params.values())[0] + '/expression')
elif page == 'locus':
if len(params) > 0:
return HTTPFound('/locus/' + list(params.values())[0])
elif page == 'phenotype':
if 'phenotype' in params:
old_phenotype = params['phenotype'].split(':')
if len(old_phenotype) > 1:
new_phenotype = (old_phenotype[1] + ' ' + old_phenotype[0]).strip().replace(' ', '_').replace('/', '-')
if 'property_value' in params:
if 'chemicals' in new_phenotype:
new_phenotype = new_phenotype.replace('chemicals', params['property_value'].replace(' ', '_').replace('|', '_and_'))
elif 'chemical_compound' in new_phenotype:
new_phenotype = new_phenotype.replace('chemical_compound', params['property_value'].replace(' ', '_'))
else:
new_phenotype = old_phenotype[0]
return HTTPFound('/phenotype/' + new_phenotype)
elif 'dbid' in params:
return HTTPFound('/locus/' + params['dbid'] + '/phenotype')
elif 'observable' in params:
return HTTPFound('/observable/' + params['observable'].replace(' ', '_'))
elif 'property_value' in params:
return HTTPFound('/chemical/' + params['property_value'].replace(' ', '_'))
elif page == 'go':
if len(params) > 0:
return HTTPFound('/locus/' + list(params.values())[0] + '/go')
elif page == 'go_term':
if len(params) > 0:
if list(params.values())[0].startswith('GO:'):
return HTTPFound('/go/' + list(params.values())[0])
else:
return HTTPFound('/go/GO:' + str(int(list(params.values())[0])).zfill(7))
elif page == 'disease':
if len(params) > 0:
return HTTPFound('/locus/' + list(params.values())[0] + '/disease')
elif page == 'do_term':
if len(params) > 0:
if list(params.values())[0].startswith('DO:'):
return HTTPFound('/disease/' + list(params.values())[0])
else:
return HTTPFound('/disease/DO:' + str(int(list(params.values())[0])).zfill(7))
elif page == 'reference':
if 'author' in params:
return HTTPFound('/author/' + list(params.values())[0].replace(' ', '_'))
elif 'topic' in params:
topic = list(params.values())[0]
page = urllib.request.urlopen(self.heritage_url + '/cgi-bin/reference/reference.pl?topic=' + topic + '&rm=multi_ref_result').read()
return Response(page)
elif 'rm' in params and 'topic_group' in params and 'page' in params:
page = urllib.request.urlopen(self.heritage_url + '/cgi-bin/reference/reference.pl?rm=' + params['rm'] + '&topic_group=' + params['topic_group'] + '&page=' + params['page']).read()
return Response(page)
elif 'doi' in params:
return HTTPFound('/reference/doi:' + list(params.values())[0].replace(' ', '_').replace('/', '-').lower())
elif len(params) > 0:
return HTTPFound('/reference/' + list(params.values())[0].replace(' ', '_').replace('/', '-'))
else:
return Response(status_int=500, body='Invalid URL.')
def header(self):
header_str = render('templates/header.jinja2', {})
return {'header': header_str}
def footer(self):
footer_str = render('templates/footer.jinja2', {})
return {'footer': footer_str}
def download_table(self, response, header_info, data, display_name):
headers = response.headers
date = datetime.datetime.now().strftime("%m/%d/%Y")
description = "!\n!Date: " + date + '\n' + "!From: Saccharomyces Genome Database (SGD) \n!URL: http://www.yeastgenome.org/ \n!Contact Email: <EMAIL> \n!Funding: NHGRI at US NIH, grant number 5-U41-HG001315 \n!"
cutoff = 1
if header_info[1] == 'Analyze ID':
if header_info[2] == '':
cutoff = 3
else:
cutoff = 2
table_header = description + '\n\n' + '\t'.join(header_info[cutoff:])
for row in data:
try:
[clean_cell(cell) for cell in row[cutoff:]]
except:
print(row)
response.text = table_header + '\n' + '\n'.join(['\t'.join([clean_cell(cell) for cell in row[cutoff:]]) for row in data])
exclude = set([x for x in string.punctuation if x != ' ' and x != '_'])
display_name = ''.join(ch for ch in display_name if ch not in exclude).replace(' ', '_')
headers['Content-Type'] = 'text/plain; charset=utf-8'
headers['Content-Disposition'] = str('attachment; filename=' + display_name.replace(' ', '_').replace('(', '').replace(')', '') + '.txt')
headers['Content-Description'] = 'File Transfer'
return response
def download_image(self, response, data, display_name):
headers = response.headers
response.body = base64.b64decode(data[22:])
exclude = set([x for x in string.punctuation if x != ' ' and x != '_'])
display_name = ''.join(ch for ch in display_name if ch not in exclude).replace(' ', '_')
headers['Content-Type'] = 'image/png;'
headers['Content-Disposition'] = str('attachment; filename=' + display_name + '.png')
headers['Content-Description'] = 'File Transfer'
return response
def download_citations(self, response, reference_ids, display_name):
reference_ids = list(set(reference_ids))
references = get_json(self.backend_url + '/reference_list', data={'reference_ids': reference_ids})
headers = response.headers
exclude = set([x for x in string.punctuation if x != ' ' and x != '_'])
display_name = ''.join(ch for ch in display_name if ch not in exclude).replace(' ', '_')
response.text = '\n' + '\n\n'.join([ref['text'] for ref in references])
headers['Content-Type'] = 'text/plain'
headers['Content-Disposition'] = str('attachment; filename=' + display_name + '.nbib')
headers['Content-Description'] = 'File Transfer'
return response
def download_sequence(self, response, sequence, filename, header):
headers = response.headers
response.text = '>' + header + '\n' + clean_cell('\n'.join([sequence[i:i+60] for i in range(0, len(sequence), 60)]))
headers['Content-Type'] = 'text/plain'
headers['Content-Disposition'] = str('attachment; filename=' + '"' + filename + '"')
headers['Content-Description'] = 'File Transfer'
return response
def analyze(self, list_name, bioent_ids):
bioent_ids = list(set([int(x) for x in bioent_ids if x is not None]))
bioents = get_json(self.backend_url + '/bioentity_list', data={'bioent_ids': bioent_ids})
if bioents is None:
return Response(status_int=500, body='Bioents could not be found.')
page = {
#Basic Info
'list_name_html': list_name,
'list_name': clean_cell(list_name).replace(' ', '_'),
'bioents': json.dumps(bioents),
}
return page
def enrichment(self, bioent_ids):
enrichment_results = get_json(self.backend_url + '/go_enrichment', data={'bioent_ids': bioent_ids})
return enrichment_results
def backend(self, url_repr, request, args=None):
relative_url = '/' + ('/'.join(url_repr))
backend_url = self.backend_url
full_url = backend_url + relative_url
if request.method == 'POST':
request_data = request.json_body
else:
request_data = None
if args is not None and len(args) > 0:
full_url += '?' + request.query_string
self.log.info(full_url)
try:
return get_json(full_url, request_data)
# prevent from returning 200 for failed backend requests
except ValueError:
return Response('null', status=404)
def yeastgenome_frontend(backend_url, heritage_url, log_directory, **configs):
chosen_frontend = YeastgenomeFrontend(backend_url, heritage_url, log_directory)
settings = dict(configs)
settings.setdefault('jinja2.i18n.domain', 'myproject')
configurator = Configurator(settings=settings)
configurator.add_translation_dirs('locale/')
configurator.include('pyramid_jinja2')
# set global template var asset_root to read from cloudfront or local, depending on .ini value, default to False
production_assets = configs.get('production_assets', False)
if production_assets == 'True':
file_path = os.path.dirname(os.path.realpath(__file__)) | |
- 40: I1ii11iIi11i * iIii1I11I1II1 % OoOoOO00
if 50 - 50: i11iIiiIii + ooOoO0o
if 41 - 41: I1IiiI * OoO0O00 + IiII / OoO0O00 . I1Ii111
if 2 - 2: O0 % o0oOOo0O0Ooo
O000oOOoOOO = self . eid . afi if ( self . eid . instance_id == 0 ) else LISP_AFI_LCAF
if ( O000oOOoOOO < 0 ) : O000oOOoOOO = LISP_AFI_LCAF
iiI1 = ( self . group . is_null ( ) == False )
if ( iiI1 ) : O000oOOoOOO = LISP_AFI_LCAF
if 64 - 64: OoOoOO00
iIiiii = ( self . signature_count << 12 ) | self . map_version
iIi1iii1 = 0 if self . eid . is_binary ( ) == False else self . eid . mask_len
if 25 - 25: II111iiii + I11i
IIii1i = struct . pack ( "IBBHHH" , socket . htonl ( self . record_ttl ) ,
self . rloc_count , iIi1iii1 , socket . htons ( OOo000 ) ,
socket . htons ( iIiiii ) , socket . htons ( O000oOOoOOO ) )
if 97 - 97: O0 + OOooOOo % OoOoOO00 * I11i . iIii1I11I1II1
if 94 - 94: oO0o
if 53 - 53: ooOoO0o + iII111i * i1IIi + I1IiiI
if 89 - 89: I1IiiI / II111iiii - OoOoOO00 % o0oOOo0O0Ooo
if ( iiI1 ) :
IIii1i += self . eid . lcaf_encode_sg ( self . group )
return ( IIii1i )
if 1 - 1: OoooooooOO . I11i / OoOoOO00 + o0oOOo0O0Ooo % i1IIi
if 1 - 1: OoooooooOO - OoO0O00 - OoooooooOO / iII111i
if 70 - 70: Ii1I + I1ii11iIi11i . II111iiii * i11iIiiIii
if 87 - 87: Ii1I / I1Ii111 % OoOoOO00 * I1ii11iIi11i - OoooooooOO / OoOoOO00
if 24 - 24: I11i . OOooOOo * i1IIi . I1ii11iIi11i / ooOoO0o / O0
if ( self . eid . afi == LISP_AFI_GEO_COORD and self . eid . instance_id == 0 ) :
IIii1i = IIii1i [ 0 : - 2 ]
IIii1i += self . eid . address . encode_geo ( )
return ( IIii1i )
if 62 - 62: o0oOOo0O0Ooo % II111iiii
if 22 - 22: oO0o - o0oOOo0O0Ooo
if 89 - 89: OOooOOo
if 34 - 34: iII111i . OOooOOo
if 13 - 13: OoO0O00 * OOooOOo + oO0o
if ( O000oOOoOOO == LISP_AFI_LCAF ) :
IIii1i += self . eid . lcaf_encode_iid ( )
return ( IIii1i )
if 21 - 21: i11iIiiIii . Ii1I % i1IIi * Ii1I . oO0o + Ii1I
if 92 - 92: i1IIi + OoO0O00 * I11i
if 70 - 70: Oo0Ooo
if 93 - 93: iII111i . I1ii11iIi11i . Oo0Ooo . oO0o . OoooooooOO
if 51 - 51: O0 - iII111i
IIii1i += self . eid . pack_address ( )
return ( IIii1i )
if 65 - 65: O0 / II111iiii * IiII % Ii1I + o0oOOo0O0Ooo
if 43 - 43: I1Ii111 + OoO0O00 * OoooooooOO
def decode ( self , packet ) :
O00oO00oOO00O = "IBBHHH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 85 - 85: iII111i + OOooOOo
self . record_ttl , self . rloc_count , self . eid . mask_len , OOo000 , self . map_version , self . eid . afi = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 36 - 36: OoO0O00 % II111iiii * O0 + II111iiii - oO0o - i1IIi
if 53 - 53: Ii1I - OOooOOo
if 75 - 75: iII111i % O0 - I11i - I1ii11iIi11i + I1IiiI - I1IiiI
self . record_ttl = socket . ntohl ( self . record_ttl )
OOo000 = socket . ntohs ( OOo000 )
self . action = ( OOo000 >> 13 ) & 0x7
self . authoritative = True if ( ( OOo000 >> 12 ) & 1 ) else False
self . ddt_incomplete = True if ( ( OOo000 >> 11 ) & 1 ) else False
self . map_version = socket . ntohs ( self . map_version )
self . signature_count = self . map_version >> 12
self . map_version = self . map_version & 0xfff
self . eid . afi = socket . ntohs ( self . eid . afi )
self . eid . instance_id = 0
packet = packet [ ooOoooOoo0oO : : ]
if 87 - 87: i1IIi % Ii1I % i1IIi + iIii1I11I1II1
if 23 - 23: iIii1I11I1II1 * I11i . I1Ii111 - o0oOOo0O0Ooo
if 66 - 66: I1IiiI * I1Ii111 / i11iIiiIii / OOooOOo
if 19 - 19: ooOoO0o % iIii1I11I1II1 * OoooooooOO
if ( self . eid . afi == LISP_AFI_LCAF ) :
packet , O0o00oOOOO00 = self . eid . lcaf_decode_eid ( packet )
if ( O0o00oOOOO00 ) : self . group = O0o00oOOOO00
self . group . instance_id = self . eid . instance_id
return ( packet )
if 53 - 53: OoOoOO00 . oO0o - OOooOOo . II111iiii * i11iIiiIii + OOooOOo
if 99 - 99: I1ii11iIi11i % Oo0Ooo
packet = self . eid . unpack_address ( packet )
return ( packet )
if 31 - 31: o0oOOo0O0Ooo - II111iiii * OOooOOo . OOooOOo - oO0o
if 57 - 57: OOooOOo / i11iIiiIii / I1Ii111 - Oo0Ooo . iIii1I11I1II1
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 84 - 84: IiII
if 42 - 42: O0 . I1Ii111 / I11i
if 69 - 69: OoOoOO00 / I1Ii111 * I1IiiI
if 76 - 76: O0 + II111iiii * OoO0O00
if 1 - 1: o0oOOo0O0Ooo
if 34 - 34: o0oOOo0O0Ooo + OOooOOo . OoO0O00 + I1IiiI + OoooooooOO
if 90 - 90: Ii1I / OoOoOO00 - iIii1I11I1II1 / i1IIi * I1Ii111 - ooOoO0o
if 2 - 2: iII111i * I11i * ooOoO0o + i11iIiiIii + oO0o
if 81 - 81: o0oOOo0O0Ooo * OoO0O00
if 18 - 18: i11iIiiIii / o0oOOo0O0Ooo - oO0o . I11i * i1IIi
if 67 - 67: Ii1I
if 64 - 64: OoOoOO00 + iII111i * OoOoOO00 - I1IiiI * OoooooooOO
if 27 - 27: II111iiii + i11iIiiIii
if 32 - 32: i1IIi
if 76 - 76: II111iiii % ooOoO0o - I1ii11iIi11i
if 50 - 50: II111iiii / I1IiiI . Ii1I % i11iIiiIii
if 66 - 66: oO0o / OOooOOo / iII111i
if 5 - 5: I1Ii111 . oO0o
if 77 - 77: iII111i / i11iIiiIii
if 20 - 20: O0 . I11i
if 67 - 67: OoOoOO00 - ooOoO0o - iIii1I11I1II1
if 31 - 31: II111iiii + o0oOOo0O0Ooo * i11iIiiIii . o0oOOo0O0Ooo
if 73 - 73: oO0o / OOooOOo * II111iiii % OoooooooOO - i1IIi - ooOoO0o
if 43 - 43: o0oOOo0O0Ooo + Ii1I % OoO0O00 . I1Ii111 + i1IIi
if 85 - 85: Oo0Ooo % I1ii11iIi11i / OOooOOo
if 65 - 65: ooOoO0o + IiII - OoOoOO00 % II111iiii - iIii1I11I1II1
if 39 - 39: I1IiiI + I1ii11iIi11i - i11iIiiIii
if 43 - 43: iIii1I11I1II1
if 73 - 73: OoOoOO00 + o0oOOo0O0Ooo
if 58 - 58: i1IIi * I1ii11iIi11i % iII111i . OoO0O00 % IiII % I11i
if 63 - 63: I1ii11iIi11i % ooOoO0o % I1ii11iIi11i
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
if 71 - 71: Ii1I
class lisp_ecm ( ) :
def __init__ ( self , sport ) :
self . security = False
self . ddt = False
self . to_etr = False
self . to_ms = False
self . length = 0
self . ttl = LISP_DEFAULT_ECM_TTL
self . protocol = LISP_UDP_PROTOCOL
self . ip_checksum = 0
self . source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . udp_sport = sport
self . | |
<gh_stars>1-10
"""
This module provides functionality to generate simulations of confined systems.
"""
import os
import numpy as np
import mdevaluate as md
from .utils import save_open, write_gro
from .mdgenerate import env
POSRE_LINE = '{ind:>6d} {func} {params[0]:e} {params[1]:e} {params[2]:e}\n'
def write_posre(indices, itp_file='psore.itp', func_type=1, params=(1e6, 1e6, 1e6)):
"""
Write indices of constrained atoms to an itp file.
Args:
indices: List of constraint atom indices, in gromacs format (starting at 1).
itp_file (opt.): Filename of the itp file.
func_type (opt.): Function type of the constraint.
params (opt.): Parameters of the position restraint potential.
"""
with save_open(itp_file) as itp:
itp.write('[ position_restraints ]\n')
fdict = {'func': func_type, 'params': params}
for ind in indices:
fdict['ind'] = ind
itp.write(POSRE_LINE.format(**fdict))
class ResizedBoxFrame(md.coordinates.CoordinateFrame):
_box = None
_residue_ids = None
@property
def box(self):
if self._box is None:
return super().box
else:
return self._box
@box.setter
def box(self, val):
if val.shape == (3,):
self._box = np.eye(3) * val
else:
self._box = val
@property
def residue_ids(self):
if self._residue_ids is None:
return super().residue_ids
else:
return self._residue_ids
@residue_ids.setter
def residue_ids(self, val):
self._residue_ids = val
def resize_to_box(frame, box, len_mol):
"""
Cut a smaller box out of the frame.
"""
if box.shape == (3, 3):
box = box.diagonal()
nr_res = len(frame) // len_mol
residues = md.pbc.whole(frame % frame.box.diagonal(), len_res=len_mol).reshape(nr_res, len_mol, 3)
masses = frame.masses.reshape(nr_res, len_mol, 1)
masses /= masses.sum() / nr_res
com = (residues * masses).sum(axis=1)
res_in_box = np.where((com <= box).all(axis=1))
new_frame = residues[res_in_box].view(ResizedBoxFrame).reshape(-1, 3)
new_frame.box = box
new_frame.residue_ids = np.zeros(len(new_frame), dtype=int)
for i in range(len_mol):
new_frame.residue_ids[i::len_mol] = res_in_box[0] + 1
return new_frame
def make_spherical_conf(trajectory, constrained_subset, step, outfile, radius,
resize_box=False, method='residue', **kwargs):
"""
Generate an initial configuration of spherically pinned molecules.
Args:
trajectory (mdevaluate.Coordinates):
Bulk simulation from which the configuration is taken
constrained_subset (dict):
Definition of a subset of the atoms defining the constraints
step: Timestep at which the configuration is taken from the source trajectory
outfile: Output file of the new configuration
radius: Radius of the sphercial confinement
resize_box:
If the siulation box should be resized accoriding to the size of confinement.
When this is True, the box size is set to L = 2*radius + 2.0.
method: Method by which molecules are constraint, possible values are:
- residue: Change the residue of constraint molecules to the name given by
the keyword 'constrained_residue'
- posres: Use position_restraints to constrain molecules
- freeze: Use freeze groups to constrain molecules
Returns:
Tuple of number of unconstrained and constrained molecules.
"""
subset = trajectory.subset(**constrained_subset)
coords = subset[step]
# Check if it is a water system with only one residue
residue_ids = trajectory.atoms.residue_ids
atom_names = trajectory.atoms.atom_names
len_mol = len(set(atom_names))
nr_mol = len(atom_names) // len_mol
if len(set(residue_ids)) == 1:
residue_ids[0::len_mol] = residue_ids[1::len_mol] = residue_ids[2::len_mol] = range(1, nr_mol + 1)
coords = coords.view(ResizedBoxFrame)
coords.residue_ids = residue_ids[0::len_mol]
if resize_box:
L = min(2*radius + 3.0, coords.box.max())
coords = resize_to_box(coords, np.array([L, L, L]), len(coords)//nr_mol)
coords %= coords.box.diagonal()
coords -= coords.box.diagonal()/2
r = md.coordinates.spherical_radius(coords, origin=0)
constr_res = np.where(r >= radius, coords.residue_ids, -np.ones(r.shape, dtype=int))
unconstrained = sum(constr_res == -1)
constrained = len(constr_res) - unconstrained
if method == 'residue':
atoms = []
if resize_box:
frame = trajectory[step]
atom_crds = md.pbc.whole(frame % frame.box.diagonal(), len_res=len_mol)
else:
atom_crds = trajectory[step] % trajectory[step].box.diagonal()
for atm, res, resnr, x in zip(atom_names,
trajectory.atoms.residue_names,
residue_ids,
atom_crds):
atm_dict = {'atm': atm, 'resnr': resnr, 'x': x}
if resnr not in coords.residue_ids:
continue
if resnr in constr_res:
atm_dict['res'] = kwargs['constrained_residue']
else:
atm_dict['res'] = res
atoms.append(atm_dict)
atoms = sorted(atoms, key=lambda x: x['res'])
cur_resnr = -1
i = 0
for atm in atoms:
if atm['resnr'] != cur_resnr:
cur_resnr = atm['resnr']
i += 1
atm['resnr'] = i
#TODO: write correct box in gro file!!! (do energy minimazation w/o posres first)
write_gro(outfile, atoms, kwargs['name'], coords.box.diagonal()+kwargs.get('box_buffer', 0))
else:
raise NotImplementedError('method={} not implemented at the moment.'.format(method))
return unconstrained, constrained
def make_slit_conf(trajectory, constrained_subset, step, outfile, thickness, **kwargs):
"""
Generate an initial configuration of a slit pore of pinned molecules.
Args:
trajectory (mdevaluate.Coordinates):
Bulk simulation from which the configuration is taken
constrained_subset (dict):
Definition of a subset of the atoms defining the constraints
step: Timestep at which the configuration is taken from the source trajectory
outfile: Output file of the new configuration
radius: Radius of the sphercial confinement
resize_box:
If the siulation box should be resized accoriding to the size of confinement.
When this is True, the box size is set to L = 2*radius + 2.0.
method: Method by which molecules are constraint, possible values are:
- residue: Change the residue of constraint molecules to the name given by
the keyword 'constrained_residue'
- posres: Use position_restraints to constrain molecules
- freeze: Use freeze groups to constrain molecules
Returns:
Tuple of number of unconstrained and constrained molecules.
"""
subset = trajectory.subset(**constrained_subset)
coords = subset[step]
# Check if it is a water system with only one residue
residue_ids = trajectory.atoms.residue_ids
atom_names = trajectory.atoms.atom_names
len_mol = len(set(atom_names))
nr_mol = len(atom_names) // len_mol
if len(set(residue_ids)) == 1:
residue_ids[0::len_mol] = residue_ids[1::len_mol] = residue_ids[2::len_mol] = range(1, nr_mol + 1)
coords = coords.view(ResizedBoxFrame)
coords.residue_ids = residue_ids[0::len_mol]
coords %= coords.box.diagonal()
z = coords[:, 2]
constr_res = np.where(z < thickness, coords.residue_ids, -np.ones(z.shape, dtype=int))
unconstrained = sum(constr_res == -1)
constrained = len(constr_res) - unconstrained
atoms = []
atom_crds = (trajectory[step] % trajectory[step].box.diagonal()).whole
for atm, res, resnr, x in zip(atom_names,
trajectory.atoms.residue_names,
residue_ids,
atom_crds):
atm_dict = {'atm': atm, 'resnr': resnr, 'x': x}
if resnr not in coords.residue_ids:
continue
if resnr in constr_res:
atm_dict['res'] = kwargs['constrained_residue']
else:
atm_dict['res'] = res
atoms.append(atm_dict)
atoms = sorted(atoms, key=lambda x: x['res'])
cur_resnr = -1
i = 0
for atm in atoms:
if atm['resnr'] != cur_resnr:
cur_resnr = atm['resnr']
i += 1
atm['resnr'] = i
write_gro(outfile, atoms, kwargs['name'], coords.box.diagonal())
return unconstrained, constrained
def make_cylindrical_conf(trajectory, constrained_subset, step, outfile, radius, **kwargs):
"""
Generate an initial configuration of a cylindrical pore of pinned molecules.
Args:
trajectory (mdevaluate.Coordinates):
Bulk simulation from which the configuration is taken
constrained_subset (dict):
Definition of a subset of the atoms defining the constraints
step: Timestep at which the configuration is taken from the source trajectory
outfile: Output file of the new configuration
radius: Radius of the sphercial confinement
resize_box:
If the siulation box should be resized accoriding to the size of confinement.
When this is True, the box size is set to L = 2*radius + 2.0.
method: Method by which molecules are constraint, possible values are:
- residue: Change the residue of constraint molecules to the name given by
the keyword 'constrained_residue'
- posres: Use position_restraints to constrain molecules
- freeze: Use freeze groups to constrain molecules
Returns:
Tuple of number of unconstrained and constrained molecules.
"""
subset = trajectory.subset(**constrained_subset)
coords = subset[step]
# Check if it is a water system with only one residue
residue_ids = trajectory.atoms.residue_ids
atom_names = trajectory.atoms.atom_names
len_mol = len(set(atom_names))
nr_mol = len(atom_names) // len_mol
if len(set(residue_ids)) == 1:
residue_ids[0::len_mol] = residue_ids[1::len_mol] = residue_ids[2::len_mol] = range(1, nr_mol + 1)
coords = coords.view(ResizedBoxFrame)
coords.residue_ids = residue_ids[0::len_mol]
coords %= coords.box.diagonal()
coords -= coords.box.diagonal()/2
r, _ = md.coordinates.polar_coordinates(coords[:, 0], coords[:, 1])
constr_res = np.where(r > radius, coords.residue_ids, -np.ones(r.shape, dtype=int))
unconstrained = sum(constr_res == -1)
constrained = len(constr_res) - unconstrained
atoms = []
atom_crds = (trajectory[step] % trajectory[step].box.diagonal()).whole
for atm, res, resnr, x in zip(atom_names,
trajectory.atoms.residue_names,
residue_ids,
atom_crds):
atm_dict = {'atm': atm, 'resnr': resnr, 'x': x}
if resnr not in coords.residue_ids:
continue
if resnr in constr_res:
atm_dict['res'] = kwargs['constrained_residue']
else:
atm_dict['res'] = res
atoms.append(atm_dict)
atoms = sorted(atoms, key=lambda x: x['res'])
cur_resnr = -1
i = 0
for atm in atoms:
if atm['resnr'] != cur_resnr:
cur_resnr = atm['resnr']
i += 1
atm['resnr'] = i
write_gro(outfile, atoms, kwargs['name'], coords.box.diagonal())
return unconstrained, constrained
def write_water_top(topout, **kwargs):
"""
Write a water topology with constrained molecules.
Args:
topout: Output file of the topology
name: Name of the system
nr_sol: Number of unconstrained molecules (SOL)
nr_wal: Number of constrained molecules (WAL)
"""
with save_open(topout) as top:
top.write(
env.get_template('water.top').render(**kwargs)
)
def generate_spherical_water(outdir, trajectory, step, radius, resize_box=False, **kwargs):
"""
Generate gromacs topology for water in spherical neutral confinement.
Args:
outdir: Output directory of the new topology
trajectory: Trajectory from which the starting configuration is taken
step: | |
'type': 'str'},
'ssl_configuration': {'key': 'sslConfiguration', 'type': 'SslConfiguration'},
'aks_networking_configuration': {'key': 'aksNetworkingConfiguration', 'type': 'AksNetworkingConfiguration'},
}
def __init__(
self,
*,
cluster_fqdn: Optional[str] = None,
agent_count: Optional[int] = None,
agent_vm_size: Optional[str] = None,
ssl_configuration: Optional["SslConfiguration"] = None,
aks_networking_configuration: Optional["AksNetworkingConfiguration"] = None,
**kwargs
):
super(AKSProperties, self).__init__(**kwargs)
self.cluster_fqdn = cluster_fqdn
self.system_services = None
self.agent_count = agent_count
self.agent_vm_size = agent_vm_size
self.ssl_configuration = ssl_configuration
self.aks_networking_configuration = aks_networking_configuration
class AmlCompute(Compute):
"""An Azure Machine Learning compute.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
:param properties: AML Compute properties.
:type properties: ~azure.mgmt.machinelearningservices.models.AmlComputeProperties
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'AmlComputeProperties'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
properties: Optional["AmlComputeProperties"] = None,
**kwargs
):
super(AmlCompute, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'AmlCompute' # type: str
self.properties = properties
class AmlComputeNodeInformation(msrest.serialization.Model):
"""Compute node information related to a AmlCompute.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar node_id: ID of the compute node.
:vartype node_id: str
:ivar private_ip_address: Private IP address of the compute node.
:vartype private_ip_address: str
:ivar public_ip_address: Public IP address of the compute node.
:vartype public_ip_address: str
:ivar port: SSH port number of the node.
:vartype port: int
:ivar node_state: State of the compute node. Values are idle, running, preparing, unusable,
leaving and preempted. Possible values include: "idle", "running", "preparing", "unusable",
"leaving", "preempted".
:vartype node_state: str or ~azure.mgmt.machinelearningservices.models.NodeState
:ivar run_id: ID of the Experiment running on the node, if any else null.
:vartype run_id: str
"""
_validation = {
'node_id': {'readonly': True},
'private_ip_address': {'readonly': True},
'public_ip_address': {'readonly': True},
'port': {'readonly': True},
'node_state': {'readonly': True},
'run_id': {'readonly': True},
}
_attribute_map = {
'node_id': {'key': 'nodeId', 'type': 'str'},
'private_ip_address': {'key': 'privateIpAddress', 'type': 'str'},
'public_ip_address': {'key': 'publicIpAddress', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'node_state': {'key': 'nodeState', 'type': 'str'},
'run_id': {'key': 'runId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AmlComputeNodeInformation, self).__init__(**kwargs)
self.node_id = None
self.private_ip_address = None
self.public_ip_address = None
self.port = None
self.node_state = None
self.run_id = None
class ComputeNodesInformation(msrest.serialization.Model):
"""Compute nodes information related to a Machine Learning compute. Might differ for every type of compute.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AmlComputeNodesInformation.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:ivar next_link: The continuation token.
:vartype next_link: str
"""
_validation = {
'compute_type': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
_subtype_map = {
'compute_type': {'AmlCompute': 'AmlComputeNodesInformation'}
}
def __init__(
self,
**kwargs
):
super(ComputeNodesInformation, self).__init__(**kwargs)
self.compute_type = None # type: Optional[str]
self.next_link = None
class AmlComputeNodesInformation(ComputeNodesInformation):
"""Compute node information related to a AmlCompute.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:ivar next_link: The continuation token.
:vartype next_link: str
:ivar nodes: The collection of returned AmlCompute nodes details.
:vartype nodes: list[~azure.mgmt.machinelearningservices.models.AmlComputeNodeInformation]
"""
_validation = {
'compute_type': {'required': True},
'next_link': {'readonly': True},
'nodes': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'next_link': {'key': 'nextLink', 'type': 'str'},
'nodes': {'key': 'nodes', 'type': '[AmlComputeNodeInformation]'},
}
def __init__(
self,
**kwargs
):
super(AmlComputeNodesInformation, self).__init__(**kwargs)
self.compute_type = 'AmlCompute' # type: str
self.nodes = None
class AmlComputeProperties(msrest.serialization.Model):
"""AML Compute properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param vm_size: Virtual Machine Size.
:type vm_size: str
:param vm_priority: Virtual Machine priority. Possible values include: "Dedicated",
"LowPriority".
:type vm_priority: str or ~azure.mgmt.machinelearningservices.models.VmPriority
:param scale_settings: Scale settings for AML Compute.
:type scale_settings: ~azure.mgmt.machinelearningservices.models.ScaleSettings
:param user_account_credentials: Credentials for an administrator user account that will be
created on each compute node.
:type user_account_credentials:
~azure.mgmt.machinelearningservices.models.UserAccountCredentials
:param subnet: Virtual network subnet resource ID the compute nodes belong to.
:type subnet: ~azure.mgmt.machinelearningservices.models.ResourceId
:param remote_login_port_public_access: State of the public SSH port. Possible values are:
Disabled - Indicates that the public ssh port is closed on all nodes of the cluster. Enabled -
Indicates that the public ssh port is open on all nodes of the cluster. NotSpecified -
Indicates that the public ssh port is closed on all nodes of the cluster if VNet is defined,
else is open all public nodes. It can be default only during cluster creation time, after
creation it will be either enabled or disabled. Possible values include: "Enabled", "Disabled",
"NotSpecified". Default value: "NotSpecified".
:type remote_login_port_public_access: str or
~azure.mgmt.machinelearningservices.models.RemoteLoginPortPublicAccess
:ivar allocation_state: Allocation state of the compute. Possible values are: steady -
Indicates that the compute is not resizing. There are no changes to the number of compute nodes
in the compute in progress. A compute enters this state when it is created and when no
operations are being performed on the compute to change the number of compute nodes. resizing -
Indicates that the compute is resizing; that is, compute nodes are being added to or removed
from the compute. Possible values include: "Steady", "Resizing".
:vartype allocation_state: str or ~azure.mgmt.machinelearningservices.models.AllocationState
:ivar allocation_state_transition_time: The time at which the compute entered its current
allocation state.
:vartype allocation_state_transition_time: ~datetime.datetime
:ivar errors: Collection of errors encountered by various compute nodes during node setup.
:vartype errors: list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar current_node_count: The number of compute nodes currently assigned to the compute.
:vartype current_node_count: int
:ivar target_node_count: The target number of compute nodes for the compute. If the
allocationState is resizing, this property denotes the target node count for the ongoing resize
operation. If the allocationState is steady, this property denotes the target node count for
the previous resize operation.
:vartype target_node_count: int
:ivar node_state_counts: Counts of various node states on the compute.
:vartype node_state_counts: ~azure.mgmt.machinelearningservices.models.NodeStateCounts
"""
_validation = {
'allocation_state': {'readonly': True},
'allocation_state_transition_time': {'readonly': True},
'errors': {'readonly': True},
'current_node_count': {'readonly': True},
'target_node_count': {'readonly': True},
'node_state_counts': {'readonly': True},
}
_attribute_map = {
'vm_size': {'key': 'vmSize', 'type': 'str'},
'vm_priority': {'key': 'vmPriority', 'type': 'str'},
'scale_settings': {'key': 'scaleSettings', 'type': 'ScaleSettings'},
'user_account_credentials': {'key': 'userAccountCredentials', 'type': 'UserAccountCredentials'},
'subnet': {'key': 'subnet', 'type': 'ResourceId'},
'remote_login_port_public_access': {'key': 'remoteLoginPortPublicAccess', 'type': 'str'},
| |
import datetime
import uuid
import copy
from django.test import Client, TestCase
from django.contrib.auth.models import User, Permission
from django.urls import reverse
from majora2 import models
from majora2 import forms
from majora2.test.test_basic_api import BasicAPIBase, OAuthAPIClientBase
from tatl import models as tmodels
import sys
import json
default_central_sample_id = "HOOT-00001"
default_payload = {
"biosamples": [
{
"adm1": "UK-ENG",
"central_sample_id": default_central_sample_id,
"collection_date": datetime.date.today().strftime("%Y-%m-%d"),
"is_surveillance": False,
"received_date": datetime.date.today().strftime("%Y-%m-%d"),
"adm2": "Birmingham",
"source_age": 30,
"source_sex": "M",
"adm2_private": "B20",
"biosample_source_id": "ABC12345",
"collecting_org": "Hypothetical University of Hooting",
"root_sample_id": "PHA_12345",
"sample_type_collected": "swab",
"sample_type_received": "primary",
"sender_sample_id": "LAB12345",
"swab_site": "nose-throat",
"collection_pillar": 1,
"is_hcw": True,
"is_hospital_patient": True,
"is_icu_patient": False,
"admitted_with_covid_diagnosis": True,
"employing_hospital_name": "Hoot Point Hospital",
"employing_hospital_trust_or_board": "Hoot Point Hospital Trust",
"admission_date": datetime.date.today().strftime("%Y-%m-%d"),
"admitted_hospital_name": "Hooting Hospital",
"admitted_hospital_trust_or_board": "Hooting Hospital Trust",
"is_care_home_worker": False,
"is_care_home_resident": False,
"anonymised_care_home_code": None,
"metadata": {
"test": {
"bubo": "bubo",
"hoots": 8,
"hooting": False,
},
"majora": {
"mask": "creepy",
}
},
"metrics": {
"ct": {
"records": {
1: {
"test_platform": "INHOUSE",
"test_target": "S",
"test_kit": "INHOUSE",
"ct_value": 20,
},
2: {
"test_platform": "INHOUSE",
"test_target": "E",
"test_kit": "INHOUSE",
"ct_value": 21,
},
}
}
},
},
],
"client_name": "pytest",
"client_version": 1,
}
def _test_biosample(self, bs, payload):
# Fixed values
self.assertEqual("United Kingdom", bs.created.collection_location_country)
self.assertEqual("2697049", bs.taxonomy_identifier)
self.assertEqual(payload["biosamples"][0].get("adm1"), bs.created.collection_location_adm1)
self.assertEqual(payload["biosamples"][0]["central_sample_id"], bs.dice_name)
self.assertEqual(datetime.datetime.strptime(payload["biosamples"][0]["collection_date"], "%Y-%m-%d").date(), bs.created.collection_date)
if hasattr(bs.created, "coguk_supp"):
self.assertEqual(payload["biosamples"][0].get("is_surveillance"), bs.created.coguk_supp.is_surveillance)
self.assertEqual(payload["biosamples"][0].get("collection_pillar"), bs.created.coguk_supp.collection_pillar)
self.assertEqual(payload["biosamples"][0].get("is_hcw"), bs.created.coguk_supp.is_hcw)
self.assertEqual(payload["biosamples"][0].get("is_hospital_patient"), bs.created.coguk_supp.is_hospital_patient)
self.assertEqual(payload["biosamples"][0].get("is_icu_patient"), bs.created.coguk_supp.is_icu_patient)
self.assertEqual(payload["biosamples"][0].get("admitted_with_covid_diagnosis"), bs.created.coguk_supp.admitted_with_covid_diagnosis)
self.assertEqual(payload["biosamples"][0].get("employing_hospital_name"), bs.created.coguk_supp.employing_hospital_name)
self.assertEqual(payload["biosamples"][0].get("employing_hospital_trust_or_board"), bs.created.coguk_supp.employing_hospital_trust_or_board)
admission_date = None
try:
admission_date = datetime.datetime.strptime(payload["biosamples"][0].get("admission_date"), "%Y-%m-%d").date()
except TypeError:
pass
self.assertEqual(admission_date, bs.created.coguk_supp.admission_date)
self.assertEqual(payload["biosamples"][0].get("admitted_hospital_name"), bs.created.coguk_supp.admitted_hospital_name)
self.assertEqual(payload["biosamples"][0].get("admitted_hospital_trust_or_board"), bs.created.coguk_supp.admitted_hospital_trust_or_board)
self.assertEqual(payload["biosamples"][0].get("is_care_home_worker"), bs.created.coguk_supp.is_care_home_worker)
self.assertEqual(payload["biosamples"][0].get("is_care_home_resident"), bs.created.coguk_supp.is_care_home_resident)
self.assertEqual(payload["biosamples"][0].get("anonymised_care_home_code"), bs.created.coguk_supp.anonymised_care_home_code)
received_date = None
try:
received_date = datetime.datetime.strptime(payload["biosamples"][0].get("received_date"), "%Y-%m-%d").date()
except TypeError:
pass
self.assertEqual(received_date, bs.created.received_date)
adm2 = None
try:
adm2 = payload["biosamples"][0].get("adm2").upper() #adm2 coerced to upper
except AttributeError:
pass
self.assertEqual(adm2, bs.created.collection_location_adm2)
self.assertEqual(payload["biosamples"][0].get("source_age"), bs.created.source_age)
self.assertEqual(payload["biosamples"][0].get("source_sex", ""), bs.created.source_sex)
self.assertEqual(payload["biosamples"][0].get("adm2_private"), bs.created.private_collection_location_adm2)
biosample_sources = []
for record in bs.created.records.all():
if record.in_group and record.in_group.kind == "Biosample Source":
biosample_sources.append(record.in_group.secondary_id)
if payload["biosamples"][0].get("biosample_source_id"):
self.assertEqual(payload["biosamples"][0]["biosample_source_id"], biosample_sources[0])
self.assertEqual(payload["biosamples"][0]["biosample_source_id"], bs.primary_group.dice_name)
self.assertEqual(len(biosample_sources), 1)
else:
self.assertEqual(len(biosample_sources), 0)
self.assertEqual(None, bs.primary_group)
self.assertEqual(payload["biosamples"][0].get("collecting_org"), bs.created.collected_by)
self.assertEqual(self.user, bs.created.submission_user)
self.assertEqual(self.user.profile.institute.name, bs.created.submitted_by)
self.assertEqual(self.user.profile.institute, bs.created.submission_org)
self.assertEqual(payload["biosamples"][0].get("root_sample_id"), bs.root_sample_id)
self.assertEqual(payload["biosamples"][0].get("sample_type_collected", ""), bs.sample_type_collected)
self.assertEqual(payload["biosamples"][0].get("sample_type_received"), bs.sample_type_current)
self.assertEqual(payload["biosamples"][0].get("sender_sample_id"), bs.sender_sample_id)
self.assertEqual(payload["biosamples"][0].get("swab_site"), bs.sample_site)
# Metadata
expected_n_metadata = 0
for tag_name, tag_data in payload["biosamples"][0]["metadata"].items():
expected_n_metadata += len(tag_data.keys())
self.assertEqual(bs.metadata.count(), expected_n_metadata)
record_tests = 0
for record in bs.metadata.all():
self.assertEqual(str(payload["biosamples"][0]["metadata"][record.meta_tag][record.meta_name]), record.value) # all metadata is str atm
record_tests += 1
self.assertEqual(record_tests, expected_n_metadata)
# Metrics
expected_n_metrics_objects = 0
expected_n_metrics_records = 0
for tag_name, tag_data in payload["biosamples"][0]["metrics"].items():
expected_n_metrics_objects += 1
expected_n_metrics_records += len(tag_data["records"])
n_records = 0
self.assertEqual(bs.metrics.count(), expected_n_metrics_objects)
for metric in bs.metrics.all():
for record in metric.metric_records.all():
n_records += 1
self.assertEqual(n_records, expected_n_metrics_records)
record_tests = 0
if expected_n_metrics_objects > 0:
for i, metric in payload["biosamples"][0]["metrics"]["ct"]["records"].items():
self.assertIsNotNone(models.TemporaryMajoraArtifactMetricRecord_ThresholdCycle.objects.filter(
artifact_metric__artifact=bs,
test_platform = metric["test_platform"],
test_kit = metric["test_kit"],
test_target = metric["test_target"],
ct_value = metric["ct_value"]
).first())
record_tests += 1
self.assertEqual(record_tests, expected_n_metrics_records)
class BiosampleArtifactTest(BasicAPIBase):
def setUp(self):
super().setUp()
self.default_central_sample_id = default_central_sample_id
self.default_payload = copy.deepcopy(default_payload)
self.default_payload["username"] = self.user.username
self.default_payload["token"] = self.key.key
def _add_biosample(self, payload, expected_errors=0, update=False, empty=False, expected_http=200):
endpoint = "api.artifact.biosample.add"
if update:
endpoint = "api.artifact.biosample.update"
elif empty:
endpoint = "api.artifact.biosample.addempty"
response = self.c.post(reverse(endpoint), payload, secure=True, content_type="application/json")
self.assertEqual(expected_http, response.status_code)
j = None
if expected_http == 200:
j = response.json()
if j["errors"] != expected_errors:
sys.stderr.write(json.dumps(j, indent=4, sort_keys=True) + '\n')
self.assertEqual(expected_errors, j["errors"])
bs = None
try:
bs = models.BiosampleArtifact.objects.get(central_sample_id=self.default_central_sample_id)
except models.BiosampleArtifact.DoesNotExist:
pass
return bs, j
def test_add_biosample(self):
payload = copy.deepcopy(self.default_payload)
n_biosamples = models.BiosampleArtifact.objects.count()
bs, j = self._add_biosample(payload)
self.assertEqual(models.BiosampleArtifact.objects.count(), n_biosamples+1)
_test_biosample(self, bs, payload)
def test_biosample_pha_update(self):
# create a biosample
payload = copy.deepcopy(self.default_payload)
self._add_biosample(payload)
update_payload = {
"username": self.user.username,
"token": self.key.key,
"biosamples": [
{
"central_sample_id": "HOOT-00001",
"root_biosample_source_id": "HOOTER-1",
},
],
"client_name": "pytest",
"client_version": 1,
}
self._add_biosample(update_payload, update=True)
bs = models.BiosampleArtifact.objects.get(central_sample_id=self.default_central_sample_id)
self.assertEqual(update_payload["biosamples"][0]["root_biosample_source_id"], bs.root_biosample_source_id)
_test_biosample(self, bs, payload) # determine nothing has changed from the initial payload
def test_biosample_update(self):
# create a biosample
payload = copy.deepcopy(self.default_payload)
self._add_biosample(payload)
update_payload = {
"username": self.user.username,
"token": self.key.key,
"biosamples": [
{
"adm1": "UK-WLS",
"central_sample_id": self.default_central_sample_id,
"collection_date": datetime.date.today().strftime("%Y-%m-%d"),
"is_surveillance": True,
"received_date": datetime.date.today().strftime("%Y-%m-%d"),
"adm2": "Swansea",
"source_age": 31,
"source_sex": "F",
"adm2_private": "SA4",
"biosample_source_id": "XYZ12345",
"collecting_org": "Parliament of Hooters",
"root_sample_id": "PHA_67890",
"sample_type_collected": "BAL",
"sample_type_received": "primary",
"sender_sample_id": "LAB67890",
"swab_site": None,
"collection_pillar": 2,
"is_hcw": False,
"is_hospital_patient": True,
"is_icu_patient": True,
"admitted_with_covid_diagnosis": False,
"employing_hospital_name": None,
"employing_hospital_trust_or_board": None,
"admission_date": datetime.date.today().strftime("%Y-%m-%d"),
"admitted_hospital_name": "HOSPITAL",
"admitted_hospital_trust_or_board": "HOSPITAL",
"is_care_home_worker": True,
"is_care_home_resident": True,
"anonymised_care_home_code": "CC-X00",
"metadata": {
"test": {
"bubo": "bubo",
"hoots": 8,
"hooting": False,
},
"majora": {
"mask": "creepy",
}
},
"metrics": {
"ct": {
"records": {
1: {
"test_platform": "INHOUSE",
"test_target": "S",
"test_kit": "INHOUSE",
"ct_value": 20,
},
2: {
"test_platform": "INHOUSE",
"test_target": "E",
"test_kit": "INHOUSE",
"ct_value": 21,
},
}
}
},
},
],
"client_name": "pytest",
"client_version": 1,
}
bs, j = self._add_biosample(update_payload)
with self.assertRaises(AssertionError):
# Check that the biosample has changed from the initial
_test_biosample(self, bs, payload)
_test_biosample(self, bs, update_payload)
# Check the supp has been updated and not recreated
self.assertEqual(models.COGUK_BiosourceSamplingProcessSupplement.objects.count(), 1)
def test_biosample_add_overwrite_metadata(self):
# create a biosample
payload = copy.deepcopy(self.default_payload)
bs, j = self._add_biosample(payload)
update_payload = copy.deepcopy(self.default_payload)
update_payload["biosamples"][0]["metadata"]["test"]["hooting"] = True
update_payload["biosamples"][0]["metadata"]["majora"]["mask"] = "cute"
update_payload["biosamples"][0]["metrics"] = {}
bs, j = self._add_biosample(update_payload)
with self.assertRaises(AssertionError):
# Check that the biosample has changed from the initial
_test_biosample(self, bs, payload)
update_payload["biosamples"][0]["metrics"] = payload["biosamples"][0]["metrics"] # reinsert to check metrics have stayed
_test_biosample(self, bs, update_payload)
# Check tatl
expected_context = {
"changed_fields": [],
"nulled_fields": [],
"changed_metadata": ["metadata:test.hooting", "metadata:majora.mask"],
"flashed_metrics": [],
}
self._test_update_biosample_tatl(j["request"], expected_context)
def test_biosample_add_overwrite_metrics(self):
# create a biosample
payload = copy.deepcopy(self.default_payload)
bs, j = self._add_biosample(payload)
update_payload = copy.deepcopy(self.default_payload)
update_payload["biosamples"][0]["metrics"]["ct"]["records"][2]["ct_value"] = 30
bs, j = self._add_biosample(update_payload)
with self.assertRaises(AssertionError):
# Check that the biosample has changed from the initial
_test_biosample(self, bs, payload)
_test_biosample(self, bs, update_payload)
# Check tatl
expected_context = {
"changed_fields": [],
"nulled_fields": [],
"changed_metadata": [],
"flashed_metrics": ["ct"],
}
self._test_update_biosample_tatl(j["request"], expected_context)
def test_biosample_add_update_nostomp(self):
# create a biosample
payload = copy.deepcopy(self.default_payload)
bs, j = self._add_biosample(payload)
payload = copy.deepcopy(self.default_payload)
payload["biosamples"][0]["collection_pillar"] = 2
bs, j = self._add_biosample(payload)
_test_biosample(self, bs, payload) # compare object to payload
def test_biosample_add_update_nuke_stomp(self):
#NOTE Some fields become "" empty string when sending None
#TODO it would be nice if that behaviour was consistent
# create a biosample
payload = copy.deepcopy(self.default_payload)
bs, j = self._add_biosample(payload)
stomp_payload = {
"username": self.user.username,
"token": self.key.key,
"biosamples": [
{
"adm1": "UK-ENG",
"central_sample_id": self.default_central_sample_id,
"collection_date": datetime.date.today().strftime("%Y-%m-%d"),
"is_surveillance": False,
"received_date": None,
"adm2": None,
"source_age": None,
"source_sex": "",
"adm2_private": None,
"biosample_source_id": "ABC12345", # can't nuke biosample_source_id once it has been set
"collecting_org": None,
"root_sample_id": None,
"sample_type_collected": "",
"sample_type_received": None,
"sender_sample_id": None,
"swab_site": None,
"collection_pillar": None,
"is_hcw": None,
"is_hospital_patient": None,
"is_icu_patient": None,
"admitted_with_covid_diagnosis": None,
"employing_hospital_name": None,
"employing_hospital_trust_or_board": None,
"admission_date": None,
"admitted_hospital_name": None,
"admitted_hospital_trust_or_board": None,
"is_care_home_worker": None,
"is_care_home_resident": None,
"anonymised_care_home_code": None,
"metadata": {},
"metrics": {},
},
],
"client_name": "pytest",
"client_version": 1,
}
bs, j = self._add_biosample(stomp_payload)
# Add the metadata and metrics back to show that blanking them does nothing
stomp_payload["biosamples"][0]["metadata"] = payload["biosamples"][0]["metadata"]
stomp_payload["biosamples"][0]["metrics"] = payload["biosamples"][0]["metrics"]
_test_biosample(self, bs, stomp_payload) # compare object to payload
# Check the supp has been updated and not recreated
self.assertEqual(models.COGUK_BiosourceSamplingProcessSupplement.objects.count(), 1)
def test_biosample_minimal_add_metrics_update(self):
# Add a minimal biosample and update it with some metrics
payload = {
"username": self.user.username,
"token": self.key.key,
"biosamples": [
{
"adm1": "UK-ENG",
"central_sample_id": self.default_central_sample_id,
"collection_date": datetime.date.today().strftime("%Y-%m-%d"),
"is_surveillance": False,
"is_hcw": True,
"metadata": {},
"metrics": {},
},
],
"client_name": "pytest",
"client_version": 1,
}
bs, j = self._add_biosample(payload)
_test_biosample(self, bs, payload)
new_payload = {
"username": self.user.username,
"token": self.key.key,
"biosamples": [
{
"central_sample_id": self.default_central_sample_id,
"metadata": {
"test": {
"bubo": "bubo",
"hoots": 8,
"hooting": False,
},
"majora": {
"mask": "creepy",
}
},
"metrics": {
"ct": {
"records": {
1: {
"test_platform": "INHOUSE",
"test_target": "S",
"test_kit": "INHOUSE",
"ct_value": 20,
},
2: {
"test_platform": "INHOUSE",
"test_target": "E",
"test_kit": "INHOUSE",
"ct_value": 21,
},
}
}
},
},
],
}
bs, j = self._add_biosample(new_payload, update=True)
update_payload = copy.deepcopy(payload)
update_payload["biosamples"][0]["metadata"] = new_payload["biosamples"][0]["metadata"]
update_payload["biosamples"][0]["metrics"] = new_payload["biosamples"][0]["metrics"]
_test_biosample(self, bs, update_payload)
def test_biosample_full_add_partial_update(self):
# Add a full biosample and update a few additional fields that were placeholded
payload = copy.deepcopy(self.default_payload)
bs, j = self._add_biosample(payload)
_test_biosample(self, bs, payload)
payload["biosamples"][0]["is_surveillance"] = True
payload["biosamples"][0]["collection_pillar"] = 2
bs, j = self._add_biosample(payload, update=True)
_test_biosample(self, bs, payload)
def test_biosample_minimal_add_partial_update(self):
# Add a minimal biosample and update a few additional fields
payload = {
"username": self.user.username,
"token": self.key.key,
"biosamples": [
{
"adm1": "UK-ENG",
"central_sample_id": self.default_central_sample_id,
"collection_date": datetime.date.today().strftime("%Y-%m-%d"),
"is_surveillance": False,
"is_hcw": True,
"metadata": {},
"metrics": {},
},
],
"client_name": "pytest",
"client_version": 1,
}
bs, j = self._add_biosample(payload)
_test_biosample(self, bs, payload)
new_payload = copy.deepcopy(payload)
del new_payload["biosamples"][0]["adm1"]
del | |
# (c) 2021 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from textwrap import dedent
from ansible_collections.cisco.iosxr.tests.unit.compat.mock import patch
from ansible_collections.cisco.iosxr.plugins.modules import iosxr_ntp_global
from ansible_collections.cisco.iosxr.tests.unit.modules.utils import (
set_module_args,
)
from .iosxr_module import TestIosxrModule
class TestIosxrNtpGlobalModule(TestIosxrModule):
module = iosxr_ntp_global
def setUp(self):
super(TestIosxrNtpGlobalModule, self).setUp()
self.mock_get_resource_connection = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module_base."
"get_resource_connection"
)
self.get_resource_connection = (
self.mock_get_resource_connection.start()
)
self.mock_get_config = patch(
"ansible_collections.cisco.iosxr.plugins.module_utils.network.iosxr.facts.ntp_global.ntp_global."
"Ntp_globalFacts.get_config"
)
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestIosxrNtpGlobalModule, self).tearDown()
self.get_resource_connection.stop()
self.get_config.stop()
def test_iosxr_ntp_global_merged_idempotent(self):
self.maxDiff = None
run_cfg = dedent(
"""\
ntp
max-associations 10
interface GigabitEthernet0/0/0/0 vrf siteB
multicast key 1
!
interface GigabitEthernet0/0/0/0
broadcast client
multicast client 192.168.127.12
multicast destination 192.168.127.12
!
authentication-key 1 md5 encrypted testkey
authentication-key 2 md5 encrypted 071B245F5A5B
authenticate
trusted-key 1
trusted-key 2
ipv4 dscp af11
ipv6 precedence routine
peer vrf siteC 192.0.2.1 iburst
server vrf siteD 192.0.2.2 burst
server 192.0.2.2 version 2 key 1 minpoll 4 maxpoll 5 prefer burst iburst source GigabitEthernet0/0/0/0
drift file apphost
drift aging time 0
master 1
access-group vrf siteA ipv4 peer PeerAcl2
access-group vrf siteA ipv4 serve ServeAcl2
access-group ipv4 peer PeerAcl1
access-group ipv4 serve ServeAcl1
access-group ipv4 serve-only ServeOnlyAcl1
access-group ipv4 query-only QueryOnlyAcl1
access-group ipv6 peer PeerAcl2
source vrf siteE GigabitEthernet0/0/0/0
source GigabitEthernet0/0/0/0
passive
broadcastdelay 1
update-calendar
log-internal-sync
!
"""
)
self.get_config.return_value = run_cfg
set_module_args(
dict(
config=dict(
access_group=dict(
ipv4=dict(
peer="PeerAcl1",
query_only="QueryOnlyAcl1",
serve="ServeAcl1",
serve_only="ServeOnlyAcl1",
),
ipv6=dict(peer="PeerAcl2"),
vrfs=[
dict(
ipv4=dict(peer="PeerAcl2", serve="ServeAcl2"),
name="siteA",
)
],
),
authenticate=True,
authentication_keys=[
dict(id=1, key="testkey", encryption=True),
dict(id=2, key="<KEY>", encryption=True),
],
broadcastdelay=1,
drift=dict(aging_time=0, file="apphost"),
interfaces=[
dict(
name="GigabitEthernet0/0/0/0",
multicast_client="192.168.127.12",
multicast_destination="192.168.127.12",
broadcast_client=True,
),
dict(
name="GigabitEthernet0/0/0/0",
multicast_key=1,
vrf="siteB",
),
],
ipv4=dict(dscp="af11"),
ipv6=dict(precedence="routine"),
log_internal_sync=True,
master=dict(stratum=1),
max_associations=10,
passive=True,
peers=[dict(iburst=True, peer="192.0.2.1", vrf="siteC")],
servers=[
dict(burst=True, server="192.0.2.2", vrf="siteD"),
dict(
iburst=True,
burst=True,
server="192.0.2.2",
key_id=1,
maxpoll=5,
minpoll=4,
prefer=True,
source="GigabitEthernet0/0/0/0",
version=2,
),
],
source_interface="GigabitEthernet0/0/0/0",
source_vrfs=[
dict(name="GigabitEthernet0/0/0/0", vrf="siteE")
],
trusted_keys=[dict(key_id=1), dict(key_id=2)],
update_calendar=True,
),
state="merged",
)
)
self.execute_module(changed=False, commands=[])
def test_iosxr_ntp_global_merged(self):
self.maxDiff = None
set_module_args(
dict(
config=dict(
access_group=dict(
ipv4=dict(
peer="PeerAcl1",
query_only="QueryOnlyAcl1",
serve="ServeAcl1",
serve_only="ServeOnlyAcl1",
),
ipv6=dict(peer="PeerAcl2"),
vrfs=[
dict(
ipv4=dict(peer="PeerAcl2", serve="ServeAcl2"),
name="siteA",
)
],
),
authenticate=True,
authentication_keys=[
dict(id=1, key="testkey", encryption=True),
dict(id=2, key="<KEY>", encryption=True),
],
broadcastdelay=1,
drift=dict(aging_time=0, file="apphost"),
interfaces=[
dict(
name="GigabitEthernet0/0/0/0",
multicast_client="192.168.127.12",
multicast_destination="192.168.127.12",
broadcast_client=True,
),
dict(
name="GigabitEthernet0/0/0/0",
multicast_key=1,
vrf="siteB",
),
],
ipv4=dict(dscp="af11"),
ipv6=dict(precedence="routine"),
log_internal_sync=True,
master=dict(stratum=1),
max_associations=10,
passive=True,
peers=[dict(iburst=True, peer="192.0.2.1", vrf="siteC")],
servers=[
dict(burst=True, server="192.0.2.2", vrf="siteD"),
dict(
iburst=True,
burst=True,
server="192.0.2.2",
key_id=1,
maxpoll=5,
minpoll=4,
prefer=True,
source="GigabitEthernet0/0/0/0",
version=2,
),
],
source_interface="GigabitEthernet0/0/0/0",
source_vrfs=[
dict(name="GigabitEthernet0/0/0/0", vrf="siteE")
],
trusted_keys=[dict(key_id=1), dict(key_id=2)],
update_calendar=True,
),
state="merged",
)
)
commands = [
"ntp authentication-key 1 md5 encrypted testkey",
"ntp authentication-key 2 md5 encrypted 071B245F5A5B",
"ntp peer vrf siteC 192.0.2.1 iburst",
"ntp server vrf siteD 192.0.2.2 burst",
"ntp server 192.0.2.2 burst iburst key 1 minpoll 4 maxpoll 5 prefer version 2 source GigabitEthernet0/0/0/0",
"ntp trusted-key 1",
"ntp trusted-key 2",
"ntp interface GigabitEthernet0/0/0/0 broadcast client",
"ntp interface GigabitEthernet0/0/0/0 multicast destination 192.168.127.12",
"ntp interface GigabitEthernet0/0/0/0 multicast client 192.168.127.12",
"ntp interface GigabitEthernet0/0/0/0 vrf siteB multicast key 1",
"ntp vrf siteE source GigabitEthernet0/0/0/0",
"ntp access-group vrf siteA ipv4 serve ServeAcl2",
"ntp access-group vrf siteA ipv4 peer PeerAcl2",
"ntp access-group ipv4 peer PeerAcl1",
"ntp access-group ipv4 serve ServeAcl1",
"ntp access-group ipv4 serve-only ServeOnlyAcl1",
"ntp access-group ipv4 query-only QueryOnlyAcl1",
"ntp access-group ipv6 peer PeerAcl2",
"ntp authenticate",
"ntp log-internal-sync",
"ntp broadcastdelay 1",
"ntp drift aging time 0",
"ntp drift file apphost",
"ntp ipv4 dscp af11",
"ntp ipv6 precedence routine",
"ntp max-associations 10",
"ntp master 1",
"ntp passive",
"ntp update-calendar",
"ntp source GigabitEthernet0/0/0/0",
]
result = self.execute_module(changed=True)
self.assertEqual(sorted(result["commands"]), sorted(commands))
def test_iosxr_ntp_global_deleted(self):
self.maxDiff = None
run_cfg = dedent(
"""\
ntp
max-associations 10
interface GigabitEthernet0/0/0/0 vrf siteB
multicast key 1
!
interface GigabitEthernet0/0/0/0
broadcast client
multicast client 192.168.127.12
multicast destination 192.168.127.12
!
authentication-key 1 md5 encrypted testkey
authentication-key 2 md5 encrypted 071B245F5A5B
authenticate
trusted-key 1
trusted-key 2
ipv4 dscp af11
ipv6 precedence routine
peer vrf siteC 192.0.2.1 iburst
server vrf siteD 192.0.2.2 burst
server 192.0.2.2 version 2 key 1 minpoll 4 maxpoll 5 prefer burst iburst source GigabitEthernet0/0/0/0
drift file apphost
drift aging time 0
master 1
access-group vrf siteA ipv4 peer PeerAcl3
access-group vrf siteA ipv4 serve ServeAcl2
access-group ipv4 peer PeerAcl1
access-group ipv4 serve ServeAcl1
access-group ipv4 serve-only ServeOnlyAcl1
access-group ipv4 query-only QueryOnlyAcl1
access-group ipv6 peer PeerAcl2
source vrf siteE GigabitEthernet0/0/0/0
source GigabitEthernet0/0/0/0
passive
broadcastdelay 1
update-calendar
log-internal-sync
!
"""
)
self.get_config.return_value = run_cfg
set_module_args(dict(state="deleted"))
commands = [
"no ntp authentication-key 1 md5 encrypted testkey",
"no ntp authentication-key 2 md5 encrypted 071B245F5A5B",
"no ntp peer vrf siteC 192.0.2.1 iburst",
"no ntp server vrf siteD 192.0.2.2 burst",
"no ntp server 192.0.2.2 burst iburst key 1 minpoll 4 maxpoll 5 prefer version 2 source GigabitEthernet0/0/0/0",
"no ntp trusted-key 1",
"no ntp trusted-key 2",
"no ntp interface GigabitEthernet0/0/0/0 vrf siteB",
"no ntp interface GigabitEthernet0/0/0/0",
"no ntp vrf siteE source GigabitEthernet0/0/0/0",
"no ntp access-group vrf siteA ipv4 serve ServeAcl2",
"no ntp access-group vrf siteA ipv4 peer PeerAcl3",
"no ntp access-group ipv4 peer PeerAcl1",
"no ntp access-group ipv4 serve ServeAcl1",
"no ntp access-group ipv4 serve-only ServeOnlyAcl1",
"no ntp access-group ipv4 query-only QueryOnlyAcl1",
"no ntp access-group ipv6 peer PeerAcl2",
"no ntp authenticate",
"no ntp log-internal-sync",
"no ntp broadcastdelay 1",
"no ntp drift aging time 0",
"no ntp drift file apphost",
"no ntp ipv4 dscp af11",
"no ntp ipv6 precedence routine",
"no ntp max-associations 10",
"no ntp master 1",
"no ntp passive",
"no ntp update-calendar",
"no ntp source GigabitEthernet0/0/0/0",
]
result = self.execute_module(changed=True)
self.assertEqual(sorted(result["commands"]), sorted(commands))
def test_iosxr_ntp_global_replaced(self):
self.maxDiff = None
run_cfg = dedent(
"""\
ntp
max-associations 10
interface GigabitEthernet0/0/0/0 vrf siteB
multicast key 1
!
interface GigabitEthernet0/0/0/0
broadcast client
multicast client 192.168.127.12
multicast destination 192.168.127.12
!
authentication-key 1 md5 encrypted testkey
authentication-key 2 md5 encrypted 071B245F5A5B
authenticate
trusted-key 1
trusted-key 2
ipv4 dscp af11
ipv6 precedence routine
peer vrf siteC 192.0.2.1 iburst
server vrf siteD 192.0.2.2 burst
server 192.0.2.2 version 2 key 1 minpoll 4 maxpoll 5 prefer burst iburst source GigabitEthernet0/0/0/0
drift file apphost
drift aging time 0
master 1
access-group vrf siteA ipv4 peer PeerAcl3
access-group vrf siteA ipv4 serve ServeAcl2
access-group ipv4 peer PeerAcl1
access-group ipv4 serve ServeAcl1
access-group ipv4 serve-only ServeOnlyAcl1
access-group ipv4 query-only QueryOnlyAcl1
access-group ipv6 peer PeerAcl2
source vrf siteE GigabitEthernet0/0/0/0
source GigabitEthernet0/0/0/0
passive
broadcastdelay 1
update-calendar
log-internal-sync
!
"""
)
self.get_config.return_value = run_cfg
set_module_args(
dict(
config=dict(
access_group=dict(
ipv4=dict(
peer="PeerAcl1",
query_only="QueryOnlyAcl2",
serve="ServeAcl1",
serve_only="ServeOnlyAcl1",
),
ipv6=dict(peer="PeerAcl2"),
vrfs=[
dict(
ipv4=dict(peer="PeerAcl2", serve="ServeAcl2"),
name="siteA",
)
],
),
authenticate=True,
authentication_keys=[
dict(id=1, key="testkey1", encryption=True),
dict(id=2, key="<KEY>", encryption=True),
],
broadcastdelay=1,
drift=dict(aging_time=0, file="apphost"),
interfaces=[
dict(
name="GigabitEthernet0/0/0/1",
multicast_client="192.168.127.12",
multicast_destination="192.168.127.12",
broadcast_client=True,
),
dict(
name="GigabitEthernet0/0/0/0",
multicast_key=1,
vrf="siteB",
),
],
ipv4=dict(dscp="af12"),
ipv6=dict(precedence="routine"),
log_internal_sync=True,
master=dict(stratum=1),
max_associations=10,
passive=True,
peers=[dict(iburst=True, peer="192.0.2.1", vrf="siteC")],
servers=[
dict(burst=True, server="192.0.2.3", vrf="siteD"),
dict(
iburst=True,
burst=True,
server="192.0.2.2",
key_id=1,
maxpoll=5,
minpoll=4,
prefer=True,
source="GigabitEthernet0/0/0/1",
version=2,
),
],
source_interface="GigabitEthernet0/0/0/0",
source_vrfs=[
dict(name="GigabitEthernet0/0/0/0", vrf="siteE")
],
trusted_keys=[dict(key_id=1), dict(key_id=2)],
update_calendar=True,
),
state="replaced",
)
)
commands = [
"no ntp server vrf siteD 192.0.2.2 burst",
"no ntp interface GigabitEthernet0/0/0/0",
"ntp authentication-key 1 md5 encrypted testkey1",
"ntp server vrf siteD 192.0.2.3 burst",
"ntp server 192.0.2.2 burst iburst key 1 minpoll 4 maxpoll 5 prefer version 2 source GigabitEthernet0/0/0/1",
"ntp interface GigabitEthernet0/0/0/1 broadcast client",
"ntp interface GigabitEthernet0/0/0/1 multicast destination 192.168.127.12",
"ntp interface GigabitEthernet0/0/0/1 multicast client 192.168.127.12",
"ntp access-group ipv4 query-only QueryOnlyAcl2",
"ntp access-group vrf siteA ipv4 peer PeerAcl2",
"ntp access-group vrf siteA ipv4 serve ServeAcl2",
"ntp ipv4 dscp af12",
]
result = self.execute_module(changed=True)
self.assertEqual(sorted(result["commands"]), sorted(commands))
def test_iosxr_logging_global_rendered(self):
self.maxDiff = None
set_module_args(
dict(
config=dict(
access_group=dict(
ipv4=dict(
peer="PeerAcl1",
query_only="QueryOnlyAcl1",
serve="ServeAcl1",
serve_only="ServeOnlyAcl1",
),
ipv6=dict(peer="PeerAcl2"),
vrfs=[
dict(
ipv4=dict(peer="PeerAcl2", serve="ServeAcl2"),
name="siteA",
)
],
),
authenticate=True,
authentication_keys=[
dict(id=1, key="testkey", encryption=True),
dict(id=2, key="<KEY>", encryption=True),
],
| |
"""
pyLUT
Authored by <NAME>
Updated and modified by <NAME>
MIT License
"""
#####Import necessary modules#####
import os
import math
import struct
"""
The following modules are installed via pip. The program will check if they exist.
If the module doesn't exist, then the program exits early.
"""
#NumPy
try:
import numpy as np
print("NumPy loaded!")
except ImportError:
print("NumPy not installed! Please install via: pip install numpy")
sys.exit("Exiting program")
#MatPlotLib
try:
import matplotlib
# matplotlib : general plot
from matplotlib.pyplot import *
# matplotlib : for 3D plot
# mplot3d has to be imported for 3d projection
import mpl_toolkits.mplot3d
#matplotlib and PyQt5
from matplotlib.colors import *
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
print("MatPlotLIb loaded!")
except ImportError:
print("matplotlib not installed. Run: pip install matplotlib")
sys.exit("Exiting program")
#KDTree
try:
import kdtree
print("KDTree loaded!")
except ImportError:
print("KDTree doesn't exist! Please install via: pip install KDTree")
sys.exit("Exiting program")
#Progress Bar
try:
from progress.bar import Bar
print("Progress bar loaded!")
except ModuleNotFoundError:
print("Progress bar not installed. Please install via: pip install progress")
sys.exit("Exiting program")
def EmptyLatticeOfSize(cubeSize):
return np.zeros((cubeSize, cubeSize, cubeSize), object)
#Creates an empty numpy array at the specified cube size
def Indices01(cubeSize):
indices = []
ratio = 1.0/float(cubeSize-1)
for i in range(cubeSize):
indices.append(float(i) * ratio)
return indices
def Indices(cubeSize, maxVal):
indices = []
for i in Indices01(cubeSize):
indices.append(int(i * (maxVal)))
return indices
def RemapIntTo01(val, maxVal):
return (float(val)/float(maxVal))
def Remap01ToInt(val, maxVal):
return int(iround(float(val) * float(maxVal)))
def iround(num):
if (num > 0):
return int(num+.5)
else:
return int(num-.5)
def LerpColor(beginning, end, value01):
if value01 < 0 or value01 > 1:
raise NameError("Improper Lerp")
return Color(Lerp1D(beginning.r, end.r, value01), Lerp1D(beginning.g, end.g, value01), Lerp1D(beginning.b, end.b, value01))
def Lerp3D(beginning, end, value01):
if value01 < 0 or value01 > 1:
raise NameError("Improper Lerp")
return [Lerp1D(beginning[0], end[0], value01), Lerp1D(beginning[1], end[1], value01), Lerp1D(beginning[2], end[2], value01)]
def Lerp1D(beginning, end, value01):
if value01 < 0 or value01 > 1:
raise NameError("Improper Lerp")
range = float(end) - float(beginning)
return float(beginning) + float(range) * float(value01)
def Distance3D(a, b):
return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2 + (a[2] - b[2])**2)
def Clamp(value, min, max):
if min > max:
raise NameError("Invalid Clamp Values")
if value < min:
return int(min)
if value > max:
return int(max)
return value
def Checksum(data):
sum = 0
for x in data:
sum = sum + struct.unpack("<B",x)
return sum
def ToIntArray(string):
array = []
for x in string:
array.append(ord(x))
return array
class Color:
"""
RGB floating point representation of a color. 0 is absolute black, 1 is absolute white.
Access channel data by color.r, color.g, or color.b.
"""
def __init__(self, r, g, b):
self.r = r
self.g = g
self.b = b
def Clamped01(self):
return Color(Clamp(float(self.r), 0, 1), Clamp(float(self.g), 0, 1), Clamp(float(self.b), 0, 1))
@staticmethod
def FromRGBInteger(r, g, b, bitdepth):
"""
Instantiates a floating point color from RGB integers at a bitdepth.
"""
maxBits = 2**bitdepth - 1
return Color(RemapIntTo01(r, maxBits), RemapIntTo01(g, maxBits), RemapIntTo01(b, maxBits))
@staticmethod
def FromFloatArray(array):
"""
Creates Color from a list or tuple of 3 floats.
"""
return Color(array[0], array[1], array[2])
@staticmethod
def FromRGBIntegerArray(array, bitdepth):
"""
Creates Color from a list or tuple of 3 RGB integers at a specified bitdepth.
"""
maxBits = 2**bitdepth - 1
return Color(RemapIntTo01(array[0], maxBits), RemapIntTo01(array[1], maxBits), RemapIntTo01(array[2], maxBits))
def ToFloatArray(self):
"""
Creates a tuple of 3 floating point RGB values from the floating point color.
"""
return (self.r, self.g, self.b)
def ToRGBIntegerArray(self, bitdepth):
"""
Creates a list of 3 RGB integer values at specified bitdepth from the floating point color.
"""
maxVal = (2**bitdepth - 1)
return (Remap01ToInt(self.r, maxVal), Remap01ToInt(self.g, maxVal), Remap01ToInt(self.b, maxVal))
def ClampColor(self, min, max):
"""
Returns a clamped color.
"""
return Color(Clamp(self.r, min.r, max.r), Clamp(self.g, min.g, max.g), Clamp(self.b, min.b, max.b))
def DistanceToColor(color):
if isinstance(color, Color):
return Distance3D(self.ToFloatArray(), color.ToFloatArray())
return NotImplemented
def __add__(self, color):
return Color(self.r + color.r, self.g + color.g, self.b + color.b)
def __sub__(self, color):
return Color(self.r - color.r, self.g - color.g, self.b - color.b)
def __mul__(self, color):
if not isinstance(color, Color):
mult = float(color)
return Color(self.r * mult, self.g * mult, self.b * mult)
return Color(self.r * color.r, self.g * color.g, self.b * color.b)
def __eq__(self, color):
if isinstance(color, Color):
return self.r == color.r and self.g == color.g and self.b == color.b
return NotImplemented
def __ne__(self, color):
result = self.__eq__(color)
if result is NotImplemented:
return result
return not result
def __str__(self):
return "(" + str(self.r) + ", " + str(self.g) + ", " + str(self.b) + ")"
def FormattedAsFloat(self, format = '{:1.6f}'):
return format.format(self.r) + " " + format.format(self.g) + " " + format.format(self.b)
def FormattedAsInteger(self, maxVal):
rjustValue = len(str(maxVal)) + 1
return str(Remap01ToInt(self.r, maxVal)).rjust(rjustValue) + " " + str(Remap01ToInt(self.g, maxVal)).rjust(rjustValue) + " " + str(Remap01ToInt(self.b, maxVal)).rjust(rjustValue)
class LUT:
"""
A class that represents a 3D LUT with a 3D numpy array.
The idea is that the modifications are non-volatile, meaning that every modification method returns a new LUT object.
This class, treated like an object in LUTOMETRY, stores the values of the loaded LUT.
"""
def __init__(self, lattice, name = "Untitled LUT", resolution = 33):
#NumPy 3D array representing the 3D LUT's values
self.lattice = lattice
#Cube size stored as resolution. cubeSize is an array version of the LUT size
self.resolution = resolution
self.cubeSize = range(0, self.resolution)
print(self.cubeSize)
"""
LUT is of size (cubeSize, cubeSize, cubeSize) and index positions are from 0 to cubeSize-1
"""
self.name = str(name)
"""
Every LUT has a name!
"""
self.red_values = []
self.green_values = []
self.blue_values = []
self.colors = []
def Resize(self, newCubeSize):
"""
Scales the lattice to a new cube size.
"""
if newCubeSize == self.resolution:
return self
newLattice = EmptyLatticeOfSize(newCubeSize)
ratio = float(self.resolution - 1.0) / float(newCubeSize - 1.0)
for x in range(newCubeSize):
for y in range(newCubeSize):
for z in range(newCubeSize):
newLattice[x, y, z] = self.ColorAtInterpolatedLatticePoint(x*ratio, y*ratio, z*ratio)
newUserLUT = LUT(newLattice, name = self.name + "_Resized"+str(newCubeSize))
newUserLUT.resolution = newCubeSize
newUserLUT.cubeSize = range(0, newUserLUT.resolution)
return newUserLUT
def _ResizeAndAddToData(self, newCubeSize, data, progress = False):
"""
Scales the lattice to a new cube size.
"""
newLattice = EmptyLatticeOfSize(newCubeSize)
ratio = float(self.resolution - 1.0) / float(newCubeSize-1.0)
maxVal = newCubeSize-1
bar = Bar("Building search tree", max = maxVal, suffix='%(percent)d%% - %(eta)ds remain')
try:
for x in range(newCubeSize):
if progress:
bar.next()
for y in range(newCubeSize):
for z in range(newCubeSize):
data.add(self.ColorAtInterpolatedLatticePoint(x*ratio, y*ratio, z*ratio).ToFloatArray(), (RemapIntTo01(x,maxVal), RemapIntTo01(y,maxVal), RemapIntTo01(z,maxVal)))
except KeyboardInterrupt:
bar.finish()
raise KeyboardInterrupt
bar.finish()
return data
def Reverse(self, progress = False):
"""
Reverses a LUT. Warning: This can take a long time depending on if the input/output is a bijection.
"""
tree = self.KDTree(progress)
newLattice = EmptyLatticeOfSize(self.resolution)
maxVal = self.resolution - 1
bar = Bar("Searching for matches", max = maxVal, suffix='%(percent)d%% - %(eta)ds remain')
try:
for x in range(self.resolution):
if progress:
bar.next()
for y in range(self.resolution):
for z in range(self.resolution):
newLattice[x, y, z] = Color.FromFloatArray(tree.search_nn((RemapIntTo01(x,maxVal), RemapIntTo01(y,maxVal), RemapIntTo01(z,maxVal))).aux)
except KeyboardInterrupt:
bar.finish()
raise KeyboardInterrupt
bar.finish()
return LUT(newLattice, name = self.name +"_Reverse")
def KDTree(self, progress = False):
tree = kdtree.create(dimensions=3)
tree = self._ResizeAndAddToData(self.resolution*3, tree, progress)
return tree
def CombineWithLUT(self, otherLUT):
"""
Combines LUT with another LUT.
"""
if self.resolution is not otherLUT.cubeSize:
raise NameError("Lattice Sizes not equivalent")
cubeSize = self.resolution
newLattice = EmptyLatticeOfSize(cubeSize)
for x in range(cubeSize):
for y in range(cubeSize):
for z in range(cubeSize):
selfColor = self.lattice[x, y, z].Clamped01()
newLattice[x, y, z] = otherLUT.ColorFromColor(selfColor)
return LUT(newLattice, name = self.name + "+" + otherLUT.name)
def ClampColor(self, min, max):
"""
Returns a new RGB clamped LUT.
"""
cubeSize = self.resolution
newLattice = EmptyLatticeOfSize(cubeSize)
for x in range(cubeSize):
for y in range(cubeSize):
for z in range(cubeSize):
newLattice[x, y, z] = self.ColorAtLatticePoint(x, y, z).ClampColor(min, max)
return LUT(newLattice)
def _LatticeTo3DLString(self, bitdepth):
"""
Used for internal creating of 3DL files.
"""
string = ""
cubeSize = self.resolution
for currentCubeIndex in range(0, cubeSize**3):
redIndex = currentCubeIndex / (cubeSize*cubeSize)
greenIndex = ( (currentCubeIndex % (cubeSize*cubeSize)) / (cubeSize) )
blueIndex = currentCubeIndex % cubeSize
latticePointColor = self.lattice[redIndex, greenIndex, blueIndex].Clamped01()
string += latticePointColor.FormattedAsInteger(2**bitdepth-1) + "\n"
return string
def ToLustre3DLFile(self, fileOutPath, bitdepth = 12):
cubeSize = self.resolution
inputDepth = math.log(cubeSize-1, 2)
if int(inputDepth) != inputDepth:
raise NameError("Invalid | |
from __future__ import division
from collections import deque
import os
import warnings
import numpy as np
import keras.backend as K
import keras.layers as layers
import keras.optimizers as optimizers
from rl.core import Agent
from rl.util import *
def mean_q(y_true, y_pred):
return K.mean(K.max(y_pred, axis=-1))
# Deep DPG as described by Lillicrap et al. (2015)
# http://arxiv.org/pdf/1509.02971v2.pdf
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.646.4324&rep=rep1&type=pdf
class UBDDPGAgent(Agent):
"""Write me
"""
def __init__(self, nb_actions, actor, critic, nb_players, critic_action_inputs, memory,
gamma=.99, batch_size=32, nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000,
train_interval=1, memory_interval=1, delta_range=None, delta_clip=np.inf,
random_process=None, custom_model_objects={}, target_model_update=.001, **kwargs):
assert len(critic_action_inputs) == nb_players
if hasattr(actor.output, '__len__') and len(actor.output) != nb_players:
raise ValueError((
'Actor "{}" does not have the right number of ',
'outputs. DDPG expects an actor that has {} outputs.'
).format(actor, nb_players))
# if hasattr(critic.output, '__len__') and len(critic.output) > 1:
# raise ValueError('Critic "{}" has more than one output. DDPG expects a critic that has a single output.'.format(critic))
for critic_action_input in critic_action_inputs:
if critic_action_input not in critic.input:
raise ValueError('Critic "{}" does not have designated action input "{}".'.format(critic, critic_action_input))
if not hasattr(critic.input, '__len__') or len(critic.input) < 2:
raise ValueError('Critic "{}" does not have enough inputs. The critic must have at least two inputs, one for the action and one for the observation.'.format(critic))
super(UBDDPGAgent, self).__init__(**kwargs)
# Soft vs hard target model updates.
if target_model_update < 0:
raise ValueError('`target_model_update` must be >= 0.')
elif target_model_update >= 1:
# Hard update every `target_model_update` steps.
target_model_update = int(target_model_update)
else:
# Soft update with `(1 - target_model_update) * old + target_model_update * new`.
target_model_update = float(target_model_update)
if delta_range is not None:
warnings.warn('`delta_range` is deprecated. Please use `delta_clip` instead, which takes a single scalar. For now we\'re falling back to `delta_range[1] = {}`'.format(delta_range[1]))
delta_clip = delta_range[1]
# Parameters.
self.nb_actions = nb_actions
self.nb_steps_warmup_actor = nb_steps_warmup_actor
self.nb_steps_warmup_critic = nb_steps_warmup_critic
self.random_process = random_process
self.delta_clip = delta_clip
self.gamma = gamma
self.target_model_update = target_model_update
self.batch_size = batch_size
self.train_interval = train_interval
self.memory_interval = memory_interval
self.custom_model_objects = custom_model_objects
# Related objects.
self.actor = actor
self.critic = critic
self.nb_players = nb_players
self.critic_action_inputs = critic_action_inputs
self.critic_action_input_idxes = [
self.critic.input.index(critic_action_input)
for critic_action_input in critic_action_inputs
]
self.memory = memory
# State.
self.compiled = False
self.reset_states()
@property
def uses_learning_phase(self):
return self.actor.uses_learning_phase or self.critic.uses_learning_phase
def compile(self, optimizer, metrics=[]):
metrics += [mean_q]
if type(optimizer) in (list, tuple):
if len(optimizer) != 2:
raise ValueError('More than two optimizers provided. Please only provide a maximum of two optimizers, the first one for the actor and the second one for the critic.')
actor_optimizer, critic_optimizer = optimizer
else:
actor_optimizer = optimizer
critic_optimizer = clone_optimizer(optimizer)
if type(actor_optimizer) is str:
actor_optimizer = optimizers.get(actor_optimizer)
if type(critic_optimizer) is str:
critic_optimizer = optimizers.get(critic_optimizer)
assert actor_optimizer != critic_optimizer
if len(metrics) == 2 and hasattr(metrics[0], '__len__') and hasattr(metrics[1], '__len__'):
actor_metrics, critic_metrics = metrics
else:
actor_metrics = critic_metrics = metrics
def clipped_error(y_true, y_pred):
y_true = K.squeeze(y_true, axis=-1)
y_pred = K.squeeze(y_pred, axis=-1)
loss = K.mean(
# K.random_uniform(shape=(self.batch_size, self.nb_players), minval=0., maxval=1.) *
huber_loss(y_true, y_pred, self.delta_clip),
axis=-1)
# y_true = K.print_tensor(y_true, message='y_true: ')
# y_pred = K.print_tensor(y_pred, message='y_pred: ')
# loss = K.print_tensor(loss, message='loss: ')
return loss
# Compile target networks. We only use them in feed-forward mode, hence we can pass any
# optimizer and loss since we never use it anyway.
self.target_actor = clone_model(self.actor, self.custom_model_objects)
self.target_actor.compile(optimizer='sgd', loss='mse')
self.target_critic = clone_model(self.critic, self.custom_model_objects)
self.target_critic.compile(optimizer='sgd', loss='mse')
# We also compile the actor. We never optimize the actor using Keras but instead compute
# the policy gradient ourselves. However, we need the actor in feed-forward mode, hence
# we also compile it with any optimzer and
self.actor.compile(optimizer='sgd', loss='mse')
# Compile the critic.
if self.target_model_update < 1.:
# We use the `AdditionalUpdatesOptimizer` to efficiently soft-update the target model.
critic_updates = get_soft_target_model_updates(self.target_critic, self.critic, self.target_model_update)
critic_optimizer = AdditionalUpdatesOptimizer(critic_optimizer, critic_updates)
self.critic.compile(
optimizer=critic_optimizer,
loss=[clipped_error]*self.nb_players,
metrics=critic_metrics)
# Combine actor and critic so that we can get the policy gradient.
# Assuming critic's state inputs are the same as actor's.
critic_inputs = []
actor_inputs = []
for i in self.critic.input:
if i in self.critic_action_inputs:
critic_inputs.append([])
else:
critic_inputs.append(i)
actor_inputs.append(i)
actor_outputs = self.actor(actor_inputs)
if not isinstance(actor_outputs, (list,)):
actor_outputs = [actor_outputs]
assert len(actor_outputs) == self.nb_players
for input_idx, actor_output in zip(self.critic_action_input_idxes, actor_outputs):
critic_inputs[input_idx] = actor_output
# critic_outputs = layers.Maximum()(self.critic(critic_inputs))
critic_outputs = self.critic(critic_inputs)
if not isinstance(critic_outputs, (list,)):
critic_outputs = [critic_outputs]
assert len(critic_outputs) == self.nb_players
actor_losses = [None]* self.nb_players
for input_idx, critic_output in zip(self.critic_action_input_idxes, critic_outputs):
actor_losses[input_idx] = -K.mean(critic_output)
updates = actor_optimizer.get_updates(
params=self.actor.trainable_weights,
loss=actor_losses)
if self.target_model_update < 1.:
# Include soft target model updates.
updates += get_soft_target_model_updates(self.target_actor, self.actor, self.target_model_update)
updates += self.actor.updates # include other updates of the actor, e.g. for BN
# Finally, combine it all into a callable function.
if K.backend() == 'tensorflow':
self.actor_train_fn = K.function(actor_inputs + [K.learning_phase()],
actor_outputs, updates=updates)
else:
if self.uses_learning_phase:
actor_inputs += [K.learning_phase()]
self.actor_train_fn = K.function(actor_inputs, actor_outputs, updates=updates)
self.actor_optimizer = actor_optimizer
self.compiled = True
def load_weights(self, filepath):
filename, extension = os.path.splitext(filepath)
actor_filepath = filename + '_actor' + extension
critic_filepath = filename + '_critic' + extension
self.actor.load_weights(actor_filepath)
self.critic.load_weights(critic_filepath)
self.update_target_models_hard()
def save_weights(self, filepath, overwrite=False):
filename, extension = os.path.splitext(filepath)
actor_filepath = filename + '_actor' + extension
critic_filepath = filename + '_critic' + extension
self.actor.save_weights(actor_filepath, overwrite=overwrite)
self.critic.save_weights(critic_filepath, overwrite=overwrite)
def update_target_models_hard(self):
self.target_critic.set_weights(self.critic.get_weights())
self.target_actor.set_weights(self.actor.get_weights())
# TODO: implement pickle
def reset_states(self):
if self.random_process is not None:
self.random_process.reset_states()
self.recent_action = None
self.recent_observation = None
if self.compiled:
self.actor.reset_states()
self.critic.reset_states()
self.target_actor.reset_states()
self.target_critic.reset_states()
def process_state_batch(self, batch):
batch = np.array(batch)
if self.processor is None:
return batch
return self.processor.process_state_batch(batch)
def select_action(self, state):
batch = self.process_state_batch([state])
# actions = [action.flatten() for action in self.actor.predict_on_batch(batch)]
actions = self.actor.predict_on_batch(batch)
if self.nb_players == 1:
actions =[actions]
# actions = [a.flatten() for a in actions]
assert len(actions) == self.nb_players
# assert actions[0].shape == (self.nb_actions,)
assert actions[0].shape == (1, self.nb_actions)
# print('actions: {}'.format(actions))
if len(self.critic.inputs) > (self.nb_players+1): # state is a list
state_batch_with_action = batch[:]
else:
state_batch_with_action = [batch]
for action_idx, input_idx in enumerate(self.critic_action_input_idxes):
state_batch_with_action.insert(input_idx, actions[action_idx])
q_values = [
qv.flatten()
for qv in self.critic.predict_on_batch(state_batch_with_action)
]
assert q_values[0].shape == (1, )
assert len(q_values) == self.nb_players
# print('q_values: {}'.format(q_values))
action_best = actions[np.argmax(q_values)].flatten()
# assert action_best.shape == (self.nb_actions, )
assert action_best.shape == (self.nb_actions, )
# print('action_best: {}'.format(action_best))
# print(type(action_best[0]))
# Apply noise, if a random process is set.
if self.training and self.random_process is not None:
noise = self.random_process.sample()
assert noise.shape == action_best.shape
action_best += noise
return action_best
def forward(self, observation):
# Select an action.
state = self.memory.get_recent_state(observation)
action = self.select_action(state) # TODO: move this into policy
# Book-keeping.
self.recent_observation = observation
self.recent_action = action
return action
@property
def layers(self):
return self.actor.layers[:] + self.critic.layers[:]
@property
def metrics_names(self):
names = self.critic.metrics_names[:]
if self.processor is not None:
names += self.processor.metrics_names[:]
return names
def backward(self, reward, terminal=False):
# Store most recent experience in memory.
if self.step % self.memory_interval == 0:
self.memory.append(self.recent_observation, self.recent_action, reward, terminal,
training=self.training)
metrics = [np.nan for _ in self.metrics_names]
if not self.training:
# We're done here. No need to update the experience memory since we only use the working
# memory to obtain the state over the most recent observations.
return metrics
# Train the network on a single stochastic batch.
can_train_either = self.step > self.nb_steps_warmup_critic or self.step > self.nb_steps_warmup_actor
if can_train_either and self.step % self.train_interval == 0:
experiences = self.memory.sample(self.batch_size)
assert len(experiences) == self.batch_size
# Start by extracting the necessary parameters (we use a vectorized implementation).
state0_batch = []
reward_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for e in experiences:
state0_batch.append(e.state0)
state1_batch.append(e.state1)
reward_batch.append(e.reward)
action_batch.append(e.action)
terminal1_batch.append(0. if e.terminal1 else 1.)
# Prepare and validate parameters.
state0_batch = self.process_state_batch(state0_batch)
state1_batch = self.process_state_batch(state1_batch)
terminal1_batch = np.array(terminal1_batch)
reward_batch = np.array(reward_batch)
action_batch = np.array(action_batch)
assert reward_batch.shape == (self.batch_size,)
assert terminal1_batch.shape == reward_batch.shape
assert action_batch.shape == (self.batch_size, self.nb_actions)
# Update critic, if warm up is over.
if self.step > self.nb_steps_warmup_critic:
target_actions = self.target_actor.predict_on_batch(state1_batch)
if not isinstance(target_actions, (list,)):
target_actions = [target_actions]
assert len(target_actions) == self.nb_players
assert target_actions[0].shape == (self.batch_size, self.nb_actions)
if len(self.critic.inputs) > (self.nb_players+1): # state is a list
# if len(self.critic.inputs) >= 3:
state1_batch_with_action = state1_batch[:]
else:
state1_batch_with_action = [state1_batch]
# state1_batch_with_action.insert(self.critic_action_input_idx, target_actions)
for action_idx, input_idx in enumerate(self.critic_action_input_idxes):
state1_batch_with_action.insert(input_idx, target_actions[action_idx])
target_q_values = self.target_critic.predict_on_batch(state1_batch_with_action)
if not isinstance(target_q_values, (list,)):
target_q_values = [target_q_values]
target_q_values = [ tqv.flatten() for tqv in target_q_values]
assert target_q_values[0].shape == | |
are converted to | while resolving future annotations so we can't consistently
# assert the message.
@pytest.mark.anyio()
async def test_call_with_async_di_with_defaulting_union_type_dependency(context: alluka.BasicContext):
mock_type: typing.Any = mock.Mock()
mock_other_type: typing.Any = mock.Mock()
mock_value = mock.Mock()
context.injection_client.set_type_dependency(mock_other_type, mock_value)
async def callback(
bar: int, baz: str, cope: int = alluka.inject(type=typing.Union[mock_type, mock_other_type, None])
) -> float:
assert bar == 123
assert baz == "ok"
assert cope is mock_value
return 243.234
result = await context.call_with_async_di(callback, 123, "ok")
assert result == 243.234
@pytest.mark.anyio()
async def test_call_with_async_di_with_defaulting_union_type_dependency_not_found(context: alluka.BasicContext):
mock_type: typing.Any = mock.Mock()
async def callback(
bar: float, baz: int, cope: typing.Optional[int] = alluka.inject(type=typing.Optional[mock_type])
) -> float:
assert bar == 123.321
assert baz == 543
assert cope is None
return 321.123
result = await context.call_with_async_di(callback, 123.321, 543)
assert result == 321.123
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_type_dependency(context: alluka.BasicContext):
mock_value = GlobalStubType()
mock_other_value = GlobalOtherStubType()
async def callback(
rawr: int,
xd: float,
meowmeow: typing.Annotated[str, alluka.inject(type=GlobalStubType)],
imacow: typing.Annotated[int, alluka.inject(type=GlobalOtherStubType)],
) -> str:
assert rawr == 69
assert xd == "rew"
assert meowmeow is mock_value
assert imacow is mock_other_value
return "meow"
context.injection_client.set_type_dependency(GlobalStubType, mock_value).set_type_dependency(
GlobalOtherStubType, mock_other_value
)
result = await context.call_with_async_di(callback, rawr=69, xd="rew")
assert result == "meow"
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_type_dependency_inferred_from_type(context: alluka.BasicContext):
mock_value = GlobalStubType()
mock_other_value = GlobalOtherStubType()
async def callback(
meow: int,
nyaa: float,
meowmeow: typing.Annotated[GlobalStubType, alluka.inject()],
imacow: typing.Annotated[GlobalOtherStubType, alluka.inject()],
) -> str:
assert meow == 2222
assert nyaa == "xxxxx"
assert meowmeow is mock_value
assert imacow is mock_other_value
return "wewewewew"
context.injection_client.set_type_dependency(GlobalStubType, mock_value).set_type_dependency(
GlobalOtherStubType, mock_other_value
)
result = await context.call_with_async_di(callback, meow=2222, nyaa="xxxxx")
assert result == "wewewewew"
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_type_dependency_not_found(context: alluka.BasicContext):
mock_other_value = GlobalOtherStubType()
async def callback(
meow: int,
nyaa: float,
meowmeow: typing.Annotated[int, alluka.inject(type=GlobalStubType)],
imacow: typing.Annotated[str, alluka.inject(type=GlobalOtherStubType)],
) -> str:
raise NotImplementedError
context.injection_client.set_type_dependency(GlobalOtherStubType, mock_other_value)
with pytest.raises(alluka.MissingDependencyError) as exc_info:
await context.call_with_async_di(callback, meow=2222, nyaa="xxxxx")
assert exc_info.value.dependency_type is GlobalStubType
assert exc_info.value.message == f"Couldn't resolve injected type(s) {GlobalStubType} to actual value"
# These tests covers syntax which was introduced in 3.10
if sys.version_info >= (3, 10): # TODO: do we want to dupe other test cases for |?
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_3_10_union_type_dependency(context: alluka.BasicContext):
mock_value = GlobalOtherStubType()
async def callback(
yeee: str,
nyaa: bool,
yeet: typing.Annotated[str, alluka.inject(type=GlobalStubType | GlobalOtherStubType)],
) -> str:
assert yeee == "yeee"
assert nyaa is True
assert yeet is mock_value
return "hey"
context.injection_client.set_type_dependency(GlobalOtherStubType, mock_value)
result = await context.call_with_async_di(callback, yeee="yeee", nyaa=True)
assert result == "hey"
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_3_10_union_type_dependency_not_found(
context: alluka.BasicContext,
):
async def callback(
bar: int, baz: str, cope: typing.Annotated[int, alluka.inject(type=GlobalOtherStubType | GlobalStubType)]
) -> float:
raise NotImplementedError
with pytest.raises(alluka.MissingDependencyError) as exc_info:
await context.call_with_async_di(callback, 123, "ok")
assert exc_info.value.dependency_type == GlobalOtherStubType | GlobalStubType
# 3.10.1/2+ and 3.11 may re-order the | union types while resolving them from a string
# future annotation so we can't reliably assert these.
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_3_10_union_type_dependency_defaulting(
context: alluka.BasicContext,
):
mock_value = GlobalStubType()
context.injection_client.set_type_dependency(GlobalStubType, mock_value)
async def callback(
bar: int,
baz: str,
cope: typing.Annotated[int, alluka.inject(type=GlobalOtherStubType | GlobalStubType | None)],
) -> float:
assert bar == 123
assert baz == "ok"
assert cope is mock_value
return 451.123
result = await context.call_with_async_di(callback, 123, "ok")
assert result == 451.123
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_3_10_union_type_dependency_defaulting_not_found(
context: alluka.BasicContext,
):
async def callback(
bar: int,
baz: str,
cope: typing.Annotated[int, alluka.inject(type=GlobalOtherStubType | GlobalStubType | None)],
) -> float:
assert bar == 123
assert baz == "ok"
assert cope is None
return 451.123
result = await context.call_with_async_di(callback, 123, "ok")
assert result == 451.123
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_3_10_union_type_dependency_natural_defaulting(
context: alluka.BasicContext,
):
mock_value = GlobalStubType()
context.injection_client.set_type_dependency(GlobalStubType, mock_value)
async def callback(
bar: int,
baz: str,
cope: typing.Annotated[int, alluka.inject(type=GlobalStubType | GlobalStubType | None)] = 123,
) -> float:
assert bar == 123
assert baz == "ok"
assert cope is mock_value
return 451.123
result = await context.call_with_async_di(callback, 123, "ok")
assert result == 451.123
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_3_10_union_type_dependency_natural_defaulting_not_found(
context: alluka.BasicContext,
):
async def callback(
bar: int, baz: str, cope: typing.Annotated[int, alluka.inject(type=GlobalStubType | GlobalStubType)] = 43123
) -> float:
assert bar == 123
assert baz == "ok"
assert cope == 43123
return 451.123
result = await context.call_with_async_di(callback, 123, "ok")
assert result == 451.123
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_union_type_dependency(context: alluka.BasicContext):
mock_value = GlobalOtherStubType()
async def callback(
meow: int,
meowmeow: typing.Annotated[typing.Union[GlobalStubType, GlobalOtherStubType], alluka.inject()],
) -> str:
assert meow == 1233212
assert meowmeow is mock_value
return "yay"
context.injection_client.set_type_dependency(GlobalOtherStubType, mock_value)
result = await context.call_with_async_di(callback, 1233212)
assert result == "yay"
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_union_type_dependency_not_found(context: alluka.BasicContext):
async def callback(
yeee: str,
nyaa: bool,
yeet: typing.Annotated[int, alluka.inject(type=typing.Union[GlobalStubType, GlobalOtherStubType])],
) -> str:
raise NotImplementedError
with pytest.raises(alluka.MissingDependencyError) as exc_info:
await context.call_with_async_di(callback, yeee="yeee", nyaa=True)
assert exc_info.value.dependency_type == typing.Union[GlobalStubType, GlobalOtherStubType]
# On 3.10.1/2+ typing.Unions are converted to | while resolving future annotations so we can't consistently
# assert the message.
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_defaulting_type_dependency(context: alluka.BasicContext):
mock_value = GlobalStubType()
context.injection_client.set_type_dependency(GlobalStubType, mock_value)
async def callback(
eaaaa: str,
nyaa: bool,
yeet: typing.Annotated[str, alluka.inject(type=typing.Optional[GlobalStubType])],
) -> str:
assert eaaaa == "easd"
assert nyaa is False
assert yeet is mock_value
return "aaaaa"
result = await context.call_with_async_di(callback, "easd", nyaa=False)
assert result == "aaaaa"
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_defaulting_type_dependency_not_found(context: alluka.BasicContext):
async def callback(
eaaaa: str,
nyaa: bool,
yeet: typing.Annotated[str, alluka.inject(type=typing.Optional[GlobalStubType])],
) -> str:
assert eaaaa == "easd"
assert nyaa is False
assert yeet is None
return "aaaaa"
result = await context.call_with_async_di(callback, "easd", nyaa=False)
assert result == "aaaaa"
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_natural_defaulting_type_dependency(context: alluka.BasicContext):
mock_value = GlobalStubType()
context.injection_client.set_type_dependency(GlobalStubType, mock_value)
async def callback(
eaaaa: str,
nyaa: bool,
yeet: typing.Annotated[str, alluka.inject(type=GlobalStubType)] = "default",
) -> str:
assert eaaaa == "easd"
assert nyaa is False
assert yeet is mock_value
return "aaaaa"
result = await context.call_with_async_di(callback, "easd", nyaa=False)
assert result == "aaaaa"
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_natural_defaulting_type_dependency_not_found(
context: alluka.BasicContext,
):
async def callback(
eaaaa: str,
nyaa: bool,
yeet: typing.Annotated[int, alluka.inject(type=GlobalStubType)] = 123,
) -> str:
assert eaaaa == "easd"
assert nyaa is False
assert yeet == 123
return "aaaaa"
result = await context.call_with_async_di(callback, "easd", nyaa=False)
assert result == "aaaaa"
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_defaulting_union_type_dependency(context: alluka.BasicContext):
mock_value = GlobalOtherStubType()
context.injection_client.set_type_dependency(GlobalOtherStubType, mock_value)
async def callback(
vvvvv: int,
value: typing.Annotated[str, alluka.inject(type=typing.Union[GlobalStubType, GlobalOtherStubType])],
) -> str:
assert vvvvv == 123
assert value is mock_value
return "ea sports"
result = await context.call_with_async_di(callback, 123)
assert result == "ea sports"
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_defaulting_union_type_dependency_not_found(
context: alluka.BasicContext,
):
async def callback(
vvvvv: int,
value: typing.Annotated[str, alluka.inject(type=typing.Optional[GlobalStubType])],
) -> str:
assert vvvvv == 123
assert value is None
return "yeeee"
result = await context.call_with_async_di(callback, 123)
assert result == "yeeee"
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_natural_defaulting_union_type_dependency(
context: alluka.BasicContext,
):
mock_value = GlobalOtherStubType()
context.injection_client.set_type_dependency(GlobalOtherStubType, mock_value)
async def callback(
vvvvv: int,
value: typing.Annotated[str, alluka.inject(type=typing.Union[GlobalStubType, GlobalOtherStubType])] = "default",
) -> str:
assert vvvvv == 123
assert value is mock_value
return "ea sports"
result = await context.call_with_async_di(callback, 123)
assert result == "ea sports"
@pytest.mark.anyio()
async def test_call_with_async_di_with_annotated_natural_defaulting_union_type_dependency_not_found(
context: alluka.BasicContext,
):
async def callback(
vvvvv: int,
value: typing.Annotated[
str, alluka.inject(type=typing.Union[GlobalStubType, GlobalOtherStubType, None])
] = "default 2",
) -> str:
assert vvvvv == 123
assert value == "default 2"
return "yeeee"
result = await context.call_with_async_di(callback, 123)
assert result == "yeeee"
@pytest.mark.anyio()
async def test_call_with_async_di_with_shorthand_annotated_type_dependency(context: alluka.BasicContext):
mock_value = GlobalStubType()
mock_other_value = GlobalOtherStubType()
async def callback(
rawr: int, xd: float, meowmeow: alluka.Injected[GlobalStubType], other: alluka.Injected[GlobalOtherStubType]
) -> str:
assert rawr == 1233212
assert xd == "seee"
assert meowmeow is mock_value
assert other is mock_other_value
return "eeesss"
context.injection_client.set_type_dependency(GlobalStubType, mock_value).set_type_dependency(
GlobalOtherStubType, mock_other_value
)
result = await context.call_with_async_di(callback, 1233212, xd="seee")
assert result == "eeesss"
@pytest.mark.anyio()
async def test_call_with_async_di_with_shorthand_annotated_type_dependency_not_found(context: alluka.BasicContext):
mock_other_value = GlobalOtherStubType()
async def callback(
meow: int,
nyaa: float,
meowmeow: alluka.Injected[GlobalStubType],
imacow: alluka.Injected[GlobalOtherStubType],
) -> str:
raise NotImplementedError
context.injection_client.set_type_dependency(GlobalOtherStubType, mock_other_value)
with pytest.raises(alluka.MissingDependencyError) as exc_info:
await context.call_with_async_di(callback, meow=2222, nyaa="xxxxx")
assert exc_info.value.dependency_type is GlobalStubType
assert exc_info.value.message == f"Couldn't resolve injected type(s) {GlobalStubType} to actual value"
# These tests covers syntax which was introduced in 3.10
if sys.version_info >= (3, 10): # TODO: do we want to dupe other test cases for |?
@pytest.mark.anyio()
async def test_call_with_async_di_with_shorthand_annotated_3_10_union_type_dependency(
context: alluka.BasicContext,
):
mock_value = GlobalOtherStubType()
async def callback(
yeee: str,
nyaa: bool,
yeet: alluka.Injected[GlobalStubType | GlobalOtherStubType],
) -> str:
assert yeee == "yeee"
assert nyaa is True
assert yeet is mock_value
return "hey"
context.injection_client.set_type_dependency(GlobalOtherStubType, mock_value)
result = await context.call_with_async_di(callback, yeee="yeee", nyaa=True)
assert result == "hey"
@pytest.mark.anyio()
async def test_call_with_async_di_with_shorthand_annotated_3_10_union_type_dependency_not_found(
context: alluka.BasicContext,
):
async def callback(bar: int, baz: str, cope: alluka.Injected[GlobalOtherStubType | GlobalStubType]) -> float:
raise NotImplementedError
with pytest.raises(alluka.MissingDependencyError) as exc_info:
await context.call_with_async_di(callback, 123, "ok")
assert exc_info.value.dependency_type == GlobalOtherStubType | GlobalStubType
# 3.10.1/2+ and 3.11 may re-order the | union types while resolving them from a string
# | |
"""
Extract, combine, and correct chamber sensor data.
For pre-processing only, not intended for general-purpose use.
Hyytiälä COS campaign, April-November 2016
(c) 2016-2017 <NAME> <<EMAIL>>
Revision history
----------------
26 May 2016, W.S.
- The two PAR sensors are now called 'PAR_ch_1' and 'PAR_ch_2', because their
association with the chambers changed throughout the campaign.
29 Aug 2016, W.S.
- Continue to the next day's file in the loop when the current day's file
is not found. This is to skip the day 28 Aug 2016 for missing data.
16 Jan 2017, W.S.
- Running options are now controlled by an external config file.
- Code review and small edits
- Ad hoc filtering criteria added
- Daily plot option added, which is controlled by the preprocessing config
"""
import argparse
import glob
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import preproc_config # preprocessing config file, in the same directory
def IQR_bounds_func(x):
"""Filter thermocouple data by IQR bounds. Used only in this script."""
if np.sum(np.isfinite(x)) > 0:
q1, q3 = np.nanpercentile(x, [25, 75])
IQR = q3 - q1
return(q1 - 2 * IQR, q3 + 5 * IQR)
else:
return(np.nan, np.nan)
# define terminal argument parser
parser = argparse.ArgumentParser(
description='Extract, combine, and correct chamber sensor data.')
parser.add_argument('-s', '--silent', dest='flag_silent_mode',
action='store_true',
help='silent mode: run without printing daily summary')
args = parser.parse_args()
# echo program starting
print('Subsetting, gapfilling and downsampling the biomet sensor data...')
dt_start = datetime.datetime.now()
print(datetime.datetime.strftime(dt_start, '%Y-%m-%d %X'))
print('numpy version = ' + np.__version__)
print('pandas version = ' + pd.__version__)
if preproc_config.run_options['plot_sensor_data']:
print('Plotting option is enabled. Will generate daily plots.')
# settings
pd.options.display.float_format = '{:.2f}'.format
# let pandas dataframe displays float with 2 decimal places
plt.rcParams.update({'mathtext.default': 'regular'}) # sans-serif math
plt.style.use('ggplot')
sensor_dir = preproc_config.data_dir['sensor_data_raw']
output_dir = preproc_config.data_dir['sensor_data_reformatted']
# get file list of sensor data
lc_sensor_flist = glob.glob(
sensor_dir + '/sm_cop/*.cop') # leaf chamber sensors
sc_sensor_flist = glob.glob(
sensor_dir + '/sm_mpr/*.mpr') # soil chamber sensors
# local time is UTC+2
doy_today = (datetime.datetime.utcnow() -
datetime.datetime(2016, 1, 1)).total_seconds() / 86400. + 2. / 24.
if preproc_config.run_options['process_recent_period']:
doy_start = np.int(doy_today -
preproc_config.run_options['traceback_in_days'])
doy_end = np.int(np.ceil(doy_today))
else:
doy_start = 97 # campaign starts on 7 Apr 2016
doy_end = 315 # campaign ends on 10 Nov 2016 (plus one for `range()`)
year_start = 2016 # starting year for converting day of year values
# data fields in the leaf chamber sensor data file (*.cop)
# correspondence between chamber number and sensor number was changing
# throughout the campaign. refer to the metadata table for the information.
# 0 - time; 1 - PAR_ch_1; 2 - PAR_ch_2;
# 8 - ambient T; 10 - T_ch_1;
# 11 - T_ch_2; 12 - T_ch_3;
# data fields in the soil chamber sensor data file (*.mpr)
# 0 - time; 5 - soil chamber 1 (T_ch_4); 6 - soil chamber 2 (T_ch_5)
# 7 - soil chamber 3 (T_ch_6)
for doy in range(doy_start, doy_end):
run_date_str = (datetime.datetime(2016, 1, 1) +
datetime.timedelta(doy + 0.5)).strftime('%y%m%d')
current_lc_sensor_files = [s for s in lc_sensor_flist if run_date_str in s]
current_sc_sensor_files = [s for s in sc_sensor_flist if run_date_str in s]
# reading leaf chamber sensor data
df_lc_sensor = None
if len(current_lc_sensor_files) > 0:
for entry in current_lc_sensor_files:
df_lc_sensor_loaded = pd.read_csv(
entry, sep='\\s+', usecols=[0, 1, 2, 8, 10, 11, 12],
names=['datetime', 'PAR_ch_1', 'PAR_ch_2', 'T_amb',
'T_ch_1', 'T_ch_2', 'T_ch_3'],
dtype={'datetime': str, 'PAR_ch_1': np.float64,
'PAR_ch_2': np.float64, 'T_amb': np.float64,
'T_ch_1': np.float64, 'T_ch_2': np.float64,
'T_ch_3': np.float64},
parse_dates={'timestamp': [0]},
date_parser=lambda s: np.datetime64(
'%s-%s-%s %s:%s:%s' % (s[0:4], s[4:6], s[6:8],
s[8:10], s[10:12], s[12:14])),
engine='c', na_values='-')
if df_lc_sensor is None:
df_lc_sensor = df_lc_sensor_loaded
else:
df_lc_sensor = pd.concat([df_lc_sensor, df_lc_sensor_loaded],
ignore_index=True)
del df_lc_sensor_loaded
else:
print('Leaf chamber sensor data file not found on day 20%s' %
run_date_str)
continue
# reading soil chamber sensor data
df_sc_sensor = None
if len(current_sc_sensor_files) > 0:
for entry in current_sc_sensor_files:
df_sc_sensor_loaded = pd.read_csv(
entry, sep='\\s+', usecols=[0, 5, 6, 7],
names=['datetime', 'T_ch_4', 'T_ch_5', 'T_ch_6'],
dtype={'datetime': str, 'T_ch_4': np.float64,
'T_ch_5': np.float64, 'T_ch_6': np.float64},
parse_dates={'timestamp': [0]},
date_parser=lambda s: np.datetime64(
'%s-%s-%s %s:%s:%s' % (s[0:4], s[4:6], s[6:8],
s[8:10], s[10:12], s[12:14])),
engine='c')
if df_sc_sensor is None:
df_sc_sensor = df_sc_sensor_loaded
else:
df_sc_sensor = pd.concat([df_sc_sensor, df_sc_sensor_loaded],
ignore_index=True)
del df_sc_sensor_loaded
else:
print('Soil chamber sensor data file not found on day 20%s' %
run_date_str)
continue
# convert day of year number
doy_lc_sensor = \
(df_lc_sensor['timestamp'] - pd.Timestamp('%s-01-01' % year_start)) / \
pd.Timedelta(days=1)
# parse datetime strings
# doy_lc_sensor = np.zeros(df_lc_sensor.shape[0]) * np.nan
# for i in range(df_lc_sensor.shape[0]):
# dt_str = df_lc_sensor.loc[i, 'datetime']
# if len(dt_str) == 14:
# # accelerate datetime parsing with manual operations
# dt_converted = datetime.datetime(
# int(dt_str[0:4]), int(dt_str[4:6]), int(dt_str[6:8]),
# int(dt_str[8:10]), int(dt_str[10:12]), int(dt_str[12:14]))
# doy_lc_sensor[i] = \
# (dt_converted -
# datetime.datetime(2016, 1, 1)).total_seconds() / 86400.
# # doy_lc_sensor[i] = (
# # datetime.datetime.strptime(dt_str, '%Y%m%d%H%M%S') -
# # datetime.datetime(2016, 1, 1)).total_seconds() / 86400.
# else:
# doy_lc_sensor[i] = np.nan
# indices for insertion, range 0 to 17279
ind_lc_sensor = (doy_lc_sensor - doy) * 86400. / 5.
ind_lc_sensor = np.round(ind_lc_sensor).astype(np.int64)
# convert day of year number
doy_sc_sensor = \
(df_sc_sensor['timestamp'] - pd.Timestamp('%s-01-01' % year_start)) / \
pd.Timedelta(days=1)
# doy_sc_sensor = np.zeros(df_sc_sensor.shape[0]) * np.nan
# for i in range(df_sc_sensor.shape[0]):
# dt_str = df_sc_sensor.loc[i, 'datetime']
# if len(dt_str) == 14:
# # accelerate datetime parsing with manual operations
# dt_converted = datetime.datetime(
# int(dt_str[0:4]), int(dt_str[4:6]), int(dt_str[6:8]),
# int(dt_str[8:10]), int(dt_str[10:12]), int(dt_str[12:14]))
# doy_sc_sensor[i] = \
# (dt_converted -
# datetime.datetime(2016, 1, 1)).total_seconds() / 86400.
# # doy_sc_sensor[i] = (
# # datetime.datetime.strptime(dt_str, '%Y%m%d%H%M%S') -
# # datetime.datetime(2016, 1, 1)).total_seconds() / 86400.
# else:
# doy_sc_sensor[i] = np.nan
# indices for insertion, range 0 to 17279
ind_sc_sensor = (doy_sc_sensor - doy) * 86400. / 5.
ind_sc_sensor = np.round(ind_sc_sensor).astype(np.int64)
# corrections for PAR and TC values
# parameters from <NAME> <<EMAIL>>, 13 April 2016
# correction factor for 'PAR_ch_2' was updated 27 October 2016,
# according to <NAME> <<EMAIL>>
df_lc_sensor['PAR_ch_1'] *= 200. # was 210-220
df_lc_sensor['PAR_ch_2'] *= 205. # was 200
df_lc_sensor['T_ch_1'] = df_lc_sensor['T_ch_1'] * 0.94 + 0.75
df_lc_sensor['T_ch_2'] = df_lc_sensor['T_ch_2'] * 0.96 - 0.20
if doy < 103:
# before 13 April 2016, but not including that day
df_lc_sensor['T_ch_3'] = df_lc_sensor['T_ch_3'] * 0.98 - 0.89
else:
# TC in the large leaf chamber reinstalled 13 April 2016 11:20 am
# before that, temperature data were corrupt at this channel
df_lc_sensor['T_ch_3'] = df_lc_sensor['T_ch_3'] * 0.97 - 0.39
# mask corrupt data
# 1. 'T_ch_3' data between April 8 and 13 of 2016 were corrupt
if doy == 98:
break_pt = (datetime.datetime(2016, 4, 8, 9, 33, 41) -
datetime.datetime(2016, 1, 1)).total_seconds() / 86400.
df_lc_sensor.loc[doy_lc_sensor > break_pt, 'T_ch_3'] = np.nan
del break_pt
elif 98 < doy < 103:
df_lc_sensor['T_ch_3'] = np.nan
elif doy == 103:
break_pt = (datetime.datetime(2016, 4, 13, 11, 20, 24) -
datetime.datetime(2016, 1, 1)).total_seconds() / 86400.
df_lc_sensor.loc[doy_lc_sensor < break_pt, 'T_ch_3'] = np.nan
del break_pt
# 2. no soil chamber sensors before 12 April 2016 10:37:09 am
if doy < 102:
df_sc_sensor[['T_ch_4', 'T_ch_5', 'T_ch_6']] = np.nan
elif doy == 102:
break_pt = (datetime.datetime(2016, 4, 12, 10, 37, 9) -
datetime.datetime(2016, 1, 1)).total_seconds() / 86400.
df_sc_sensor.loc[doy_sc_sensor < break_pt,
['T_ch_4', 'T_ch_5', 'T_ch_6']] = np.nan
del break_pt
# 3. remove 'PAR_ch_2' data before before 8 April 2016 09:40:25 am
if doy == 97:
df_lc_sensor['PAR_ch_2'] = np.nan
elif doy == 98:
break_pt = (datetime.datetime(2016, 4, 8, 9, 40, 25) -
datetime.datetime(2016, 1, 1)).total_seconds() / 86400.
df_lc_sensor.loc[doy_lc_sensor < break_pt, 'PAR_ch_2'] = np.nan
del break_pt
# 4. 'PAR_ch_2' data from 08:40 to 09:41 on 7 June 2016 were corrupt
if doy == 158:
break_pt1 = (datetime.datetime(2016, 6, 7, 8, 40) -
datetime.datetime(2016, 1, 1)).total_seconds() / 86400.
break_pt2 = (datetime.datetime(2016, 6, 7, 9, 41) -
datetime.datetime(2016, 1, 1)).total_seconds() / 86400.
df_lc_sensor.loc[
(doy_lc_sensor > break_pt1) & (doy_lc_sensor < break_pt2) &
(df_lc_sensor['PAR_ch_1'].values < 400.), 'PAR_ch_1'] = np.nan
df_lc_sensor.loc[
(doy_lc_sensor > break_pt1) & (doy_lc_sensor < break_pt2) &
(df_lc_sensor['PAR_ch_2'].values < 400.), 'PAR_ch_2'] = np.nan
del break_pt1, break_pt2
# 5. power failure for leaf chamber sensor logger
# no data from 30 Aug 2016 13:44:36 to 5 Sep 2016 11:22:44
if doy == 242:
break_pt = (datetime.datetime(2016, 8, 30, 13, 44, 36) -
datetime.datetime(2016, 1, 1)).total_seconds() / 86400.
df_lc_sensor.loc[doy_lc_sensor > break_pt, 1:] = np.nan
if 242 < doy < 248:
df_lc_sensor.loc[:, 1:] = np.nan
if doy == 248:
break_pt = | |
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.study_get_tenant_study_statistics_with_http_info(tenant_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str tenant_id: (required)
:param str start_date:
:param str end_date:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GetTenantStudyStatisticsQueryResult, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'tenant_id',
'start_date',
'end_date'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method study_get_tenant_study_statistics" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'tenant_id' is set
if self.api_client.client_side_validation and ('tenant_id' not in local_var_params or # noqa: E501
local_var_params['tenant_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `tenant_id` when calling `study_get_tenant_study_statistics`") # noqa: E501
collection_formats = {}
path_params = {}
if 'tenant_id' in local_var_params:
path_params['tenantId'] = local_var_params['tenant_id'] # noqa: E501
query_params = []
if 'start_date' in local_var_params and local_var_params['start_date'] is not None: # noqa: E501
query_params.append(('startDate', local_var_params['start_date'])) # noqa: E501
if 'end_date' in local_var_params and local_var_params['end_date'] is not None: # noqa: E501
query_params.append(('endDate', local_var_params['end_date'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/studies/statistics/{tenantId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetTenantStudyStatisticsQueryResult', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def study_merge_study(self, tenant_id, study_id, **kwargs): # noqa: E501
"""study_merge_study # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.study_merge_study(tenant_id, study_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str tenant_id: (required)
:param str study_id: (required)
:param bool force_merge_from_baseline:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.study_merge_study_with_http_info(tenant_id, study_id, **kwargs) # noqa: E501
def study_merge_study_with_http_info(self, tenant_id, study_id, **kwargs): # noqa: E501
"""study_merge_study # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.study_merge_study_with_http_info(tenant_id, study_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str tenant_id: (required)
:param str study_id: (required)
:param bool force_merge_from_baseline:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'tenant_id',
'study_id',
'force_merge_from_baseline'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method study_merge_study" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'tenant_id' is set
if self.api_client.client_side_validation and ('tenant_id' not in local_var_params or # noqa: E501
local_var_params['tenant_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `tenant_id` when calling `study_merge_study`") # noqa: E501
# verify the required parameter 'study_id' is set
if self.api_client.client_side_validation and ('study_id' not in local_var_params or # noqa: E501
local_var_params['study_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `study_id` when calling `study_merge_study`") # noqa: E501
collection_formats = {}
path_params = {}
if 'tenant_id' in local_var_params:
path_params['tenantId'] = local_var_params['tenant_id'] # noqa: E501
if 'study_id' in local_var_params:
path_params['studyId'] = local_var_params['study_id'] # noqa: E501
query_params = []
if 'force_merge_from_baseline' in local_var_params and local_var_params['force_merge_from_baseline'] is not None: # noqa: E501
query_params.append(('forceMergeFromBaseline', local_var_params['force_merge_from_baseline'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/studies/{tenantId}/{studyId}/merge', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def study_merge_study_deprecated(self, tenant_id, user_id, study_id, **kwargs): # noqa: E501
"""study_merge_study_deprecated # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.study_merge_study_deprecated(tenant_id, user_id, study_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str tenant_id: (required)
:param str user_id: (required)
:param str study_id: (required)
:param bool force_merge_from_baseline:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.study_merge_study_deprecated_with_http_info(tenant_id, user_id, study_id, **kwargs) # noqa: E501
def study_merge_study_deprecated_with_http_info(self, tenant_id, user_id, study_id, **kwargs): # noqa: E501
"""study_merge_study_deprecated # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.study_merge_study_deprecated_with_http_info(tenant_id, user_id, study_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str tenant_id: (required)
:param str user_id: (required)
:param str study_id: (required)
:param bool force_merge_from_baseline:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'tenant_id',
'user_id',
'study_id',
'force_merge_from_baseline'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method study_merge_study_deprecated" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'tenant_id' is set
if self.api_client.client_side_validation and ('tenant_id' not in local_var_params or # noqa: E501
local_var_params['tenant_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `tenant_id` when calling `study_merge_study_deprecated`") # noqa: E501
# verify the required parameter 'user_id' is set
if self.api_client.client_side_validation and ('user_id' not in local_var_params or # noqa: E501
local_var_params['user_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `user_id` when calling `study_merge_study_deprecated`") # noqa: E501
# verify the required parameter 'study_id' is set
if self.api_client.client_side_validation and ('study_id' not in local_var_params or # noqa: E501
local_var_params['study_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `study_id` when calling `study_merge_study_deprecated`") # noqa: E501
collection_formats = {}
path_params = {}
if 'tenant_id' in local_var_params:
path_params['tenantId'] = local_var_params['tenant_id'] # noqa: E501
if 'user_id' in local_var_params:
path_params['userId'] = local_var_params['user_id'] # noqa: E501
if 'study_id' in local_var_params:
path_params['studyId'] = local_var_params['study_id'] # noqa: E501
query_params = []
if 'force_merge_from_baseline' in local_var_params and local_var_params['force_merge_from_baseline'] is not None: # noqa: E501
query_params.append(('forceMergeFromBaseline', local_var_params['force_merge_from_baseline'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/studies/{tenantId}/{userId}/{studyId}/merge', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
| |
test info'
'For example: "LRGM Mic", "LRMG Mic"', default=''),
Arg('mic_jack_type', str, 'Microphone jack Type: nocheck, lrgm, lrmg',
default='nocheck'),
Arg('audiofuntest_run_delay', (int, float),
'Delay between consecutive calls to audiofuntest', default=None),
Arg(
'tests_to_conduct', list,
'A list of dicts. A dict should contain at least one key named\n'
'**type** indicating the test type, which can be **audiofun**,\n'
'**sinewav**, or **noise**.\n'
'\n'
'If type is **audiofun**, the dict can optionally contain:\n'
' - **iteration**: Iterations to run the test.\n'
' - **threshold**: The minimum success rate to pass the test.\n'
' - **input_channels**: A list of input channels to be tested.\n'
' - **output_channels**: A list of output channels to be tested.\n'
' - **volume_gain**: The volume gain set to audiofuntest for \n'
' controlling the volume of generated audio frames. The \n'
' range is from 0 to 100.\n'
' - **input_gain**: The volume gain for sox recorder command.\n'
' The value should be in "dB", you can see the value \n'
' suggested by CRAS with command \n'
' `cras_test_client --dump_sever_info`, check the "Gain" \n'
' column.'
' - **capture_rate**: The capturing sample rate use for testing. \n'
' The value should be determined by output device.\n'
' - **sample_format**: The sample format for audiofuntest. \n'
' See -t section in audiofuntest manual.\n'
' - **player_format**: The sample format for output device.\n'
' - **min_frequency**: The minimum frequency set to audiofuntest.\n'
' - **max_frequency**: The maximum frequency set to audiofuntest.\n'
'\n'
'If type is **sinewav**, the dict can optionally contain:\n'
' - **duration**: The test duration, in seconds.\n'
' - **input_channels**: A list of input channels to be tested.\n'
' - **freq_threshold**: Acceptable frequency margin.\n'
' - **rms_threshold**: **[min, max]** that will make\n'
' sure the following inequality is true: *min <= recorded\n'
' audio RMS (root mean square) value <= max*, otherwise,\n'
' fail the test. Both of **min** and **max** can be set to\n'
' None, which means no limit.\n'
' - **amplitude_threshold**: **[min, max]** and it will\n'
' make sure the inequality is true: *min <= minimum measured\n'
' amplitude <= maximum measured amplitude <= max*,\n'
' otherwise, fail the test. Both of **min** and **max** can\n'
' be set to None, which means no limit.\n'
' - **max_delta_threshold**: **[min, max]** and it will\n'
' make sure the inequality is true: *min <= maximum measured\n'
' delta <= max*, otherwise, fail the test. Both of **min** \n'
' and **max** can be set to None, which means no limit.\n'
'\n'
'If type is **noise**, the dict can optionally contain:\n'
' - **duration**: The test duration, in seconds.\n'
' - **rms_threshold**: **[min, max]** that will make\n'
' sure the following inequality is true: *min <= recorded\n'
' audio RMS (root mean square) value <= max*, otherwise,\n'
' fail the test. Both of **min** and **max** can be set to\n'
' None, which means no limit.\n'
' - **amplitude_threshold**: **[min, max]** and it will\n'
' make sure the inequality is true: *min <= minimum measured\n'
' amplitude <= maximum measured amplitude <= max*,\n'
' otherwise, fail the test. Both of **min** and **max** can\n'
' be set to None, which means no limit.'
' - **max_delta_threshold**: **[min, max]** and it will\n'
' make sure the inequality is true: *min <= maximum measured\n'
' delta <= max*, otherwise, fail the test. Both of **min** \n'
' and **max** can be set to None, which means no limit.\n',
schema=_ARG_TESTS_TO_CONDUCT_SCHEMA),
Arg('keep_raw_logs', bool,
'Whether to attach the audio by Testlog when the test fail.',
default=True)
]
def setUp(self):
self._dut = device_utils.CreateDUTInterface()
if self.args.audio_conf:
self._dut.audio.LoadConfig(self.args.audio_conf)
self._output_volumes = self.args.output_volume
if not isinstance(self._output_volumes, list):
self._output_volumes = [self._output_volumes]
self._output_volume_index = 0
# The test results under each output volume candidate.
# If any one of tests to conduct fails, test fails under that output
# volume candidate. If test fails under all output volume candidates,
# the whole test fails.
self._test_results = [True] * len(self._output_volumes)
self._test_message = []
self._mic_jack_type = {
'nocheck': None,
'lrgm': base.MicJackType.lrgm,
'lrmg': base.MicJackType.lrmg
}[self.args.mic_jack_type]
if self.args.initial_actions is None:
self._dut.audio.Initialize()
else:
for card, action in self.args.initial_actions:
if card.isdigit() is False:
card = self._dut.audio.GetCardIndexByName(card)
if action is None:
self._dut.audio.Initialize(card)
else:
self._dut.audio.ApplyAudioConfig(action, card)
# Transfer input and output device format
self._in_card = self._dut.audio.GetCardIndexByName(self.args.input_dev[0])
self._in_channel_map = _DEFAULT_TEST_INPUT_CHANNELS
if self.args.input_dev[1].isdigit():
self._in_device = self.args.input_dev[1]
else:
# Detect _in_device from ucm config.
self._in_device = self._dut.audio.config_mgr.GetPCMId(
'CapturePCM', self.args.input_dev[1], self._in_card)
channels_from_ucm_config = self._dut.audio.config_mgr.GetChannelMap(
self.args.input_dev[1], self._in_card)
if channels_from_ucm_config is not None:
self._in_channel_map = channels_from_ucm_config
self._out_card = self._dut.audio.GetCardIndexByName(self.args.output_dev[0])
if self.args.output_dev[1].isdigit():
self._out_device = self.args.output_dev[1]
else:
# Detect _out_device from ucm config.
self._out_device = self._dut.audio.config_mgr.GetPCMId(
'PlaybackPCM', self.args.output_dev[1], self._out_card)
# Backward compatible for non-porting case, which use ALSA device name.
# only works on chromebook device
# TODO(mojahsu) Remove them later.
self._alsa_input_device = 'hw:%s,%s' % (self._in_card, self._in_device)
self._alsa_output_device = 'hw:%s,%s' % (self._out_card, self._out_device)
self._current_test_args = None
if self.args.check_cras:
# Check cras status
if self.args.cras_enabled:
cras_status = 'start/running'
else:
cras_status = 'stop/waiting'
self.assertIn(
cras_status,
self._dut.CallOutput(['status', 'cras']),
'cras status is wrong (expected status: %s). '
'Please make sure that you have appropriate setting for '
'\'"disable_services": ["cras"]\' in the test item.' % cras_status)
self._dut_temp_dir = self._dut.temp.mktemp(True, '', 'audio_loop')
# If the test fails, attach the audio file; otherwise, remove it.
self._audio_file_path = []
ucm_config_mgr = self._dut.audio.ucm_config_mgr
self._default_input_gain = ucm_config_mgr.GetDefaultInputGain(self._in_card)
def tearDown(self):
self._dut.audio.RestoreMixerControls()
self._dut.CheckCall(['rm', '-rf', self._dut_temp_dir])
def runTest(self):
# If autostart, JS triggers start_run_test event.
# Otherwise, it binds start_run_test with 's' key pressed.
self.ui.CallJSFunction('init',
self.args.require_dongle, self.args.test_title)
if self.args.autostart:
self.ui.RunJS('window.template.innerHTML = "";')
else:
self.ui.WaitKeysOnce('S')
self.CheckDongleStatus()
self.SetupAudio()
self.CheckConformance()
# Run each tests to conduct under each output volume candidate.
for self._output_volume_index, output_volume in enumerate(
self._output_volumes):
if output_volume is not None:
if self.args.require_dongle:
self._dut.audio.SetHeadphoneVolume(output_volume, self._out_card)
else:
self._dut.audio.SetSpeakerVolume(output_volume, self._out_card)
for test in self.args.tests_to_conduct:
self._current_test_args = test
if test['type'] == 'audiofun':
self.AudioFunTest()
elif test['type'] == 'sinewav':
self.SinewavTest()
elif test['type'] == 'noise':
self.NoiseTest()
else:
raise ValueError('Test type "%s" not supported.' % test['type'])
if self.MayPassTest():
self.ui.CallJSFunction('testPassResult')
self.Sleep(0.5)
for file_path in self._audio_file_path:
os.unlink(file_path)
return
if self.args.keep_raw_logs:
for file_path in self._audio_file_path:
testlog.AttachFile(
path=file_path,
mime_type='audio/x-raw',
name=os.path.basename(file_path),
description='recorded audio of the test',
delete=True)
else:
for file_path in self._audio_file_path:
os.unlink(file_path)
self.FailTest()
def AppendErrorMessage(self, error_message):
"""Sets the test result to fail and append a new error message."""
self._test_results[self._output_volume_index] = False
self._test_message.append(
'Under output volume %r' % self._output_volumes[
self._output_volume_index])
self._test_message.append(error_message)
session.console.error(error_message)
def _MatchPatternLines(self, in_stream, re_pattern, num_lines=None):
"""Try to match the re pattern in the given number of lines.
Try to read lines one-by-one from input stream and perform re matching.
Stop when matching successes or reaching the number of lines limit.
Args:
in_stream: input stream to read from.
re_pattern: re pattern used for matching.
num_lines: maximum number of lines to stop for matching.
None for read until end of input stream.
"""
num_read = 0
while True:
line = in_stream.readline()
if not line:
return None
num_read += 1
m = re_pattern.match(line)
if m is not None:
return m
if num_lines is not None and num_read >= num_lines:
return None
def _ParseSingleRunOutput(self, audiofun_output, input_channels):
"""Parse a single run output from audiofuntest
Sample single run output:
O: channel = 0, success = 1, fail = 0, rate = 100.0
X: channel = 1, success = 0, fail = 1, rate = 0.0
Args:
audiofun_output: output stream of audiofuntest to parse from
input_channels: a list of mic channels used for testing
"""
all_channel_rate = {}
for expected_channel in input_channels:
m = self._MatchPatternLines(
audiofun_output, _AUDIOFUNTEST_SUCCESS_RATE_RE, 1)
if m is None or int(m.group(1)) != expected_channel:
self.AppendErrorMessage(
'Failed to get expected %d channel output from audiofuntest'
% expected_channel)
return None
all_channel_rate[expected_channel] = float(m.group(2))
return all_channel_rate
def AudioFunTestWithOutputChannel(self, capture_rate, input_channels,
output_channel):
"""Runs audiofuntest program to get the frequency from microphone
immediately according to speaker and microphone setting.
Sample audiofuntest message:
Config values.
Player parameter: aplay -r 48000 -f s16 -t raw -c 2 -B 0 -
Recorder parameter: arecord -r 48000 -f s16 -t raw -c 2 -B 0 -
Player FIFO name:
| |
did not count this sideband")
if consecutive_null_odd == 1 and no_more_odds == False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
if verbose:
print("I found these sidebands:", self.sb_list)
print('-' * 15)
print()
print()
self.sb_guess = np.array([np.asarray(sb_freq_guess), np.asarray(sb_amp_guess),
np.asarray(sb_error_est)]).T
# self.sb_guess = [frequency guess, amplitude guess, relative error of amplitude] for each sideband.
def fit_sidebands(self, plot=False, verbose=False):
"""
This takes self.sb_guess and fits to each maxima to get the details of
each sideband. It's really ugly, but it works. The error of the
sideband area is approximated from the data, not the curve fit. All
else is from the curve fit. Which is definitely underestimating the
error, but we don't care too much about those errors (at this point).
self.sb_guess = [frequency guess, amplitude guess, relative error of amplitude] for each sideband.
Temporary stuff:
sb_fits = holder of the fitting results until all spectra have been fit
window = an integer that determines the "radius" of the fit window, proportional to thz_freq.
Attributes created:
self.sb_results = the money maker. Column order:
[sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
[ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
self.full_dict = a dictionary similar to sb_results, but now the keys
are the sideband orders. Column ordering is otherwise the same.
:param plot: Do you want to see the fits plotted with the data?
:type plot: bool
:param verbose: Do you want to see the details AND the initial guess fits?
:type verbose: bool
:return: None
"""
# print "Trying to fit these"
sb_fits = []
if verbose:
print("=" * 15)
print()
print("Fitting CCD Sidebands")
print(os.path.basename(self.fname))
print()
print("=" * 15)
# pretty sure you want this up here so things don't break
# when no sidebands found
self.full_dict = {}
thz_freq = self.parameters["thz_freq"]
window = 15 + int(15 * thz_freq / 0.0022) # Adjust the fit window based on the sideband spacing
# The 15's are based on empirical knowledge that for
# 540 GHz (2.23 meV), the best window size is 30 and
# that it seems like the window size should grow slowly?
for elem, peakIdx in enumerate(self.sb_index): # Have to do this because guess_sidebands
# doesn't out put data in the most optimized way
if peakIdx < window:
data_temp = self.proc_data[:peakIdx + window, :]
elif (1600 - peakIdx) < window:
data_temp = self.proc_data[peakIdx - window:, :]
else:
data_temp = self.proc_data[peakIdx - window:peakIdx + window, :]
width_guess = 0.0001 + 0.000001 * self.sb_list[elem] # so the width guess gets wider as order goes up
p0 = np.array([self.sb_guess[elem, 0],
self.sb_guess[elem, 1] * width_guess,
width_guess,
0.1])
# print "Let's fit this shit!"
if verbose:
print("Fitting SB {}. Peak index: {}, {}th peak in spectra".format(
self.sb_list[elem], peakIdx, elem
))
# print "\nnumber:", elem, num
# print "data_temp:", data_temp
# print "p0:", p0
print(' '*20 +"p0 = " + np.array_str(p0, precision=4))
# plot_guess = True # This is to disable plotting the guess function
if verbose and plot:
plt.figure('CCD data')
linewidth = 3
x_vals = np.linspace(data_temp[0, 0], data_temp[-1, 0], num=500)
if elem != 0:
try:
plt.plot(x_vals, gauss(x_vals, *p0),
plt.gca().get_lines()[-1].get_color() + '--' # I don't really know. Mostly
# just looked around at what functions
# matplotlib has...
, linewidth=linewidth)
except: # to prevent weird mac issues with the matplotlib things?
plt.plot(x_vals, gauss(x_vals, *p0), '--', linewidth=linewidth)
else:
plt.plot(x_vals, gauss(x_vals, *p0), '--', linewidth=linewidth)
try:
# 11/1/16
# needed to bump maxfev up to 2k because a sideband wasn't being fit
# Fix for sb 106
# 05-23 Loren 10nm\hsg_640_Perp352seq_spectrum.txt
coeff, var_list = curve_fit(
gauss, data_temp[:, 0], data_temp[:, 1], p0=p0, maxfev = 2000)
except Exception as e:
if verbose:
print("\tThe fit failed:")
print("\t\t", e)
print("\tFitting region: {}->{}".format(peakIdx-window, peakIdx+window))
# print "I couldn't fit", elem
# print "It's sideband", num
# print "In file", self.fname
# print "because", e
# print "wanted to fit xindx", peakIdx, "+-", window
self.sb_list[elem] = None
continue # This will ensure the rest of the loop is not run without an actual fit.
coeff[1] = abs(coeff[1]) # The amplitude could be negative if the linewidth is negative
coeff[2] = abs(coeff[2]) # The linewidth shouldn't be negative
if verbose:
print("\tFit successful: ", end=' ')
print("p = " + np.array_str(coeff, precision=4))
# print "coeffs:", coeff
# print "sigma for {}: {}".format(self.sb_list[elem], coeff[2])
if 10e-4 > coeff[2] > 10e-6:
try:
sb_fits.append(np.hstack((self.sb_list[elem], coeff, np.sqrt(np.diag(var_list)))))
except RuntimeWarning:
sb_fits.append(np.hstack((self.sb_list[elem], coeff, np.sqrt(np.abs(np.diag(var_list))))))
# the var_list wasn't approximating the error well enough, even when using sigma and absoluteSigma
# self.sb_guess[elem, 2] is the relative error as calculated by the guess_sidebands method
# coeff[1] is the area from the fit. Therefore, the product should be the absolute error
# of the integrated area of the sideband. The other errors are still underestimated.
#
# 1/12/18 note: So it looks like what hunter did is calculate an error estimate
# for the strength/area by the quadrature sum of errors of the points in the peak
# (from like 813 in guess_sidebands:
# error_est = np.sqrt(sum([i ** 2 for i in error[found_index - 1:found_index + 2]])) / (
# Where the error is what comes from the CCD by averaging 4 spectra. As far as I can tell,
# it doesn't currently pull in the dark counts or anything like that, except maybe
# indirectly since it'll cause the variations in the peaks
sb_fits[-1][6] = self.sb_guess[elem, 2] * coeff[1]
if verbose:
print("\tRel.Err: {:.4e} | Abs.Err: {:.4e}".format(
self.sb_guess[elem, 2], coeff[1] * self.sb_guess[elem, 2]
))
print()
# print "The rel. error guess is", self.sb_guess[elem, 2]
# print "The abs. error guess is", coeff[1] * self.sb_guess[elem, 2]
# The error from self.sb_guess[elem, 2] is a relative error
if plot and verbose:
plt.figure('CCD data')
linewidth = 5
x_vals = np.linspace(data_temp[0, 0], data_temp[-1, 0], num=500)
if elem != 0:
try:
plt.plot(x_vals, gauss(x_vals, *coeff),
plt.gca().get_lines()[-1].get_color() + '--' # I don't really know. Mostly
# just looked around at what functions
# matplotlib has...
, linewidth=linewidth)
except: # to prevent weird mac issues with the matplotlib things?
plt.plot(x_vals, gauss(x_vals, *coeff), '--', linewidth=linewidth)
else:
plt.plot(x_vals, gauss(x_vals, *coeff), '--', linewidth=linewidth)
sb_fits_temp = np.asarray(sb_fits)
reorder = [0, 1, 5, 2, 6, 3, 7, 4, 8]
# Reorder the list to put the error of the i-th parameter as the i+1th.
try:
sb_fits = sb_fits_temp[:, reorder]
# if verbose: print "The abs. error guess is", sb_fits[:, 0:5]
except:
raise RuntimeError("No sidebands to fit?")
# Going to label the appropriate row with the sideband
self.sb_list = sorted(list([x for x in self.sb_list if x is not None]))
sb_names = np.vstack(self.sb_list)
# Sort by SB order
sorter = np.argsort(sb_fits[:, 0])
self.sb_results = np.array(sb_fits[sorter, :7])
if verbose:
print("\tsb_results:")
print("\t\t" + ("{:^5s}" + ("{:^12s}")*(self.sb_results.shape[1]-1)).format(
"SB", "Cen.En.", "", "Area", "", "Width",""))
for line in self.sb_results:
print('\t\t[' + ("{:^5.0f}"+ "{:<12.4g}"*(line.size-1)).format(*line) + ']')
print('-'*19)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def infer_frequencies(self, nir_units="wavenumber", thz_units="GHz", bad_points=-2):
"""
This guy tries to fit the results from fit_sidebands to a line to get the relevant frequencies
:param nir_units: What units do you want this to output?
:type nir_units: 'nm', 'wavenumber', 'eV', 'THz'
:param thz_units: What units do you want this to output for the THz?
:type thz_units: 'GHz', 'wavenumber', 'meV'
:param bad_points: How many more-positive order sidebands shall this ignore?
:type bad_points: int
:return: freqNIR, freqTHz, the frequencies in the appropriate units
"""
# force same units for in dict
freqNIR, freqTHz = calc_laser_frequencies(self, "wavenumber", "wavenumber", bad_points)
self.parameters["calculated NIR freq (cm-1)"] = "{}".format(freqNIR, nir_units)
self.parameters["calculated THz freq (cm-1)"] = "{}".format(freqTHz, freqTHz)
freqNIR, freqTHz = calc_laser_frequencies(self, nir_units, thz_units, bad_points)
return freqNIR, freqTHz
def save_processing(self, file_name, folder_str, marker='', index='', verbose=''):
"""
This will save all of the self.proc_data and the results from the
| |
== 0)
m.c5837 = Constraint(expr= - m.b7050 + m.b7154 + m.b7155 + m.b7156 == 0)
m.c5838 = Constraint(expr= - m.b7051 + m.b7157 + m.b7158 + m.b7159 == 0)
m.c5839 = Constraint(expr= - m.b7052 + m.b7160 + m.b7161 + m.b7162 == 0)
m.c5840 = Constraint(expr= - m.b7053 + m.b7163 + m.b7164 + m.b7165 == 0)
m.c5841 = Constraint(expr= - m.b7054 + m.b7166 + m.b7167 + m.b7168 == 0)
m.c5842 = Constraint(expr= - m.b7055 + m.b7169 + m.b7170 + m.b7171 == 0)
m.c5843 = Constraint(expr= - m.b7056 + m.b7172 + m.b7173 + m.b7174 == 0)
m.c5844 = Constraint(expr= - m.b7057 + m.b7175 + m.b7176 + m.b7177 == 0)
m.c5845 = Constraint(expr= m.b7058 + m.b7062 <= 1)
m.c5846 = Constraint(expr= m.b7059 + m.b7062 <= 1)
m.c5847 = Constraint(expr= m.b7060 + m.b7062 <= 1)
m.c5848 = Constraint(expr= m.b7058 + m.b7066 <= 1)
m.c5849 = Constraint(expr= m.b7059 + m.b7066 <= 1)
m.c5850 = Constraint(expr= m.b7060 + m.b7066 <= 1)
m.c5851 = Constraint(expr= m.b7061 + m.b7065 <= 1)
m.c5852 = Constraint(expr= m.b7062 + m.b7065 <= 1)
m.c5853 = Constraint(expr= m.b7063 + m.b7065 <= 1)
m.c5854 = Constraint(expr= m.b7061 + m.b7066 <= 1)
m.c5855 = Constraint(expr= m.b7062 + m.b7066 <= 1)
m.c5856 = Constraint(expr= m.b7063 + m.b7066 <= 1)
m.c5857 = Constraint(expr= m.b7061 + m.b7069 <= 1)
m.c5858 = Constraint(expr= m.b7062 + m.b7069 <= 1)
m.c5859 = Constraint(expr= m.b7063 + m.b7069 <= 1)
m.c5860 = Constraint(expr= m.b7064 + m.b7068 <= 1)
m.c5861 = Constraint(expr= m.b7065 + m.b7068 <= 1)
m.c5862 = Constraint(expr= m.b7066 + m.b7068 <= 1)
m.c5863 = Constraint(expr= m.b7064 + m.b7069 <= 1)
m.c5864 = Constraint(expr= m.b7065 + m.b7069 <= 1)
m.c5865 = Constraint(expr= m.b7066 + m.b7069 <= 1)
m.c5866 = Constraint(expr= m.b7070 + m.b7074 <= 1)
m.c5867 = Constraint(expr= m.b7071 + m.b7074 <= 1)
m.c5868 = Constraint(expr= m.b7072 + m.b7074 <= 1)
m.c5869 = Constraint(expr= m.b7070 + m.b7078 <= 1)
m.c5870 = Constraint(expr= m.b7071 + m.b7078 <= 1)
m.c5871 = Constraint(expr= m.b7072 + m.b7078 <= 1)
m.c5872 = Constraint(expr= m.b7073 + m.b7077 <= 1)
m.c5873 = Constraint(expr= m.b7074 + m.b7077 <= 1)
m.c5874 = Constraint(expr= m.b7075 + m.b7077 <= 1)
m.c5875 = Constraint(expr= m.b7073 + m.b7078 <= 1)
m.c5876 = Constraint(expr= m.b7074 + m.b7078 <= 1)
m.c5877 = Constraint(expr= m.b7075 + m.b7078 <= 1)
m.c5878 = Constraint(expr= m.b7073 + m.b7081 <= 1)
m.c5879 = Constraint(expr= m.b7074 + m.b7081 <= 1)
m.c5880 = Constraint(expr= m.b7075 + m.b7081 <= 1)
m.c5881 = Constraint(expr= m.b7076 + m.b7080 <= 1)
m.c5882 = Constraint(expr= m.b7077 + m.b7080 <= 1)
m.c5883 = Constraint(expr= m.b7078 + m.b7080 <= 1)
m.c5884 = Constraint(expr= m.b7076 + m.b7081 <= 1)
m.c5885 = Constraint(expr= m.b7077 + m.b7081 <= 1)
m.c5886 = Constraint(expr= m.b7078 + m.b7081 <= 1)
m.c5887 = Constraint(expr= m.b7082 + m.b7086 <= 1)
m.c5888 = Constraint(expr= m.b7083 + m.b7086 <= 1)
m.c5889 = Constraint(expr= m.b7084 + m.b7086 <= 1)
m.c5890 = Constraint(expr= m.b7082 + m.b7090 <= 1)
m.c5891 = Constraint(expr= m.b7083 + m.b7090 <= 1)
m.c5892 = Constraint(expr= m.b7084 + m.b7090 <= 1)
m.c5893 = Constraint(expr= m.b7085 + m.b7089 <= 1)
m.c5894 = Constraint(expr= m.b7086 + m.b7089 <= 1)
m.c5895 = Constraint(expr= m.b7087 + m.b7089 <= 1)
m.c5896 = Constraint(expr= m.b7085 + m.b7090 <= 1)
m.c5897 = Constraint(expr= m.b7086 + m.b7090 <= 1)
m.c5898 = Constraint(expr= m.b7087 + m.b7090 <= 1)
m.c5899 = Constraint(expr= m.b7085 + m.b7093 <= 1)
m.c5900 = Constraint(expr= m.b7086 + m.b7093 <= 1)
m.c5901 = Constraint(expr= m.b7087 + m.b7093 <= 1)
m.c5902 = Constraint(expr= m.b7088 + m.b7092 <= 1)
m.c5903 = Constraint(expr= m.b7089 + m.b7092 <= 1)
m.c5904 = Constraint(expr= m.b7090 + m.b7092 <= 1)
m.c5905 = Constraint(expr= m.b7088 + m.b7093 <= 1)
m.c5906 = Constraint(expr= m.b7089 + m.b7093 <= 1)
m.c5907 = Constraint(expr= m.b7090 + m.b7093 <= 1)
m.c5908 = Constraint(expr= m.b7094 + m.b7098 <= 1)
m.c5909 = Constraint(expr= m.b7095 + m.b7098 <= 1)
m.c5910 = Constraint(expr= m.b7096 + m.b7098 <= 1)
m.c5911 = Constraint(expr= m.b7094 + m.b7102 <= 1)
m.c5912 = Constraint(expr= m.b7095 + m.b7102 <= 1)
m.c5913 = Constraint(expr= m.b7096 + m.b7102 <= 1)
m.c5914 = Constraint(expr= m.b7097 + m.b7101 <= 1)
m.c5915 = Constraint(expr= m.b7098 + m.b7101 <= 1)
m.c5916 = Constraint(expr= m.b7099 + m.b7101 <= 1)
m.c5917 = Constraint(expr= m.b7097 + m.b7102 <= 1)
m.c5918 = Constraint(expr= m.b7098 + m.b7102 <= 1)
m.c5919 = Constraint(expr= m.b7099 + m.b7102 <= 1)
m.c5920 = Constraint(expr= m.b7097 + m.b7105 <= 1)
m.c5921 = Constraint(expr= m.b7098 + m.b7105 <= 1)
m.c5922 = Constraint(expr= m.b7099 + m.b7105 <= 1)
m.c5923 = Constraint(expr= m.b7100 + m.b7104 <= 1)
m.c5924 = Constraint(expr= m.b7101 + m.b7104 <= 1)
m.c5925 = Constraint(expr= m.b7102 + m.b7104 <= 1)
m.c5926 = Constraint(expr= m.b7100 + m.b7105 <= 1)
m.c5927 = Constraint(expr= m.b7101 + m.b7105 <= 1)
m.c5928 = Constraint(expr= m.b7102 + m.b7105 <= 1)
m.c5929 = Constraint(expr= m.b7106 + m.b7110 <= 1)
m.c5930 = Constraint(expr= m.b7107 + m.b7110 <= 1)
m.c5931 = Constraint(expr= m.b7108 + m.b7110 <= 1)
m.c5932 = Constraint(expr= m.b7106 + m.b7114 <= 1)
m.c5933 = Constraint(expr= m.b7107 + m.b7114 <= 1)
m.c5934 = Constraint(expr= m.b7108 + m.b7114 <= 1)
m.c5935 = Constraint(expr= m.b7109 + m.b7113 <= 1)
m.c5936 = Constraint(expr= m.b7110 + m.b7113 <= 1)
m.c5937 = Constraint(expr= m.b7111 + m.b7113 <= 1)
m.c5938 = Constraint(expr= m.b7109 + m.b7114 <= 1)
m.c5939 = Constraint(expr= m.b7110 + m.b7114 <= 1)
m.c5940 = Constraint(expr= m.b7111 + m.b7114 <= 1)
m.c5941 = Constraint(expr= m.b7109 + m.b7117 <= 1)
m.c5942 = Constraint(expr= m.b7110 + m.b7117 <= 1)
m.c5943 = Constraint(expr= m.b7111 + m.b7117 <= 1)
m.c5944 = Constraint(expr= m.b7112 + m.b7116 <= 1)
m.c5945 = Constraint(expr= m.b7113 + m.b7116 <= 1)
m.c5946 = Constraint(expr= m.b7114 + m.b7116 <= 1)
m.c5947 = Constraint(expr= m.b7112 + m.b7117 <= 1)
m.c5948 = Constraint(expr= m.b7113 + m.b7117 <= 1)
m.c5949 = Constraint(expr= m.b7114 + m.b7117 <= 1)
m.c5950 = Constraint(expr= m.b7118 + m.b7122 <= 1)
m.c5951 = Constraint(expr= m.b7119 + m.b7122 <= 1)
m.c5952 = Constraint(expr= m.b7120 + m.b7122 <= 1)
m.c5953 = Constraint(expr= m.b7118 + m.b7126 <= 1)
m.c5954 = Constraint(expr= m.b7119 + m.b7126 <= 1)
m.c5955 = Constraint(expr= m.b7120 + m.b7126 <= 1)
m.c5956 = Constraint(expr= m.b7121 + m.b7125 <= 1)
m.c5957 = Constraint(expr= m.b7122 + m.b7125 <= 1)
m.c5958 = Constraint(expr= m.b7123 + m.b7125 <= 1)
m.c5959 = Constraint(expr= m.b7121 + m.b7126 <= 1)
m.c5960 = Constraint(expr= m.b7122 + m.b7126 <= 1)
m.c5961 = Constraint(expr= m.b7123 + m.b7126 <= 1)
m.c5962 = Constraint(expr= m.b7121 + m.b7129 <= 1)
m.c5963 = Constraint(expr= m.b7122 + m.b7129 <= 1)
m.c5964 = Constraint(expr= m.b7123 + m.b7129 <= 1)
m.c5965 = Constraint(expr= m.b7124 + m.b7128 <= 1)
m.c5966 = Constraint(expr= m.b7125 + m.b7128 <= 1)
m.c5967 = Constraint(expr= m.b7126 + m.b7128 <= 1)
m.c5968 = Constraint(expr= m.b7124 + m.b7129 <= 1)
m.c5969 = Constraint(expr= m.b7125 + m.b7129 <= 1)
m.c5970 = Constraint(expr= m.b7126 + m.b7129 <= 1)
m.c5971 = Constraint(expr= m.b7130 + m.b7134 <= 1)
m.c5972 = Constraint(expr= m.b7131 + m.b7134 <= 1)
m.c5973 = Constraint(expr= m.b7132 + m.b7134 <= 1)
m.c5974 = Constraint(expr= m.b7130 + m.b7138 <= 1)
m.c5975 = Constraint(expr= m.b7131 + m.b7138 <= 1)
m.c5976 = Constraint(expr= m.b7132 + m.b7138 <= 1)
m.c5977 = Constraint(expr= m.b7133 + m.b7137 <= 1)
m.c5978 = Constraint(expr= m.b7134 + m.b7137 <= 1)
m.c5979 = Constraint(expr= m.b7135 + m.b7137 <= 1)
m.c5980 = Constraint(expr= m.b7133 + m.b7138 <= 1)
m.c5981 = Constraint(expr= m.b7134 + m.b7138 <= 1)
m.c5982 = Constraint(expr= m.b7135 + m.b7138 <= 1)
m.c5983 = Constraint(expr= m.b7133 + m.b7141 <= 1)
m.c5984 = Constraint(expr= m.b7134 + m.b7141 <= 1)
m.c5985 = Constraint(expr= m.b7135 + m.b7141 <= 1)
m.c5986 = Constraint(expr= m.b7136 + m.b7140 <= 1)
m.c5987 = Constraint(expr= m.b7137 + m.b7140 <= 1)
m.c5988 = Constraint(expr= m.b7138 + m.b7140 <= 1)
m.c5989 = Constraint(expr= m.b7136 + m.b7141 <= 1)
m.c5990 = Constraint(expr= m.b7137 + m.b7141 <= 1)
m.c5991 = Constraint(expr= m.b7138 + m.b7141 <= 1)
m.c5992 = Constraint(expr= m.b7142 + m.b7146 <= 1)
m.c5993 = Constraint(expr= m.b7143 + m.b7146 <= 1)
m.c5994 = Constraint(expr= m.b7144 + m.b7146 <= 1)
m.c5995 = Constraint(expr= m.b7142 + m.b7150 <= 1)
m.c5996 = Constraint(expr= m.b7143 + m.b7150 <= 1)
m.c5997 = Constraint(expr= m.b7144 + m.b7150 <= 1)
m.c5998 = Constraint(expr= m.b7145 + m.b7149 <= 1)
m.c5999 = Constraint(expr= m.b7146 + m.b7149 <= 1)
m.c6000 = Constraint(expr= m.b7147 + m.b7149 <= 1)
m.c6001 = Constraint(expr= m.b7145 + m.b7150 <= 1)
m.c6002 = Constraint(expr= m.b7146 + m.b7150 <= 1)
m.c6003 = Constraint(expr= m.b7147 + m.b7150 <= 1)
m.c6004 = Constraint(expr= m.b7145 + m.b7153 <= 1)
m.c6005 = Constraint(expr= m.b7146 + m.b7153 <= 1)
m.c6006 = Constraint(expr= m.b7147 + m.b7153 <= 1)
m.c6007 = Constraint(expr= m.b7148 + m.b7152 <= 1)
m.c6008 = Constraint(expr= m.b7149 + m.b7152 <= 1)
m.c6009 = Constraint(expr= m.b7150 + m.b7152 <= 1)
m.c6010 = Constraint(expr= m.b7148 + m.b7153 <= 1)
m.c6011 = Constraint(expr= m.b7149 + m.b7153 <= 1)
m.c6012 = Constraint(expr= m.b7150 + m.b7153 <= 1)
m.c6013 = Constraint(expr= m.b7154 + m.b7158 <= | |
(self.request,)
class PersistentTimestampEmptyError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('Persistent timestamp empty' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PersistentTimestampInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('Persistent timestamp invalid' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PersistentTimestampOutdatedError(ServerError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('Persistent timestamp outdated' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhoneCodeEmptyError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The phone code is missing' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhoneCodeExpiredError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The confirmation code has expired' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhoneCodeHashEmptyError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The phone code hash is missing' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhoneCodeInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The phone code entered was invalid' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhoneMigrateError(InvalidDCError):
def __init__(self, request, capture=0):
self.request = request
self.new_dc = int(capture)
super(Exception, self).__init__('The phone number a user is trying to use for authorization is associated with DC {new_dc}'.format(new_dc=self.new_dc) + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request, self.new_dc)
class PhoneNumberAppSignupForbiddenError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__("You can't sign up using this app" + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhoneNumberBannedError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The used phone number has been banned from Telegram and cannot be used anymore. Maybe check https://www.telegram.org/faq_spam' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhoneNumberFloodError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('You asked for the code too many times.' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhoneNumberInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The phone number is invalid' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhoneNumberOccupiedError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The phone number is already in use' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhoneNumberUnoccupiedError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The phone number is not yet being used' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhonePasswordFloodError(AuthKeyError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('You have tried logging in too many times' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhonePasswordProtectedError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('This phone is password protected' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhotoContentUrlEmptyError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The content from the URL used as a photo appears to be empty or has caused another HTTP error' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhotoCropSizeSmallError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('Photo is too small' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhotoExtInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The extension of the photo is invalid' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhotoIdInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('Photo id is invalid' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhotoInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('Photo invalid' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhotoInvalidDimensionsError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The photo dimensions are invalid (hint: `pip install pillow` for `send_file` to resize images)' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhotoSaveFileInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The photo you tried to send cannot be saved by Telegram. A reason may be that it exceeds 10MB. Try resizing it locally' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PhotoThumbUrlEmptyError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The URL used as a thumbnail appears to be empty or has caused another HTTP error' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PinRestrictedError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__("You can't pin messages in private chats with other people" + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PollAnswersInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The poll did not have enough answers or had too many' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PollOptionDuplicateError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('A duplicate option was sent in the same poll' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PollOptionInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('A poll option used invalid data (the data may be too long)' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PollQuestionInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The poll question was either empty or too long' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PollUnsupportedError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('This layer does not support polls in the issued method' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PrivacyKeyInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The privacy key is invalid' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PrivacyTooLongError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('Cannot add that many entities in a single request' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PrivacyValueInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The privacy value is invalid' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class PtsChangeEmptyError(ServerError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('No PTS change' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class QueryIdEmptyError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The query ID is empty' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class QueryIdInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The query ID is invalid' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class QueryTooShortError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The query string is too short' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class QuizCorrectAnswersEmptyError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('A quiz must specify one correct answer' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class QuizCorrectAnswersTooMuchError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('There can only be one correct answer' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class QuizCorrectAnswerInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The correct answer is not an existing answer' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class QuizMultipleInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('A poll cannot be both multiple choice and quiz' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class RandomIdDuplicateError(ServerError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('You provided a random ID that was already used' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class RandomIdInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('A provided random ID is invalid' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class RandomLengthInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('Random length invalid' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class RangesInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('Invalid range provided' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class ReactionEmptyError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('No reaction provided' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class ReactionInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('Invalid reaction provided (only emoji are allowed)' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class RegIdGenerateFailedError(ServerError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('Failure while generating registration ID' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class ReplyMarkupGameEmptyError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The provided reply markup for the game is empty' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class ReplyMarkupInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The provided reply markup is invalid' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class ReplyMarkupTooLongError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The data embedded in the reply markup buttons was too much' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class ResultsTooMuchError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('You sent too many results, see https://core.telegram.org/bots/api#answerinlinequery for the current limit' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class ResultIdDuplicateError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('Duplicated IDs on the sent results. Make sure to use unique IDs' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class ResultIdInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('The given result cannot be used to send the selection to the bot' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class ResultTypeInvalidError(BadRequestError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('Result type invalid' + self._fmt_request(self.request))
def __reduce__(self):
return type(self), (self.request,)
class RightForbiddenError(ForbiddenError):
def __init__(self, request):
self.request = request
super(Exception, self).__init__('Either your admin rights do not allow you to | |
= get_close_matches('_'.join(tproduct),strippedMolecules)
rclose2 = [x.split('_') for x in rclose]
rclose2 = ['_'.join([y for y in x if y != '']) for x in rclose2]
pclose2 = [x.split('_') for x in pclose]
pclose2 = ['_'.join([y for y in x if y != '']) for x in pclose2]
trueReactant = None
trueProduct = None
try:
trueReactant = rclose[rclose2.index('_'.join(treactant))]
trueProduct = pclose[pclose2.index('_'.join(tproduct))]
except:
pass
if trueReactant and trueProduct:
pairedMolecules[stoch2].append((trueReactant,trueProduct))
pairedMolecules2[stoch].append((trueProduct,trueReactant))
for x in treactant:
reactant.remove(x)
for x in tproduct:
product.remove(x)
idx = -1
break
if sum(len(x) for x in reactantString+productString)> 0 and self.conservationOfMass:
return None,None
else:
return pairedMolecules,pairedMolecules2
def approximateMatching(self,ruleList,differenceParameter=[]):
def curateString(element,differences,symbolList = ['#','&',';','@','!','?'],equivalenceDict={}):
'''
remove compound differencese (>2 characters) and instead represent them with symbols
returns transformed string,an equivalence dictionary and unused symbols
'''
tmp = element
for difference in differences:
if difference in element:
if difference.startswith('_'):
if difference not in equivalenceDict:
symbol = symbolList.pop()
equivalenceDict[difference] = symbol
else:
symbol = equivalenceDict[difference]
tmp = re.sub(r'{0}(_|$)'.format(difference),r'{0}\1'.format(symbol),tmp)
elif difference.endswith('_'):
if difference not in equivalenceDict:
symbol = symbolList.pop()
equivalenceDict[difference] = symbol
else:
symbol = equivalenceDict[difference]
tmp = re.sub(r'(_|^){0}'.format(difference),r'{0}\1'.format(symbol),tmp)
return tmp,symbolList,equivalenceDict
'''
given a transformation of the kind a+ b -> ~a_~b, where ~a and ~b are some
slightly modified version of a and b, this function will return a list of
lexical changes that a and b must undergo to become ~a and ~b.
'''
flag = True
if len(ruleList[1]) == 1 and ruleList[1] != '0':
differences = deepcopy(differenceParameter)
tmpRuleList = deepcopy(ruleList)
while flag:
flag = False
sym = ['#','&',';','@','!','?']
dic = {}
for idx,_ in enumerate(tmpRuleList[0]):
tmpRuleList[0][idx],sym,dic = curateString(ruleList[0][idx],differences,sym,dic)
tmpRuleList[1][0],sym,dic = curateString(ruleList[1][0],differences,sym,dic)
permutations = [x for x in itertools.permutations(ruleList[0])]
tpermutations = [x for x in itertools.permutations(tmpRuleList[0])]
score = [difflib.SequenceMatcher(None,'_'.join(x),ruleList[1][0]).ratio() \
for x in permutations]
maxindex = score.index(max(score))
ruleList[0] = list(permutations[maxindex])
tmpRuleList[0] = list(tpermutations[maxindex])
sym = [dic[x] for x in dic]
sym.extend(differences)
sym = [x for x in sym if '_' not in x]
simplifiedDifference = difflib.SequenceMatcher(lambda x: x in sym,'-'.join(tmpRuleList[0]),tmpRuleList[1][0])
matches = simplifiedDifference.get_matching_blocks()
if len(matches) != len(ruleList[0]) + 1:
return [[],[]],[[],[]]
productPartitions = []
for idx,match in enumerate(matches):
if matches[idx][2] != 0:
productPartitions.append(tmpRuleList[1][0][
matches[idx][1]:matches[idx][1]+matches[idx][2]])
reactantPartitions = tmpRuleList[0]
#Don't count trailing underscores as part of the species name
for idx,_ in enumerate(reactantPartitions):
reactantPartitions[idx] = reactantPartitions[idx].strip('_')
for idx,_ in enumerate(productPartitions):
productPartitions[idx] = productPartitions[idx].strip('_')
#greedymatching
acc=0
#FIXME:its not properly copying all the string
for idx in range(0,len(matches)-1):
while matches[idx][2]+ acc < len(tmpRuleList[1][0]) \
and tmpRuleList[1][0][matches[idx][2]+ acc] in sym:
productPartitions[idx] += tmpRuleList[1][0][matches[idx][2] + acc]
acc += 1
#idx = 0
#while(tmpString[matches[0][2]+ idx] in sym):
# reactantfirstHalf += tmpString[matches[0][2] + idx]
# idx += 1
for element in dic:
for idx in range(len(productPartitions)):
productPartitions[idx] = productPartitions[idx].replace(dic[element],element)
reactantPartitions[idx] = reactantPartitions[idx].replace(dic[element],element)
zippedPartitions = zip(reactantPartitions,productPartitions)
zippedPartitions = [sorted(x,key=len) for x in zippedPartitions]
bdifferences = [[z for z in y if '+ ' in z or '- ' in z] for y in \
[difflib.ndiff(*x) for x in zippedPartitions]]
processedDifferences = [''.join([y.strip('+ ') for y in x]) for x in bdifferences]
for idx,processedDifference in enumerate(processedDifferences):
if processedDifference not in differences and \
'- ' not in processedDifference and bdifferences[idx] != []:
flag = True
differences.append(processedDifference)
else:
#TODO: dea with reactions of the kindd a+b -> c + d
return [[],[]],[[],[]]
return bdifferences,zippedPartitions
def getReactionClassification(self,reactionDefinition,rules,equivalenceTranslator,
indirectEquivalenceTranslator,
translationKeys=[]):
'''
*reactionDefinition* is a list of conditions that must be met for a reaction
to be classified a certain way
*rules* is the list of reactions
*equivalenceTranslator* is a dictinary containing all complexes that have been
determined to be the same through naming conventions
This method will go through the list of rules and the list of rule definitions
and tell us which rules it can classify according to the rule definitions list
provided
'''
ruleDictionary = self.species2Rules(rules)
#determines a reaction's reactionStructure aka stoichoimetry
ruleComplianceMatrix = np.zeros((len(rules),len(reactionDefinition['reactions'])))
for (idx,rule) in enumerate(rules):
reaction2 = rule #list(parseReactions(rule))
ruleComplianceMatrix[idx] = self.identifyReactions2(reaction2,reactionDefinition)
#initialize the tupleComplianceMatrix array with the same keys as ruleDictionary
#the tuple complianceMatrix is basically there to make sure we evaluate
#bidirectional reactions as one reaction
tupleComplianceMatrix = {key:np.zeros((len(reactionDefinition['reactions']))) for key in ruleDictionary}
#check which reaction conditions each tuple satisfies
for element in ruleDictionary:
for rule in ruleDictionary[element]:
tupleComplianceMatrix[element] += ruleComplianceMatrix[rule]
#now we will check for the nameConventionMatrix (same thing as before but for naming conventions)
tupleNameComplianceMatrix = {key:{key2:0 for key2 in equivalenceTranslator} \
for key in ruleDictionary}
for rule in ruleDictionary:
for namingConvention in equivalenceTranslator:
for equivalence in equivalenceTranslator[namingConvention]:
if all(element in rule for element in equivalence):
tupleNameComplianceMatrix[rule][namingConvention] +=1
break
for equivalence in indirectEquivalenceTranslator[namingConvention]:
if all(element in rule for element in equivalence[0]):
tupleNameComplianceMatrix[rule][namingConvention] +=1
break
#we can have more than one
#elif appro
#check if the reaction conditions each tuple satisfies are enough to get classified
#as an specific named reaction type
tupleDefinitionMatrix = {key:np.zeros((len(reactionDefinition['definitions']))) for key in ruleDictionary}
for key,element in tupleComplianceMatrix.items():
for idx,member in enumerate(reactionDefinition['definitions']):
for alternative in member:
if 'r' in alternative:
tupleDefinitionMatrix[key][idx] += np.all([element[reaction] for reaction in alternative[u'r']])
if 'n' in alternative and reactionDefinition['reactionsNames'][idx] in equivalenceTranslator:
tupleDefinitionMatrix[key][idx] += np.all([tupleNameComplianceMatrix[key][reactionDefinition['reactionsNames'][idx]]])
#cotains which rules are equal to reactions defined in reactionDefinitions['definitions']
#use the per tuple classification to obtain a per reaction classification
ruleDefinitionMatrix = np.zeros((len(rules),len(reactionDefinition['definitions'])))
for key,element in ruleDictionary.items():
for rule in element:
ruleDefinitionMatrix[rule] = self.checkCompliance(ruleComplianceMatrix[rule],
tupleDefinitionMatrix[key],reactionDefinition['definitions'])
#use reactionDefinitions reactionNames field to actually tell us what reaction
#type each reaction is
results = []
for idx,element in enumerate(ruleDefinitionMatrix):
nonZero = np.nonzero(element)[0]
if(len(nonZero) == 0):
results.append('None')
#todo: need to do something if it matches more than one reaction
else:
classifications = [reactionDefinition['reactionsNames'][x] for x in nonZero]
#FIXME: we should be able to support more than one transformation
results.append(classifications[0])
return results
def setConfigurationFile(self,configurationFile):
self.configurationFile = configurationFile
def getReactionProperties(self):
'''
if we are using a naming convention definition in the json file
this method will return the component and state names that this
reaction uses
'''
#TODO: once we transition completely to a naming convention delete
#this ----
reactionTypeProperties = {}
reactionDefinition = self.loadConfigFiles(self.configurationFile)
if self.speciesEquivalences != None:
self.userEquivalences = self.loadConfigFiles(self.speciesEquivalences)['reactionDefinition']
for reactionType,properties in zip(reactionDefinition['reactionsNames'],reactionDefinition['definitions']):
#if its a reaction defined by its naming convention
#xxxxxxxxxxxxxxxxxxx
for alternative in properties:
if 'n' in alternative.keys():
try:
site = reactionDefinition['reactionSite'][alternative['rsi']]
state = reactionDefinition['reactionState'][alternative['rst']]
except:
site = reactionType
state = reactionType[0]
reactionTypeProperties[reactionType] = [site,state]
#TODO: end of delete
reactionDefinition = self.namingConventions
for idx,reactionType in enumerate(reactionDefinition['modificationList']):
site = reactionDefinition['reactionSite'][reactionDefinition['definitions'][idx]['rsi']]
state = reactionDefinition['reactionState'][reactionDefinition['definitions'][idx]['rst']]
reactionTypeProperties[reactionType] = [site,state]
return reactionTypeProperties
def processFuzzyReaction(self,reaction,translationKeys,conventionDict,indirectEquivalenceTranslator):
differences,pairedChemicals= self.approximateMatching(reaction,
translationKeys)
#matching,matching2 = self.approximateMatching2(reaction,strippedMolecules,
# translationKeys)
d1,d2 = differences[0],differences[1]
firstMatch,secondMatch = pairedChemicals[0],pairedChemicals[1]
matches = [firstMatch,secondMatch]
for index,element in enumerate([d1,d2]):
idx1=0
idx2 = 1
while idx2 <= len(element):
if (element[idx1],) in conventionDict.keys():
pattern = conventionDict[(element[idx1],)]
indirectEquivalenceTranslator[pattern].append([[reaction[0][index],reaction[1][0]],reaction[0],matches[index],reaction[1]])
elif (element[idx1].replace('-','+'),) in conventionDict.keys():
matches[index].reverse()
transformedPattern = conventionDict[(element[idx1].replace('-','+'),) ]
indirectEquivalenceTranslator[transformedPattern].append([[reaction[1][0],reaction[0][index]],reaction[0],matches[index],reaction[1]])
elif idx2 < len(element):
if tuple([element[idx1],element[idx2]]) in conventionDict.keys():
pattern = conventionDict[tuple([element[idx1],element[idx2]])]
indirectEquivalenceTranslator[pattern].append([[reaction[0][index],reaction[1][0]],reaction[0],matches[index],reaction[1]])
idx1 += 1
idx2 += 1
elif '-' in element[idx1] and '-' in element[idx2]:
if tuple([element[idx1].replace('-','+'),element[idx2].replace('-','+')]) in conventionDict.keys():
matches[index].reverse()
transformedPattern = conventionDict[tuple([element[idx1].replace('-','+'),element[idx2].replace('-','+')])]
indirectEquivalenceTranslator[transformedPattern].append([[reaction[1][0],reaction[0][index]],reaction[0],matches[index],reaction[1]])
idx1 += 1
idx2 += 1
idx1+=1
idx2+=1
def removeExactMatches(self, reactantList, productList):
"""
goes through the list of lists reactantList and productList and removes the intersection
"""
reactantFlat = Counter([y for x in reactantList for y in x])
productFlat = Counter([y for x in productList for y in x])
intersection = reactantFlat & productFlat
intersection2 = deepcopy(intersection)
newReactant = []
newProduct = []
for chemical in reactantList:
tmp = []
for element in chemical:
if intersection[element] > 0:
intersection[element] -= 1
else:
tmp.append(element)
newReactant.append(tmp)
for chemical in productList:
tmp = []
for element in chemical:
if intersection2[element] > 0:
intersection2[element] -= 1
else:
tmp.append(element)
newProduct.append(tmp)
return newReactant,newProduct
def findBiggestActionable(self,chemicalList, chemicalCandidatesList):
actionableList = []
for chemical,chemicalCandidates in zip(chemicalList,chemicalCandidatesList):
if len(chemicalCandidates) == 0:
return None
if len(chemicalCandidates) == 1:
actionableList.append([chemical])
continue
# find all combinations
scoreDict = | |
**State** *(string) --*
The state of the directory's registration with Amazon WorkSpaces
- **WorkspaceCreationProperties** *(dict) --*
The default creation properties for all WorkSpaces in the directory.
- **EnableWorkDocs** *(boolean) --*
Specifies whether the directory is enabled for Amazon WorkDocs.
- **EnableInternetAccess** *(boolean) --*
The public IP address to attach to all WorkSpaces that are created or rebuilt.
- **DefaultOu** *(string) --*
The organizational unit (OU) in the directory for the WorkSpace machine accounts.
- **CustomSecurityGroupId** *(string) --*
The identifier of any security groups to apply to WorkSpaces when they are created.
- **UserEnabledAsLocalAdministrator** *(boolean) --*
Specifies whether the WorkSpace user is an administrator on the WorkSpace.
- **ipGroupIds** *(list) --*
The identifiers of the IP access control groups associated with the directory.
- *(string) --*
:type DirectoryIds: list
:param DirectoryIds:
The identifiers of the directories. If the value is null, all directories are retrieved.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeWorkspaceImages(Paginator):
def paginate(self, ImageIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`WorkSpaces.Client.describe_workspace_images`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceImages>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ImageIds=[
'string',
],
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Images': [
{
'ImageId': 'string',
'Name': 'string',
'Description': 'string',
'OperatingSystem': {
'Type': 'WINDOWS'|'LINUX'
},
'State': 'AVAILABLE'|'PENDING'|'ERROR',
'RequiredTenancy': 'DEFAULT'|'DEDICATED',
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
],
}
**Response Structure**
- *(dict) --*
- **Images** *(list) --*
Information about the images.
- *(dict) --*
Describes a WorkSpace image.
- **ImageId** *(string) --*
The identifier of the image.
- **Name** *(string) --*
The name of the image.
- **Description** *(string) --*
The description of the image.
- **OperatingSystem** *(dict) --*
The operating system that the image is running.
- **Type** *(string) --*
The operating system.
- **State** *(string) --*
The status of the image.
- **RequiredTenancy** *(string) --*
Specifies whether the image is running on dedicated hardware. When bring your own license (BYOL) is enabled, this value is set to DEDICATED.
- **ErrorCode** *(string) --*
The error code that is returned for the image.
- **ErrorMessage** *(string) --*
The text of the error message that is returned for the image.
:type ImageIds: list
:param ImageIds:
The identifier of the image.
- *(string) --*
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeWorkspaces(Paginator):
def paginate(self, WorkspaceIds: List = None, DirectoryId: str = None, UserName: str = None, BundleId: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`WorkSpaces.Client.describe_workspaces`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaces>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
WorkspaceIds=[
'string',
],
DirectoryId='string',
UserName='string',
BundleId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Workspaces': [
{
'WorkspaceId': 'string',
'DirectoryId': 'string',
'UserName': 'string',
'IpAddress': 'string',
'State': 'PENDING'|'AVAILABLE'|'IMPAIRED'|'UNHEALTHY'|'REBOOTING'|'STARTING'|'REBUILDING'|'MAINTENANCE'|'ADMIN_MAINTENANCE'|'TERMINATING'|'TERMINATED'|'SUSPENDED'|'UPDATING'|'STOPPING'|'STOPPED'|'ERROR',
'BundleId': 'string',
'SubnetId': 'string',
'ErrorMessage': 'string',
'ErrorCode': 'string',
'ComputerName': 'string',
'VolumeEncryptionKey': 'string',
'UserVolumeEncryptionEnabled': True|False,
'RootVolumeEncryptionEnabled': True|False,
'WorkspaceProperties': {
'RunningMode': 'AUTO_STOP'|'ALWAYS_ON',
'RunningModeAutoStopTimeoutInMinutes': 123,
'RootVolumeSizeGib': 123,
'UserVolumeSizeGib': 123,
'ComputeTypeName': 'VALUE'|'STANDARD'|'PERFORMANCE'|'POWER'|'GRAPHICS'|'POWERPRO'|'GRAPHICSPRO'
},
'ModificationStates': [
{
'Resource': 'ROOT_VOLUME'|'USER_VOLUME'|'COMPUTE_TYPE',
'State': 'UPDATE_INITIATED'|'UPDATE_IN_PROGRESS'
},
]
},
],
}
**Response Structure**
- *(dict) --*
- **Workspaces** *(list) --*
Information about the WorkSpaces.
Because CreateWorkspaces is an asynchronous operation, some of the returned information could be incomplete.
- *(dict) --*
Describes a WorkSpace.
- **WorkspaceId** *(string) --*
The identifier of the WorkSpace.
- **DirectoryId** *(string) --*
The identifier of the AWS Directory Service directory for the WorkSpace.
- **UserName** *(string) --*
The user for the WorkSpace.
- **IpAddress** *(string) --*
The IP address of the WorkSpace.
- **State** *(string) --*
The operational state of the WorkSpace.
- **BundleId** *(string) --*
The identifier of the bundle used to create the WorkSpace.
- **SubnetId** *(string) --*
The identifier of the subnet for the WorkSpace.
- **ErrorMessage** *(string) --*
The text of the error message that is returned if the WorkSpace cannot be created.
- **ErrorCode** *(string) --*
The error code that is returned if the WorkSpace cannot be created.
- **ComputerName** *(string) --*
The name of the WorkSpace, as seen by the operating system.
- **VolumeEncryptionKey** *(string) --*
The KMS key used to encrypt data stored on your WorkSpace.
- **UserVolumeEncryptionEnabled** *(boolean) --*
Indicates whether the data stored on the user volume is encrypted.
- **RootVolumeEncryptionEnabled** *(boolean) --*
Indicates whether the data stored on the root volume is encrypted.
- **WorkspaceProperties** *(dict) --*
The properties of the WorkSpace.
- **RunningMode** *(string) --*
The running mode. For more information, see `Manage the WorkSpace Running Mode <https://docs.aws.amazon.com/workspaces/latest/adminguide/running-mode.html>`__ .
- **RunningModeAutoStopTimeoutInMinutes** *(integer) --*
The time after a user logs off when WorkSpaces are automatically stopped. Configured in 60 minute intervals.
- **RootVolumeSizeGib** *(integer) --*
The size of the root volume.
- **UserVolumeSizeGib** *(integer) --*
The size of the user storage.
- **ComputeTypeName** *(string) --*
The compute type. For more information, see `Amazon WorkSpaces Bundles <http://aws.amazon.com/workspaces/details/#Amazon_WorkSpaces_Bundles>`__ .
- **ModificationStates** *(list) --*
The modification states of the WorkSpace.
- *(dict) --*
Describes a WorkSpace modification.
- **Resource** *(string) --*
The resource.
- **State** *(string) --*
The modification state.
:type WorkspaceIds: list
:param WorkspaceIds:
The identifiers of the WorkSpaces. You cannot combine this parameter with any other filter.
Because the CreateWorkspaces operation is asynchronous, the identifier it returns is not immediately available. If you immediately call DescribeWorkspaces with this identifier, no information is returned.
- *(string) --*
:type DirectoryId: string
:param DirectoryId:
The identifier of the directory. In addition, you can optionally specify a specific directory user (see ``UserName`` ). You cannot combine this parameter with any other filter.
:type UserName: string
:param UserName:
The name of the directory user. You must specify this parameter with ``DirectoryId`` .
:type BundleId: string
:param BundleId:
The identifier of the bundle. All WorkSpaces that are created from this bundle are retrieved. You cannot combine this parameter with any other filter.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class DescribeWorkspacesConnectionStatus(Paginator):
def paginate(self, WorkspaceIds: List = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate | |
STV_Drop_Index(USING PARAMETERS index ='{name}') OVER ();"
elif method == "text":
query = f"DROP TEXT INDEX {name};"
elif method == "schema":
query = f"DROP SCHEMA {name} CASCADE;"
if query:
try:
executeSQL(query, title="Deleting the relation.")
result = True
except:
if raise_error:
raise
result = False
elif method == "temp":
sql = """SELECT
table_schema, table_name
FROM columns
WHERE LOWER(table_name) LIKE '%_verticapy_tmp_%'
GROUP BY 1, 2;"""
all_tables = result = executeSQL(sql, print_time_sql=False, method="fetchall")
for elem in all_tables:
table = '"{}"."{}"'.format(
elem[0].replace('"', '""'), elem[1].replace('"', '""')
)
drop(table, method="table")
sql = """SELECT
table_schema, table_name
FROM view_columns
WHERE LOWER(table_name) LIKE '%_verticapy_tmp_%'
GROUP BY 1, 2;"""
all_views = executeSQL(sql, print_time_sql=False, method="fetchall")
for elem in all_views:
view = '"{}"."{}"'.format(
elem[0].replace('"', '""'), elem[1].replace('"', '""')
)
drop(view, method="view")
result = True
else:
result = True
return result
# ---#
def readSQL(query: str, time_on: bool = False, limit: int = 100):
"""
---------------------------------------------------------------------------
Returns the result of a SQL query as a tablesample object.
Parameters
----------
query: str, optional
SQL Query.
time_on: bool, optional
If set to True, displays the query elapsed time.
limit: int, optional
Number maximum of elements to display.
Returns
-------
tablesample
Result of the query.
"""
check_types(
[
("query", query, [str]),
("time_on", time_on, [bool]),
("limit", limit, [int, float]),
]
)
while len(query) > 0 and query[-1] in (";", " "):
query = query[:-1]
count = executeSQL(
"SELECT COUNT(*) FROM ({}) VERTICAPY_SUBTABLE".format(query),
method="fetchfirstelem",
print_time_sql=False,
)
sql_on_init = verticapy.options["sql_on"]
time_on_init = verticapy.options["time_on"]
try:
verticapy.options["time_on"] = time_on
verticapy.options["sql_on"] = False
try:
result = to_tablesample("{} LIMIT {}".format(query, limit))
except:
result = to_tablesample(query)
except:
verticapy.options["time_on"] = time_on_init
verticapy.options["sql_on"] = sql_on_init
raise
verticapy.options["time_on"] = time_on_init
verticapy.options["sql_on"] = sql_on_init
result.count = count
if verticapy.options["percent_bar"]:
vdf = vDataFrameSQL("({}) VERTICAPY_SUBTABLE".format(query))
percent = vdf.agg(["percent"]).transpose().values
for column in result.values:
result.dtype[column] = vdf[column].ctype()
result.percent[column] = percent[vdf.format_colnames(column)][0]
return result
# ---#
def get_data_types(expr: str, column_name: str = ""):
"""
---------------------------------------------------------------------------
Returns customized relation columns and the respective data types.
This process creates a temporary table.
Parameters
----------
expr: str
An expression in pure SQL.
column_name: str, optional
If not empty, it will return only the data type of the input column if it
is in the relation.
Returns
-------
list of tuples
The list of the different columns and their respective type.
"""
from verticapy.connect import current_cursor
if isinstance(current_cursor(), vertica_python.vertica.cursor.Cursor):
try:
if column_name:
executeSQL(expr, print_time_sql=False)
description = current_cursor().description[0]
return type_code_to_dtype(
type_code=description[1],
display_size=description[2],
precision=description[4],
scale=description[5],
)
else:
executeSQL(expr, print_time_sql=False)
description, ctype = current_cursor().description, []
for elem in description:
ctype += [
[
elem[0],
type_code_to_dtype(
type_code=elem[1],
display_size=elem[2],
precision=elem[4],
scale=elem[5],
),
]
]
return ctype
except:
pass
tmp_name, schema = gen_tmp_name(name="table"), "v_temp_schema"
drop("{}.{}".format(schema, tmp_name), method="table")
try:
if schema == "v_temp_schema":
executeSQL(
"CREATE LOCAL TEMPORARY TABLE {} ON COMMIT PRESERVE ROWS AS {}".format(
tmp_name, expr
),
print_time_sql=False,
)
else:
executeSQL(
"CREATE TEMPORARY TABLE {}.{} ON COMMIT PRESERVE ROWS AS {}".format(
schema, tmp_name, expr
),
print_time_sql=False,
)
except:
drop("{}.{}".format(schema, tmp_name), method="table")
raise
query = (
"SELECT column_name, data_type FROM columns WHERE {0}table_name = '{1}'"
" AND table_schema = '{2}' ORDER BY ordinal_position"
).format(
f"column_name = '{column_name}' AND " if (column_name) else "",
tmp_name,
schema,
)
cursor = executeSQL(query, title="Getting the data types.")
if column_name:
ctype = cursor.fetchone()[1]
else:
ctype = cursor.fetchall()
drop("{}.{}".format(schema, tmp_name), method="table")
return ctype
# ---#
def insert_into(
table_name: str,
data: list,
schema: str = "",
column_names: list = [],
copy: bool = True,
genSQL: bool = False,
):
"""
---------------------------------------------------------------------------
Inserts the dataset into an existing Vertica table.
Parameters
----------
table_name: str
Name of the table to insert into.
data: list
The data to ingest.
schema: str, optional
Schema name.
column_names: list, optional
Name of the column(s) to insert into.
copy: bool, optional
If set to True, the batch insert is converted to a COPY statement
with prepared statements. Otherwise, the INSERTs are performed
sequentially.
genSQL: bool, optional
If set to True, the SQL code that would be used to insert the data
is generated, but not executed.
Returns
-------
int
The number of rows ingested.
See Also
--------
pandas_to_vertica : Ingests a pandas DataFrame into the Vertica database.
"""
check_types(
[
("table_name", table_name, [str]),
("column_names", column_names, [list]),
("data", data, [list]),
("schema", schema, [str]),
("copy", copy, [bool]),
("genSQL", genSQL, [bool]),
]
)
if not (schema):
schema = verticapy.options["temp_schema"]
input_relation = "{}.{}".format(quote_ident(schema), quote_ident(table_name))
if not (column_names):
query = f"""SELECT
column_name
FROM columns
WHERE table_name = '{table_name}'
AND table_schema = '{schema}'
ORDER BY ordinal_position"""
result = executeSQL(
query,
title=f"Getting the table {input_relation} column names.",
method="fetchall",
)
column_names = [elem[0] for elem in result]
assert column_names, MissingRelation(
f"The table {input_relation} does not exist."
)
cols = [quote_ident(col) for col in column_names]
if copy and not (genSQL):
sql = "INSERT INTO {} ({}) VALUES ({})".format(
input_relation,
", ".join(cols),
", ".join(["%s" for i in range(len(cols))]),
)
executeSQL(
sql,
title=(
f"Insert new lines in the {table_name} table. The batch insert is "
"converted into a COPY statement by using prepared statements."
),
data=list(map(tuple, data)),
)
executeSQL("COMMIT;", title="Commit.")
return len(data)
else:
if genSQL:
sql = []
i, n, total_rows = 0, len(data), 0
header = "INSERT INTO {} ({}) VALUES ".format(input_relation, ", ".join(cols))
for i in range(n):
sql_tmp = "("
for elem in data[i]:
if isinstance(elem, str):
sql_tmp += "'{}'".format(elem.replace("'", "''"))
elif elem is None or elem != elem:
sql_tmp += "NULL"
else:
sql_tmp += "'{}'".format(elem)
sql_tmp += ","
sql_tmp = sql_tmp[:-1] + ");"
query = header + sql_tmp
if genSQL:
sql += [query]
else:
try:
executeSQL(
query,
title="Insert a new line in the relation: {}.".format(
input_relation
),
)
executeSQL("COMMIT;", title="Commit.")
total_rows += 1
except Exception as e:
warning_message = "Line {} was skipped.\n{}".format(i, e)
warnings.warn(warning_message, Warning)
if genSQL:
return sql
else:
return total_rows
# ---#
def pandas_to_vertica(
df,
name: str = "",
schema: str = "",
dtype: dict = {},
parse_nrows: int = 10000,
temp_path: str = "",
insert: bool = False,
):
"""
---------------------------------------------------------------------------
Ingests a pandas DataFrame into the Vertica database by creating a
CSV file and then using flex tables to load the data.
Parameters
----------
df: pandas.DataFrame
The pandas.DataFrame to ingest.
name: str, optional
Name of the new relation or the relation in which to insert the
data. If unspecified, a temporary local table is created. This
temporary table is dropped at the end of the local session.
schema: str, optional
Schema of the new relation. If empty, a temporary schema is used.
To modify the temporary schema, use the 'set_option' function.
dtype: dict, optional
Dictionary of input types. Providing a dictionary can increase
ingestion speed and precision. If specified, rather than parsing
the intermediate CSV and guessing the input types, VerticaPy uses
the specified input types instead.
parse_nrows: int, optional
If this parameter is greater than 0, VerticaPy creates and
ingests a temporary file containing 'parse_nrows' number
of rows to determine the input data types before ingesting
the intermediate CSV file containing the rest of the data.
This method of data type identification is less accurate,
but is much faster for large datasets.
temp_path: str, optional
The path to which to write the intermediate CSV file. This
is useful in cases where the user does not have write
permissions on the current directory.
insert: bool, optional
If set to True, the data are ingested into the input relation.
The column names of your table and the pandas.DataFrame must
match.
Returns
-------
vDataFrame
vDataFrame of the new relation.
See Also
--------
read_csv : Ingests a CSV file into the Vertica database.
read_json : Ingests a JSON file into the Vertica database.
"""
check_types(
[
("name", name, [str]),
("schema", schema, [str]),
("parse_nrows", parse_nrows, [int]),
("dtype", dtype, [dict]),
("temp_path", temp_path, [str]),
("insert", insert, [bool]),
]
)
if not (schema):
schema = verticapy.options["temp_schema"]
assert name or not (insert), ParameterError(
"Parameter 'name' can not be empty when parameter 'insert' is set to True."
)
if not (name):
tmp_name = gen_tmp_name(name="df")[1:-1]
else:
tmp_name = ""
path = "{0}{1}{2}.csv".format(
temp_path, "/" if (len(temp_path) > 1 and temp_path[-1] != "/") else "", name
)
try:
# Adding the quotes to STR pandas columns in order to simplify the ingestion.
# Not putting them can lead to wrong data ingestion.
str_cols = []
| |
<reponame>passteinforth/eNMRpy
"""
Here, you can find the SpecModel Class for creating a fit model to analyse the eNMR spectra.
You can also use the wrapper function set_peaks() to create a fit model from a matplotlib-based GUI input
You can also Simulate Spectra with Phase shifts.
"""
import numpy as np
import matplotlib.pyplot as plt
import lmfit
import pandas as pd
from collections import OrderedDict
def set_peaks(m, n=-1, timeout=-1, xlim=None, **plot_kwargs): #,xlim=(0,10)
"""
m : measurement object
returns: array of tuples with [(v0, a0), ...] for make_model()
______________________________________________________________
should be used in jupyter notebook like this:
%matplotlib
peaks = set_peaks(m)
%matplotlib inline
make_model(peaks)
"""
#use built-in method to create pyplot-figure
m.plot_spec([0], xlim=xlim, **plot_kwargs)
# adjust the layout for the GUI to minimize bezels
plt.tight_layout()
# insert hints how to correctly use the GUI input
plt.text(0.95,0.95,'left mouse button: set coordinate \nright mouse button: delete last coordinate\nmiddle mouse button: exit\n',
horizontalalignment='right',verticalalignment='top', transform=plt.gca().transAxes)
# activates the input-function delivered by matplotlib
arr = plt.ginput(n=n, timeout=timeout)
#returns an array with the selected coordinates
return arr
def peakpicker(x, y=None, inverse_order=False, width=10, threshold=1e5):
"""
tool to automatically determine peak positions in an x-y spectrum
x: ppm/frequency-array
x [optional]: Measurement-object of which the first slice is taken
y: intensity
inverse_order:
inverts the order of peaks in the array
this could be helpful when passing it to make_model
width: scan width. should be even numbered
returns: 2D-np.array with [[ppm,intensity],[...]] which can be passed to make_model
"""
#res: step size for scan
res=1
if type(x) != np.ndarray:
y = x.data[0]
x = x.ppm
if width%2 != 0:
raise ValueError('width should be an even integer')
# return
def advindx(arr, min, max):
"""more or less introduces repeating boundaries for the array"""
if (min < 0) or (max > len(arr)):
a = arr[min:]
b = arr[0:max]
return np.append(a, b)
else:
return arr[min:max]
def check_peak(y_out, width=width):
'''checks for consecutive increase and decrease of values'''
arr = y_out
sel_arr = np.array([])
for i in range(width):
# if adjacent values increase, the difference is positive.
# if adjacent values increase, the difference is negative.
sel_arr = np.append(sel_arr, (arr[i+1].real - arr[i].real))
# check for increase or decrease
bool_arr = sel_arr > 0
# if in the first part all values increase and the second part all values decrease
if (all(bool_arr[:width//2]) == True) and all(not x for x in bool_arr[width//2:]):
return True
else:
return False
peaks = []
for i in range(0,len(y), res):
x_out = advindx(x,i-width,i+width)
y_out = advindx(y,i-width,i+width)
check = check_peak(y_out, width)
if check and (y[i-width//2].real > threshold):
# i-width//2 ensures that the right coordinates are given for the respective peak
peaks.append((x[i-width//2], y[i-width//2].real))
if inverse_order:
return np.array(peaks)[::-1]
elif not inverse_order:
return np.array(peaks)
def make_model(peaks, print_params=True):
'''
returns a SpecModel()-object with parameters generated from set_peaks()-array
'''
model = SpecModel(len(peaks))
# sets the chemical shift vX of each peak
model.set_initial_values(['v%i'%i for i in range(len(peaks))], [i[0] for i in peaks])
# sets the estimated amplitude as the peak hight divided by 100
model.set_initial_values(['a%i'%i for i in range(len(peaks))], [i[1]/100 for i in peaks])
#shows the resulting parameters in output line
if print_params:
model.params.pretty_print()
return model
# maybe should be handled as static method
def reduce_fitted_phases(Measurementobject, SpecModel):
"""
Static method for the calculation of reduced phase shift values, where the slope is equal to the
electrophoretic mobility when plotted against the applied voltage.
This is especially helpful for the comparison of phase shifts obtained for different nuclei
or under different experimental conditions.
"""
m = Measurementobject
for k in ['ph%i'%i for i in range(SpecModel.n)]:
m.eNMRraw[k+'reduced'] = m.eNMRraw[k]*m.d/m.delta/m.Delta/m.g/m.gamma
m.eNMRraw[k+'reduced'] -= m.eNMRraw.loc[m.eNMRraw['U / [V]']==0, k+'reduced'][0]
def fit_Measurement(obj_M, obj_S, fixed_parameters=None, plot=False, savepath=None, **plot_kwargs):
'''
function to fit a the series of voltage dependent spectra contained in a typical eNMR measurement
obj_M: object of the class eNMR_Measurement
obj_S: object of the class SpecModel
fixed_parameters: List of parameters to be fixed after the first fit
**plot_kwargs are passed to SpecModel.fit:
peak_deconvolution=False, parse_complex='abs','real, or 'imag'
'''
i=0
if fixed_parameters is not None:
fp = [] # working list of parameter-objects
# iterates the list of parameters
for k in obj_S.params.keys():
# if the parameter name p[0] matches any string in fixed_parameters
if any(k == np.array(fixed_parameters)):
# append the parameter to the working list
fp.append(k)
fig = obj_S.fit(obj_M.ppm, obj_M.data[0], **plot_kwargs)
print('row 0 fitted including fixed_parameters being varied')
ph_res = obj_S.get_result_values()
for par in ph_res.keys():
# saves the results from row 0 in the eNMRraw-DataFrame
obj_M.eNMRraw.at[0, par] = ph_res[par]
for p in fp: # fixes all variables listed in fixed_parameters
obj_S.params[p].set(obj_S.result.params[p].value)
obj_S.params[p].set(vary=False)
print('%s not varied!'%p)
if (plot is True) and (savepath is not None):
fig.savefig(savepath+'%.1f'%obj_M.eNMRraw.loc[0, obj_M._x_axis]+'.png', dpi=300)
i = 1 #counter set to one for the rest of the spectra to be fitted
print('start fitting from row %i'%i)
for row in range(i, obj_M.data[:,0].size):
fig = obj_S.fit(obj_M.ppm, obj_M.data[row], plot=plot, **plot_kwargs)
ph_res = obj_S.get_result_values()
for par in ph_res.keys():
#obj_M.eNMRraw.set_value(row, par, ph_res[par])
obj_M.eNMRraw.at[row, par] = ph_res[par]
if (plot is True) and (savepath is not None):
fig.savefig(savepath+'%.1f'%obj_M.eNMRraw.loc[row, obj_M._x_axis]+'.png', dpi=300)
#for p in fp: # reset all vary-Values
#obj_S.params[p].set(vary=True)
print('fitting finished')
def drop_errors(df):
'''
drops all columns that which keys end with _err --> created from the fitting model
This function is used in the plot_correlations function
'''
df.keys()
#drop the vc, Voltag, Gradient and outlier columns
try:
sel = df.drop(['vc', 'U / [V]', 'g in T/m', 'outlier'], axis=1)
except KeyError:
try:
sel = df.drop(['vd', 'U / [V]', 'g in T/m', 'outlier'], axis=1)
except:
print('no vd or vc found, no standard parameters dropped')
sel = df
#sel = df.drop(['vd', 'U / [V]', 'g in T/m', 'outlier'], axis=1)
# drop all error-columns-boolean
_bool = np.array([k[-4:] != '_err' for k in sel.keys()])
# elect only the non-error columns
sel = sel[np.array(sel.keys()[_bool])]
return sel
def plot_correlations_heatmap(df, method='pearson', without_errors=True, textcolor="#222222", **fig_kwargs):
"""
correlation coefficients plot (heatmap) for any pandas DataFrame
method:
pearson, kendall, spearman
"""
if without_errors:
df = drop_errors(df)
corr = df.corr(method=method)
columns = corr.keys()
indices = np.array(corr.index)
fig, ax = plt.subplots(**fig_kwargs)
im = ax.imshow(corr, cmap='Spectral', vmin=-1, vmax=1)
# We want to show all ticks...
ax.set_xticks(np.arange(len(columns)))
ax.set_yticks(np.arange(len(indices)))
# ... and label them with the respective list entries
ax.set_xticklabels(columns)
ax.set_yticklabels(indices)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(indices)):
for j in range(len(columns)):
text = ax.text(j, i, '%.3f'%corr.iloc[i, j],
ha="center", va="center", color=textcolor)
plt.colorbar(im)
fig.tight_layout()
return fig, corr
def lorentz_real(x, x0, _lambda, ph, amplitude):
"""
calculates the real part of the spectrum
here, the conversion from rad to ° happens
"""
def dispersion(x, x0, _lambda):
return -(x-x0)/(_lambda**2+(x-x0)**2)
def absorption(x, x0, _lambda):
return _lambda/(_lambda**2+(x-x0)**2)
# transforming the phase angle from degree ° in rad
ph = (ph/360*2*np.pi)
amplitude, _lambda = abs(amplitude), abs(_lambda)
return amplitude*(absorption(x, x0, _lambda)*np.cos(ph)-dispersion(x, x0, _lambda)*np.sin(ph))
def lorentz_imag(x, x0, _lambda, ph, amplitude):
"""
calculates the imaginary part of the spectrum
here, the conversion from rad to ° happens
"""
def dispersion(x, x0, _lambda):
return -(x-x0)/(_lambda**2+(x-x0)**2)
def absorption(x, x0, _lambda):
return _lambda/(_lambda**2+(x-x0)**2)
# transforming the phase angle from degree ° in rad
ph = (ph/360*2*np.pi)
amplitude, _lambda = abs(amplitude), abs(_lambda)
return amplitude*(dispersion(x, x0, _lambda)*np.cos(ph)+absorption(x, x0, _lambda)*np.sin(ph))
def gauss(x, x0, s, amp=1):
"""
this is just the function of a gaußian distribution with x0 as the center value, s the standard deviation and
amp as an amplitude which would be 1 in the case of a normal distribution.
"""
return amp*np.exp(-(x-x0)**2/(2*s**2))/(np.sqrt(np.pi*2)*s)
def makefunc_Lorentz_cmplx(n = 1):
'''
returns a function describing the complex lorentzians with real and imaginary part
n: number of lorentzians contained in the function
'''
s = 'lambda x, baseline'
for i in range(n):
s += ', v%i, l%i, ph%i, a%i'%(i, i, i, i)
s+=': baseline'
for i in range(n):
s += ' + lorentz_real(x, v%i, l%i, | |
<gh_stars>0
'''
Written by: <NAME>
Last Update: October 16th 2020
'''
import numpy as np
import copy
import ctypes
import socket
import time
########## Parameters
if socket.gethostname() == "ph-photonbec5":
dll_file = r"D:\Control\EMCCD\atmcd32d.dll"
class EMCCD():
##### Error Codes
emccd_return_codes = dict()
emccd_return_codes.update({20001: 'DRV_ERROR_CODES'})
emccd_return_codes.update({20002: 'DRV_SUCCESS'})
emccd_return_codes.update({20003: 'DRV_VXDNOTINSTALLED'})
emccd_return_codes.update({20004: 'DRV_ERROR_SCAN'})
emccd_return_codes.update({20005: 'DRV_ERROR_CHECK_SUM'})
emccd_return_codes.update({20006: 'DRV_ERROR_FILELOAD'})
emccd_return_codes.update({20007: 'DRV_UNKNOWN_FUNCTION'})
emccd_return_codes.update({20008: 'DRV_ERROR_VXD_INIT'})
emccd_return_codes.update({20009: 'DRV_ERROR_ADDRESS'})
emccd_return_codes.update({20010: 'DRV_ERROR_PAGELOCK'})
emccd_return_codes.update({20011: 'DRV_ERROR_PAGE_UNLOCK'})
emccd_return_codes.update({20012: 'DRV_ERROR_BOARDTEST'})
emccd_return_codes.update({20013: 'DRV_ERROR_ACK'})
emccd_return_codes.update({20014: 'DRV_ERROR_UP_FIFO'})
emccd_return_codes.update({20015: 'DRV_ERROR_PATTERN'})
emccd_return_codes.update({20017: 'DRV_ACQUISITION_ERRORS'})
emccd_return_codes.update({20018: 'DRV_ACQ_BUFFER'})
emccd_return_codes.update({20019: 'DRV_ACQ_DOWNFIFO_FULL'})
emccd_return_codes.update({20020: 'DRV_PROC_UNKNOWN_INSTRUCTION'})
emccd_return_codes.update({20021: 'DRV_ILLEGAL_OP_CODE'})
emccd_return_codes.update({20022: 'DRV_KINETIC_TIME_NOT_MET'})
emccd_return_codes.update({20023: 'DRV_ACCUM_TIME_NOT_MET'})
emccd_return_codes.update({20024: 'DRV_NO_NEW_DATA'})
emccd_return_codes.update({20025: 'PCI_DMA_FAIL'})
emccd_return_codes.update({20026: 'DRV_SPOOLERROR'})
emccd_return_codes.update({20027: 'DRV_SPOOLSETUPERROR'})
emccd_return_codes.update({20029: 'SATURATED'})
emccd_return_codes.update({20033: 'DRV_TEMPERATURE_CODES'})
emccd_return_codes.update({20034: 'DRV_TEMPERATURE_OFF'})
emccd_return_codes.update({20035: 'DRV_TEMP_NOT_STABILIZED'})
emccd_return_codes.update({20036: 'DRV_TEMPERATURE_STABILIZED'})
emccd_return_codes.update({20037: 'DRV_TEMPERATURE_NOT_REACHED'})
emccd_return_codes.update({20038: 'DRV_TEMPERATURE_OUT_RANGE'})
emccd_return_codes.update({20039: 'DRV_TEMPERATURE_NOT_SUPPORTED'})
emccd_return_codes.update({20040: 'DRV_TEMPERATURE_DRIFT'})
emccd_return_codes.update({20049: 'DRV_GENERAL_ERRORS'})
emccd_return_codes.update({20050: 'DRV_INVALID_AUX'})
emccd_return_codes.update({20051: 'DRV_COF_NOTLOADED'})
emccd_return_codes.update({20052: 'DRV_FPGAPROG'})
emccd_return_codes.update({20053: 'DRV_FLEXERROR'})
emccd_return_codes.update({20054: 'DRV_GPIBERROR'})
emccd_return_codes.update({20055: 'ERROR_DMA_UPLOAD'})
emccd_return_codes.update({20064: 'DRV_DATATYPE'})
emccd_return_codes.update({20065: 'DRV_DRIVER_ERRORS'})
emccd_return_codes.update({20066: 'DRV_P1INVALID'})
emccd_return_codes.update({20067: 'DRV_P2INVALID'})
emccd_return_codes.update({20068: 'DRV_P3INVALID'})
emccd_return_codes.update({20069: 'DRV_P4INVALID'})
emccd_return_codes.update({20070: 'DRV_INIERROR'})
emccd_return_codes.update({20071: 'DRV_COFERROR'})
emccd_return_codes.update({20072: 'DRV_ACQUIRING'})
emccd_return_codes.update({20073: 'DRV_IDLE'})
emccd_return_codes.update({20074: 'DRV_TEMPCYCLE'})
emccd_return_codes.update({20075: 'DRV_NOT_INITIALIZED'})
emccd_return_codes.update({20076: 'DRV_P5INVALID'})
emccd_return_codes.update({20077: 'DRV_P6INVALID'})
emccd_return_codes.update({20078: 'DRV_INVALID_MODE'})
emccd_return_codes.update({20079: 'DRV_INVALID_FILTER'})
emccd_return_codes.update({20080: 'DRV_I2CERRORS'})
emccd_return_codes.update({20081: 'DRV_DRV_I2CDEVNOTFOUND'})
emccd_return_codes.update({20082: 'DRV_I2CTIMEOUT'})
emccd_return_codes.update({20083: 'DRV_P7INVALID'})
emccd_return_codes.update({20089: 'DRV_USBERROR'})
emccd_return_codes.update({20090: 'DRV_IOCERROR'})
emccd_return_codes.update({20091: 'DRV_VRMVERSIONERROR'})
emccd_return_codes.update({20093: 'DRV_USB_INTERRUPT_ENDPOINT_ERROR'})
emccd_return_codes.update({20094: 'DRV_RANDOM_TRACK_ERROR'})
emccd_return_codes.update({20095: 'DRV_INVALID_TRIGGER_MODE'})
emccd_return_codes.update({20096: 'DRV_LOAD_FIRMWARE_ERROR'})
emccd_return_codes.update({20097: 'DRV_DIVIDE_BY_ZERO_ERROR'})
emccd_return_codes.update({20098: 'DRV_INVALID_RINGEXPOSURES'})
emccd_return_codes.update({20099: 'DRV_BINNING_ERROR'})
emccd_return_codes.update({20990: 'DRV_ERROR_NOCAMERA'})
emccd_return_codes.update({20991: 'DRV_NOT_SUPPORTED'})
emccd_return_codes.update({20992: 'DRV_NOT_AVAILABLE'})
emccd_return_codes.update({20115: 'DRV_ERROR_MAP'})
emccd_return_codes.update({20116: 'DRV_ERROR_UNMAP'})
emccd_return_codes.update({20117: 'DRV_ERROR_MDL'})
emccd_return_codes.update({20118: 'DRV_ERROR_UNMDL'})
emccd_return_codes.update({20119: 'DRV_ERROR_BUFFSIZE'})
emccd_return_codes.update({20121: 'DRV_ERROR_NOHANDLE'})
emccd_return_codes.update({20130: 'DRV_GATING_NOT_AVAILABLE'})
emccd_return_codes.update({20131: 'DRV_FPGA_VOLTAGE_ERROR'})
emccd_return_codes.update({20099: 'DRV_BINNING_ERROR'})
emccd_return_codes.update({20100: 'DRV_INVALID_AMPLIFIER'})
emccd_return_codes.update({20101: 'DRV_INVALID_COUNTCONVERT_MODE'})
##### Camera attributes
acquisition_modes = {"single scan":1, "accumulate":2, "kinetics":3, "fast kinetics":4, "run till abort":5}
output_amplifier_modes = {"EMCCD":0, "CCD":1}
read_modes = {"full vertical binning":0, "multi-track":1, "random-track":2, "single-track":3, "image":4}
shutter_modes = {"fully auto":0, "permanently open":1, "permanently closed":2, "open for FVB series":4, "open for any series":5}
trigger_modes = {"internal":0, "external":1, "external start":6, "external exposure (bulb)":7, "external FVB EM":9, "software trigger":10, "external charge shifting":12}
def __init__(self, VERBOSE=True, frontend=None):
self.VERBOSE = VERBOSE
self.frontend = frontend
self.COOLER = False
self.STABLE_TEMPERATURE = False
# Loads the dll
self.dll = ctypes.WinDLL(dll_file)
# Initializes EMCCD SDK
self.printout(message="Initializing SDK:")
dummy = ctypes.c_char()
out = self.dll.Initialize(dummy)
self.printout(code=out)
if not out == 20002:
raise Exception("Could not load SDK")
# Retrives the valid range of temperatures in centrigrades to which the detector can be cooled
self.printout(message="Getting sensor temperature range:")
Tmin = ctypes.c_int()
Tmax = ctypes.c_int()
out = self.dll.GetTemperatureRange(ctypes.pointer(Tmin), ctypes.pointer(Tmax))
self.printout(code=out)
if not out == 20002:
raise Exception("Could not retrive detector temperature range: "+self.emccd_return_codes[out])
self.Tmin = Tmin.value
self.Tmax = Tmax.value
self.printout(message="Temperature min = "+str(self.Tmin))
self.printout(message="Temperature max = "+str(self.Tmax))
# Gets horizontal shifting speeds from the camera
self.printout(message="Calibrating horizontal shifting speeds")
self.horizontal_shifting_speeds = dict() # Shifting speeds in MHz
for typ in [0, 1]: # 0:electron multiplication, 1: conventional
self.horizontal_shifting_speeds[typ] = dict()
speeds = ctypes.c_int()
out = self.dll.GetNumberHSSpeeds(ctypes.c_int(0), ctypes.c_int(typ), ctypes.pointer(speeds))
speeds = speeds.value
if out == 20002:
for i in range(0, speeds):
speedMHz = ctypes.c_float()
out1 = self.dll.GetHSSpeed(ctypes.c_int(0), ctypes.c_int(typ), ctypes.c_int(i), ctypes.pointer(speedMHz))
if out1 == 20002:
self.horizontal_shifting_speeds[typ][i] = speedMHz.value
else:
raise Exception("Could not retrieve horizontal shift speed: "+self.emccd_return_codes[out1])
else:
raise Exception("Could not retrieve number of horizontal shift speeds: "+self.emccd_return_codes[out])
# Gets vertical shifting speeds from the camera
self.printout(message="Calibrating vertical shifting speeds")
self.vertical_shifting_speeds = dict() # Shifting speeds in microseconds per pixel shift
speeds = ctypes.c_int()
out = self.dll.GetNumberVSSpeeds(ctypes.pointer(speeds))
speeds = speeds.value
if out == 20002:
for i in range(0, speeds):
speed_ms = ctypes.c_float()
out1 = self.dll.GetVSSpeed(ctypes.c_int(i), ctypes.pointer(speed_ms))
if out1 == 20002:
self.vertical_shifting_speeds[i] = speed_ms.value
else:
raise Exception("Could not retrieve vertical shift speed: "+self.emccd_return_codes[out1])
else:
raise Exception("Could not retrieve number of vertical shift speeds: "+self.emccd_return_codes[out])
# Gets Preamp gain values
self.printout(message="Calibrating pre-amp gain values")
self.preamp_gain_values = dict()
gains = ctypes.c_int()
out = self.dll.GetNumberPreAmpGains(ctypes.pointer(gains))
gains = gains.value
if out == 20002:
for i in range(0, gains):
gain = ctypes.c_float()
out1 = self.dll.GetPreAmpGain(ctypes.c_int(i), ctypes.pointer(gain))
if out1 == 20002:
self.preamp_gain_values[i] = gain.value
else:
raise Exception("Could not retrieve pre-amp gain value: "+self.emccd_return_codes[out1])
else:
raise Exception("Could not retrieve number of pre-amp gains: "+self.emccd_return_codes[out])
# Gets the detector size, in pixels
self.printout(message="Getting number of detector pixels")
xpixels = ctypes.c_int()
ypixels = ctypes.c_int()
out = self.dll.GetDetector(ctypes.pointer(xpixels), ctypes.pointer(ypixels))
if not out == 20002:
raise Exception("Could not retrive number of pixels: "+self.emccd_return_codes[out])
self.xpixels = xpixels.value
self.ypixels = ypixels.value
self.image_format = None
def printout(self, code=None, message=None):
# Prints to the frontend, if it exists
if not self.frontend is None:
if not message is None:
self.frontend.write_camera_message(message=message)
if not code is None:
self.frontend.write_camera_message(message=self.emccd_return_codes[code])
# Prints to the command line
if self.VERBOSE:
if not message is None:
print("EMCCD object: "+message)
if not code is None:
print("EMCCD object: "+self.emccd_return_codes[code])
def SetTemperature(self, temperature):
"""
Sets the desired temperature of the detector. To turn the cooling ON and OFF the user
must use the CoolerON and CoolerOFF methods.
Parameters:
temperature (int): Desired detector temperature (in C)
"""
temperature = int(temperature)
if temperature<self.Tmin or temperature>self.Tmax:
raise Exception("Invalid temperature")
self.printout(message="Setting temperature to "+str(temperature)+" C")
out = self.dll.SetTemperature(ctypes.c_int(temperature))
self.printout(code=out)
if out == 20002:
SUCCESS = True
else:
SUCCESS = False
return SUCCESS, copy.deepcopy(self.emccd_return_codes[out])
def CoolerON(self):
"""
Switches ON the cooling.
"""
self.printout(message="Switching ON the cooling")
out = self.dll.CoolerON()
self.printout(code=out)
if out == 20002:
SUCCESS = True
self.COOLER = True
else:
SUCCESS = False
return SUCCESS, copy.deepcopy(self.emccd_return_codes[out])
def CoolerOFF(self):
"""
Switches OFF the cooling.
"""
self.printout(message="Switching OFF the cooling")
out = self.dll.CoolerOFF()
self.printout(code=out)
if out == 20002:
SUCCESS = True
self.COOLER = False
else:
SUCCESS = False
return SUCCESS, copy.deepcopy(self.emccd_return_codes[out])
def StabilizeTemperature(self):
"""
Waits until detector temperature has stabilized to set point
"""
self.printout(message="Stabilizing detector temperature...")
current_temperature_c = ctypes.c_int()
out = self.dll.GetTemperature(ctypes.pointer(current_temperature_c))
if out == 20037:
while out == 20037:
self.printout(message=" Current temperature: "+str(current_temperature_c.value))
time.sleep(1)
out = self.dll.GetTemperature(ctypes.pointer(current_temperature_c))
self.printout(code=out)
if out == 20036:
SUCCESS = True
self.STABLE_TEMPERATURE = True
self.printout(message="Detector temperature has stabilized at set point")
else:
SUCCESS = False
self.STABLE_TEMPERATURE = False
return SUCCESS, copy.deepcopy(self.emccd_return_codes[out]), current_temperature_c.value
def SetAcquisitionMode(self, mode):
"""
Sets the acquisition mode.
Parameters:
mode (str): Option are "single scan", "accumulate", "kinetics", "fast kinetics", "run till abort"
"""
self.printout(message="Setting acquisition mode")
if not any([mode==possibility for possibility in self.acquisition_modes.keys()]):
raise Exception("Unkown acquisition mode")
out = self.dll.SetAcquisitionMode(ctypes.c_int(self.acquisition_modes[mode]))
self.printout(code=out)
if out == 20002:
SUCCESS = True
else:
SUCCESS = False
return SUCCESS, copy.deepcopy(self.emccd_return_codes[out])
def SetOutputAmplifier(self, mode):
"""
Some EMCCD systems have the capability to use a second output amplifier. This
function will set the type of output amplifier to be used when reading data from the head
for these systems.
Parameters:
mode (str): "EMCCD": Standard EMCCD gain register (default)/Conventional(clara)
"CCD": Conventional CCD register/Extended NIR mode(clara).
"""
self.printout(message="Setting output amplifier mode")
if not any([mode==possibility for possibility in self.output_amplifier_modes.keys()]):
raise Exception("Unkown output amplifier mode")
out = self.dll.SetOutputAmplifier(ctypes.c_int(self.output_amplifier_modes[mode]))
self.printout(code=out)
if out == 20002:
SUCCESS = True
else:
SUCCESS = False
return SUCCESS, copy.deepcopy(self.emccd_return_codes[out])
def SetReadMode(self, mode):
"""
Sets the read mode.
Parameters:
mode (str): Option are "full vertical binning", "multi-track", "random-track", "single-track", "image"
"""
self.printout(message="Setting read mode")
if not any([mode==possibility for possibility in self.read_modes.keys()]):
raise Exception("Unkown read mode")
out = self.dll.SetReadMode(ctypes.c_int(self.read_modes[mode]))
self.printout(code=out)
if out == 20002:
SUCCESS = True
else:
SUCCESS = False
return SUCCESS, copy.deepcopy(self.emccd_return_codes[out])
def SetShutter(self, typ, mode, closingtime, openingtime):
"""
This function controls the behaviour of the shutter.
The typ parameter allows the user to control the TTL signal output to an external shutter.
The mode parameter configures whether the shutter opens and closes automatically
(controlled by the camera) or is permanently open or permanently closed.
The opening and closing time specify the time required to open and close the shutter
(this information is required for calculating acquisition timings - see SHUTTER TRANSFER TIME in the manual).
Parameters:
typ (int): 0: Output TTL low signal to open shutter
1: Output TTL high signal to open shutter
mode (str): Option are "fully auto", "permanently open", "permanently closed", "open for FVB series", "open for any series"
closingtime (int): time shutter takes to close (miliseconds)
openingtime (int): time shutter takes to open (miliseconds)
"""
self.printout(message="Setting shutter mode")
if not (typ==0 or typ==1):
raise Exception("Invalid shutter TTL type")
if not any([mode==possibility for possibility in self.shutter_modes.keys()]):
raise Exception("Unkown shutter mode")
out = self.dll.SetShutter(
ctypes.c_int(typ),
ctypes.c_int(self.shutter_modes[mode]),
ctypes.c_int(closingtime),
ctypes.c_int(openingtime))
self.printout(code=out)
if out == 20002:
SUCCESS = True
else:
SUCCESS = False
return SUCCESS, copy.deepcopy(self.emccd_return_codes[out])
def SetExposureTime(self, time):
"""
This function will set the exposure time to the nearest valid value not less than the given
value. The actual exposure time used is obtained by GetAcquisitionTimings.. Please
refer to SECTION 5 - ACQUISITION MODES for further information.
Parameters:
time (float): The exposure time in seconds
"""
self.printout(message="Setting exposure time")
out = self.dll.SetExposureTime(ctypes.c_float(time))
self.printout(code=out)
if out == 20002:
SUCCESS = True
else:
SUCCESS = False
return SUCCESS, copy.deepcopy(self.emccd_return_codes[out])
def SetTriggerMode(self, mode):
"""
This function will set the trigger mode that the camera will operate in.
Parameters:
mode (str): Options are "internal", "external", "external start", "external exposure (bulb)",
"external FVB EM", "software trigger", "external charge shifting"
"""
self.printout(message="Setting trigger mode")
if not any([mode==possibility for possibility in self.trigger_modes.keys()]):
raise Exception("Unkown trigger mode")
out = self.dll.SetTriggerMode(ctypes.c_int(self.trigger_modes[mode]))
self.printout(code=out)
if out == 20002:
SUCCESS = True
else:
SUCCESS = False
return SUCCESS, copy.deepcopy(self.emccd_return_codes[out])
def SetAccumulationCycleTime(self, time):
"""
This function will set the accumulation cycle time to the nearest valid value not less than
the given value. The actual cycle time used is obtained by GetAcquisitionTimings. Please
refer to SECTION 5 - ACQUISITION MODES for further information.
Parameters:
time (float): The accumulation cycle time in seconds
"""
self.printout(message="Setting accumulation cycle time")
out = self.dll.SetAccumulationCycleTime(ctypes.c_float(time))
self.printout(code=out)
if out == 20002:
SUCCESS = True
else:
SUCCESS = False
return SUCCESS, copy.deepcopy(self.emccd_return_codes[out])
def SetNumberAccumulations(self, number):
"""
This function will set the number of scans accumulated in memory. This will only take
effect if the acquisition mode is either Accumulate or Kinetic Series.
Parameters:
number (int): Number of scans to accumulate
"""
self.printout(message="Setting number of accumulations")
out = self.dll.SetNumberAccumulations(ctypes.c_int(number))
self.printout(code=out)
if out == 20002:
SUCCESS = True
else:
SUCCESS = False
return SUCCESS, copy.deepcopy(self.emccd_return_codes[out])
def SetNumberKinetics(self, number):
"""
This function will set the number of scans (possibly accumulated scans) to be taken
during a single acquisition sequence. This will only take effect if the acquisition mode is
Kinetic Series.
Parameters:
number (int): Number of scans to store
"""
self.printout(message="Setting number of kinetic scans")
out = self.dll.SetNumberKinetics(ctypes.c_int(number))
self.printout(code=out)
if out == 20002:
SUCCESS = True
else:
SUCCESS = False
return SUCCESS, copy.deepcopy(self.emccd_return_codes[out])
def SetKineticCycleTime(self, time):
"""
This function will set the kinetic cycle time to the nearest valid value not less than the
given value. The actual time used is obtained by GetAcquisitionTimings. Please refer to
SECTION 5 - ACQUISITION MODES for further information.
Parameters:
time (float): The kinetic cycle time in seconds
"""
self.printout(message="Setting kinetic cycle time")
out = self.dll.SetKineticCycleTime(ctypes.c_float(time))
self.printout(code=out)
if out == 20002:
SUCCESS = True
else:
SUCCESS = False
return SUCCESS, copy.deepcopy(self.emccd_return_codes[out])
def SetFrameTransferMode(self, FRAME_TRANSFER_MODE):
"""
This function will set whether an acquisition will readout in Frame Transfer Mode. If the
acquisition mode is Single Scan or Fast Kinetics this call will have no affect.
Parameters:
FRAME_TRANSFER_MODE (bool)
"""
dummy | |
<gh_stars>1-10
# Copyright (c) 2012 The Foundry Visionmongers Ltd. All Rights Reserved.
import maya.cmds as cmds
import sys
import os.path
import scenegraphXML
# Make sure the Alembic plugin is loaded
cmds.loadPlugin('AbcExport', quiet=True)
# Module level global used to hold animating bounds data from alembic callbacks
animBoundsData = {}
# Main function called to actually export from Maya to ScenegraphXML format
def maya2ScenegraphXML(mayaSelection, xmlFileName, startFrame=None, endFrame=None,
arbAttrs=None, geoFileOptions=''):
# Strip xmlFileName into directory and file name components
lastSlashPos = xmlFileName.rfind('/')
if lastSlashPos == -1:
fileDir = None
fileStem = xmlFileName
else:
fileDir = xmlFileName[:lastSlashPos]
fileStem = xmlFileName[lastSlashPos+1:]
# Strip fileStem of .xml extension if it has one
if fileStem.endswith('.xml'):
fileStem = fileStem[:-4]
# Clear the animBoundsData
animBoundsData = {}
# Construct python classes to represent Maya Hierarchy using scenegraphXML.py
sgxmlHandler = MayaSgxmlHandler(mayaSelection=mayaSelection,
fileDir=fileDir,
fileStem=fileStem,
startFrame=startFrame,
endFrame=endFrame,
arbAttrs=arbAttrs,
geoFileOptions=geoFileOptions)
sgxmlHandler.writeChannelData()
# Write the scenegraphXML files for the hierarchical components
sgxmlHandler.writeSgxml()
# Write the accompanying animating channel data files
#sgxmlHandler.writeChannelData()
def addAnimBoundsData(mayaNode, frame, bounds):
# Adds an entry in animBoundsData for mayaNode for the current frame.
if not mayaNode in animBoundsData:
animBoundsData[mayaNode] = {}
animBoundsData[mayaNode][frame] = bounds
def maya2abcCallback(mayaNode, frame, bounds):
# Callback used to store animating bounds information calculated when exporting using Alembic.
# Needs to convert order of bounds data from [minx, miny, minz, maxx, maxy, maxz] to [minx, maxx, miny, maxy, minz, maxz]
addAnimBoundsData(mayaNode, frame, [bounds[0], bounds[3], bounds[1], bounds[4], bounds[2], bounds[5]])
def maya2Abc(mayaParent, mayaSelection, filepath, startFrame=None, endFrame=None, abcOptions=''):
# check that we actually have elements in the mayaSelection
if mayaSelection:
# create the directory if it doesn't exist
dir = os.path.dirname(filepath)
if not os.path.isdir(dir):
os.makedirs(dir)
abcCommandString = abcOptions + ' -root ' + ' -root '.join(mayaSelection) + ' -uvWrite -file ' + filepath
# add Python callback to store bounding box information
#abcCommandString = 'pythonPerFrameCallback=maya2scenegraphXML.maya2abcCallback(mayaNode="'+mayaParent+'",frame=#FRAME#,bounds=#BOUNDSARRAY#) ' + abcCommandString
abcCommandString = '-pythonPerFrameCallback maya2scenegraphXML.maya2abcCallback(mayaNode="'+mayaParent+'",frame=#FRAME#,bounds=#BOUNDSARRAY#) ' + abcCommandString
if startFrame is not None and endFrame is not None:
abcCommandString = '-frameRange ' + str(startFrame) + ' ' + str(endFrame) + ' ' + abcCommandString
#abcCommandString = 'range ' + str(startFrame) + ' ' + str(endFrame) + ' ' + abcCommandString
print "Command string: %s" % abcCommandString
cmds.AbcExport(j=abcCommandString)
else:
print "Warning: %s doesn't have any children to export as a component" % mayaParent
def getMayaNodeName(mayaPath):
nameParts = mayaPath.rstrip('|').rsplit('|',1)
return nameParts[-1]
def mayaNode2FileStem(mayaPath):
return mayaPath.lstrip('|').replace('_','__').replace('|','_')
def mayaNode2FilePath(mayaPath, directory, extension=None, relativePath=True):
# check if the node explicitly defines a file path
if cmds.listAttr(mayaPath, st=['sgxml_filepath']):
return cmds.getAttr(mayaPath + '.sgxml_filepath', x=True)
else:
basePath = mayaPath.lstrip('|').replace('_','__').replace('|','_').replace(':','_')
if not relativePath:
basePath = directory.rstrip('/') + '/' + basePath
if extension is None:
return basePath
else:
return basePath + '.' + extension
def getValidChildren(mayaElementPath):
validChildren = []
#childNodes = cmds.listRelatives(mayaElementPath, fullPath=True)
childNodes = cmds.listRelatives(mayaElementPath, pa=True)
if childNodes is not None:
for mayaChildPath in childNodes:
# check if the node is set to 'ignore'
if cmds.listAttr(mayaChildPath, st=['sgxml_ignore']) is None:
# check if this is a valid instance node. We only process transform nodes
curNodeType = cmds.nodeType(mayaChildPath)
if curNodeType == 'transform':
validChildren.append(mayaChildPath)
return validChildren
def getAttrOrNone(mayaPath, attrName):
if cmds.listAttr(mayaPath, st=[attrName]):
return cmds.getAttr(mayaPath + '.' + attrName)
else:
return None
def getAttrOrZero(mayaPath, attrName):
if cmds.listAttr(mayaPath, st=[attrName]):
return cmds.getAttr(mayaPath + '.' + attrName)
else:
return 0
def isAnimated(mayaPath, attrName):
return cmds.connectionInfo(mayaPath+'.'+attrName, isDestination=True)
def isXformAnimated(mayaPath):
if isAnimated(mayaPath, 'tx') or isAnimated(mayaPath, 'ty') or \
isAnimated(mayaPath, 'tz') or isAnimated(mayaPath, 'rx') or \
isAnimated(mayaPath, 'ry') or isAnimated(mayaPath, 'rz') or \
isAnimated(mayaPath, 'sx') or isAnimated(mayaPath, 'sy') or \
isAnimated(mayaPath, 'sz'):
return True
else:
return False
def getAnimBoundsData(mayaPath, frameNumber):
# check if we have bounds data for this maya node in animBoundsData
if mayaPath in animBoundsData.keys():
curAnimBounds = animBoundsData[mayaPath]
# check if we have animating values or not
if len(curAnimBounds) == 1:
# bounds are static, so take the value from the single element in the dictionary
return curAnimBounds.values()[0]
else:
# bounds are animated so we should have a matching value for this frame
if frameNumber in curAnimBounds:
return curAnimBounds[frameNumber]
else:
raise ValueError, 'matching anim bounds value not found'
else:
return None
def deleteSgxmlAttr(mayaPath, attrName):
if cmds.listAttr(mayaPath, st=[attrName]):
cmds.deleteAttr(mayaPath, at=attrName)
def deleteSgxmlAttrs(mayaPath):
if cmds.listAttr(mayaPath, st=['sgxml_ignore']):
ignore = True
else:
ignore = False
deleteSgxmlAttr(mayaPath, 'sgxml_assembly')
deleteSgxmlAttr(mayaPath, 'sgxml_component')
deleteSgxmlAttr(mayaPath, 'sgxml_reference')
deleteSgxmlAttr(mayaPath, 'sgxml_nodeType')
deleteSgxmlAttr(mayaPath, 'sgxml_filepath')
deleteSgxmlAttr(mayaPath, 'sgxml_refType')
deleteSgxmlAttr(mayaPath, 'sgxml_abcTransform')
deleteSgxmlAttr(mayaPath, 'sgxml_ignore')
deleteSgxmlAttr(mayaPath, 'sgxml_boundsWriteMode')
deleteSgxmlAttr(mayaPath, 'sgxml_lodTag')
deleteSgxmlAttr(mayaPath, 'sgxml_lodWeight')
deleteSgxmlAttr(mayaPath, 'sgxml_proxyName')
deleteSgxmlAttr(mayaPath, 'sgxml_proxyFile')
deleteSgxmlAttr(mayaPath, 'sgxml_nodeGroupType')
#childNodes = cmds.listRelatives(mayaPath, fullPath=True)
childNodes = cmds.listRelatives(mayaPath, pa=True)
if childNodes is not None and not ignore:
for mayaChildPath in childNodes:
deleteSgxmlAttrs(mayaChildPath)
def mayaAddStringAttribute(curItem, attrName, attrVal):
if curItem is not None:
# check that the cmpt attribute doesn't aleady exist
if not cmds.listAttr(curItem, st=[attrName]):
if not cmds.referenceQuery(curItem, isNodeReferenced=True):
cmds.lockNode(curItem, lock=False)
cmds.addAttr(curItem, ln=attrName, dt='string')
cmds.setAttr(curItem + '.' + attrName, keyable=True)
cmds.setAttr(curItem + '.' + attrName, attrVal, type='string')
def mayaAddStringAttributeToShape(curItem, attrName, attrVal):
# get shapes node for current item
shapes = cmds.listRelatives(curItem, shapes=True, pa=True)
if shapes is not None:
# we assume there is only one shape
curShape = shapes[0]
# check that the cmpt attribute doesn't aleady exist
if not cmds.listAttr( curShape, st=[attrName] ):
if not cmds.referenceQuery(curShape, isNodeReferenced=True):
cmds.lockNode(curShape, lock=False)
cmds.addAttr( curShape, ln=attrName, dt='string' )
cmds.setAttr( curShape + '.' + attrName, keyable=True )
cmds.setAttr( curShape + '.' + attrName, attrVal, type='string')
def mayaAssignTag(selection, tagName, tagValue):
if selection is None:
selection = cmds.ls( selection=True )
if selection is not None:
for curItem in selection:
mayaAddStringAttribute(curItem, tagName, tagValue)
def mayaAssignTagToShape(selection, tagName, tagValue):
if selection is None:
selection = cmds.ls( selection=True )
if selection is not None:
for curItem in selection:
mayaAddStringAttributeToShape(curItem, tagName, tagValue)
class ChannelHandler:
# holds data for animated channels prior to export as ScenegraphXML channel files
def __init__(self, channelData):
self.xmlChannelData = channelData
self.mayaChannelData = []
def addMayaChannel(self, mayaPath, attrName, channelIndex):
newChannelData = [mayaPath, attrName, channelIndex]
self.mayaChannelData.append(newChannelData)
class MayaSgxmlHandler:
# creates python classes using scenegraphXML.py to represent Maya hierarchy data
def __init__(self, mayaSelection, fileDir, fileStem, startFrame=None, endFrame=None,
arbAttrs=None, geoFileOptions=None, boundsWriteMode='all', mayaParent=None):
self.mayaSelection = mayaSelection
self.mayaParent = mayaParent
self.fileDir = fileDir
self.fileStem = fileStem
self.startFrame = startFrame
self.endFrame = endFrame
self.arbAttrs = arbAttrs
self.geoFileOptions = geoFileOptions
self.boundsWriteMode = boundsWriteMode
self.childHandlers = []
self.mayaChannelData = []
self.numChannels = 0
self.rangeMaxBoundsList = []
# iterate over the hierachy and create scenegraphXML element to hold data for
# any Maya nodes that need to be written out to scenegraphXML
# create root element for scenegraphXML data
self.root = scenegraphXML.ScenegraphRoot()
# if required, create channel data handler for animation data
#if startFrame is not None and endFrame is not None:
chanPath = self.fileStem
if self.fileDir is not None:
chanPath = self.fileDir + '/' + chanPath
if self.isStatic():
frameNo = self.getStaticFrameNo()
self.root.channelData = scenegraphXML.ChannelData(frameNo, frameNo)
else:
self.root.channelData = scenegraphXML.ChannelData(startFrame, endFrame, chanPath)
# iterate through the Maya selection list creating a SgXML hierarchy for each
# instance and add them to the root SgXML element
for curMayaElement in self.mayaSelection:
curInstance = self.createSgXMLHierarchy(curMayaElement)
self.root.addInstance(curInstance)
def isStatic(self):
return self.startFrame == self.endFrame
def getStaticFrameNo(self):
if self.startFrame == None:
return 1
return self.startFrame
def createSgXMLHierarchy(self, mayaElementPath):
curNodeName = getMayaNodeName(mayaElementPath)
nodeType = getAttrOrNone(mayaElementPath, 'sgxml_nodeType')
newElement = None
nodeGroupType = getAttrOrNone(mayaElementPath, 'sgxml_nodeGroupType')
dirUsed = self.fileDir
stemUsed = ''
if nodeType == 'xmlReference':
# The hierarchy under here needs to be written out as another SgXML file.
# We create a new MayaSgxmlHandler for the sub hierarchy and add it to the
# list child handlers on this class so that we can later write our all the
# SgXML files together
# Allow to overwrite the name and path of the destination file
nodeFilepath = getAttrOrNone(mayaElementPath, 'sgxml_filepath')
if nodeFilepath:
fileDir = os.path.dirname(nodeFilepath)
if fileDir:
dirUsed = fileDir
fileBase = os.path.basename(nodeFilepath)
stemUsed = os.path.splitext(fileBase)[0]
else:
stemUsed = mayaNode2FileStem(mayaElementPath)
filepath = mayaNode2FilePath(mayaElementPath, self.fileDir, 'xml', relativePath=True)
newElement = scenegraphXML.Reference(curNodeName, refFile=filepath, groupType=nodeGroupType)
elementChildList = getValidChildren(mayaElementPath)
newChildHandler = MayaSgxmlHandler(mayaSelection=elementChildList,
fileDir=dirUsed,
fileStem=stemUsed,
startFrame=self.startFrame,
endFrame=self.endFrame,
arbAttrs=self.arbAttrs,
geoFileOptions=self.geoFileOptions,
boundsWriteMode=self.boundsWriteMode,
mayaParent=mayaElementPath)
self.childHandlers.append(newChildHandler)
elif nodeType == 'component' or nodeType == 'staticComponent':
# This is a component, so write out the maya node tree under this node as a
# alembic format .abc file
# (Would be nice to generalise this later)
refType = getAttrOrNone(mayaElementPath, 'sgxml_refType')
elementChildList = getValidChildren(mayaElementPath)
if not elementChildList and mayaElementPath:
elementChildList = [mayaElementPath]
if refType == | |
test_weibull_mocked(self):
self._test_sampling_func("weibull", a=1.0, size=(1,))
def test_zipf_mocked(self):
self._test_sampling_func("zipf", a=1.0, size=(1,))
@classmethod
def _test_sampling_func(cls, fname, *args, **kwargs):
mock_gen = mock.MagicMock()
getattr(mock_gen, fname).return_value = "foo"
rng = iarandom.RNG(0)
rng.generator = mock_gen
result = getattr(rng, fname)(*args, **kwargs)
assert result == "foo"
getattr(mock_gen, fname).assert_called_once_with(*args, **kwargs)
#
# outdated methods from RandomState
#
def test_rand_mocked(self):
self._test_sampling_func_alias("rand", "random", 1, 2, 3)
def test_randint_mocked(self):
self._test_sampling_func_alias("randint", "integers", 0, 100)
def randn(self):
self._test_sampling_func_alias("randn", "standard_normal", 1, 2, 3)
def random_integers(self):
self._test_sampling_func_alias("random_integers", "integers", 1, 2)
def random_sample(self):
self._test_sampling_func_alias("random_sample", "uniform", (1, 2, 3))
def tomaxint(self):
self._test_sampling_func_alias("tomaxint", "integers", (1, 2, 3))
def test_rand(self):
result = iarandom.RNG(0).rand(10, 20, 3)
assert result.dtype.name == "float32"
assert result.shape == (10, 20, 3)
assert np.all(result >= 0.0)
assert np.all(result <= 1.0)
assert np.any(result > 0.0)
assert np.any(result < 1.0)
def test_randint(self):
result = iarandom.RNG(0).randint(10, 100, size=(10, 20, 3))
assert result.dtype.name == "int32"
assert result.shape == (10, 20, 3)
assert np.all(result >= 10)
assert np.all(result <= 99)
assert np.any(result > 10)
assert np.any(result < 99)
def test_randn(self):
result = iarandom.RNG(0).randn(10, 50, 3)
assert result.dtype.name == "float32"
assert result.shape == (10, 50, 3)
assert np.any(result > 0.5)
assert np.any(result < -0.5)
assert np.average(np.logical_or(result < 2.0, result > -2.0)) > 0.5
def test_random_integers(self):
result = iarandom.RNG(0).random_integers(10, 100, size=(10, 20, 3))
assert result.dtype.name == "int32"
assert result.shape == (10, 20, 3)
assert np.all(result >= 10)
assert np.all(result <= 100)
assert np.any(result > 10)
assert np.any(result < 100)
def test_random_integers__no_high(self):
result = iarandom.RNG(0).random_integers(100, size=(10, 20, 3))
assert result.dtype.name == "int32"
assert result.shape == (10, 20, 3)
assert np.all(result >= 1)
assert np.all(result <= 100)
assert np.any(result > 1)
assert np.any(result < 100)
def test_random_sample(self):
result = iarandom.RNG(0).random_sample((10, 20, 3))
assert result.dtype.name == "float64"
assert result.shape == (10, 20, 3)
assert np.all(result >= 0.0)
assert np.all(result <= 1.0)
assert np.any(result > 0.0)
assert np.any(result < 1.0)
def test_tomaxint(self):
result = iarandom.RNG(0).tomaxint((10, 200, 3))
assert result.dtype.name == "int32"
assert result.shape == (10, 200, 3)
assert np.all(result >= 0)
assert np.any(result > 10000)
@classmethod
def _test_sampling_func_alias(cls, fname_alias, fname_subcall, *args,
**kwargs):
rng = iarandom.RNG(0)
mock_func = mock.Mock()
mock_func.return_value = "foo"
setattr(rng, fname_subcall, mock_func)
result = getattr(rng, fname_alias)(*args, **kwargs)
assert result == "foo"
assert mock_func.call_count == 1
class Test_supports_new_numpy_rng_style(_Base):
def test_call(self):
assert iarandom.supports_new_numpy_rng_style() is IS_NP_117_OR_HIGHER
class Test_get_global_rng(_Base):
def test_call(self):
iarandom.seed(0)
rng = iarandom.get_global_rng()
expected = iarandom.RNG(0)
assert rng is not None
assert rng.equals(expected)
class Test_seed(_Base):
@mock.patch("imgaug.random._seed_np117_")
@mock.patch("imgaug.random._seed_np116_")
def test_mocked_call(self, mock_np116, mock_np117):
iarandom.seed(1)
if IS_NP_117_OR_HIGHER:
mock_np117.assert_called_once_with(1)
assert mock_np116.call_count == 0
else:
mock_np116.assert_called_once_with(1)
assert mock_np117.call_count == 0
def test_integrationtest(self):
iarandom.seed(1)
assert iarandom.GLOBAL_RNG.equals(iarandom.RNG(1))
def test_seed_affects_augmenters_created_after_its_call(self):
image = np.full((50, 50, 3), 128, dtype=np.uint8)
images_aug = []
for _ in np.arange(5):
iarandom.seed(100)
aug = iaa.AdditiveGaussianNoise(scale=50, per_channel=True)
images_aug.append(aug(image=image))
# assert all images identical
for other_image_aug in images_aug[1:]:
assert np.array_equal(images_aug[0], other_image_aug)
# but different seed must lead to different image
iarandom.seed(101)
aug = iaa.AdditiveGaussianNoise(scale=50, per_channel=True)
image_aug = aug(image=image)
assert not np.array_equal(images_aug[0], image_aug)
def test_seed_affects_augmenters_created_before_its_call(self):
image = np.full((50, 50, 3), 128, dtype=np.uint8)
images_aug = []
for _ in np.arange(5):
aug = iaa.AdditiveGaussianNoise(scale=50, per_channel=True)
iarandom.seed(100)
images_aug.append(aug(image=image))
# assert all images identical
for other_image_aug in images_aug[1:]:
assert np.array_equal(images_aug[0], other_image_aug)
# but different seed must lead to different image
aug = iaa.AdditiveGaussianNoise(scale=50, per_channel=True)
iarandom.seed(101)
image_aug = aug(image=image)
assert not np.array_equal(images_aug[0], image_aug)
class Test_normalize_generator(_Base):
@mock.patch("imgaug.random.normalize_generator_")
def test_mocked_call(self, mock_subfunc):
mock_subfunc.return_value = "foo"
inputs = ["bar"]
result = iarandom.normalize_generator(inputs)
assert mock_subfunc.call_count == 1
assert mock_subfunc.call_args[0][0] is not inputs
assert mock_subfunc.call_args[0][0] == inputs
assert result == "foo"
class Test_normalize_generator_(_Base):
@mock.patch("imgaug.random._normalize_generator_np117_")
@mock.patch("imgaug.random._normalize_generator_np116_")
def test_mocked_call(self, mock_np116, mock_np117):
mock_np116.return_value = "np116"
mock_np117.return_value = "np117"
result = iarandom.normalize_generator_(None)
if IS_NP_117_OR_HIGHER:
assert result == "np117"
mock_np117.assert_called_once_with(None)
assert mock_np116.call_count == 0
else:
assert result == "np116"
mock_np116.assert_called_once_with(None)
assert mock_np117.call_count == 0
def test_called_with_none(self):
result = iarandom.normalize_generator_(None)
assert result is iarandom.get_global_rng().generator
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"SeedSequence does not exist in numpy <=1.16")
def test_called_with_seed_sequence(self):
seedseq = np.random.SeedSequence(0)
result = iarandom.normalize_generator_(seedseq)
expected = np.random.Generator(
iarandom.BIT_GENERATOR(np.random.SeedSequence(0)))
assert iarandom.is_generator_equal_to(result, expected)
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"BitGenerator does not exist in numpy <=1.16")
def test_called_with_bit_generator(self):
bgen = iarandom.BIT_GENERATOR(np.random.SeedSequence(0))
result = iarandom.normalize_generator_(bgen)
assert result.bit_generator is bgen
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Generator does not exist in numpy <=1.16")
def test_called_with_generator(self):
gen = np.random.Generator(
iarandom.BIT_GENERATOR(np.random.SeedSequence(0))
)
result = iarandom.normalize_generator_(gen)
assert result is gen
def test_called_with_random_state(self):
rs = np.random.RandomState(0)
result = iarandom.normalize_generator_(rs)
if IS_NP_117_OR_HIGHER:
seed = iarandom.generate_seed_(np.random.RandomState(0))
expected = iarandom.convert_seed_to_generator(seed)
assert iarandom.is_generator_equal_to(result, expected)
else:
assert result is rs
def test_called_int(self):
seed = 0
result = iarandom.normalize_generator_(seed)
expected = iarandom.convert_seed_to_generator(seed)
assert iarandom.is_generator_equal_to(result, expected)
class Test_convert_seed_to_generator(_Base):
@mock.patch("imgaug.random._convert_seed_to_generator_np117")
@mock.patch("imgaug.random._convert_seed_to_generator_np116")
def test_mocked_call(self, mock_np116, mock_np117):
mock_np116.return_value = "np116"
mock_np117.return_value = "np117"
result = iarandom.convert_seed_to_generator(1)
if IS_NP_117_OR_HIGHER:
assert result == "np117"
mock_np117.assert_called_once_with(1)
assert mock_np116.call_count == 0
else:
assert result == "np116"
mock_np116.assert_called_once_with(1)
assert mock_np117.call_count == 0
def test_call(self):
gen = iarandom.convert_seed_to_generator(1)
if IS_NP_117_OR_HIGHER:
expected = np.random.Generator(
iarandom.BIT_GENERATOR(np.random.SeedSequence(1)))
assert iarandom.is_generator_equal_to(gen, expected)
else:
expected = np.random.RandomState(1)
assert iarandom.is_generator_equal_to(gen, expected)
class Test_convert_seed_sequence_to_generator(_Base):
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"SeedSequence does not exist in numpy <=1.16")
def test_call(self):
seedseq = np.random.SeedSequence(1)
gen = iarandom.convert_seed_sequence_to_generator(seedseq)
expected = np.random.Generator(
iarandom.BIT_GENERATOR(np.random.SeedSequence(1)))
assert iarandom.is_generator_equal_to(gen, expected)
class Test_create_pseudo_random_generator_(_Base):
def test_call(self):
global_gen = copylib.deepcopy(iarandom.get_global_rng().generator)
gen = iarandom.create_pseudo_random_generator_()
expected = iarandom.convert_seed_to_generator(
iarandom.generate_seed_(global_gen))
assert iarandom.is_generator_equal_to(gen, expected)
class Test_create_fully_random_generator(_Base):
@mock.patch("imgaug.random._create_fully_random_generator_np117")
@mock.patch("imgaug.random._create_fully_random_generator_np116")
def test_mocked_call(self, mock_np116, mock_np117):
mock_np116.return_value = "np116"
mock_np117.return_value = "np117"
result = iarandom.create_fully_random_generator()
if IS_NP_117_OR_HIGHER:
assert result == "np117"
mock_np117.assert_called_once_with()
assert mock_np116.call_count == 0
else:
assert result == "np116"
mock_np116.assert_called_once_with()
assert mock_np117.call_count == 0
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_np117_mocked(self):
dummy_bitgen = np.random.SFC64(1)
with mock.patch("numpy.random.SFC64") as mock_bitgen:
mock_bitgen.return_value = dummy_bitgen
result = iarandom._create_fully_random_generator_np117()
assert mock_bitgen.call_count == 1
assert iarandom.is_generator_equal_to(
result, np.random.Generator(dummy_bitgen))
def test_np116_mocked(self):
dummy_rs = np.random.RandomState(1)
with mock.patch("numpy.random.RandomState") as mock_rs:
mock_rs.return_value = dummy_rs
result = iarandom._create_fully_random_generator_np116()
assert mock_rs.call_count == 1
assert iarandom.is_generator_equal_to(result, np.random.RandomState(1))
class Test_generate_seed_(_Base):
@mock.patch("imgaug.random.generate_seeds_")
def test_mocked_call(self, mock_seeds):
gen = iarandom.convert_seed_to_generator(0)
_ = iarandom.generate_seed_(gen)
mock_seeds.assert_called_once_with(gen, 1)
class Test_generate_seeds_(_Base):
@mock.patch("imgaug.random.polyfill_integers")
def test_mocked_call(self, mock_integers):
gen = iarandom.convert_seed_to_generator(0)
_ = iarandom.generate_seeds_(gen, 10)
mock_integers.assert_called_once_with(
gen, iarandom.SEED_MIN_VALUE, iarandom.SEED_MAX_VALUE, size=(10,))
def test_call(self):
gen = iarandom.convert_seed_to_generator(0)
seeds = iarandom.generate_seeds_(gen, 2)
assert len(seeds) == 2
assert ia.is_np_array(seeds)
assert seeds.dtype.name == "int32"
class Test_copy_generator(_Base):
@mock.patch("imgaug.random._copy_generator_np116")
def test_mocked_call_with_random_state(self, mock_np116):
mock_np116.return_value = "np116"
gen = np.random.RandomState(1)
gen_copy = iarandom.copy_generator(gen)
assert gen_copy == "np116"
mock_np116.assert_called_once_with(gen)
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
@mock.patch("imgaug.random._copy_generator_np117")
def test_mocked_call_with_generator(self, mock_np117):
mock_np117.return_value = "np117"
gen = np.random.Generator(iarandom.BIT_GENERATOR(1))
gen_copy = iarandom.copy_generator(gen)
assert gen_copy == "np117"
mock_np117.assert_called_once_with(gen)
def test_call_with_random_state(self):
gen = np.random.RandomState(1)
gen_copy = iarandom._copy_generator_np116(gen)
assert gen is not gen_copy
assert iarandom.is_generator_equal_to(gen, gen_copy)
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_call_with_generator(self):
gen = np.random.Generator(iarandom.BIT_GENERATOR(1))
gen_copy = iarandom._copy_generator_np117(gen)
assert gen is not gen_copy
assert iarandom.is_generator_equal_to(gen, gen_copy)
class Test_copy_generator_unless_global_generator(_Base):
@mock.patch("imgaug.random.get_global_rng")
@mock.patch("imgaug.random.copy_generator")
def test_mocked_gen_is_global(self, mock_copy, mock_get_global_rng):
gen = iarandom.convert_seed_to_generator(1)
mock_copy.return_value = "foo"
mock_get_global_rng.return_value = iarandom.RNG(gen)
result = iarandom.copy_generator_unless_global_generator(gen)
assert mock_get_global_rng.call_count == 1
assert mock_copy.call_count == 0
assert result is gen
@mock.patch("imgaug.random.get_global_rng")
@mock.patch("imgaug.random.copy_generator")
def test_mocked_gen_is_not_global(self, mock_copy, mock_get_global_rng):
gen1 = iarandom.convert_seed_to_generator(1)
gen2 = iarandom.convert_seed_to_generator(2)
mock_copy.return_value = "foo"
mock_get_global_rng.return_value = iarandom.RNG(gen2)
result = iarandom.copy_generator_unless_global_generator(gen1)
assert mock_get_global_rng.call_count == 1
mock_copy.assert_called_once_with(gen1)
assert result == "foo"
class Test_reset_generator_cache_(_Base):
@mock.patch("imgaug.random._reset_generator_cache_np117_")
@mock.patch("imgaug.random._reset_generator_cache_np116_")
def test_mocked_call(self, mock_np116, mock_np117):
mock_np116.return_value = "np116"
mock_np117.return_value = "np117"
gen = iarandom.convert_seed_to_generator(1)
result = iarandom.reset_generator_cache_(gen)
if IS_NP_117_OR_HIGHER:
assert result == "np117"
mock_np117.assert_called_once_with(gen)
assert mock_np116.call_count == 0
else:
assert result == "np116"
mock_np116.assert_called_once_with(gen)
assert mock_np117.call_count == 0
@unittest.skipIf(not IS_NP_117_OR_HIGHER,
"Function uses classes from numpy 1.17+")
def test_call_np117(self):
gen = iarandom.convert_seed_to_generator(1)
gen_without_cache_copy = copylib.deepcopy(gen)
state = iarandom._get_generator_state_np117(gen)
state["has_uint32"] = 1
gen_with_cache = copylib.deepcopy(gen)
iarandom.set_generator_state_(gen_with_cache, state)
gen_with_cache_copy = copylib.deepcopy(gen_with_cache)
gen_cache_reset = iarandom.reset_generator_cache_(gen_with_cache)
assert iarandom.is_generator_equal_to(gen_cache_reset,
gen_without_cache_copy)
assert not iarandom.is_generator_equal_to(gen_cache_reset,
gen_with_cache_copy)
def test_call_np116(self):
gen = np.random.RandomState(1)
gen_without_cache_copy = copylib.deepcopy(gen)
state = iarandom._get_generator_state_np116(gen)
state = list(state)
state[-2] = 1
gen_with_cache = copylib.deepcopy(gen)
iarandom.set_generator_state_(gen_with_cache, tuple(state))
gen_with_cache_copy = copylib.deepcopy(gen_with_cache)
gen_cache_reset = iarandom.reset_generator_cache_(gen_with_cache)
assert iarandom.is_generator_equal_to(gen_cache_reset,
gen_without_cache_copy)
assert not iarandom.is_generator_equal_to(gen_cache_reset,
gen_with_cache_copy)
class Test_derive_generator_(_Base):
@mock.patch("imgaug.random.derive_generators_")
def test_mocked_call(self, mock_derive_gens):
mock_derive_gens.return_value = ["foo"]
gen = iarandom.convert_seed_to_generator(1)
gen_derived = iarandom.derive_generator_(gen)
mock_derive_gens.assert_called_once_with(gen, n=1)
assert gen_derived == "foo"
def test_integration(self):
gen = iarandom.convert_seed_to_generator(1)
gen_copy = copylib.deepcopy(gen)
gen_derived = iarandom.derive_generator_(gen)
assert not iarandom.is_generator_equal_to(gen_derived, gen_copy)
# should have advanced the state
assert not iarandom.is_generator_equal_to(gen_copy, gen)
class Test_derive_generators_(_Base):
@mock.patch("imgaug.random._derive_generators_np117_")
@mock.patch("imgaug.random._derive_generators_np116_")
def test_mocked_call(self, mock_np116, mock_np117):
mock_np116.return_value = "np116"
mock_np117.return_value = "np117"
gen = iarandom.convert_seed_to_generator(1)
result = iarandom.derive_generators_(gen, 1)
if isinstance(gen, np.random.RandomState):
assert result == "np116"
mock_np116.assert_called_once_with(gen, n=1)
assert mock_np117.call_count == 0
else:
assert result == "np117"
mock_np117.assert_called_once_with(gen, n=1)
assert mock_np116.call_count == 0
@unittest.skipIf(not | |
6, 9], s.get_spans(vals).tolist())
dst = s.open_dataset(bio, "w", "src")
ds = dst.create_dataframe('ds')
vals_f = s.create_numeric(ds, "vals", "int32")
vals_f.data.write(vals)
self.assertListEqual([0, 1, 3, 5, 6, 9], s.get_spans(s.get(ds['vals'])).tolist())
def test_get_spans_two_fields(self):
vals_1 = np.asarray(['a', 'a', 'a', 'b', 'b', 'b', 'b', 'b', 'c', 'c', 'c', 'c'], dtype='S1')
vals_2 = np.asarray([5, 5, 6, 2, 2, 3, 4, 4, 7, 7, 7, 7], dtype=np.int32)
bio = BytesIO()
with session.Session() as s:
self.assertListEqual([0, 2, 3, 5, 6, 8, 12], s.get_spans(fields=(vals_1, vals_2)).tolist())
dst = s.open_dataset(bio, "w", "src")
ds = dst.create_dataframe('ds')
vals_1_f = s.create_fixed_string(ds, 'vals_1', 1)
vals_1_f.data.write(vals_1)
vals_2_f = s.create_numeric(ds, 'vals_2', 'int32')
vals_2_f.data.write(vals_2)
self.assertListEqual([0, 2, 3, 5, 6, 8, 12], s.get_spans(fields=(vals_1, vals_2)).tolist())
def test_get_spans_index_string_field(self):
bio=BytesIO()
with session.Session() as s:
dst = s.open_dataset(bio, "w", "src")
ds = dst.create_dataframe('ds')
idx= s.create_indexed_string(ds,'idx')
idx.data.write(['aa','bb','bb','c','c','c','d','d','e','f','f','f'])
self.assertListEqual([0,1,3,6,8,9,12],s.get_spans(idx))
def test_get_spans_with_dest(self):
vals = np.asarray([0, 1, 1, 3, 3, 6, 5, 5, 5], dtype=np.int32)
bio = BytesIO()
with session.Session() as s:
self.assertListEqual([0, 1, 3, 5, 6, 9], s.get_spans(vals).tolist())
dst = s.open_dataset(bio, "w", "src")
ds = dst.create_dataframe('ds')
vals_f = s.create_numeric(ds, "vals", "int32")
vals_f.data.write(vals)
self.assertListEqual([0, 1, 3, 5, 6, 9], s.get_spans(s.get(ds['vals'])).tolist())
span_dest = ds.create_numeric('span','int32')
s.get_spans(ds['vals'],dest=span_dest)
self.assertListEqual([0, 1, 3, 5, 6, 9],ds['span'].data[:].tolist())
class TestSessionAggregate(unittest.TestCase):
def test_apply_spans_count(self):
idx = np.asarray([0, 1, 1, 2, 2, 2, 3, 3, 3, 3], dtype=np.int32)
bio = BytesIO()
with session.Session() as s:
spans = s.get_spans(idx)
results = s.apply_spans_count(spans)
self.assertListEqual([1, 2, 3, 4], results.tolist())
dst = s.open_dataset(bio, "w", "ds")
ds = dst.create_dataframe('ds')
s.apply_spans_count(spans, dest=s.create_numeric(ds, 'result', 'int32'))
self.assertListEqual([1, 2, 3, 4], s.get(ds['result']).data[:].tolist())
def test_apply_spans_first(self):
idx = np.asarray([0, 1, 1, 2, 2, 2, 3, 3, 3, 3], dtype=np.int32)
vals = np.asarray([0, 8, 2, 6, 4, 5, 3, 7, 1, 9], dtype=np.int64)
bio = BytesIO()
with session.Session() as s:
spans = s.get_spans(idx)
results = s.apply_spans_first(spans, vals)
self.assertListEqual([0, 8, 6, 3], results.tolist())
dst = s.open_dataset(bio, "w", "ds")
ds = dst.create_dataframe('ds')
s.apply_spans_first(spans, vals, dest=s.create_numeric(ds, 'result', 'int64'))
self.assertListEqual([0, 8, 6, 3], s.get(ds['result']).data[:].tolist())
s.create_numeric(ds, 'vals', 'int64').data.write(vals)
s.apply_spans_first(spans, s.get(ds['vals']), dest=s.create_numeric(ds, 'result2', 'int64'))
self.assertListEqual([0, 8, 6, 3], s.get(ds['result2']).data[:].tolist())
def test_apply_spans_last(self):
idx = np.asarray([0, 1, 1, 2, 2, 2, 3, 3, 3, 3], dtype=np.int32)
vals = np.asarray([0, 8, 2, 6, 4, 5, 3, 7, 1, 9], dtype=np.int64)
bio = BytesIO()
with session.Session() as s:
spans = s.get_spans(idx)
results = s.apply_spans_last(spans, vals)
self.assertListEqual([0, 2, 5, 9], results.tolist())
dst = s.open_dataset(bio, "w", "ds")
ds = dst.create_dataframe('ds')
s.apply_spans_last(spans, vals, dest=s.create_numeric(ds, 'result', 'int64'))
self.assertListEqual([0, 2, 5, 9], s.get(ds['result']).data[:].tolist())
s.create_numeric(ds, 'vals', 'int64').data.write(vals)
s.apply_spans_last(spans, s.get(ds['vals']), dest=s.create_numeric(ds, 'result2', 'int64'))
self.assertListEqual([0, 2, 5, 9], s.get(ds['result2']).data[:].tolist())
def test_apply_spans_min(self):
idx = np.asarray([0, 1, 1, 2, 2, 2, 3, 3, 3, 3], dtype=np.int32)
vals = np.asarray([0, 8, 2, 6, 4, 5, 3, 7, 1, 9], dtype=np.int64)
bio = BytesIO()
with session.Session() as s:
spans = s.get_spans(idx)
results = s.apply_spans_min(spans, vals)
self.assertListEqual([0, 2, 4, 1], results.tolist())
dst = s.open_dataset(bio, "w", "ds")
ds = dst.create_dataframe('ds')
s.apply_spans_min(spans, vals, dest=s.create_numeric(ds, 'result', 'int64'))
self.assertListEqual([0, 2, 4, 1], s.get(ds['result']).data[:].tolist())
s.create_numeric(ds, 'vals', 'int64').data.write(vals)
s.apply_spans_min(spans, s.get(ds['vals']), dest=s.create_numeric(ds, 'result2', 'int64'))
self.assertListEqual([0, 2, 4, 1], s.get(ds['result2']).data[:].tolist())
def test_apply_spans_max(self):
idx = np.asarray([0, 1, 1, 2, 2, 2, 3, 3, 3, 3], dtype=np.int32)
vals = np.asarray([0, 8, 2, 6, 4, 5, 3, 7, 1, 9], dtype=np.int64)
bio = BytesIO()
with session.Session() as s:
spans = s.get_spans(idx)
results = s.apply_spans_max(spans, vals)
self.assertListEqual([0, 8, 6, 9], results.tolist())
dst = s.open_dataset(bio, "w", "ds")
ds = dst.create_dataframe('ds')
s.apply_spans_max(spans, vals, dest=s.create_numeric(ds, 'result', 'int64'))
self.assertListEqual([0, 8, 6, 9], s.get(ds['result']).data[:].tolist())
s.create_numeric(ds, 'vals', 'int64').data.write(vals)
s.apply_spans_max(spans, s.get(ds['vals']), dest=s.create_numeric(ds, 'result2', 'int64'))
self.assertListEqual([0, 8, 6, 9], s.get(ds['result2']).data[:].tolist())
def test_apply_spans_concat(self):
idx = np.asarray([0, 1, 1, 2, 2, 2, 3, 3, 3, 3], dtype=np.int32)
vals = ['a', 'b', 'a', 'b', 'a', 'b', 'a', 'b', 'a', 'b']
bio = BytesIO()
with session.Session() as s:
spans = s.get_spans(idx)
self.assertListEqual([0, 1, 3, 6, 10], spans.tolist())
dst = s.open_dataset(bio, "w", "ds")
ds = dst.create_dataframe('ds')
s.create_indexed_string(ds, 'vals').data.write(vals)
s.apply_spans_concat(spans, s.get(ds['vals']), dest=s.create_indexed_string(ds, 'result'))
self.assertListEqual([0, 1, 4, 9, 16], s.get(ds['result']).indices[:].tolist())
self.assertListEqual(['a', 'b,a', 'b,a,b', 'a,b,a,b'], s.get(ds['result']).data[:])
def test_apply_spans_concat_2(self):
idx = np.asarray([0, 0, 1, 2, 2, 3, 4, 4, 4, 4], dtype=np.int32)
vals = ['a', 'b,c', 'd', 'e,f', 'g', 'h,i', 'j', 'k,l', 'm', 'n,o']
bio = BytesIO()
with session.Session() as s:
spans = s.get_spans(idx)
self.assertListEqual([0, 2, 3, 5, 6, 10], spans.tolist())
dst = s.open_dataset(bio, "w", "ds")
ds = dst.create_dataframe('ds')
s.create_indexed_string(ds, 'vals').data.write(vals)
s.apply_spans_concat(spans, s.get(ds['vals']), dest=s.create_indexed_string(ds, 'result'))
self.assertListEqual([0, 7, 8, 15, 20, 35], s.get(ds['result']).indices[:].tolist())
self.assertListEqual(['a,"b,c"', 'd', '"e,f",g', '"h,i"', 'j,"k,l",m,"n,o"'],
s.get(ds['result']).data[:])
def test_apply_spans_concat_field(self):
idx = np.asarray([0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2], dtype=np.int32)
vals = ['a', "'b'", 'what', 'some, information', 'x',
'', 'foo', 'flop',
"'dun'", "'mun'", "'race, track?'", '', "for, too", 'z', 'now!']
# vals = ['a', 'b', 'a', 'b', 'a', 'b', 'a', 'b', 'a', 'b']
bio = BytesIO()
with session.Session() as s:
spans = s.get_spans(idx)
# results = s.apply_spans_concat(spans, vals)
# self.assertListEqual([0, 8, 6, 9], results.tolist())
dst = s.open_dataset(bio, "w", "ds")
ds = dst.create_dataframe('ds')
# s.apply_spans_concat(spans, vals, dest=s.create_indexed_string(ds, 'result'))
# self.assertListEqual([0, 8, 6, 9], s.get(ds['result']).data[:].tolist())
s.create_indexed_string(ds, 'vals').data.write(vals)
s.apply_spans_concat(spans, s.get(ds['vals']), dest=s.create_indexed_string(ds, 'result'))
self.assertListEqual(['a,\'b\',what,"some, information",x', 'foo,flop',
'\'dun\',\'mun\',"\'race, track?\'","for, too",z,now!'],
s.get(ds['result']).data[:])
def test_apply_spans_concat_small_chunk_size(self):
idx = np.asarray([0, 0, 0, 1, 1, 2, 2, 2, 3, 3,
4, 4, 4, 5, 5, 6, 6, 6, 7, 7,
8, 8, 8, 9, 9, 10, 10, 10, 11, 11,
12, 12, 12, 13, 13, 14, 14, 14, 15, 15,
16, 16, 16, 17, 17, 18, 18, 18, 19, 19])
vals = ['a', 'b,c', '', 'd', 'e,f', '', 'g', 'h,i', '', 'j',
'k,l', '', 'm', 'n,o', '', 'p', 'q,r', '', 's', 't,u',
'', 'v', 'w,x', '', 'y', 'z,aa', '', 'ab', 'ac,ad', '',
'ae', 'af,ag', '', 'ah', 'ai,aj', '', 'ak', 'al,am', '', 'an',
'ao,ap', '', 'aq', 'ar,as', '', 'at', 'au,av', '', 'aw', 'ax,ay']
bio = BytesIO()
with session.Session() as s:
spans = s.get_spans(idx)
self.assertListEqual([0, 3, 5, 8, 10, 13, 15, 18, 20, 23, 25, 28,
30, 33, 35, 38, 40, 43, 45, 48, 50], spans.tolist())
dst = s.open_dataset(bio, "w", "ds")
ds = dst.create_dataframe('ds')
s.create_indexed_string(ds, 'vals').data.write(vals)
expected_indices = [0,
7, 14, 21, 22, 29, 34, 41, 48, 55, 56,
65, 72, 82, 92, 102, 104, 114, 121, 131, 141]
expected_data = ['a,"b,c"', 'd,"e,f"', 'g,"h,i"', 'j', '"k,l",m',
'"n,o"', 'p,"q,r"', 's,"t,u"', 'v,"w,x"', 'y',
'"z,aa",ab', '"ac,ad"', 'ae,"af,ag"', 'ah,"ai,aj"', 'ak,"al,am"',
'an', '"ao,ap",aq', '"ar,as"', 'at,"au,av"', 'aw,"ax,ay"']
s.apply_spans_concat(spans, s.get(ds['vals']), dest=s.create_indexed_string(ds, 'result'))
self.assertListEqual(expected_indices, s.get(ds['result']).indices[:].tolist())
self.assertListEqual(expected_data, s.get(ds['result']).data[:])
s.apply_spans_concat(spans, s.get(ds['vals']), dest=s.create_indexed_string(ds, 'result2'),
src_chunksize=16, dest_chunksize=16)
self.assertListEqual(expected_indices, s.get(ds['result2']).indices[:].tolist())
self.assertListEqual(expected_data, s.get(ds['result2']).data[:])
def test_aggregate_count(self):
idx = np.asarray([0, 1, 1, 2, 2, 2, 3, 3, 3, 3], dtype=np.int32)
bio = BytesIO()
with session.Session() as s:
results = s.aggregate_count(idx)
self.assertListEqual([1, 2, 3, 4], results.tolist())
dst = s.open_dataset(bio, "w", "ds")
ds = dst.create_dataframe('ds')
s.aggregate_count(idx, dest=s.create_numeric(ds, 'result', 'int32'))
self.assertListEqual([1, 2, 3, 4], s.get(ds['result']).data[:].tolist())
def test_aggregate_first(self):
idx = np.asarray([0, 1, 1, 2, 2, 2, 3, 3, 3, 3], dtype=np.int32)
vals = np.asarray([0, 8, 2, 6, 4, 5, 3, 7, 1, 9], dtype=np.int64)
bio = BytesIO()
with session.Session() as s:
results = s.aggregate_first(idx, vals)
self.assertListEqual([0, 8, 6, 3], results.tolist())
dst = s.open_dataset(bio, "w", "ds")
ds = dst.create_dataframe('ds')
s.aggregate_first(idx, vals, dest=s.create_numeric(ds, 'result', 'int64'))
self.assertListEqual([0, 8, 6, 3], s.get(ds['result']).data[:].tolist())
s.create_numeric(ds, 'vals', 'int64').data.write(vals)
s.aggregate_first(idx, s.get(ds['vals']), dest=s.create_numeric(ds, 'result2', 'int64'))
self.assertListEqual([0, 8, 6, 3], s.get(ds['result2']).data[:].tolist())
def test_aggregate_last(self):
idx = np.asarray([0, 1, 1, 2, 2, 2, 3, 3, 3, 3], dtype=np.int32)
vals = np.asarray([0, 8, 2, 6, 4, 5, 3, 7, 1, 9], dtype=np.int64)
bio = BytesIO()
with session.Session() as s:
results = s.aggregate_last(idx, vals)
self.assertListEqual([0, 2, 5, 9], results.tolist())
dst = s.open_dataset(bio, "w", "ds")
ds = dst.create_dataframe('ds')
s.aggregate_last(idx, vals, dest=s.create_numeric(ds, 'result', 'int64'))
self.assertListEqual([0, 2, 5, 9], s.get(ds['result']).data[:].tolist())
s.create_numeric(ds, 'vals', 'int64').data.write(vals)
s.aggregate_last(idx, s.get(ds['vals']), dest=s.create_numeric(ds, 'result2', 'int64'))
self.assertListEqual([0, 2, 5, 9], s.get(ds['result2']).data[:].tolist())
def test_aggregate_min(self):
idx = np.asarray([0, 1, 1, 2, 2, 2, 3, 3, 3, 3], dtype=np.int32)
vals = np.asarray([0, 8, 2, 6, 4, 5, 3, 7, 1, 9], dtype=np.int64)
bio = BytesIO()
with session.Session() as s:
results = s.aggregate_min(idx, vals)
self.assertListEqual([0, 2, 4, 1], results.tolist())
dst = s.open_dataset(bio, "w", "ds")
ds = dst.create_dataframe('ds')
s.aggregate_min(idx, vals, dest=s.create_numeric(ds, 'result', 'int64'))
self.assertListEqual([0, 2, 4, 1], s.get(ds['result']).data[:].tolist())
s.create_numeric(ds, 'vals', 'int64').data.write(vals)
s.aggregate_min(idx, s.get(ds['vals']), dest=s.create_numeric(ds, 'result2', 'int64'))
self.assertListEqual([0, 2, 4, 1], s.get(ds['result2']).data[:].tolist())
def test_aggregate_max(self):
idx = np.asarray([0, 1, 1, 2, 2, 2, | |
"""Abstract base class for API-type streams."""
import abc
import copy
import logging
from datetime import datetime
from typing import Any, Callable, Dict, Iterable, List, Optional, Union
import backoff
import requests
from singer.schema import Schema
from singer_sdk.authenticators import APIAuthenticatorBase, SimpleAuthenticator
from singer_sdk.exceptions import FatalAPIError, RetriableAPIError
from singer_sdk.helpers.jsonpath import extract_jsonpath
from singer_sdk.plugin_base import PluginBase as TapBaseClass
from singer_sdk.streams.core import Stream
DEFAULT_PAGE_SIZE = 1000
DEFAULT_REQUEST_TIMEOUT = 300 # 5 minutes
class RESTStream(Stream, metaclass=abc.ABCMeta):
"""Abstract base class for REST API streams."""
_page_size: int = DEFAULT_PAGE_SIZE
_requests_session: Optional[requests.Session]
rest_method = "GET"
#: JSONPath expression to extract records from the API response.
records_jsonpath: str = "$[*]"
#: Optional JSONPath expression to extract a pagination token from the API response.
#: Example: `"$.next_page"`
next_page_token_jsonpath: Optional[str] = None
# Private constants. May not be supported in future releases:
_LOG_REQUEST_METRICS: bool = True
# Disabled by default for safety:
_LOG_REQUEST_METRIC_URLS: bool = False
@property
@abc.abstractmethod
def url_base(self) -> str:
"""Return the base url, e.g. ``https://api.mysite.com/v3/``."""
pass
def __init__(
self,
tap: TapBaseClass,
name: Optional[str] = None,
schema: Optional[Union[Dict[str, Any], Schema]] = None,
path: Optional[str] = None,
) -> None:
"""Initialize the REST stream.
Args:
tap: Singer Tap this stream belongs to.
schema: JSON schema for records in this stream.
name: Name of this stream.
path: URL path for this entity stream.
"""
super().__init__(name=name, schema=schema, tap=tap)
if path:
self.path = path
self._http_headers: dict = {}
self._requests_session = requests.Session()
self._compiled_jsonpath = None
self._next_page_token_compiled_jsonpath = None
@staticmethod
def _url_encode(val: Union[str, datetime, bool, int, List[str]]) -> str:
"""Encode the val argument as url-compatible string.
Args:
val: TODO
Returns:
TODO
"""
if isinstance(val, str):
result = val.replace("/", "%2F")
else:
result = str(val)
return result
def get_url(self, context: Optional[dict]) -> str:
"""Get stream entity URL.
Developers override this method to perform dynamic URL generation.
Args:
context: Stream partition or context dictionary.
Returns:
A URL, optionally targeted to a specific partition or context.
"""
url = "".join([self.url_base, self.path or ""])
vals = copy.copy(dict(self.config))
vals.update(context or {})
for k, v in vals.items():
search_text = "".join(["{", k, "}"])
if search_text in url:
url = url.replace(search_text, self._url_encode(v))
return url
# HTTP Request functions
@property
def requests_session(self) -> requests.Session:
"""Get requests session.
Returns:
The `requests.Session`_ object for HTTP requests.
.. _requests.Session:
https://docs.python-requests.org/en/latest/api/#request-sessions
"""
if not self._requests_session:
self._requests_session = requests.Session()
return self._requests_session
def validate_response(self, response: requests.Response) -> None:
"""Validate HTTP response.
By default, checks for error status codes (>400) and raises a
:class:`singer_sdk.exceptions.FatalAPIError`.
Tap developers are encouraged to override this method if their APIs use HTTP
status codes in non-conventional ways, or if they communicate errors
differently (e.g. in the response body).
.. image:: ../images/200.png
In case an error is deemed transient and can be safely retried, then this
method should raise an :class:`singer_sdk.exceptions.RetriableAPIError`.
Args:
response: A `requests.Response`_ object.
Raises:
FatalAPIError: If the request is not retriable.
RetriableAPIError: If the request is retriable.
.. _requests.Response:
https://docs.python-requests.org/en/latest/api/#requests.Response
"""
if 400 <= response.status_code < 500:
msg = (
f"{response.status_code} Client Error: "
f"{response.reason} for path: {self.path}"
)
raise FatalAPIError(msg)
elif 500 <= response.status_code < 600:
msg = (
f"{response.status_code} Server Error: "
f"{response.reason} for path: {self.path}"
)
raise RetriableAPIError(msg)
def request_decorator(self, func: Callable) -> Callable:
"""Instantiate a decorator for handling request failures.
Developers may override this method to provide custom backoff or retry
handling.
Args:
func: Function to decorate.
Returns:
A decorated method.
"""
decorator: Callable = backoff.on_exception(
backoff.expo,
(
RetriableAPIError,
requests.exceptions.ReadTimeout,
),
max_tries=5,
factor=2,
)(func)
return decorator
def _request(
self, prepared_request: requests.PreparedRequest, context: Optional[dict]
) -> requests.Response:
"""TODO.
Args:
prepared_request: TODO
context: Stream partition or context dictionary.
Returns:
TODO
"""
response = self.requests_session.send(prepared_request, timeout=self.timeout)
if self._LOG_REQUEST_METRICS:
extra_tags = {}
if self._LOG_REQUEST_METRIC_URLS:
extra_tags["url"] = prepared_request.path_url
self._write_request_duration_log(
endpoint=self.path,
response=response,
context=context,
extra_tags=extra_tags,
)
self.validate_response(response)
logging.debug("Response received successfully.")
return response
def get_url_params(
self, context: Optional[dict], next_page_token: Optional[Any]
) -> Dict[str, Any]:
"""Return a dictionary of values to be used in URL parameterization.
If paging is supported, developers may override with specific paging logic.
Args:
context: Stream partition or context dictionary.
next_page_token: Token, page number or any request argument to request the
next page of data.
Returns:
Dictionary of URL query parameters to use in the request.
"""
return {}
def prepare_request(
self, context: Optional[dict], next_page_token: Optional[Any]
) -> requests.PreparedRequest:
"""Prepare a request object.
If partitioning is supported, the `context` object will contain the partition
definitions. Pagination information can be parsed from `next_page_token` if
`next_page_token` is not None.
Args:
context: Stream partition or context dictionary.
next_page_token: Token, page number or any request argument to request the
next page of data.
Returns:
Build a request with the stream's URL, path, query parameters,
HTTP headers and authenticator.
"""
http_method = self.rest_method
url: str = self.get_url(context)
params: dict = self.get_url_params(context, next_page_token)
request_data = self.prepare_request_payload(context, next_page_token)
headers = self.http_headers
authenticator = self.authenticator
if authenticator:
headers.update(authenticator.auth_headers or {})
params.update(authenticator.auth_params or {})
request = self.requests_session.prepare_request(
requests.Request(
method=http_method,
url=url,
params=params,
headers=headers,
json=request_data,
),
)
return request
def request_records(self, context: Optional[dict]) -> Iterable[dict]:
"""Request records from REST endpoint(s), returning response records.
If pagination is detected, pages will be recursed automatically.
Args:
context: Stream partition or context dictionary.
Yields:
An item for every record in the response.
Raises:
RuntimeError: If a loop in pagination is detected. That is, when two
consecutive pagination tokens are identical.
"""
next_page_token: Any = None
finished = False
decorated_request = self.request_decorator(self._request)
while not finished:
prepared_request = self.prepare_request(
context, next_page_token=next_page_token
)
resp = decorated_request(prepared_request, context)
for row in self.parse_response(resp):
yield row
previous_token = copy.deepcopy(next_page_token)
next_page_token = self.get_next_page_token(
response=resp, previous_token=previous_token
)
if next_page_token and next_page_token == previous_token:
raise RuntimeError(
f"Loop detected in pagination. "
f"Pagination token {next_page_token} is identical to prior token."
)
# Cycle until get_next_page_token() no longer returns a value
finished = not next_page_token
# Overridable:
def prepare_request_payload(
self, context: Optional[dict], next_page_token: Optional[Any]
) -> Optional[dict]:
"""Prepare the data payload for the REST API request.
By default, no payload will be sent (return None).
Developers may override this method if the API requires a custom payload along
with the request. (This is generally not required for APIs which use the
HTTP 'GET' method.)
Args:
context: Stream partition or context dictionary.
next_page_token: Token, page number or any request argument to request the
next page of data.
Returns:
Dictionary with the body to use for the request.
"""
return None
def get_next_page_token(
self, response: requests.Response, previous_token: Optional[Any]
) -> Any:
"""Return token identifying next page or None if all records have been read.
Args:
response: A raw `requests.Response`_ object.
previous_token: Previous pagination reference.
Returns:
Reference value to retrieve next page.
.. _requests.Response:
https://docs.python-requests.org/en/latest/api/#requests.Response
"""
if self.next_page_token_jsonpath:
all_matches = extract_jsonpath(
self.next_page_token_jsonpath, response.json()
)
first_match = next(iter(all_matches), None)
next_page_token = first_match
else:
next_page_token = response.headers.get("X-Next-Page", None)
return next_page_token
@property
def http_headers(self) -> dict:
"""Return headers dict to be used for HTTP requests.
If an authenticator is also specified, the authenticator's headers will be
combined with `http_headers` when making HTTP requests.
Returns:
Dictionary of HTTP headers to use as a base for every request.
"""
result = self._http_headers
if "user_agent" in self.config:
result["User-Agent"] = self.config.get("user_agent")
return result
@property
def timeout(self) -> int:
"""Return the request timeout limit in seconds.
The default timeout is 300 seconds, or as defined by DEFAULT_REQUEST_TIMEOUT.
Returns:
The request timeout limit as number of seconds.
"""
return DEFAULT_REQUEST_TIMEOUT
# Records iterator
def get_records(self, context: Optional[dict]) -> Iterable[Dict[str, Any]]:
"""Return a generator of row-type dictionary objects.
Each row emitted should be a dictionary of property names to their values.
Args:
context: Stream partition or context dictionary.
Yields:
One item per (possibly processed) record in the API.
"""
for record in self.request_records(context):
transformed_record = self.post_process(record, context)
if transformed_record is None:
# Record filtered out during post_process()
continue
yield transformed_record
def parse_response(self, response: requests.Response) -> Iterable[dict]:
"""Parse the response and return an iterator of result rows.
Args:
response: A raw `requests.Response`_ object.
Yields:
One item for every item found in the response.
.. _requests.Response:
https://docs.python-requests.org/en/latest/api/#requests.Response
"""
yield from extract_jsonpath(self.records_jsonpath, input=response.json())
# Abstract methods:
@property
def authenticator(self) -> Optional[APIAuthenticatorBase]:
"""Return or set the | |
<reponame>samcom12/anuga_core
"""
Function which can be useful when setting quantities
"""
import copy
import os
import anuga.utilities.spatialInputUtil as su
def make_nearestNeighbour_quantity_function(
quantity_xyValueIn,
domain,
threshold_distance=9.0e+100,
background_value=9.0e+100,
k_nearest_neighbours=1,
method='average'
):
"""
Function which makes another function, which can be used in set_quantity
Idea: For every point x,y in the domain, we want to set a quantity based on
the 'nearest-neighbours' from quantity_xyValue (a 3 column array with
x,y,quantity-value),
UNLESS the distance from x,y to the nearest-neighbour is >
threshold_distance.
In the latter case, we want to set the quantity value to
'background_value'
We need a function f(x,y) to do that. This routine makes the
function, with the desired quantity_xyValue points,
threshold_distance, and background_value
INPUTS:
@param quantity_xyValueIn -- A 3 column array with 'x,y, Value'
defining the points used to set the new quantity values in
georeferenced coordinates
@param domain -- The ANUGA domain
@param k_nearest_neighbors --- Number of nearest neighbours used in calculation
@param threshold_distance -- Points greater than this distance from
their nearest quantity_xyValue point are set to background_value
@param background_value -- see 'threshold_distance'
@param method -- Three methods; 'average' uses an inverse-distance-weighted average
of the k nearest neighbours is used:
'min' the minimum of the k nearest neighbours is used:
'max' the maximum of the k nearest neighbours is used.
OUTPUTS:
A function f which can be passed to domain.set_quantity('myQuantity', f)
"""
import scipy
import scipy.interpolate
import scipy.spatial
if(len(quantity_xyValueIn.shape) > 1):
quantity_xyValue = quantity_xyValueIn
else:
# Treat the single-point case
quantity_xyValue = quantity_xyValueIn.reshape((1, 3))
# Make a function which gives us the ROW-INDEX of the nearest xy point in
# quantity_xyValue
# quantity_xy_interpolator = scipy.interpolate.NearestNDInterpolator(
# quantity_xyValue[:,0:2],
# scipy.arange(len(quantity_xyValue[:,2])))
# Make a function which returns k-nearest-neighbour indices + distances
quantity_xy_interpolator = scipy.spatial.cKDTree(quantity_xyValue[:, 0:2])
# Make a function of x,y which we can pass to domain.set_quantity
def quant_NN_fun(x, y):
"""
Function to assign quantity from the nearest point in quantity_xyValue,
UNLESS the point is more than 'threshold_distance' away from the
nearest point, in which case the background value is used
"""
import scipy
import scipy.interpolate
import scipy.spatial
import numpy as np
x = np.asarray(x).reshape(1, -1)[0, :]
y = np.asarray(y).reshape(1, -1)[0, :]
# Since ANUGA stores x,y internally in non-georeferenced coordinates,
# we adjust them here
xll = domain.geo_reference.xllcorner
yll = domain.geo_reference.yllcorner
z = np.zeros(shape=(len(x), 2))
z[:, 0] = x+xll
z[:, 1] = y+yll
# This will hold the quantity values
quantity_output = x*0. + background_value
# Compute the index of the nearest-neighbour in quantity_xyValue
neighbour_data = quantity_xy_interpolator.query(z,
k=k_nearest_neighbours)
# Next find indices with distance < threshold_distance
if(k_nearest_neighbours == 1):
dist_lt_thresh = neighbour_data[0] < threshold_distance
else:
dist_lt_thresh = neighbour_data[0][:, 0] < threshold_distance
dist_lt_thresh = dist_lt_thresh.nonzero()[0]
# Initialise output
quantity_output = x*0 + background_value
# Interpolate
if len(dist_lt_thresh) > 0:
if method == 'min':
numerator = 9.0e+100
for i in range(k_nearest_neighbours):
if(k_nearest_neighbours == 1):
distances = neighbour_data[0][dist_lt_thresh]
indices = neighbour_data[1][dist_lt_thresh]
values = quantity_xyValue[indices, 2]
numerator = np.minimum(numerator, values)
else:
distances = neighbour_data[0][dist_lt_thresh, i]
indices = neighbour_data[1][dist_lt_thresh, i]
values = quantity_xyValue[indices, 2]
numerator = np.minimum(numerator, values)
quantity_output[dist_lt_thresh] = numerator
elif method == 'max':
numerator = -9.0e+100
for i in range(k_nearest_neighbours):
if(k_nearest_neighbours == 1):
distances = neighbour_data[0][dist_lt_thresh]
indices = neighbour_data[1][dist_lt_thresh]
values = quantity_xyValue[indices, 2]
numerator = np.maximum(numerator, values)
else:
distances = neighbour_data[0][dist_lt_thresh, i]
indices = neighbour_data[1][dist_lt_thresh, i]
values = quantity_xyValue[indices, 2]
numerator = np.maximum(numerator, values)
quantity_output[dist_lt_thresh] = numerator
else:
numerator = 0
denominator = 0
for i in range(k_nearest_neighbours):
if(k_nearest_neighbours == 1):
distances = neighbour_data[0][dist_lt_thresh]
indices = neighbour_data[1][dist_lt_thresh]
else:
distances = neighbour_data[0][dist_lt_thresh, i]
indices = neighbour_data[1][dist_lt_thresh, i]
inverse_distance = 1.0/(distances+1.0e-100)
values = quantity_xyValue[indices, 2]
numerator += values*inverse_distance
denominator += inverse_distance
quantity_output[dist_lt_thresh] = numerator / denominator
return quantity_output
# Return the quantity function
return quant_NN_fun
###############################################################################
def composite_quantity_setting_function(poly_fun_pairs,
domain,
clip_range=None,
nan_treatment='exception',
nan_interpolation_region_polygon=None,
default_k_nearest_neighbours=1,
default_raster_interpolation='pixel',
verbose=True):
""" Make a 'composite function' to set quantities -- applies different
functions inside different polygon regions.
poly_fun_pairs = [ [p0, f0], [p1, f1], ...]
Where:
fi is a function,
or a constant,
or a '.txt' or '.csv' file with comma separated xyz data
and an optional header row which contains letters,
or the name of a gdal-compatible rasterFile
(not ending in .txt or .csv),
or a numpy array with 3 columns
pi is a polygon (anuga polygon format),
or a polygon filename (shapefile or a csv format that
anuga.read_polygon will read),
or None ( equivalent to a polygon with zero area),
or 'All' (equivalent to a polygon covering everything)
or 'Extent' in the case that fi is a rasterFile name
(equivalent to a polygon with the same extent as the raster)
IMPORTANT: When polygons overlap, the first elements of the list are
given priority. The approach is:
First f0 is applied to all points in p0, and we record
that these points have been 'set'
Next f1 is applied to all points in p1 which have not
been 'set', and then we record those points as being 'set'
Next f2 is applied to all points in p2 which have not
been 'set', and then we record those points as being 'set'
... etc
INPUT:
@param poly_fun_pairs = [ [p0, f0], [p1, f1], ...]
where fi(x,y) is a function returning quantity values at points,
or any of the special cases below
SPECIAL fi CASES:
fi = a constant in which case points in the polygon are
set to that value,
fi = a .txt or .csv file name containing x, y, z data,
with comma separators and an optional header row
containing letters (nearest neighbour interpolation is used)
fi = a string rasterFile name (not ending in .txt or .csv)
which can be passed to quantityRasterFun to make a function
fi = a numpy array with 3 columns (x,y,Value) in which case
nearest-neighbour interpolation is used on the points
pi are polygons where we want to use fi inside
(anuga polygon format) or any of the special cases below
SPECIAL pi CASES:
If pi is a filename ending in .shp or a csv format that
anuga.read_polygon can read, we assume it contains a polygon
we have to read
If any pi = 'All', then we assume that ALL unset points are set
using the function. This CAN ONLY happen in the last [fi,pi]
pair where pi is not None (since fi will be applied to
all remaining points -- so anything else is probably an
input mistake)
If any pi = None, then that [fi,pi] pair is skipped
If pi = 'Extent' and fi is the name of a raster file, then the
extent of the raster file is used to define the polygon
@param domain = ANUGA domain object
@param clip_range = List with the same length as poly_fun_pairs,
of the form:
[ [min0, max0], [min1, max1], ...]
After f0 is applied in p0, its values will be 'clipped' to the
range
[min0, max0]
, and similarly for the other fi
@param nan_treatment = 'exception' or 'fall_through' -- string determining
what to do if F(x,y) is nan. The default 'exception' raises an exception.
The value 'fall_through' allows the function to try lower-priority
poly,fun pairs (in sequence) to set the value.
@param nan_interpolation_region_polygon = None, or 'All', or a list
of csv or shp filenames containing polygons, or a list of
anuga polygon objects.
If it is not None, then all x,y points which evaluate to nan
on their **first preference** dataset are recorded, and as a
final step, the values at these x,y points
**which are inside the nan_interpolation_region_polygon**
are interpolated from the other x,y,F(x,y) values.
Nearest neighbour interpolation is used, with
k_nearest_neighbours taken from default_k_nearest_neighbours.
Note that if nan_treatment = 'exception', then nan's will cause
exceptions earlier on in this routine, so you will need
nan_treatment = 'fall_through' to use this option.
Example of why you might want this:
Say you have 2 elevation datasets (one defining the
topography above MSL, and the other defining the topography
below MSL). There | |
+ diff )
pointersUpdated += 1
print 'length of defined section:', headerInfo['rtEnd'] - headerInfo['rtStart'], ' lenght of actual section:', len(rtDataBytes)
datDataBytes[headerInfo['rtStart']:headerInfo['rtEnd']] = rtDataBytes # rtData (the global variable isn't later merged. so we need to do this here)
# Update offsets in the root/reference node tables
rootAndRefNodesTable = datDataBytes[headerInfo['rtEnd']:headerInfo['stringTableStart']]
for nodeByteOffset in xrange( 0, len(rootAndRefNodesTable), 8 ): # 8 bytes = 1 table entry
filePointer = rootAndRefNodesTable[nodeByteOffset:nodeByteOffset+4]
if filePointer >= offsetBytes:
newNodePointer = toInt( filePointer ) + diff
if newNodePointer - roundTo32( newNodePointer ) != 0: print 'Warning, root/ref node pointers must be aligned to 0x20 bytes!'
rootAndRefNodesTable[nodeByteOffset:nodeByteOffset+4] = toBytes( newNodePointer )
pointersUpdated += 1
datDataBytes[headerInfo['rtEnd']:headerInfo['stringTableStart']] = rootAndRefNodesTable
if diff < 0: # Remove bytes from the latter section
datDataBytes = datDataBytes[:offset] + datDataBytes[offset+diff:]
else: # Fill the newly extended space with zeros.
datDataBytes = datDataBytes[:offset] + bytearray( diff ) + datDataBytes[offset:]
globalDatFile.data = datDataBytes
globalDatFile.rtData = rtDataBytes
msg('RT Entries updated: ' + str(entriesUpdated) + '\nPointers updated: ' + str(pointersUpdated))
globalDatFile.unsavedChanges.append( uHex(diff) + ' of space added to file at offset ' + uHex(offset + 0x20) + '.' )
updateProgramStatus( 'Space Extension Complete' )
def generateTrimColors( fileIid, autonomousMode=False ):
#tic = time.clock()
# Get the file's data and parse the file for basic info
theDatFile = hsdFiles.datFileObj( source='disc' )
theDatFile.load( fileIid, fileData=getFileDataFromDiscTreeAsBytes( iid=fileIid ) )
hInfo = theDatFile.headerInfo
# Quick failsafe to make sure the file is recognizable, avoiding large processing time
if hInfo['rootNodeCount'] > 300 or hInfo['referenceNodeCount'] > 300 or hInfo['rtEntryCount'] > 45000 or len( theDatFile.rtData ) > 200000:
msg( 'The file structure of ' + fileIid + ' could not be analyzed for trim color generation.' )
return
updateProgramStatus( 'Generating CSP Trim Colors....' )
Gui.programStatusLabel.update()
# Collect the textures in the file
textures = {} # keys: imageDataOffset, values: pil images
totalWidth = 0
totalHeight = 0
for imageDataOffset, imageHeaderOffset, _, _, width, height, imageType, _ in identifyTextures( theDatFile ):
# Skip this texture if it's a shading layer, by checking the flags of the Texture Struct that this texture is attached to
imageDataHeader = theDatFile.initSpecificStruct( hsdStructures.ImageObjDesc, imageHeaderOffset )
if not imageDataHeader: continue
for headerParentOffset in imageDataHeader.getParents():
# Test for a Texture Struct
textureStruct = theDatFile.initSpecificStruct( hsdStructures.TextureObjDesc, headerParentOffset, printWarnings=False )
if textureStruct: break
else: continue # Above loop didn't break; no texture struct found (must be part of an animation such as an eye)
if textureStruct.getValues( 'GXTexGenSrc' ) != 4: # Checking layer flags
#print 'Skipping texture', uHex( 0x20+imageDataOffset ), 'for trim color generation, as it appears to be a shading layer'
continue
try:
imageDataLength = hsdStructures.ImageDataBlock.getDataLength( width, height, imageType )
imageData = theDatFile.getData( imageDataOffset, imageDataLength )
# Skip this texture if its data has been "blanked"
if not any( imageData ): continue
if imageType == 8 or imageType == 9 or imageType == 10:
paletteData, paletteType = getPaletteData( theDatFile, imageDataOffset )
else:
paletteData = ''
paletteType = None
# Decode the texture data
newImg = tplDecoder( '', (width, height), imageType, paletteType, imageData, paletteData )
newImg.deblockify() # This decodes the image data, to create an rgbaPixelArray.
textures[imageDataOffset] = Image.new( 'RGBA', (width, height) )
textures[imageDataOffset].putdata( newImg.rgbaPixelArray )
# Update the cumulative dimensions of the new super image
if width > totalWidth:
totalWidth = width
totalHeight += height
except:
print 'Failed to decode texture at', uHex(0x20+imageDataOffset), 'for trim color generation'
# Combine the images collected above into one super image
yOffset = 0
superImage = Image.new( 'RGBA', (totalWidth, totalHeight) )
for texture in textures.values():
superImage.paste( texture, (0, yOffset) )
yOffset += texture.size[1]
# Save the image data to a memory buffer so it can be sent to the color quantizer without creating a file.
superImageBuffer = StringIO()
superImage.save( superImageBuffer, 'png' )
# Create a palette for the super image
exitCode, outputStream = cmdChannel( '"' + pathToPngquant + '" --speed 3 13 - ', standardInput=superImageBuffer.getvalue() )
superImageBuffer.close()
if exitCode != 0:
print 'Error while generating super image palette; exit code:', exitCode
print outputStream
msg( 'There was an error during color generation. Error code: '+str(exitCode) + '\n\nDetails:\n' + outputStream, 'Error Generating CSP Trim Colors.' )
updateProgramStatus( 'Error Generating CSP Trim Colors.' )
return
# Get the palette generated for the super image
palettedFileBuffer = StringIO( outputStream )
pngImage = png.Reader( palettedFileBuffer )
pngImage.read() # Needed for pulling the palette; its return value might be useful to print
generatedPalette = pngImage.palette( alpha='force' )
palettedFileBuffer.close()
# Filter out the palette entry relating to the extra empty space in the super image (alpha of 0)
generatedPalette = [ entry for entry in generatedPalette if entry[3] != 0 ]
baseColor = generatedPalette[0]
# Get a value to determine whether the base color is light or dark (value/brightness)
baseColorValue = rgb2hsv( baseColor )[2]
# Convert the colors to HSV format (excluding the base color)
hsvList = [ rgb2hsv(color) for color in generatedPalette[1:] ]
# Go through the colors and look for the highest combination of luminance and saturation in order to pick an accent color
highestSatLum = 0
highestSatLumColorIndex = 0
for i, color in enumerate( hsvList ):
_, saturation, value = color # first value is hue
if baseColorValue >= .5: # Place higher weight on darker colors (values) instead
satLum = saturation + 1 - value
else: satLum = saturation + value
if satLum > highestSatLum:
highestSatLum = satLum
highestSatLumColorIndex = i
accentColor = generatedPalette[1:][highestSatLumColorIndex]
filename = os.path.basename( fileIid )
if autonomousMode:
updateTrimColors( filename, colors=(rgb2hex(baseColor).replace('#', ''), rgb2hex(accentColor).replace('#', '')) )
else: # Show the user some options for the accent color, and let them decide whether to add these colors to their game.
updateProgramStatus( 'CSP Trim Colors Generated' )
showColorSwatches( colors=generatedPalette, chosenColors=(baseColor, accentColor), filename=filename )
def updateTrimColors( filename, colors=() ):
tableOffset = 0x3a3c90
characterTableOffsets = { # First value is the start of that character's section (to the character name), relative to the start of the table
'ca': ( 0, 'gy', 're', 'wh', 'gr', 'bu' ), # Falcon
'dk': ( 0x70, 'bk', 're', 'bu', 'gr' ), # DK
'fx': ( 0xD0, 'or', 'la', 'gr' ), # Fox
'kb': ( 0x170, 'ye', 'bu', 're', 'gr', 'wh' ), # Kirby
'kp': ( 0x1E0, 're', 'bu', 'bk' ), # Bowser
'lk': ( 0x230, 're', 'bu', 'bk', 'wh' ), # Link
'lg': ( 0x290, 'wh', 'bu', 'pi' ), # Luigi
'mr': ( 0x2E0, 'ye', 'bk', 'bu', 'gr' ), # Mario
'ms': ( 0x340, 're', 'gr', 'bk', 'wh' ), # Marth
'mt': ( 0x3A0, 're', 'bu', 'gr' ), # Mewtwo
'ns': ( 0x3F0, 'ye', 'bu', 'gr' ), # Ness
'pe': ( 0x440, 'ye', 'wh', 'bu', 'gr' ), # Peach
'pk': ( 0x4A0, 're', 'bu', 'gr' ), # Pika
'nn': ( 0x4F0, 'ye', 'aq', 'wh' ), # Nana (updating either IC changes the colors for both)
'pp': ( 0x4F0, 'gr', 'or', 're' ), # Popo
'pr': ( 0x540, 're', 'bu', 'gr', 'ye' ), # Jiggs
'ss': ( 0x5A0, 'pi', 'bk', 'gr', 'la' ), # Samus
'ys': ( 0x600, 're', 'bu', 'ye', 'pi', 'aq' ), # Yoshi
'sk': ( 0x670, 're', 'bu', 'gr', 'wh' ), # Sheik (updating either Sheik/Zelda changes the colors for both)
'zd': ( 0x670, 're', 'bu', 'gr', 'wh' ), # Zelda
'fc': ( 0x6D0, 're', 'bu', 'gr' ), # Falco
'cl': ( 0x720, 're', 'bu', 'wh', 'bk' ), # Y. Link
'dr': ( 0x780, 're', 'bu', 'gr', 'bk' ), # Dr. Mario
'fe': ( 0x7E0, 're', 'bu', 'gr', 'ye' ), # Roy
'pc': ( 0x840, 're', 'bu', 'gr' ), # Pichu
'gn': ( 0x890, 're', 'bu', 'gr', 'la' ), # Ganon
'bo': ( 0x8F0, ), # M. Wireframe
'gl': ( 0x910, ), # F. Wireframe
'gk': ( 0x930, ) # Giga Bowser
}
# Parse the filename and make sure table location information for it is available
char = filename[2:4]
color = filename[4:6]
if char not in characterTableOffsets or ( color not in characterTableOffsets[char] and color != 'nr' and color != 'rl' and color != 'rr' ): # Last two are for Falcon's Red alt
print 'Unable to process CSP trim colors for', filename, 'due to an invalid filename.'
return False
# Calculate the offset of the color to be changed
if color == 'nr': rowNumber = 1
elif len( characterTableOffsets[char] ) == 1: rowNumber = 1 # These characters only have one set of alts
elif color == 'rl' or color == 'rr': rowNumber = 3 # Both are Falcon's red costume
else:
for i, colorCode in enumerate( characterTableOffsets[char] ):
if color == colorCode:
rowNumber = i + 1 # +1 accounts for the missing 'nr' (for the Neutral costume) in the tuple
break
else: # loop above didn't break; coloCode not found (shouldn't happen due to previous validation)
print 'Unable to process CSP trim colors for', filename, 'due to an invalid filename.'
return False
fileOffset = tableOffset + characterTableOffsets[char][0] + rowNumber * 0x10
if filename[-4:] == '.rat': fileOffset += 8
elif filename[-4:] == '.usd' and color == 'rr': fileOffset += 8 # For Falcon's Red Right alt
print 'CSP Trim colors generated for', filename + ':', colors, '| Being placed at offset:', uHex(fileOffset)
# Validate | |
# USAGE
# python opencv-optical-flow.py
# python opencv-optical-flow.py --video PATH/example_01.mp4
import numpy
import cv2
import argparse
import time
import random
import math
import imutils
from collections import Counter
TRACKER_POINTS = 500 # How many points will be used to track the optical flow
CRAZY_LINE_DISTANCE = 50 # Distance value to detect crazy lines
CRAZY_LINE_LIMIT = 100 * TRACKER_POINTS / 1000 # Amount of crazy lines are indication of different shots
ABSDIFF_ANGLE = 20 # To determine the inconsistency between tangent values in degrees
LINE_THICKNESS = 3 # Lines thickness that we will use for mask delta
CONTOUR_LIMIT = 10 # Contour limit for detecting ZOOM, ZOOM + PAN, ZOOM + TILT, ZOOM + ROLL (Not just PAN, TILT, ROLL)
TARGET_HEIGHT = 360 # Number of horizontal lines for target video and processing. Like 720p, 360p etc.
DELTA_LIMIT_DIVISOR = 3 # Divisor for detecting too much motion. Like: ( height * width / X )
# Construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
if args.get("video", None) is None: # If the video argument is None, then we are reading from webcam
cap = cv2.VideoCapture(0)
time.sleep(0.25)
else: # Otherwise, we are reading from a video file
cap = cv2.VideoCapture(args["video"])
# Parameters for Lucas Kanade Optical Flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
while True: # On this level it gets only one frame
color = numpy.random.randint(0,255,(TRACKER_POINTS,3)) # Create some random colors
ret, old_frame = cap.read() # Take first frame
old_frame = imutils.resize(old_frame, height=TARGET_HEIGHT) # Resize frame to 360p. Alternative resizing method:
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY) # Convert previous frame to grayscale
height, width = old_frame.shape[:2] # Get video height and width (size)
# Create random points on frame
p1 = numpy.random.randint(width, size=(TRACKER_POINTS, 1, 2))
for y in p1: # Get y values one by one
if y[0][1] > height: # If there is a y value that greater than max height
y[0][1] = numpy.random.random_integers(height) # Random again this time with max height value
p1 = p1.astype(numpy.float32) # Change numpy array's data type to float32
mask = numpy.zeros_like(old_frame) # Create a mask image for drawing purposes (original frame)
mask_delta = numpy.zeros_like(old_frame) # Create a mask image for drawing purposes (delta frame)
mask_white = numpy.ones_like(old_frame) # Create a white mask image for cloning original frame
white_color = numpy.array([255,255,255]) # Define color white
total_crazy_lines = 0 # Crazy line counter
most_common_angle = None # Most common angle in general optical flow in shot
while True: # Loop over the frames of the video
mask_biggest_contour = numpy.zeros_like(old_frame) # Create a mask image for drawing purposes (biggest contour frame)
ret,frame = cap.read() # Take a new frame
frame = imutils.resize(frame, height=TARGET_HEIGHT) # Resize frame to 360p. Alternative resizing method:
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert it to grayscale
delta_value = 0 # Delta Value for storing max continuous contour area for current frame
p2, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p1, None, **lk_params) # Calculate optical flow (Lucas Kanade Optical Flow function of OpenCV)
if p2 is None: # If there are not any points that coming from Lucas Kanade Optical Flow function of OpenCV
break # Break the loop to reconstruct the optical flow
# Select good points
good_points2 = p2[st==1]
good_points1 = p1[st==1]
angles_array = [] # Angle list of all newly created tiny lines
for i,(good_point2,good_point1) in enumerate(list(zip(good_points2,good_points1))): # Get point pairs one by one
# Get coordinates of points
x2,y2 = good_point2.ravel() # New point
x1,y1 = good_point1.ravel() # Previous point
distance = math.hypot(x2 - x1, y2 - y1) # Length of the tiny line between these two points.
if distance >= CRAZY_LINE_DISTANCE: # If the line is not that "tiny" it's CRAZY! xD
total_crazy_lines += 1 # Then increase total crazy line counter's value
angle = math.atan2(y2-y1, x2-x1) # Calculate tangent value of the line (returns Radian)
angle = math.degrees(angle) # Radian to Degree
angle = round(angle) # Round up the degree
angles_array.append(angle) # Append the degree to Angle List
if most_common_angle != None: # If there is a most common angle value
if abs(most_common_angle - angle) > ABSDIFF_ANGLE: # If Absolute Difference between most common angle and the current line's angle greater than ABDSDIFF_ANGLE value, this line is inconsistent to current general optical flow
cv2.line(mask_delta, (x2,y2),(x1,y1), white_color, LINE_THICKNESS) # Then draw a white line to mask_delta
cv2.line(mask, (x2,y2),(x1,y1), color[i].tolist(), 1) # In every frame draw the colored lines to original frame for better understanding (optional)
if angles_array: # If angles_array is not empty
most_common_angle = Counter(angles_array).most_common()[0][0] # Find most common angle value in Angle List
mask_delta_gray = cv2.cvtColor(mask_delta, cv2.COLOR_BGR2GRAY) # Convert mask_delta to grayscale
thresh = cv2.threshold(mask_delta_gray, 12, 255, cv2.THRESH_BINARY)[1] # Apply OpenCV's threshold function to get binary frame
thresh = cv2.dilate(thresh, None, iterations=1) # Dlation to increase white region for surrounding pixels
# Find contours on thresholded image
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contour_area_stack = [] # List of contour areas's values
contour_dictionary = {} # Dictionary of contours key = 'contour area' & value = 'contour coordinates (x,y,w,h)'
biggest_contour_coordinates = None # Biggest contour coordinate
frame_final_form = cv2.add(frame,mask_white)
# Loop over the contours
if cnts:
for c in cnts: # Contour in Contours
contour_area_stack.append(cv2.contourArea(c)) # Calculate contour area and append to contour stack
if cv2.contourArea(c) > args["min_area"]: # If contour area greater than min area
(x, y, w, h) = cv2.boundingRect(c) # Compute the bounding box for this contour
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) # Draw it on the frame
contour_dictionary[cv2.contourArea(c)] = (x, y, w, h) # Add a key - value pair to contour dictionary
delta_value = max(contour_area_stack) # Assign max contour area to delta value
if contour_dictionary: # If contour dictionary is not empty
biggest_contour_coordinates = contour_dictionary[delta_value] # Get coordinates of biggest contour
if biggest_contour_coordinates: # If we have the coordinates it means there is a contour in the frame at the same time
# Parse the coordinates
x = biggest_contour_coordinates[0]
y = biggest_contour_coordinates[1]
w = biggest_contour_coordinates[2]
h = biggest_contour_coordinates[3]
cv2.rectangle(mask_biggest_contour, (x, y), (x + w, y + h), (255, 255, 255), -1) # Draw only one white rectange
if delta_value > (height * width / DELTA_LIMIT_DIVISOR): # If delta value is too much
break # Then break
if len(contour_dictionary) > CONTOUR_LIMIT: # PROBABLY!!! There is a ZOOM, ZOOM + PAN, ZOOM + TILT, ZOOM + ROLL situation (Not just PAN, TILT, ROLL)
# Then restart angle calculation. This time divide frame to quarters
total_crazy_lines = 0 # Crazy line counter
most_common_angle1 = None # Most common angle in general optical flow in shot
angles_array1 = [] # Angle list of all newly created tiny lines
most_common_angle2 = None # Most common angle in general optical flow in shot
angles_array2 = [] # Angle list of all newly created tiny lines
most_common_angle3 = None # Most common angle in general optical flow in shot
angles_array3 = [] # Angle list of all newly created tiny lines
most_common_angle4 = None # Most common angle in general optical flow in shot
angles_array4 = [] # Angle list of all newly created tiny lines
mask_delta = numpy.zeros_like(old_frame) # Create a mask image for drawing purposes (delta frame)
for i,(good_point2,good_point1) in enumerate(list(zip(good_points2,good_points1))): # Get point pairs one by one
# Get coordinates of points
x2,y2 = good_point2.ravel() # New point
x1,y1 = good_point1.ravel() # Previous point
distance = math.hypot(x2 - x1, y2 - y1) # Length of the tiny line between these two points.
if distance >= CRAZY_LINE_DISTANCE: # If the line is not that "tiny" it's CRAZY! xD
total_crazy_lines += 1 # Then increase total crazy line counter's value
angle = math.atan2(y2-y1, x2-x1) # Calculate tangent value of the line (returns Radian)
angle = math.degrees(angle) # Radian to Degree
angle = round(angle) # Round up the degree
if (x1 <= (width / 2)) and (y1 <= (height / 2)):
angles_array1.append(angle) # Append the degree to Angle List
if most_common_angle1 != None: # If there is a most common angle value
if abs(most_common_angle1 - angle) > ABSDIFF_ANGLE: # If Absolute Difference between most common angle and the current line's angle greater than ABDSDIFF_ANGLE value, this line is inconsistent to current general optical flow
cv2.line(mask_delta, (x2,y2),(x1,y1), white_color, LINE_THICKNESS) # Then draw a white line to mask_delta
if (x1 >= (width / 2)) and (y1 <= (height / 2)):
angles_array2.append(angle) # Append the degree to Angle List
if most_common_angle2 != None: # If there is a most common angle | |
always need to do this, because it might be the case that some
transcriptions don't have a date set.
"""
gamma = get_user_gamma(user, self.blossom_api)
if before_time is not None:
# We need to get the offset from the API
offset_response = self.blossom_api.get(
"submission/",
params={
"completed_by__isnull": False,
"completed_by": get_user_id(user),
"complete_time__gte": before_time.isoformat(),
"page_size": 1,
},
)
if not offset_response.ok:
raise BlossomException(offset_response)
# We still need to calculate based on the total gamma
# It may be the case that not all transcriptions have a date set
# Then they are not included in the data nor in the API response
return gamma - rate_data.sum() - offset_response.json()["count"]
else:
# We can calculate the offset from the given data
return gamma - rate_data.sum()
def get_user_history(
self,
user: Optional[BlossomUser],
after_time: Optional[datetime],
before_time: Optional[datetime],
utc_offset: int,
) -> pd.DataFrame:
"""Get a data frame representing the history of the user.
:returns: The history data of the user.
"""
# Get all rate data
time_frame = get_data_granularity(user, after_time, before_time)
rate_data = self.get_all_rate_data(
user, time_frame, after_time, before_time, utc_offset
)
# Calculate the offset for all data points
offset = self.calculate_history_offset(user, rate_data, after_time, before_time)
# Aggregate the gamma score
history_data = get_history_data_from_rate_data(rate_data, offset)
return history_data
@cog_ext.cog_slash(
name="history",
description="Display the history graph.",
options=[
create_option(
name="users",
description="The users to display the history graph for (max 5)."
"Defaults to the user executing the command.",
option_type=3,
required=False,
),
create_option(
name="after",
description="The start date for the history data.",
option_type=3,
required=False,
),
create_option(
name="before",
description="The end date for the history data.",
option_type=3,
required=False,
),
],
)
async def history(
self,
ctx: SlashContext,
usernames: str = "me",
after: Optional[str] = None,
before: Optional[str] = None,
) -> None:
"""Get the transcription history of the user."""
start = datetime.now()
after_time, before_time, time_str = parse_time_constraints(after, before)
utc_offset = extract_utc_offset(ctx.author.display_name)
# Give a quick response to let the user know we're working on it
# We'll later edit this message with the actual content
msg = await ctx.send(
i18n["history"]["getting_history"].format(
users=get_initial_username_list(usernames, ctx), time_str=time_str,
)
)
users = get_user_list(usernames, ctx, self.blossom_api)
if users:
users.sort(key=lambda u: u["gamma"], reverse=True)
colors = get_user_colors(users)
min_gammas = []
max_gammas = []
fig: plt.Figure = plt.figure()
ax: plt.Axes = fig.gca()
fig.subplots_adjust(bottom=0.2)
ax.set_xlabel(
i18n["history"]["plot_xlabel"].format(
timezone=utc_offset_to_str(utc_offset)
)
)
ax.set_ylabel(i18n["history"]["plot_ylabel"])
for label in ax.get_xticklabels():
label.set_rotation(32)
label.set_ha("right")
ax.set_title(
i18n["history"]["plot_title"].format(
users=get_usernames(users, 2, escape=False)
)
)
for index, user in enumerate(users or [None]):
if users and len(users) > 1:
await msg.edit(
content=i18n["history"]["getting_history_progress"].format(
users=get_usernames(users),
time_str=time_str,
count=index + 1,
total=len(users),
)
)
history_data = self.get_user_history(
user, after_time, before_time, utc_offset
)
color = colors[index]
first_point = history_data.iloc[0]
last_point = history_data.iloc[-1]
min_gammas.append(first_point.at["gamma"])
max_gammas.append(last_point.at["gamma"])
# Plot the graph
ax.plot(
"date", "gamma", data=history_data.reset_index(), color=color,
)
# At a point for the last value
ax.scatter(
last_point.name, last_point.at["gamma"], color=color, s=4,
)
# Label the last value
ax.annotate(
int(last_point.at["gamma"]),
xy=(last_point.name, last_point.at["gamma"]),
color=color,
)
if users:
# Show milestone lines
min_value, max_value = min(min_gammas), max(max_gammas)
delta = (max_value - min_value) * 0.4
ax = add_milestone_lines(ax, ranks, min_value, max_value, delta)
if users and len(users) > 1:
ax.legend([get_username(user, escape=False) for user in users])
discord_file = create_file_from_figure(fig, "history_plot.png")
await msg.edit(
content=i18n["history"]["response_message"].format(
users=get_usernames(users),
time_str=time_str,
duration=get_duration_str(start),
),
file=discord_file,
)
@cog_ext.cog_slash(
name="rate",
description="Display the rate graph.",
options=[
create_option(
name="users",
description="The users to display the rate graph for (max 5)."
"Defaults to the user executing the command.",
option_type=3,
required=False,
),
create_option(
name="after",
description="The start date for the rate data.",
option_type=3,
required=False,
),
create_option(
name="before",
description="The end date for the rate data.",
option_type=3,
required=False,
),
],
)
async def rate(
self,
ctx: SlashContext,
usernames: str = "me",
after: Optional[str] = None,
before: Optional[str] = None,
) -> None:
"""Get the transcription rate of the user."""
start = datetime.now()
after_time, before_time, time_str = parse_time_constraints(after, before)
utc_offset = extract_utc_offset(ctx.author.display_name)
# Give a quick response to let the user know we're working on it
# We'll later edit this message with the actual content
msg = await ctx.send(
i18n["rate"]["getting_rate"].format(
users=get_initial_username_list(usernames, ctx), time_str=time_str,
)
)
users = get_user_list(usernames, ctx, self.blossom_api)
if users:
users.sort(key=lambda u: u["gamma"], reverse=True)
colors = get_user_colors(users)
max_rates = []
fig: plt.Figure = plt.figure()
ax: plt.Axes = fig.gca()
fig.subplots_adjust(bottom=0.2)
ax.set_xlabel(
i18n["rate"]["plot_xlabel"].format(timezone=utc_offset_to_str(utc_offset))
)
ax.set_ylabel(i18n["rate"]["plot_ylabel"])
for label in ax.get_xticklabels():
label.set_rotation(32)
label.set_ha("right")
ax.set_title(
i18n["rate"]["plot_title"].format(
users=get_usernames(users, 2, escape=False)
)
)
for index, user in enumerate(users or [None]):
if users and len(users) > 1:
await msg.edit(
content=i18n["rate"]["getting_rate"].format(
users=get_usernames(users),
count=index + 1,
total=len(users),
time_str=time_str,
)
)
user_data = self.get_all_rate_data(
user, "day", after_time, before_time, utc_offset
)
max_rate = user_data["count"].max()
max_rates.append(max_rate)
max_rate_point = user_data[user_data["count"] == max_rate].iloc[0]
color = colors[index]
# Plot the graph
ax.plot(
"date", "count", data=user_data.reset_index(), color=color,
)
# At a point for the max value
ax.scatter(
max_rate_point.name, max_rate_point.at["count"], color=color, s=4,
)
# Label the max value
ax.annotate(
int(max_rate_point.at["count"]),
xy=(max_rate_point.name, max_rate_point.at["count"]),
color=color,
)
if users:
# A milestone at every 100 rate
milestones = [
dict(threshold=i * 100, color=ranks[i + 2]["color"])
for i in range(1, 8)
]
ax = add_milestone_lines(ax, milestones, 0, max(max_rates), 40)
if users and len(users) > 1:
ax.legend([get_username(user, escape=False) for user in users])
discord_file = create_file_from_figure(fig, "rate_plot.png")
await msg.edit(
content=i18n["rate"]["response_message"].format(
usernames=get_usernames(users),
time_str=time_str,
duration=get_duration_str(start),
),
file=discord_file,
)
async def _until_user_catch_up(
self,
ctx: SlashContext,
msg: SlashMessage,
user: BlossomUser,
target_username: str,
start: datetime,
after_time: datetime,
before_time: Optional[datetime],
time_str: str,
) -> None:
"""Determine how long it will take the user to catch up with the target user."""
# Try to find the target user
try:
target = get_user(target_username, ctx, self.blossom_api)
except UserNotFound:
# This doesn't mean the username is wrong
# They could have also mistyped a rank
# So we change the error message to something else
raise InvalidArgumentException("goal", target_username)
if not target:
# Having the combined server as target doesn't make sense
# Because it includes the current user, they could never reach it
raise InvalidArgumentException("goal", target_username)
if user["gamma"] > target["gamma"]:
# Swap user and target, the target has to have more gamma
# Otherwise the goal would have already been reached
user, target = target, user
user_progress = await _get_user_progress(
user, after_time, before_time, blossom_api=self.blossom_api
)
target_progress = await _get_user_progress(
target, after_time, before_time, blossom_api=self.blossom_api
)
time_frame = (before_time or start) - after_time
if user_progress <= target_progress:
description = i18n["until"]["embed_description_user_never"].format(
user=get_username(user),
user_gamma=user["gamma"],
user_progress=user_progress,
target=get_username(target),
target_gamma=target["gamma"],
target_progress=target_progress,
time_frame=get_timedelta_str(time_frame),
)
else:
# Calculate time needed
seconds_needed = (target["gamma"] - user["gamma"]) / (
(user_progress - target_progress) / time_frame.total_seconds()
)
relative_time = timedelta(seconds=seconds_needed)
absolute_time = start + relative_time
intersection_gamma = user["gamma"] + math.ceil(
(user_progress / time_frame.total_seconds())
* relative_time.total_seconds()
)
description = i18n["until"]["embed_description_user_prediction"].format(
user=get_username(user),
user_gamma=user["gamma"],
user_progress=user_progress,
target=get_username(target),
target_gamma=target["gamma"],
target_progress=target_progress,
intersection_gamma=intersection_gamma,
time_frame=get_timedelta_str(time_frame),
relative_time=get_timedelta_str(relative_time),
absolute_time=get_discord_time_str(absolute_time),
)
color = get_rank(target["gamma"])["color"]
await msg.edit(
content=i18n["until"]["embed_message"].format(
user=get_username(user),
goal=get_username(target),
time_str=time_str,
duration=get_duration_str(start),
),
embed=Embed(
title=i18n["until"]["embed_title"].format(user=get_username(user)),
description=description,
color=discord.Colour.from_rgb(*get_rgb_from_hex(color)),
),
)
@cog_ext.cog_slash(
name="until",
description="Determines the time required to reach the next milestone.",
options=[
create_option(
name="goal",
description="The gamma, flair rank or user to reach. "
"Defaults to the next rank.",
option_type=3,
required=False,
),
create_option(
name="username",
description="The user to make the prediction for. "
"Defaults to the user executing the command.",
option_type=3,
required=False,
),
create_option(
name="after",
description="The start date for the prediction data.",
option_type=3,
required=False,
),
create_option(
name="before",
description="The end date for the prediction data.",
option_type=3,
required=False,
),
],
)
async def _until(
self,
ctx: SlashContext,
goal: Optional[str] = None,
username: str = "me",
after: str = "1 week",
before: Optional[str] = None,
) -> None:
"""Determine how long it will take the user to reach the given goal."""
start = datetime.now(tz=pytz.utc)
after_time, before_time, time_str = parse_time_constraints(after, before)
if not after_time:
# We need a starting point for the calculations
raise InvalidArgumentException("after", after)
# Send a first message to show that the bot is responsive.
# We will edit this message later with the actual content.
msg = await ctx.send(
i18n["until"]["getting_prediction"].format(
user=get_initial_username(username, ctx), time_str=time_str,
)
)
user = get_user(username, ctx, self.blossom_api)
if goal is not None:
try:
# Check if the goal is a gamma value or rank name
goal_gamma, goal_str = parse_goal_str(goal)
except InvalidArgumentException:
# The goal could be a username
if not user:
# If the user is the combined server, a target user doesn't | |
self._s:
return False
self._s.pop()
state, param = self.backtrack, None
while len(self._s) > 0 and len(self.currstate().decsglstk) > 0:
state, param = state() if param is None else state(param)
if len(self._s) == 0:
return False
elif len(self.currstate().decsglstk) == 0:
return True
def select(self):
'''Selecting a clause for execution (ISO 7.7.7)'''
#activator = convert_to_goal(self.curract())
activator = self.curract()
p = search_control_construct(activator)
if p:
self.currstate().bi = 'ctrl'
return self.execute_ctrl, p
p = search_builtin_predicate(activator)
if p:
self.currstate().bi = 'bip'
return self.execute_bip, p
p = self._kb.search(activator)
if p is None:
unknown = _FLAGS['unknown'].value
if unknown == 'error':
pi = Compound('/', Atomic(activator.name), Atomic(activator.arity))
e = PrologExistenceError('procedure', pi)
return self.throw, e.error
elif unknown == 'warning':
pi = activator.predicate_indicator()
message = 'Warning: the procedure {0} is undefined\n'.format(pi)
self._write(message)
self.currdecsgl().activator = Atomic.FAIL
return self.select, None
elif unknown == 'fail':
self.currdecsgl().activator = Atomic.FAIL
return self.select, None
else:
clauses = p.clauses()
self.currstate().bi = ('up', clauses)
return self.execute_up, clauses
def execute_ctrl(self, ctrl):
return ctrl().execute(self)
def reexecute_ctrl(self, ctrl):
return ctrl().reexecute(self)
def execute_bip(self, bip):
'''Executing a built-in predicate (ISO 7.7.12)'''
predicate = bip(self._kb)
self.curract().bip = predicate
ccs = copy(self.currstate())
self._s.append(ccs)
activator = self.curract()
try:
raw_args = [] if isatom(activator) else activator.value[1:]
args = (deref(arg) for arg in raw_args)
result = predicate.execute(*args)
except PrologError as e:
return self.throw, e.error
ccg = ccs.decsglstk[-1]
ccg.activator = Atomic.TRUE if result else Atomic.FAIL
if predicate.substitution:
self.currstate().subst.update(predicate.substitution)
for subgoal in self.currstate().decsglstk[:-1]:
subgoal.activator.apply(predicate.substitution)
return self.select, None
def execute_up(self, clauses):
'''Executing a user-defined procedure (ISO 7.7.10 and 7.7.11)'''
if not clauses:
self._s.pop()
return self.backtrack, None
c = renamed_copy(clauses[0], self.currstate().s_index)
mgu = unify(c.head(), self.curract())
if mgu is not None:
c.body().apply(mgu)
ccs = copy(self.currstate())
# get a copy of the current goal in currstate
ccg = ccs.decsglstk[-1]
ccg.activator.apply(mgu)
ccg.activator = c.body() # modified by applying MGU
ccs.bi = 'nil'
ccs.subst.update(mgu)
ccs.decsglstk[-1].cutparent = self.currstate().s_index - 1
self._s.append(ccs)
for subgoal in self.currstate().decsglstk[:-1]:
subgoal.activator.apply(mgu)
return self.select, None
else:
self.currstate().bi = ('up', clauses[1:])
return self.execute_up, clauses[1:]
def backtrack(self):
'''Backtracking (ISO 7.7.8)'''
if len(self._s) > 0:
bi = self.currstate().bi
if isinstance(bi, tuple):
clauses = bi[1]
self.currstate().bi = ('up', clauses[1:])
return self.execute_up, clauses[1:]
elif bi == 'bip':
cs = self.currstate()
if hasattr(self.curract().bip, 'reexecute'):
ccs = copy(cs)
self._s.append(ccs)
predicate = self.curract().bip
ccg = ccs.decsglstk[-1]
raw_args = [] if isatom(ccg.activator) else ccg.activator.value[1:]
args = (deref(arg) for arg in raw_args)
if predicate.reexecute(*args):
if predicate.substitution:
self.currstate().subst.update(predicate.substitution)
for subgoal in self.currstate().decsglstk[:-1]:
subgoal.activator.apply(predicate.substitution)
cg = cs.decsglstk[-1]
cg.activator.bip = ccg.activator.bip
ccg.activator = Atomic.TRUE
else:
self._s.pop()
ccs = self.currstate()
ccg = ccs.decsglstk[-1]
ccg.activator = Atomic.FAIL
else:
cg = cs.decsglstk[-1]
cg.activator = Atomic.FAIL
return self.select, None
elif bi == 'ctrl':
p = search_control_construct(self.curract())
return self.reexecute_ctrl, p
elif bi == 'nil':
return self.select, None
def throw(self, error):
error = Compound('error', error, Atomic(0))
self.currstate().decsglstk[-1].activator = Compound('throw', error)
self.currstate().bi = 'nil'
return self.select, None
def _consult(self, theory):
parser = PrologParser(theory)
term = parser.read_term()
while term:
if term.isdirective():
d = term.value[1]
directive = search_directives(d)
if directive is None:
message = 'Warning: the directive {} is unknown\n'
self._write(message.format(d.predicate_indicator()))
else:
directive(self, *d.value[1:])
else:
self._kb.append(term)
term = parser.read_term()
def _clear(self):
self._kb = Database()
def _write(self, message):
from .builtin import io
io.STANDARD_OUTPUT_STREAM.write(message)
def search_directives(procedure):
d = {'dynamic/1' : dynamic_1}
return d.get(procedure.predicate_indicator())
def dynamic_1(engine, *indicators):
'''A directive dynamic(PI) where PI is a predicate indicator,
a predicate indicator sequence, or a predicate indicator list
specifies that each user-defined procedure indicated by PI is
dynamic.
No procedure indicated by PI shall be a control construct or
a built-in predicate.
The first directive dynamic(PI) that specified a user-defined
procedure P to be dynamic shall precede all clauses for P.
Further, if P is defined to be a dynamic procedure in one
Prolog text, then a directive dynamic(PI) indicating P shall
occur in every Prolog text which contain clauses for P.'''
for indicator in indicators:
t = tuple(Variable('_') for i in range(indicator.value[2].value))
witness = Compound(indicator.value[1].name, *t)
if not search_control_construct(witness) and not search_builtin_predicate(witness):
if not engine._kb.search(witness):
pi = '{}/{}'.format(indicator.value[1], indicator.value[2])
engine._kb.insert(pi)
def unify(c1, c2):
'''Unifies two clauses, producing a Most General Unifier, yet
avoiding to apply the MGU to each clause. The MGU is returned
as a dictionary of Variable names and Term couples.'''
from copy import deepcopy
c1 = deepcopy(c1)
c2 = deepcopy(c2)
# flat variables
if isinstance(c1, Variable): # TODO is this deref()?
c1 = c1.binding()
if isinstance(c2, Variable): # TODO is this deref()?
c2 = c2.binding()
# atomic case
if isinstance(c1, Atomic):
if isinstance(c2, Variable):
mgu = Substitution()
mgu[c2.name] = c1
return mgu
elif isinstance(c2, Atomic):
return Substitution() if c1 == c2 else None
else:
return None
# variable case
if isinstance(c1, Variable):
mgu = Substitution()
if isinstance(c2, Variable):
if c1 == c2:
return mgu
if isrenamed(c2.name):
mgu[c2.name] = c1
return mgu
if c2 > c1:
mgu[c2.name] = c1
else:
mgu[c1.name] = c2
return mgu
mgu[c1.name] = c2
return mgu
# compound case
if isinstance(c1, Compound):
if isinstance(c2, Variable):
mgu = Substitution()
mgu[c2.name] = c1
return mgu
elif isinstance(c2, Compound):
if c1.predicate_indicator() == c2.predicate_indicator():
mgu = Substitution()
for a1, a2 in zip(c1.value[1:], c2.value[1:]):
arg_mgu = unify(a1, a2)
if arg_mgu is not None:
for k, v in arg_mgu.items():
if k in mgu:
value_mgu = unify(v, mgu[k])
if value_mgu is not None:
mgu.update(value_mgu)
else:
return None
else:
mgu.update(arg_mgu)
else:
return None
return mgu
else:
return None
elif isinstance(c2, List):
if c1.predicate_indicator() == c2.predicate_indicator():
mgu = Substitution()
head_mgu = unify(c1.value[1], c2.head)
if head_mgu is not None:
for k, v in head_mgu.items():
if k in mgu:
value_mgu = unify(v, mgu[k])
if value_mgu is not None:
mgu.update(value_mgu)
else:
return None
else:
mgu.update(head_mgu)
else:
return None
tail_mgu = unify(c1.value[2], c2.tail)
if tail_mgu is not None:
for k, v in tail_mgu.items():
if k in mgu:
value_mgu = unify(v, mgu[k])
if value_mgu is not None:
mgu.update(value_mgu)
else:
return None
else:
mgu.update(tail_mgu)
else:
return None
return mgu
else:
return None
else:
return None
# list case
if isinstance(c1, List):
if isinstance(c2, Variable):
mgu = Substitution()
mgu[c2.name] = c1
return mgu
elif isinstance(c2, List):
mgu = Substitution()
head_mgu = unify(c1.head, c2.head)
if head_mgu is not None:
for k, v in head_mgu.items():
if k in mgu:
value_mgu = unify(v, mgu[k])
if value_mgu is not None:
mgu.update(value_mgu)
else:
return None
else:
mgu.update(head_mgu)
else:
return None
tail_mgu = unify(c1.tail, c2.tail)
if tail_mgu is not None:
for k, v in tail_mgu.items():
if k in mgu:
value_mgu = unify(v, mgu[k])
if value_mgu is not None:
mgu.update(value_mgu)
else:
return None
else:
mgu.update(tail_mgu)
else:
return None
return mgu
elif isinstance(c2, Compound):
if c1.predicate_indicator() == c2.predicate_indicator():
mgu = Substitution()
head_mgu = unify(c1.head, c2.value[1])
if head_mgu is not None:
for k, v in head_mgu.items():
if k in mgu:
value_mgu = unify(v, mgu[k])
if value_mgu is not None:
mgu.update(value_mgu)
else:
return None
else:
mgu.update(head_mgu)
else:
return None
tail_mgu = unify(c1.tail, c2.value[2])
if tail_mgu is not None:
for k, v in tail_mgu.items():
if k in mgu:
value_mgu = unify(v, mgu[k])
if value_mgu is not None:
mgu.update(value_mgu)
else:
return None
else:
mgu.update(tail_mgu)
else:
return None
return mgu
else:
return None
else:
return None
def renamed_copy(clause, index=None):
return clause.rename(index)
def copy(state):
from copy import deepcopy
return deepcopy(state)
def deref(term):
if isinstance(term, Variable):
return term.binding()
return term
###
### TODO Place control constructs stuff in its own module?
### Use module.__dict__ to access a dictionary of name:class couples
### or getattr(__import__(module), name) to get the class object
###
class Fail_0:
'''fail/0
fail is false.'''
def execute(self, engine):
engine._s.pop()
return engine.backtrack, None
class True_0:
'''true/0
true is true.'''
def execute(self, engine):
engine.currstate().decsglstk.pop()
engine.currstate().bi = 'nil'
return engine.select, None
class Call_1:
'''call/1
call(G) is true iff G represents a goal which is true.
When G contains ! as a subgoal, the effect of ! does
not extend outside G.'''
def execute(self, engine):
ccs = copy(engine.currstate())
ccs.bi = 'nil'
currdecsgl = ccs.decsglstk.pop()
g = currdecsgl.activator.value[1]
if isinstance(g, Variable) and g.isfree():
error = Atomic('instantiation_error')
return engine.throw, error
if isinstance(g, Atomic) and g._isnumber():
error = Compound('type_error', g)
return engine.throw, error
goal = convert_to_goal(g)
if not goal:
error = Compound('type_error', Atomic('callable'), g)
return engine.throw, error
n = engine.currstate().s_index - 1
| |
operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_at must be specified if op_created_at is specified.
:type val_c_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_default_column_ind: The operator to apply to the field default_column_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. default_column_ind: A flag indicating if this is default column/ For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_default_column_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_default_column_ind: If op_default_column_ind is specified, the field named in this input will be compared to the value in default_column_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_default_column_ind must be specified if op_default_column_ind is specified.
:type val_f_default_column_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_default_column_ind: If op_default_column_ind is specified, this value will be compared to the value in default_column_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_default_column_ind must be specified if op_default_column_ind is specified.
:type val_c_default_column_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_default_sort_desc_ind: The operator to apply to the field default_sort_desc_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. default_sort_desc_ind: A flag indicating if default sort order is descending. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_default_sort_desc_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_default_sort_desc_ind: If op_default_sort_desc_ind is specified, the field named in this input will be compared to the value in default_sort_desc_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_default_sort_desc_ind must be specified if op_default_sort_desc_ind is specified.
:type val_f_default_sort_desc_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_default_sort_desc_ind: If op_default_sort_desc_ind is specified, this value will be compared to the value in default_sort_desc_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_default_sort_desc_ind must be specified if op_default_sort_desc_ind is specified.
:type val_c_default_sort_desc_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_description: The operator to apply to the field description. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. description: Attribute description For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_description: If op_description is specified, the field named in this input will be compared to the value in description using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_description must be specified if op_description is specified.
:type val_f_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_description: If op_description is specified, this value will be compared to the value in description using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_description must be specified if op_description is specified.
:type val_c_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_gui_type: The operator to apply to the field gui_type. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. gui_type: Data type used in GUI to handle the attribute. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_gui_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_gui_type: If op_gui_type is specified, the field named in this input will be compared to the value in gui_type using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_gui_type must be specified if op_gui_type is specified.
:type val_f_gui_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_gui_type: If op_gui_type is specified, this value will be compared to the value in gui_type using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_gui_type must be specified if op_gui_type is specified.
:type val_c_gui_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_hidden_ind: The operator to apply to the field hidden_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. hidden_ind: A flag indicating if attribute is hidden For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_hidden_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_hidden_ind: If op_hidden_ind is specified, the field named in this input will be compared to the value in hidden_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_hidden_ind must be specified if op_hidden_ind is specified.
:type val_f_hidden_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_hidden_ind: If op_hidden_ind is specified, this value will be compared to the value in hidden_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_hidden_ind must be specified if op_hidden_ind is specified.
:type val_c_hidden_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier for this attribute. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as | |
score in pssm.search(splice_site, threshold=minthresh):
try:
pwm_scores[spliceend][splicetype].append((position, score))
except KeyError:
pwm_scores[spliceend][splicetype]=[(position, score)]
matching_end_classifications=None
for key in pwm_scores["acceptor"].keys():
if key in pwm_scores["donor"]:
thistuple = (key, pwm_scores["donor"][key], pwm_scores["acceptor"][key])
if matching_end_classifications is None:
matching_end_classifications = thistuple
else:
if (key.startswith("U12") and matching_end_classifications[0].startswith("U12")) or (key.startswith("U2") and matching_end_classifications[0].startswith("U2")):
ascore=None
dscore=None
for pos, score in thistuple[1]:
if ascore is None or score > ascore:
ascore = 2**score
for pos, score in thistuple[2]:
if dscore is None or score > dscore:
dscore = 2**score
thisscore = ascore+dscore
prevscore = 2**(matching_end_classifications[1][0][1])+2**(matching_end_classifications[2][0][1])
if prevscore<thisscore:
matching_end_classifications = thistuple
else:
U12score = None
U2score=None
if key.startswith("U12"):
for pos, score in thistuple[1]:
if U12score is None or score > U12score:
U12score = 2**score
for pos, score in matching_end_classifications[1]:
if U2score is None or score > U2score:
U2score = 2**score
if U12score>25*U2score:
matching_end_classifications = thistuple
elif matching_end_classifications[0].startswith("U12"):
for pos, score in matching_end_classifications[1]:
if U12score is None or score > U12score:
U12score = 2**score
for pos, score in thistuple[1]:
if U2score is None or score > U2score:
U2score = 2**score
if U12score<25*U2score:
matching_end_classifications = thistuple
return(matching_end_classifications, pwm_scores)
def charcterizeIntrons(read_iterator, genome, annotation_splices=None, splicepad=0,
min_intron_length=20, pwms=None, pwmscorethreshold=3.0, logger=None, LOG_EVERY_N=10000,
pwm_species=""):
"""Return a dictionary {readID:[(start, stop)...]}
Listing the intronic sites for each read (identified by 'N' in the cigar strings).
read_iterator can be the result of a .fetch(...) call.
Or it can be a generator filtering such reads. Example
samfile.find_introns((read for read in samfile.fetch(...) if read.is_reverse)
"""
def locNearestCanonicals(donor, acceptor, padsize, refname, start, stop):
""" given donor and acceptor regions, locate the nearest canonical splice and
return the details """
import re, numpy, itertools
# we're going to find all instances of GA in the donor region, and AG int he acceptor region and
# then work out the positions relative to the actual mapped splice
donor_canonical_matches = numpy.array([m.start() for m in re.finditer('(?=GT)', str(donor))])-padsize
acceptor_canonical_matches = numpy.array([m.start() for m in re.finditer('(?=AG)', str(acceptor))])-padsize
possible_canonical_pairs = list(itertools.product(donor_canonical_matches, acceptor_canonical_matches))
possible_canonical_splices = ["{}:{}-{}".format(refname, x[0]+start, x[1]+stop) for x in possible_canonical_pairs if x!=(0,0)]
return(possible_canonical_splices)
def locNearestAnnotated(keystr, annotation_splices_dict, padsize, regex_match = re.compile("^(.+?):(.+?)-(.+?)$")):
""" locates any nearby annotated splice sites based on their positions using a
pre-seperated dictionary and binary search """
keyvals = regex_match.match(keystr).groups()
target_start = int(keyvals[1])
target_stop = int(keyvals[2])
nearest_splices=[]
if keyvals[0] in annotation_splices_dict.keys():
starts = annotation_splices_dict[keyvals[0]]["starts"]
stops = annotation_splices_dict[keyvals[0]]["stops"]
i = bisect_left(starts, target_start-padsize)
while i<1E10:
if i==len(starts):
break
if starts[i]>target_start+padsize:
break
if starts[i]>target_start-padsize and starts[i]<target_start+padsize and stops[i]>target_stop-padsize and stops[i]<target_stop+padsize:
nearest_splices.append("{}:{}-{}".format(keyvals[0], starts[i], stops[i]))
i+=1
return(nearest_splices)
# setup the output data structures
read_details={}
splice_details={}
splice_summary_numbers={"canonical_splices":0, "non_canonical_splices":0}
if annotation_splices is not None and pwms is not None:
splice_summary_numbers={"canonical_splices":0,
"non_canonical_splices":0,
"annotated_canonical_splices":0,
"annotated_canonical_undef_splices":0,
"annotated_canonical_U2_splices":0,
"annotated_canonical_U12_splices":0,
"annotated_non_canonical_splices":0,
"annotated_non_canonical_undef_splices":0,
"annotated_non_canonical_U2_splices":0,
"annotated_non_canonical_U12_splices":0,
"novel_canonical_splices":0,
"novel_canonical_undef_splices":0,
"novel_canonical_U2_splices":0,
"novel_canonical_U12_splices":0,
"novel_canonical_splices_with_nearby_annotated_canonical":0,
"novel_canonical_splices_with_nearby_annotated_non_canonical":0,
"novel_non_canonical_splices":0,
"novel_non_canonical_undef_splices":0,
"novel_non_canonical_U2_splices":0,
"novel_non_canonical_U12_splices":0,
"novel_non_canonical_splices_with_nearby_annotated_canonical":0,
"novel_non_canonical_splices_with_nearby_annotated_non_canonical":0,
"novel_non_canonical_splices_with_nearby_novel_canonical":0
}
elif annotation_splices is not None and pwms is None:
splice_summary_numbers={"canonical_splices":0,
"non_canonical_splices":0,
"annotated_canonical_splices":0,
"annotated_non_canonical_splices":0,
"novel_canonical_splices":0,
"novel_canonical_splices_with_nearby_annotated_canonical":0,
"novel_canonical_splices_with_nearby_annotated_non_canonical":0,
"novel_non_canonical_splices":0,
"novel_non_canonical_splices_with_nearby_annotated_canonical":0,
"novel_non_canonical_splices_with_nearby_annotated_non_canonical":0,
"novel_non_canonical_splices_with_nearby_novel_canonical":0,
}
elif annotation_splices is None and pwms is not None:
splice_summary_numbers={"canonical_splices":0,
"non_canonical_splices":0,
"canonical_undef_splices":0,
"canonical_U2_splices":0,
"canonical_U12_splices":0,
"non_canonical_undef_splices":0,
"non_canonical_U2_splices":0,
"non_canonical_U12_splices":0}
if annotation_splices is not None:
# split the annotation information by chromosome and position for efficient searching
regex_match = re.compile("^(.+?):(.+?)-(.+?)$")
split_annotation_splices = [regex_match.match(x).groups() for x in sorted(annotation_splices.keys())]
annotation_splices_dict = {}
for x,y,z in split_annotation_splices:
try:
annotation_splices_dict[x]["values"].append((int(y),int(z)))
except KeyError:
annotation_splices_dict[x]={"values":[]}
annotation_splices_dict[x]["values"].append((int(y),int(z)))
for key in annotation_splices_dict.keys():
annotation_splices_dict[key]["values"].sort(key=lambda r: r[0])
annotation_splices_dict[key]["starts"] = [r[0] for r in annotation_splices_dict[key]["values"]]
annotation_splices_dict[key]["stops"] = [r[1] for r in annotation_splices_dict[key]["values"]]
# Process the aligned reads looking for splices and classifying them
nlogs=1
counter=0
ts = time.time()
t0 = time.time()
for read in read_iterator:
current_read_pos = read.reference_start
thisread_details={"canonical_splices":[],
"non_canonical_splices":[],
"is_reverse":False}
if annotation_splices is not None:
annot_details={"annotated_splices":[],
"novel_splices":[],
"transcripts_matching":{}}
thisread_details.update(annot_details)
if pwms is not None:
classification_details={"unclassified_splices":[]}
for key in pwms.keys():
pwm = pwms[key]
spliceend=None
if "acceptor" in key:
spliceend = "acceptor"
elif "donor" in key:
spliceend = "donor"
splicetype = re.sub("_{}".format(spliceend),"",key)
classification_details["{}_splices".format(splicetype)]=[]
thisread_details.update(classification_details)
# identify and process each splice in the read
for tup in read.cigartuples:
if tup[0]==3 and tup[1]>min_intron_length:
# define the donor and acceptor splice regions in which we will search for alternative splices.
donor_splice_site=None
acceptor_splice_site=None
strand = "+"
if read.is_reverse:
strand = "-"
thisread_details["is_reverse"]=True
donor_splice_site = genome[read.reference_name][current_read_pos+tup[1]-2-splicepad:current_read_pos+tup[1]+splicepad].reverse_complement()
acceptor_splice_site = genome[read.reference_name][current_read_pos-splicepad:current_read_pos+2+splicepad].reverse_complement()
else:
acceptor_splice_site = genome[read.reference_name][current_read_pos+tup[1]-2-splicepad:current_read_pos+tup[1]+splicepad]
donor_splice_site = genome[read.reference_name][current_read_pos-splicepad:current_read_pos+2+splicepad]
# define the splice genomic coordinates and the terminal dinucleotides
keystr = "{}:{}-{}".format(read.reference_name,
current_read_pos,
current_read_pos+tup[1])
donor_splice_string = donor_splice_site.seq[splicepad:splicepad+2]
acceptor_splice_string = acceptor_splice_site.seq[splicepad:splicepad+2]
# if the splice has been seen before then we can just record that its been seen in a new read
# otherwise we have to classify it.
if keystr in splice_details:
splice_details[keystr]["reads"].append(read.query_name)
else:
splice_details[keystr]={"reads":[read.query_name],
"sites":(donor_splice_string, acceptor_splice_string)}
# classify splice as cannonical or not.
if donor_splice_string!="GT" or acceptor_splice_string!="AG":
splice_details[keystr]["is_canonical"]=False
else:
splice_details[keystr]["is_canonical"]=True
# classify the mapped site as U2/U12 - we only need to do this the first time this splice is seen
if pwms is not None:
classification, options = classifySplice(pwms, read.reference_name, current_read_pos,
current_read_pos+tup[1], strand, genome,
minthresh=pwmscorethreshold)
splice_details[keystr]["U2/U12_classification"]=classification
splice_details[keystr]["U2/U12_scores"]=options
# classify if splice is annotated or not
if annotation_splices is not None:
if keystr in annotation_splices.keys():
splice_details[keystr]["is_annotated"]=True
else:
splice_details[keystr]["is_annotated"]=False
# locate nearby annotated splice sites - the +1 here is so that we include the dinucleotide
# motif and then the pad region around it...
nearby_annotated_splices = locNearestAnnotated(keystr, annotation_splices_dict, splicepad+1)
# if the splice is not annotated, search for nearby splice sites
nearby_canonical_splices = locNearestCanonicals(donor_splice_site.seq, acceptor_splice_site.seq,
splicepad, read.reference_name, current_read_pos,
current_read_pos+tup[1])
annot_alt_cannonical=[]
annot_alt_cannonical_classification=[]
annot_alt_cannonical_scores=[]
novel_alt_cannonical=[]
novel_alt_cannonical_classification=[]
novel_alt_cannonical_scores=[]
for alt in nearby_canonical_splices:
annotated=False
if alt in annotation_splices.keys():
nearby_annotated_splices.remove(alt)
annotated=True
# classify the alternative splices as U2/U12
if pwms is not None:
match = re.match("(.+):([0-9]+)-([0-9]+)", alt)
classification, options = classifySplice(pwms, match.group(1), int(match.group(2)),
int(match.group(3)), strand, genome,
minthresh=pwmscorethreshold)
if annotated:
annot_alt_cannonical.append(alt)
annot_alt_cannonical_classification.append(classification)
annot_alt_cannonical_scores.append(options)
else:
novel_alt_cannonical.append(alt)
novel_alt_cannonical_classification.append(classification)
novel_alt_cannonical_scores.append(options)
splice_details[keystr]["annotated_alt_canonical"]=annot_alt_cannonical
splice_details[keystr]["annotated_alt_non_canonical"]=nearby_annotated_splices
splice_details[keystr]["annotated_alt_canonical_U2/U12_classification"]=annot_alt_cannonical_classification
splice_details[keystr]["annotated_alt_canonical_U2/U12_scores"]=annot_alt_cannonical_scores
splice_details[keystr]["novel_alt_canonical"]=novel_alt_cannonical
splice_details[keystr]["novel_alt_canonical_U2/U12_classification"]=novel_alt_cannonical_classification
splice_details[keystr]["novel_alt_canonical_U2/U12_scores"]=novel_alt_cannonical_scores
# build summary information
try:
if splice_details[keystr]["is_canonical"]:
splice_summary_numbers["canonical_splices"]+=1
else:
splice_summary_numbers["non_canonical_splices"]+=1
if "is_annotated" in splice_details[keystr].keys() and "U2/U12_classification" in splice_details[keystr].keys():
if splice_details[keystr]["is_annotated"] and splice_details[keystr]["is_canonical"]:
splice_summary_numbers["annotated_canonical_splices"]+=1
if splice_details[keystr]["U2/U12_classification"] is None:
splice_summary_numbers["annotated_canonical_undef_splices"]+=1
elif splice_details[keystr]["U2/U12_classification"][0].startswith("U2"):
splice_summary_numbers["annotated_canonical_U2_splices"]+=1
elif splice_details[keystr]["U2/U12_classification"][0].startswith("U12"):
splice_summary_numbers["annotated_canonical_U12_splices"]+=1
elif splice_details[keystr]["is_annotated"] and not splice_details[keystr]["is_canonical"]:
splice_summary_numbers["annotated_non_canonical_splices"]+=1
if splice_details[keystr]["U2/U12_classification"] is None:
splice_summary_numbers["annotated_non_canonical_undef_splices"]+=1
elif splice_details[keystr]["U2/U12_classification"][0].startswith("U2"):
splice_summary_numbers["annotated_non_canonical_U2_splices"]+=1
elif splice_details[keystr]["U2/U12_classification"][0].startswith("U12"):
splice_summary_numbers["annotated_non_canonical_U12_splices"]+=1
elif not splice_details[keystr]["is_annotated"] and splice_details[keystr]["is_canonical"]:
splice_summary_numbers["novel_canonical_splices"]+=1
if len(splice_details[keystr]["annotated_alt_canonical"])>0:
splice_summary_numbers["novel_canonical_splices_with_nearby_annotated_canonical"]+=1
if len(splice_details[keystr]["annotated_alt_non_canonical"])>0:
splice_summary_numbers["novel_canonical_splices_with_nearby_annotated_non_canonical"]+=1
if splice_details[keystr]["U2/U12_classification"] is None:
splice_summary_numbers["novel_canonical_undef_splices"]+=1
elif splice_details[keystr]["U2/U12_classification"][0].startswith("U2"):
splice_summary_numbers["novel_canonical_U2_splices"]+=1
elif splice_details[keystr]["U2/U12_classification"][0].startswith("U12"):
splice_summary_numbers["novel_canonical_U12_splices"]+=1
elif not splice_details[keystr]["is_annotated"] and not splice_details[keystr]["is_canonical"]:
splice_summary_numbers["novel_non_canonical_splices"]+=1
if len(splice_details[keystr]["annotated_alt_canonical"])>0:
splice_summary_numbers["novel_non_canonical_splices_with_nearby_annotated_canonical"]+=1
if len(splice_details[keystr]["annotated_alt_non_canonical"])>0:
splice_summary_numbers["novel_non_canonical_splices_with_nearby_annotated_non_canonical"]+=1
if len(splice_details[keystr]["novel_alt_canonical"])>0:
splice_summary_numbers["novel_non_canonical_splices_with_nearby_novel_canonical"]+=1
if splice_details[keystr]["U2/U12_classification"] is None:
splice_summary_numbers["novel_non_canonical_undef_splices"]+=1
elif splice_details[keystr]["U2/U12_classification"][0].startswith("U2"):
splice_summary_numbers["novel_non_canonical_U2_splices"]+=1
elif splice_details[keystr]["U2/U12_classification"][0].startswith("U12"):
splice_summary_numbers["novel_non_canonical_U12_splices"]+=1
elif "annotated" in splice_details[keystr].keys() and not "U2/U12_classification" in splice_details[keystr].keys():
if splice_details[keystr]["is_annotated"] and splice_details[keystr]["is_canonical"]:
splice_summary_numbers["annotated_canonical_splices"]+=1
elif splice_details[keystr]["is_annotated"] and not splice_details[keystr]["is_canonical"]:
splice_summary_numbers["annotated_non_canonical_splices"]+=1
elif not splice_details[keystr]["is_annotated"] and splice_details[keystr]["is_canonical"]:
splice_summary_numbers["novel_canonical_splices"]+=1
if len(splice_details[keystr]["annotated_alt_canonical"])>0:
splice_summary_numbers["novel_canonical_splices_with_nearby_annotated_canonical"]+=1
if len(splice_details[keystr]["annotated_alt_non_canonical"])>0:
splice_summary_numbers["novel_canonical_splices_with_nearby_annotated_non_canonical"]+=1
elif not splice_details[keystr]["is_annotated"] and not splice_details[keystr]["is_canonical"]:
splice_summary_numbers["novel_non_canonical_splices"]+=1
if len(splice_details[keystr]["annotated_alt_canonical"])>0:
splice_summary_numbers["novel_non_canonical_splices_with_nearby_annotated_canonical"]+=1
if len(splice_details[keystr]["annotated_alt_non_canonical"])>0:
splice_summary_numbers["novel_non_canonical_splices_with_nearby_annotated_non_canonical"]+=1
if len(splice_details[keystr]["novel_alt_canonical"])>0:
splice_summary_numbers["novel_non_canonical_splices_with_nearby_novel_canonical"]+=1
elif "annotated" not in splice_details[keystr].keys() and "U2/U12_classification" in splice_details[keystr].keys():
if splice_details[keystr]["is_canonical"]:
if splice_details[keystr]["U2/U12_classification"] is None:
splice_summary_numbers["canonical_undef_splices"]+=1
elif splice_details[keystr]["U2/U12_classification"][0].startswith("U2"):
splice_summary_numbers["canonical_U2_splices"]+=1
elif splice_details[keystr]["U2/U12_classification"][0].startswith("U12"):
splice_summary_numbers["canonical_U12_splices"]+=1
elif not splice_details[keystr]["is_canonical"]:
if splice_details[keystr]["U2/U12_classification"] is None:
splice_summary_numbers["non_canonical_undef_splices"]+=1
elif splice_details[keystr]["U2/U12_classification"][0].startswith("U2"):
splice_summary_numbers["non_canonical_U2_splices"]+=1
elif splice_details[keystr]["U2/U12_classification"][0].startswith("U12"):
splice_summary_numbers["non_canonical_U12_splices"]+=1
except:
print("splice_details")
for key in sorted(splice_details.keys()):
print(key, splice_details[key])
print("splice_summary_numbers")
for key in sorted(splice_summary_numbers.keys()):
print(key, splice_summary_numbers[key])
raise
if splice_details[keystr]["is_canonical"] :
thisread_details["canonical_splices"].append(keystr)
else:
thisread_details["non_canonical_splices"].append(keystr)
if annotation_splices is not None:
if splice_details[keystr]["is_annotated"]:
thisread_details["annotated_splices"].append(keystr)
thisread_details["transcripts_matching"][keystr] = annotation_splices[keystr]["transcripts"]
else:
thisread_details["novel_splices"].append(keystr)
if pwms is not None:
if splice_details[keystr]["U2/U12_classification"] is not None:
thisread_details["{}_splices".format(splice_details[keystr]["U2/U12_classification"][0])].append(keystr)
else:
thisread_details["unclassified_splices"].append(keystr)
#print(read.query_name, current_read_pos, current_read_pos+tup[1], donor_splice_site.seq, acceptor_splice_site.seq, read_is_all_canonical)
current_read_pos += tup[1]
elif tup[0]==0 or tup[0]==2:
current_read_pos += tup[1]
read_details[read.query_name] = thisread_details
counter+=1
if (counter % LOG_EVERY_N)==0:
msg="processed {these} reads (dt = {sec:.2f}s) ...".format(these=(nlogs*LOG_EVERY_N), sec=time.time()-t0)
if logger is not None:
logger.info(msg)
else:
print(msg)
nlogs+=1
t0=time.time()
msg = "Finished processed {these} reads (dt = {sec:.2f}s).".format(these=(nlogs*LOG_EVERY_N)+counter,
sec=time.time()-t0)
return(read_details, splice_details, splice_summary_numbers)
def getAnnotationIntrons(annot, genome, chr_synonym_dic={}, logger=None, LOG_EVERY_N=10000):
""" return a dictionary with all the introns in a given annotation """
if logger is not None:
logger.info("parsing transcript exon structures....")
annot.clear_all()
annot.set_feature("exons")
exons | |
def enterParallel_path_description(self, ctx:SystemVerilogParser.Parallel_path_descriptionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#parallel_path_description.
def exitParallel_path_description(self, ctx:SystemVerilogParser.Parallel_path_descriptionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#full_path_description.
def enterFull_path_description(self, ctx:SystemVerilogParser.Full_path_descriptionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#full_path_description.
def exitFull_path_description(self, ctx:SystemVerilogParser.Full_path_descriptionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#list_of_path_inputs.
def enterList_of_path_inputs(self, ctx:SystemVerilogParser.List_of_path_inputsContext):
pass
# Exit a parse tree produced by SystemVerilogParser#list_of_path_inputs.
def exitList_of_path_inputs(self, ctx:SystemVerilogParser.List_of_path_inputsContext):
pass
# Enter a parse tree produced by SystemVerilogParser#list_of_path_outputs.
def enterList_of_path_outputs(self, ctx:SystemVerilogParser.List_of_path_outputsContext):
pass
# Exit a parse tree produced by SystemVerilogParser#list_of_path_outputs.
def exitList_of_path_outputs(self, ctx:SystemVerilogParser.List_of_path_outputsContext):
pass
# Enter a parse tree produced by SystemVerilogParser#specify_input_terminal_descriptor.
def enterSpecify_input_terminal_descriptor(self, ctx:SystemVerilogParser.Specify_input_terminal_descriptorContext):
pass
# Exit a parse tree produced by SystemVerilogParser#specify_input_terminal_descriptor.
def exitSpecify_input_terminal_descriptor(self, ctx:SystemVerilogParser.Specify_input_terminal_descriptorContext):
pass
# Enter a parse tree produced by SystemVerilogParser#specify_output_terminal_descriptor.
def enterSpecify_output_terminal_descriptor(self, ctx:SystemVerilogParser.Specify_output_terminal_descriptorContext):
pass
# Exit a parse tree produced by SystemVerilogParser#specify_output_terminal_descriptor.
def exitSpecify_output_terminal_descriptor(self, ctx:SystemVerilogParser.Specify_output_terminal_descriptorContext):
pass
# Enter a parse tree produced by SystemVerilogParser#input_identifier.
def enterInput_identifier(self, ctx:SystemVerilogParser.Input_identifierContext):
pass
# Exit a parse tree produced by SystemVerilogParser#input_identifier.
def exitInput_identifier(self, ctx:SystemVerilogParser.Input_identifierContext):
pass
# Enter a parse tree produced by SystemVerilogParser#output_identifier.
def enterOutput_identifier(self, ctx:SystemVerilogParser.Output_identifierContext):
pass
# Exit a parse tree produced by SystemVerilogParser#output_identifier.
def exitOutput_identifier(self, ctx:SystemVerilogParser.Output_identifierContext):
pass
# Enter a parse tree produced by SystemVerilogParser#path_delay_value.
def enterPath_delay_value(self, ctx:SystemVerilogParser.Path_delay_valueContext):
pass
# Exit a parse tree produced by SystemVerilogParser#path_delay_value.
def exitPath_delay_value(self, ctx:SystemVerilogParser.Path_delay_valueContext):
pass
# Enter a parse tree produced by SystemVerilogParser#list_of_path_delay_expressions.
def enterList_of_path_delay_expressions(self, ctx:SystemVerilogParser.List_of_path_delay_expressionsContext):
pass
# Exit a parse tree produced by SystemVerilogParser#list_of_path_delay_expressions.
def exitList_of_path_delay_expressions(self, ctx:SystemVerilogParser.List_of_path_delay_expressionsContext):
pass
# Enter a parse tree produced by SystemVerilogParser#t_path_delay_expression.
def enterT_path_delay_expression(self, ctx:SystemVerilogParser.T_path_delay_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#t_path_delay_expression.
def exitT_path_delay_expression(self, ctx:SystemVerilogParser.T_path_delay_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#trise_path_delay_expression.
def enterTrise_path_delay_expression(self, ctx:SystemVerilogParser.Trise_path_delay_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#trise_path_delay_expression.
def exitTrise_path_delay_expression(self, ctx:SystemVerilogParser.Trise_path_delay_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#tfall_path_delay_expression.
def enterTfall_path_delay_expression(self, ctx:SystemVerilogParser.Tfall_path_delay_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#tfall_path_delay_expression.
def exitTfall_path_delay_expression(self, ctx:SystemVerilogParser.Tfall_path_delay_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#tz_path_delay_expression.
def enterTz_path_delay_expression(self, ctx:SystemVerilogParser.Tz_path_delay_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#tz_path_delay_expression.
def exitTz_path_delay_expression(self, ctx:SystemVerilogParser.Tz_path_delay_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#t01_path_delay_expression.
def enterT01_path_delay_expression(self, ctx:SystemVerilogParser.T01_path_delay_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#t01_path_delay_expression.
def exitT01_path_delay_expression(self, ctx:SystemVerilogParser.T01_path_delay_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#t10_path_delay_expression.
def enterT10_path_delay_expression(self, ctx:SystemVerilogParser.T10_path_delay_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#t10_path_delay_expression.
def exitT10_path_delay_expression(self, ctx:SystemVerilogParser.T10_path_delay_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#t0z_path_delay_expression.
def enterT0z_path_delay_expression(self, ctx:SystemVerilogParser.T0z_path_delay_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#t0z_path_delay_expression.
def exitT0z_path_delay_expression(self, ctx:SystemVerilogParser.T0z_path_delay_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#tz1_path_delay_expression.
def enterTz1_path_delay_expression(self, ctx:SystemVerilogParser.Tz1_path_delay_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#tz1_path_delay_expression.
def exitTz1_path_delay_expression(self, ctx:SystemVerilogParser.Tz1_path_delay_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#t1z_path_delay_expression.
def enterT1z_path_delay_expression(self, ctx:SystemVerilogParser.T1z_path_delay_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#t1z_path_delay_expression.
def exitT1z_path_delay_expression(self, ctx:SystemVerilogParser.T1z_path_delay_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#tz0_path_delay_expression.
def enterTz0_path_delay_expression(self, ctx:SystemVerilogParser.Tz0_path_delay_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#tz0_path_delay_expression.
def exitTz0_path_delay_expression(self, ctx:SystemVerilogParser.Tz0_path_delay_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#t0x_path_delay_expression.
def enterT0x_path_delay_expression(self, ctx:SystemVerilogParser.T0x_path_delay_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#t0x_path_delay_expression.
def exitT0x_path_delay_expression(self, ctx:SystemVerilogParser.T0x_path_delay_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#tx1_path_delay_expression.
def enterTx1_path_delay_expression(self, ctx:SystemVerilogParser.Tx1_path_delay_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#tx1_path_delay_expression.
def exitTx1_path_delay_expression(self, ctx:SystemVerilogParser.Tx1_path_delay_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#t1x_path_delay_expression.
def enterT1x_path_delay_expression(self, ctx:SystemVerilogParser.T1x_path_delay_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#t1x_path_delay_expression.
def exitT1x_path_delay_expression(self, ctx:SystemVerilogParser.T1x_path_delay_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#tx0_path_delay_expression.
def enterTx0_path_delay_expression(self, ctx:SystemVerilogParser.Tx0_path_delay_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#tx0_path_delay_expression.
def exitTx0_path_delay_expression(self, ctx:SystemVerilogParser.Tx0_path_delay_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#txz_path_delay_expression.
def enterTxz_path_delay_expression(self, ctx:SystemVerilogParser.Txz_path_delay_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#txz_path_delay_expression.
def exitTxz_path_delay_expression(self, ctx:SystemVerilogParser.Txz_path_delay_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#tzx_path_delay_expression.
def enterTzx_path_delay_expression(self, ctx:SystemVerilogParser.Tzx_path_delay_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#tzx_path_delay_expression.
def exitTzx_path_delay_expression(self, ctx:SystemVerilogParser.Tzx_path_delay_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#path_delay_expression.
def enterPath_delay_expression(self, ctx:SystemVerilogParser.Path_delay_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#path_delay_expression.
def exitPath_delay_expression(self, ctx:SystemVerilogParser.Path_delay_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#edge_sensitive_path_declaration.
def enterEdge_sensitive_path_declaration(self, ctx:SystemVerilogParser.Edge_sensitive_path_declarationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#edge_sensitive_path_declaration.
def exitEdge_sensitive_path_declaration(self, ctx:SystemVerilogParser.Edge_sensitive_path_declarationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#parallel_edge_sensitive_path_description.
def enterParallel_edge_sensitive_path_description(self, ctx:SystemVerilogParser.Parallel_edge_sensitive_path_descriptionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#parallel_edge_sensitive_path_description.
def exitParallel_edge_sensitive_path_description(self, ctx:SystemVerilogParser.Parallel_edge_sensitive_path_descriptionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#full_edge_sensitive_path_description.
def enterFull_edge_sensitive_path_description(self, ctx:SystemVerilogParser.Full_edge_sensitive_path_descriptionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#full_edge_sensitive_path_description.
def exitFull_edge_sensitive_path_description(self, ctx:SystemVerilogParser.Full_edge_sensitive_path_descriptionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#data_source_expression.
def enterData_source_expression(self, ctx:SystemVerilogParser.Data_source_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#data_source_expression.
def exitData_source_expression(self, ctx:SystemVerilogParser.Data_source_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#edge_identifier.
def enterEdge_identifier(self, ctx:SystemVerilogParser.Edge_identifierContext):
pass
# Exit a parse tree produced by SystemVerilogParser#edge_identifier.
def exitEdge_identifier(self, ctx:SystemVerilogParser.Edge_identifierContext):
pass
# Enter a parse tree produced by SystemVerilogParser#state_dependent_path_declaration.
def enterState_dependent_path_declaration(self, ctx:SystemVerilogParser.State_dependent_path_declarationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#state_dependent_path_declaration.
def exitState_dependent_path_declaration(self, ctx:SystemVerilogParser.State_dependent_path_declarationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#polarity_operator.
def enterPolarity_operator(self, ctx:SystemVerilogParser.Polarity_operatorContext):
pass
# Exit a parse tree produced by SystemVerilogParser#polarity_operator.
def exitPolarity_operator(self, ctx:SystemVerilogParser.Polarity_operatorContext):
pass
# Enter a parse tree produced by SystemVerilogParser#system_timing_check.
def enterSystem_timing_check(self, ctx:SystemVerilogParser.System_timing_checkContext):
pass
# Exit a parse tree produced by SystemVerilogParser#system_timing_check.
def exitSystem_timing_check(self, ctx:SystemVerilogParser.System_timing_checkContext):
pass
# Enter a parse tree produced by SystemVerilogParser#setup_timing_check.
def enterSetup_timing_check(self, ctx:SystemVerilogParser.Setup_timing_checkContext):
pass
# Exit a parse tree produced by SystemVerilogParser#setup_timing_check.
def exitSetup_timing_check(self, ctx:SystemVerilogParser.Setup_timing_checkContext):
pass
# Enter a parse tree produced by SystemVerilogParser#hold_timing_check.
def enterHold_timing_check(self, ctx:SystemVerilogParser.Hold_timing_checkContext):
pass
# Exit a parse tree produced by SystemVerilogParser#hold_timing_check.
def exitHold_timing_check(self, ctx:SystemVerilogParser.Hold_timing_checkContext):
pass
# Enter a parse tree produced by SystemVerilogParser#setuphold_timing_check.
def enterSetuphold_timing_check(self, ctx:SystemVerilogParser.Setuphold_timing_checkContext):
pass
# Exit a parse tree produced by SystemVerilogParser#setuphold_timing_check.
def exitSetuphold_timing_check(self, ctx:SystemVerilogParser.Setuphold_timing_checkContext):
pass
# Enter a parse tree produced by SystemVerilogParser#recovery_timing_check.
def enterRecovery_timing_check(self, ctx:SystemVerilogParser.Recovery_timing_checkContext):
pass
# Exit a parse tree produced by SystemVerilogParser#recovery_timing_check.
def exitRecovery_timing_check(self, ctx:SystemVerilogParser.Recovery_timing_checkContext):
pass
# Enter a parse tree produced by SystemVerilogParser#removal_timing_check.
def enterRemoval_timing_check(self, ctx:SystemVerilogParser.Removal_timing_checkContext):
pass
# Exit a parse tree produced by SystemVerilogParser#removal_timing_check.
def exitRemoval_timing_check(self, ctx:SystemVerilogParser.Removal_timing_checkContext):
pass
# Enter a parse tree produced by SystemVerilogParser#recrem_timing_check.
def enterRecrem_timing_check(self, ctx:SystemVerilogParser.Recrem_timing_checkContext):
pass
# Exit a parse tree produced by SystemVerilogParser#recrem_timing_check.
def exitRecrem_timing_check(self, ctx:SystemVerilogParser.Recrem_timing_checkContext):
pass
# Enter a parse tree produced by SystemVerilogParser#skew_timing_check.
def enterSkew_timing_check(self, ctx:SystemVerilogParser.Skew_timing_checkContext):
pass
# Exit a parse tree produced by SystemVerilogParser#skew_timing_check.
def exitSkew_timing_check(self, ctx:SystemVerilogParser.Skew_timing_checkContext):
pass
# Enter a parse tree produced by SystemVerilogParser#timeskew_timing_check.
def enterTimeskew_timing_check(self, ctx:SystemVerilogParser.Timeskew_timing_checkContext):
pass
# Exit a parse tree produced by SystemVerilogParser#timeskew_timing_check.
def exitTimeskew_timing_check(self, ctx:SystemVerilogParser.Timeskew_timing_checkContext):
pass
# Enter a parse tree produced by SystemVerilogParser#fullskew_timing_check.
def enterFullskew_timing_check(self, ctx:SystemVerilogParser.Fullskew_timing_checkContext):
pass
# Exit a parse tree produced by SystemVerilogParser#fullskew_timing_check.
def exitFullskew_timing_check(self, ctx:SystemVerilogParser.Fullskew_timing_checkContext):
pass
# Enter a parse tree produced by SystemVerilogParser#period_timing_check.
def enterPeriod_timing_check(self, ctx:SystemVerilogParser.Period_timing_checkContext):
pass
# Exit a parse tree produced by SystemVerilogParser#period_timing_check.
def exitPeriod_timing_check(self, ctx:SystemVerilogParser.Period_timing_checkContext):
pass
# Enter a parse tree produced by SystemVerilogParser#width_timing_check.
def enterWidth_timing_check(self, ctx:SystemVerilogParser.Width_timing_checkContext):
pass
# Exit a parse tree produced by SystemVerilogParser#width_timing_check.
def exitWidth_timing_check(self, ctx:SystemVerilogParser.Width_timing_checkContext):
pass
# Enter a parse tree produced by SystemVerilogParser#nochange_timing_check.
def enterNochange_timing_check(self, ctx:SystemVerilogParser.Nochange_timing_checkContext):
pass
# Exit a parse tree produced by SystemVerilogParser#nochange_timing_check.
def exitNochange_timing_check(self, ctx:SystemVerilogParser.Nochange_timing_checkContext):
pass
# Enter a parse tree produced by SystemVerilogParser#timecheck_condition.
def enterTimecheck_condition(self, ctx:SystemVerilogParser.Timecheck_conditionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#timecheck_condition.
def exitTimecheck_condition(self, ctx:SystemVerilogParser.Timecheck_conditionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#controlled_reference_event.
def enterControlled_reference_event(self, ctx:SystemVerilogParser.Controlled_reference_eventContext):
pass
# Exit a parse tree produced by SystemVerilogParser#controlled_reference_event.
def exitControlled_reference_event(self, ctx:SystemVerilogParser.Controlled_reference_eventContext):
pass
# Enter a parse tree produced by SystemVerilogParser#data_event.
def enterData_event(self, ctx:SystemVerilogParser.Data_eventContext):
pass
# Exit a parse tree produced by SystemVerilogParser#data_event.
def exitData_event(self, ctx:SystemVerilogParser.Data_eventContext):
pass
# Enter a parse tree produced by SystemVerilogParser#delayed_data.
def enterDelayed_data(self, ctx:SystemVerilogParser.Delayed_dataContext):
pass
# Exit a parse tree produced by SystemVerilogParser#delayed_data.
def exitDelayed_data(self, ctx:SystemVerilogParser.Delayed_dataContext):
pass
# Enter a parse tree produced by SystemVerilogParser#delayed_reference.
def enterDelayed_reference(self, ctx:SystemVerilogParser.Delayed_referenceContext):
pass
# Exit a parse tree produced by SystemVerilogParser#delayed_reference.
def exitDelayed_reference(self, ctx:SystemVerilogParser.Delayed_referenceContext):
pass
# Enter a parse tree produced by SystemVerilogParser#end_edge_offset.
def enterEnd_edge_offset(self, ctx:SystemVerilogParser.End_edge_offsetContext):
pass
# Exit a parse tree produced by SystemVerilogParser#end_edge_offset.
def exitEnd_edge_offset(self, ctx:SystemVerilogParser.End_edge_offsetContext):
pass
# Enter a parse tree produced by SystemVerilogParser#event_based_flag.
def enterEvent_based_flag(self, ctx:SystemVerilogParser.Event_based_flagContext):
pass
# Exit a parse | |
import discord
from discord import Option, OptionChoice
from discord.ext import commands, pages
import random
from Utilities import Checks, Vars, PlayerObject, ItemObject
from Utilities.Analytics import stringify_gains
from Utilities.Finances import Transaction
from Utilities.AyeshaBot import Ayesha
class OfferView(discord.ui.View):
"""Help me. Same code as Profile.ConfirmButton. Bad code moment"""
def __init__(self, target : discord.Member):
super().__init__(timeout=15.0)
self.value = None
self.target = target
@discord.ui.button(label="Accept", style=discord.ButtonStyle.green)
async def accept(self, button : discord.ui.Button,
interaction : discord.Interaction):
self.value = True
self.stop()
@discord.ui.button(label="Decline", style=discord.ButtonStyle.red)
async def decline(self, button : discord.ui.Button,
interaction : discord.Interaction):
self.value = False
self.stop()
async def interaction_check(self, interaction: discord.Interaction) -> bool:
return interaction.user.id == self.target.id
class Items(commands.Cog):
"""View and manipulate your inventory"""
def __init__(self, bot : Ayesha):
self.bot = bot
# EVENTS
@commands.Cog.listener()
async def on_ready(self):
print("Items is ready.")
# AUXILIARY FUNCTIONS
def create_embed(self, start, inv, got_eq):
embed = discord.Embed(title=f"Your Inventory", color=Vars.ABLUE)
iteration = 0
while start < len(inv) and iteration < 5:
if got_eq and start == 0:
embed.add_field(name=(
f"{inv[start]['weapon_name']}: `{inv[start]['item_id']}` "
f"[EQUIPPED]"),
value=(
f"**Attack:** {inv[start]['attack']}, **Crit:** "
f"{inv[start]['crit']}, **Type:** "
f"{inv[start]['weapontype']}, **Rarity:** "
f"{inv[start]['rarity']}"
),
inline=False)
else:
embed.add_field(name=(
f"{inv[start]['weapon_name']}: `{inv[start]['item_id']}` "
),
value=(
f"**Attack:** {inv[start]['attack']}, **Crit:** "
f"{inv[start]['crit']}, **Type:** "
f"{inv[start]['weapontype']}, **Rarity:** "
f"{inv[start]['rarity']}"
),
inline=False)
iteration += 1
start += 1
return embed
def create_armor_embed(self, start, inv):
embed = discord.Embed(title="Your Armory", color=Vars.ABLUE)
iteration = 0
while start < len(inv) and iteration < 5:
mat = inv[start]['armor_type']
slot = inv[start]['armor_slot']
embed.add_field(
name=f"{mat} {slot}: `{inv[start]['armor_id']}`",
value=f"**Defense:** {Vars.ARMOR_DEFENSE[slot][mat]}%",
inline=False)
iteration += 1
start += 1
return embed
def create_accessory_embed(self, start, inv):
embed = discord.Embed(title="Your Wardrobe", color=Vars.ABLUE)
iteration = 0
while start < len(inv) and iteration < 5:
embed.add_field(
name=f"{inv[start].name}: `{inv[start].id}`",
value=inv[start].bonus,
inline=False)
iteration += 1
start += 1
return embed
# COMMANDS
@commands.slash_command()
@commands.check(Checks.is_player)
async def inventory(self, ctx,
order : Option(str, description="Order by ATK or CRIT",
default="ID",
choices=[
OptionChoice(name="attack"),
OptionChoice(name="crit"),
OptionChoice(name="ID")],
required=False),
rarity : Option(str, description="Get only a specific rarity",
choices=[OptionChoice(name=r) for r in Vars.RARITIES.keys()],
required=False),
weapontype : Option(str,
description="Get only a specific weapon type",
choices=[OptionChoice(name=t) for t in Vars.WEAPON_TYPES],
required=False)):
"""View your inventory."""
await ctx.defer()
async with self.bot.db.acquire() as conn:
# Get equipped item to put at top of list
psql1 = """
WITH thing AS (
SELECT equipped_item
FROM players
WHERE user_id = $1
)
SELECT items.item_id, items.weapontype, items.user_id,
items.attack, items.crit, items.weapon_name,
items.rarity
FROM items
INNER JOIN thing ON items.item_id = thing.equipped_item;
"""
psql2 = """
SELECT item_id, weapontype, user_id,
attack, crit, weapon_name, rarity
FROM items
WHERE user_id = $1
"""
if rarity is not None and weapontype is not None:
psql2 += f" AND rarity = $2 AND weapontype = $3 "
if order == "ID":
psql2 += "ORDER BY item_id;"
else:
psql2 += f"ORDER BY {order} DESC;"
inv = await conn.fetch(psql2, ctx.author.id, rarity, weapontype)
elif rarity is not None and weapontype is None:
psql2 += f" AND rarity = $2 "
if order == "ID":
psql2 += "ORDER BY item_id;"
else:
psql2 += f"ORDER BY {order} DESC;"
inv = await conn.fetch(psql2, ctx.author.id, rarity)
elif rarity is None and weapontype is not None:
psql2 += f"AND weapontype = $2"
if order == "ID":
psql2 += "ORDER BY item_id;"
else:
psql2 += f"ORDER BY {order} DESC;"
inv = await conn.fetch(psql2, ctx.author.id, weapontype)
else:
if order == "ID":
psql2 += "ORDER BY item_id;"
else:
psql2 += f"ORDER BY {order} DESC;"
inv = await conn.fetch(psql2, ctx.author.id)
equip = await conn.fetchrow(psql1, ctx.author.id)
if equip is not None:
got_eq = True
else:
got_eq = False
inventory = [] # Allows me to put equip and rest of items in one thing
for record in inv:
inventory.append(record)
if len(inventory) == 0: # Account for equipped item as 1
return await ctx.respond("Your inventory is empty!")
else: # Create a bunch of embeds and paginate
if got_eq:
inventory.insert(0, equip)
# The create_embed function writes embeds; 5 per page
embeds = [self.create_embed(i, inventory, got_eq)
for i in range(0, len(inventory), 5)]
paginator = pages.Paginator(pages=embeds, timeout=30)
await paginator.respond(ctx.interaction)
@commands.slash_command()
@commands.check(Checks.is_player)
async def armory(self, ctx,
slot : Option(str,
description="Get only a specific armor slot",
required=False,
choices=[OptionChoice(s) for s in Vars.ARMOR_DEFENSE]),
material : Option(str,
description="Get only a specific armor material",
required=False,
choices=[OptionChoice(m) for m in Vars.ARMOR_DEFENSE["Boots"]])):
"""Armories held both weapons and armor, but here only armor :P"""
await ctx.defer()
async with self.bot.db.acquire() as conn:
psql = f"""
SELECT armor_id, armor_type, armor_slot
FROM armor
WHERE user_id = $1
"""
if slot is not None and material is not None:
psql += """
AND armor_type = $2 AND armor_slot = $3
ORDER BY armor_id;
"""
armory = await conn.fetch(psql, ctx.author.id, material, slot)
elif slot is not None and material is None:
psql += """
AND armor_slot = $2
ORDER BY armor_id;
"""
armory = await conn.fetch(psql, ctx.author.id, slot)
elif slot is None and material is not None:
psql += """
AND armor_type = $2
ORDER BY armor_id;
"""
armory = await conn.fetch(psql, ctx.author.id, material)
else:
psql += " ORDER BY armor_id;"
armory = await conn.fetch(psql, ctx.author.id)
if len(armory) == 0:
return await ctx.respond("Your armory is empty!")
embeds = [self.create_armor_embed(i, armory)
for i in range(0, len(armory), 5)]
if len(embeds) == 1:
await ctx.respond(embed=embeds[0])
else:
paginator = pages.Paginator(pages=embeds, timeout=30)
await paginator.respond(ctx.interaction)
@commands.slash_command()
@commands.check(Checks.is_player)
async def wardrobe(self, ctx,
prefix : Option(str,
description="Sort for a specific effect",
required=False,
choices=[OptionChoice(p) for p in Vars.ACCESSORY_BONUS]),
material : Option(str,
description="Sort for a specific core material",
required=False,
choices=[OptionChoice(m) for m in Vars.ACCESSORY_BONUS['Lucky']]
)):
"""Your wardrobe contains all your accessories. View them here."""
async with self.bot.db.acquire() as conn:
psql = """
SELECT accessory_id
FROM accessories
WHERE user_id = $1
"""
if prefix is not None and material is not None:
psql += """
AND prefix = $2 AND accessory_type = $3
ORDER BY accessory_id;
"""
inv = await conn.fetch(psql, ctx.author.id, prefix, material)
elif prefix is None and material is not None:
psql += """
AND accessory_type = $2
ORDER BY accessory_id;
"""
inv = await conn.fetch(psql, ctx.author.id, material)
elif prefix is not None and material is None:
psql += """
AND prefix = $2
ORDER BY accessory_id;
"""
inv = await conn.fetch(psql, ctx.author.id, prefix)
else:
psql += " ORDER BY accessory_id;"
inv = await conn.fetch(psql, ctx.author.id)
if len(inv) == 0:
return await ctx.respond("Your wardrobe is empty!")
inv = [await ItemObject.get_accessory_by_id(
conn, record['accessory_id'])
for record in inv] # Turn them into objects (for the name)
embeds = [self.create_accessory_embed(i, inv)
for i in range(0, len(inv), 5)]
if len(embeds) == 1:
await ctx.respond(embed=embeds[0])
else:
paginator = pages.Paginator(pages=embeds, timeout=30)
await paginator.respond(ctx.interaction)
@commands.slash_command()
@commands.check(Checks.is_player)
async def equip(self, ctx,
equip : Option(str,
description="Equip either a weapon or armor",
choices=[
OptionChoice("Equip a Weapon"),
OptionChoice("Equip Armor"),
OptionChoice("Equip an Accessory")]),
id : Option(int,
description="The ID of the item you want to equip.",
required=False)):
"""Equip an item using its ID (get from /inventory if weapon, /armory if armor)"""
async with self.bot.db.acquire() as conn:
player = await PlayerObject.get_player_by_id(conn, ctx.author.id)
if equip == "Equip a Weapon" and id is not None:
await player.equip_item(conn, id)
await ctx.respond((
f"Equipped item `{player.equipped_item.weapon_id}`: "
f"{player.equipped_item.name} (ATK: "
f"{player.equipped_item.attack}, CRIT: "
f"{player.equipped_item.crit})"))
elif equip == "Equip Armor" and id is not None:
armor = await player.equip_armor(conn, id)
await ctx.respond((
f"Equipped armor `{armor.id}`: {armor.name} "
f"(DEF: `{armor.defense}%`)"))
elif equip == "Equip an Accessory" and id is not None:
await player.equip_accessory(conn, id)
await ctx.respond((
f"Equipped accessory `{player.accessory.id}`: "
f"{player.accessory.name}: {player.accessory.bonus}."))
elif equip == "Equip a Weapon" and id is None:
await player.unequip_item(conn)
await ctx.respond("Unequipped your item.")
elif equip == "Equip Armor" and id is None:
await player.unequip_armor(conn)
await ctx.respond("Unequipped all your armor.")
else:
await player.unequip_accessory(conn)
await ctx.respond("Unequipped your accessory.")
@commands.slash_command()
@commands.check(Checks.is_player)
async def merge(self, ctx, item : Option(int,
description="The ID of the item you want to strengthen."),
fodder : Option(int,
description="The ID of the item you want to destroy.")):
"""Merge an item into another to boost its ATK by 1."""
if item == fodder:
return await ctx.respond("You cannot merge an item with itself.")
async with | |
#/************************************************************************************************************************
# Copyright (c) 2016, Imagination Technologies Limited and/or its affiliated group companies.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#************************************************************************************************************************/
# Tests related to client-side operations only
import unittest
import subprocess
import time
from collections import namedtuple
import overlord
import common
import config
import tools_common
from tools_common import CustomObject
from tools_common import CustomResource
from test_awa_client_define import client_define
from test_awa_client_get import client_get
from test_awa_client_set import client_set
from test_awa_client_delete import client_delete
from test_awa_client_subscribe import client_subscribe
class TestClient(tools_common.AwaTest):
def test_set_get_single_resource_string(self):
# test that a single string resource can be set and retrieved
manufacturer = "ACME Corp."
expectedStdout = "Object1000[/1000/0]:\n Resource100[/1000/0/100]: %s\n" % (manufacturer,)
expectedStderr = ""
expectedCode = 0
result = client_set(self.config, "/1000/0/100=\"%s\"" % (manufacturer,))
self.assertEqual(expectedCode, result.code)
result = client_get(self.config, "/1000/0/100")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_set_get_single_resource_integer(self):
# test that a single integer resource can be set and retrieved
value = 3
expectedStdout = "Object1000[/1000/0]:\n Resource101[/1000/0/101]: %d\n" % (value,)
expectedStderr = ""
expectedCode = 0
result = client_set(self.config, "--verbose /1000/0/101=%i" % (value,))
self.assertEqual(expectedCode, result.code)
result = client_get(self.config, "/1000/0/101")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_set_get_single_resource_float(self):
# test that a single float resource can be set and retrieved
value = 3.5
expectedStdout = "Object1000[/1000/0]:\n Resource102[/1000/0/102]: %g\n" % (value,)
expectedStderr = ""
expectedCode = 0
result = client_set(self.config, "/1000/0/102=%f" % (value,))
self.assertEqual(expectedCode, result.code)
result = client_get(self.config, "/1000/0/102")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_set_get_single_resource_boolean_true(self):
# test that a single boolean resource can be set and retrieved
value = True
expectedStdout = "Session IPC configured for UDP: address 127.0.0.1, port %d\nSession connected\nSet Boolean /1000/0/103 <- True\nSet operation completed successfully.\nSession disconnected\n" % (self.config.clientIpcPort,)
expectedStderr = ""
expectedCode = 0
result = client_set(self.config, "--verbose /1000/0/103=%r" % (value,))
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
expectedStdout = "Object1000[/1000/0]:\n Resource103[/1000/0/103]: True\n"
result = client_get(self.config, "/1000/0/103")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_set_get_single_resource_boolean_false(self):
# test that a single boolean resource can be set and retrieved
value = False
expectedStdout = ""
expectedStderr = ""
expectedCode = 0
result = client_set(self.config, "/1000/0/103=%r" % (value,))
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
expectedStdout = "Object1000[/1000/0]:\n Resource103[/1000/0/103]: %r\n" % (value,)
result = client_get(self.config, "/1000/0/103")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_set_get_single_resource_boolean_numbers(self):
# test correct values for setting boolean resource with integers: 0,1,2 (False,True,True)
for value in range(3):
expectedStdout = ""
expectedStderr = ""
expectedCode = 0
result = client_set(self.config, "/1000/0/103=%d" % (value,))
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
expectedStdout = "Object1000[/1000/0]:\n Resource103[/1000/0/103]: %r\n" % (bool(value),)
#print "Boolean expectedStdout: %s" % (expectedStdout,)
result = client_get(self.config, "/1000/0/103")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_set_get_single_resource_time(self):
# test that a single time resource (64 bit integer) can be set and retrieved
value = 1442972971
expectedStdout = "Object1000[/1000/0]:\n Resource104[/1000/0/104]: %d\n" % (value,)
expectedStderr = ""
expectedCode = 0
result = client_set(self.config, "/1000/0/104=%d" % (value,))
self.assertEqual(expectedCode, result.code)
result = client_get(self.config, "/1000/0/104")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_set_get_single_resource_objlink(self):
# test that a single object link resource can be set and retrieved
link = "/3/0"
result = client_set(self.config, "/1000/0/106=\"%s\"" % (link,))
self.assertEqual(0, result.code)
expectedStdout = "Object1000[/1000/0]:\n Resource106[/1000/0/106]: ObjectLink[%s]\n" % (link.strip('/').replace('/', ':'))
expectedStderr = ""
expectedCode = 0
result = client_get(self.config, "/1000/0/106")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_set_single_resource_none_executable(self):
# test that a single none resource (executable) cannot be set
value = 12345
expectedStdout = ""
expectedStderr = "Resource /1000/0/107 is of type None and cannot be set\n"
expectedCode = 0
result = client_set(self.config, "/1000/0/107=%d" % (value,))
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_get_single_resource_none_executable(self):
# test that a single none resource (executable) cannot be retrieved
expectedStdout = "Object1000[/1000/0]:\n Resource107[/1000/0/107]: [Executable]\n"
expectedStderr = ""
expectedCode = 0
result = client_get(self.config, "/1000/0/107")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_set_get_multiple_resources_same_instance(self):
# test that multiple resources from the same instance can be set and retrieved with a single command
manufacturer = "ACME Corp."
modelNumber = "1234567890"
memoryFree = 55
expectedStdout = \
"""Device[/3/0]:
Manufacturer[/3/0/0]: %s
ModelNumber[/3/0/1]: %s
MemoryFree[/3/0/10]: %d
""" % (manufacturer, modelNumber, memoryFree)
expectedStderr = ""
expectedCode = 0
client_set(self.config, "/3/0/0=\"%s\"" % (manufacturer,))
client_set(self.config, "/3/0/1=\"%s\"" % (modelNumber,))
client_set(self.config, "/3/0/10=%d" % (memoryFree,))
result = client_get(self.config, "/3/0/0", "/3/0/1", "/3/0/10")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_set_get_multiple_resources_different_instances(self):
# test that multiple resources from different instances can be set and retrieved with a single command
manufacturer = "ACME Corp."
modelNumber = "1234567890"
expectedStdout = \
"""Device[/3/0]:
Manufacturer[/3/0/0]: %s
Object1000[/1000/0]:
Resource100[/1000/0/100]: %s
""" % (manufacturer, modelNumber)
expectedStderr = ""
expectedCode = 0
client_set(self.config, "/3/0/0=\"%s\"" % (manufacturer,), "/1000/0/100=\"%s\"" % (modelNumber,))
result = client_get(self.config, "/3/0/0", "/1000/0/100")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_set_get_invalid_value(self):
# set MemoryFree (int) as string value. Currently will set the value to 0
expectedStdout = "Device[/3/0]:\n MemoryFree[/3/0/10]: 15\n"
expectedStderr = ""
expectedCode = 0
result = client_get(self.config, "/3/0/10")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
expectedStdout = ""
expectedStderr = ""
expectedCode = 0
result = client_set(self.config, "/3/0/10=abc")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
expectedStdout = "Device[/3/0]:\n MemoryFree[/3/0/10]: 0\n"
expectedStderr = ""
expectedCode = 0
result = client_get(self.config, "/3/0/10")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_set_get_delete_get(self):
# test that we can no longer get the value of a resource after it has been deleted
expectedStdout = ""
expectedStderr = ""
expectedCode = 0
result = client_set(self.config, "/1000/0/100=abc")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
expectedStdout = "Object1000[/1000/0]:\n Resource100[/1000/0/100]: abc\n"
expectedStderr = ""
expectedCode = 0
result = client_get(self.config, "/1000/0/100")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
expectedStdout = ""
expectedStderr = ""
expectedCode = 0
result = client_delete(self.config, "/1000/0/100")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
expectedStdout = ""
expectedStderr = "AwaClientGetOperation_Perform failed\nFailed to retrieve /1000/0/100: AwaError_PathNotFound\n"
expectedCode = 1
result = client_get(self.config, "/1000/0/100")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_set_multiple_instances_on_single_instance_object(self):
#test that we can't create multiple object instances on a single instance object
expectedStdout = ""
expectedStderr = ""
expectedCode = 0
result = client_set(self.config, "/1000/0/100=abc")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
expectedStdout = ""
expectedStderr = "AwaClientSetOperation_Perform failed\nFailed to set on path /1000/1: AwaError_CannotCreate\n"
expectedCode = 1
result = client_set(self.config, "--create /1000/1")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
def test_set_multiple_instances_on_single_instance_resource(self):
#test that we can't set multiple resource instances on a single instance resource
expectedStdout = ""
expectedStderr = "Error: resource /1000/0/100 is not an array; do not specify a resource instance ID\n"
expectedCode = 0
result = client_set(self.config, "/1000/0/100/0=abc")
self.assertEqual(expectedStdout, result.stdout)
self.assertEqual(expectedStderr, result.stderr)
self.assertEqual(expectedCode, result.code)
expectedStdout = ""
expectedStderr = "Error: resource /1000/0/100 is not an array; do not specify a resource instance ID\n"
expectedCode = 0
result = client_set(self.config, "/1000/0/100/1=abc")
self.assertEqual(expectedStdout, | |
<gh_stars>1-10
from itertools import groupby
import pandas as pd
from pymatgen import MPRester, Structure
from pymatgen.core.composition import Composition
from pymatgen.analysis.reaction_calculator import ComputedReaction
from pymatgen.core.units import FloatWithUnit
from pymatgen.analysis.elasticity import ElasticTensor
import pymatgen.core.periodic_table as ptable
mpr = MPRester()
def redenth_act(compstr):
"""
Finds redox enthalpies for a perovskite solid solution, both for the solid solution and for the endmembers
dh_min and dh_max are based on the redox enthalpy of the endmembers. Ideally, the theoretical redox enthalpy of
the solid solution corresponds to the weigthed average of dh_min and dh_max. If not, and "combined" is selected
in the data use variable, dh_min and dh_max are corrected using the actual theoretical redox enthalpy of the
solid solution.
:return:
theo_solid_solution: theoretical redox enthalpy for the solid solution, if available on the Materials Project
dh_min: minimum redox enthalpy of the solid solution, based on the endmember redox enthalpy
dh_max: maximum redox enthalpy of the solid solution, based on the endmember redox enthalpy
"""
dh_min = None
dh_max = None
# calculate redox enthalpies of endmembers
try:
dhs = calc_dh_endm(compstr)
# only if both are found the values shall be used
if (not dhs[0]) or (not dhs[1]):
raise TypeError()
dh_min = dhs[1]
dh_max = dhs[0]
# this happens if either the brownmillerite or the perovskite data is not on the Materials Project
except TypeError:
pass
except IndexError:
pass
theo_solid_solution = None
# calcualte redox enthalpies for complete perovskite -> brownmillerite reduction
try:
theo_solid_solution = find_theo_redenth(compstr)
# this happens if either the brownmillerite or the perovskite data is not on the Materials Project
except IndexError:
pass
splitcomp = split_comp(compstr)
# use a step function first to calculate the total redox enthalpy from perovskite to
# brownmillerite as expected according to the endmember redox enthalpies
conc_act = find_active(mat_comp=splitcomp)[1]
red_enth_mean_endm = (conc_act * dh_min) + ((1 - conc_act) * dh_max)
if theo_solid_solution:
if not red_enth_mean_endm:
difference = float('inf')
else:
difference = theo_solid_solution - red_enth_mean_endm
if abs(difference) > 30000 or not splitcomp[-1]:
dh_min = theo_solid_solution
dh_max = theo_solid_solution
else:
dh_min = dh_min + difference
dh_max = dh_max + difference
return theo_solid_solution, dh_min, dh_max, conc_act
def calc_dh_endm(compstr):
"""
Calculates the maximum and minimum redox enthalpy of a solid solution based on the redox enthalpies of its
endmembers
Uses the average redox enthalpy of A_1 B_1 O3 and A_2 B_1 O3, depending on the concentration of the two
A species
Calculates the same for A_1 B_2 O3 and A_2 B_2 O3
Whichever is higher is the upper limit for the redox enthalpy of the solid solution dh_max
The other one is the lower limit dh_min
:return: dh_max, dh_min
"""
endm = find_endmembers(compstr)
dh_1 = find_theo_redenth(endm[0]) * endm[4] + find_theo_redenth(endm[1]) * \
endm[5]
dh_2 = find_theo_redenth(endm[2]) * endm[4] + find_theo_redenth(endm[2]) * \
endm[5]
if dh_1 > dh_2:
dh_max = dh_1
dh_min = dh_2
else:
dh_max = dh_2
dh_min = dh_1
return dh_max, dh_min
def find_theo_redenth(compstr):
"""
Finds theoretical redox enthalpies from the Materials Project from perovskite to brownmillerite
based partially on https://github.com/materialsproject/pymatgen/blob/b3e972e293885c5b3c69fb3e9aa55287869d4d84/
examples/Calculating%20Reaction%20Energies%20with%20the%20Materials%20API.ipynb
:param compstr: composition as a string
:return:
red_enth: redox enthalpy in kJ/mol O
"""
compstr_perovskite = compstr.split("O")[0] + "O3"
comp_spl = split_comp(compstr)
chem_sys = ""
for i in range(len(comp_spl)):
if comp_spl[i] is not None:
chem_sys = chem_sys + comp_spl[i][0] + "-"
chem_sys = chem_sys + "O"
chem_sys = chem_sys.split("-")
all_entries = mpr.get_entries_in_chemsys(chem_sys)
# This method simply gets the lowest energy entry for all entries with the same composition.
def get_most_stable_entry(formula):
relevant_entries = [entry for entry in all_entries if
entry.composition.reduced_formula == Composition(formula).reduced_formula]
relevant_entries = sorted(relevant_entries, key=lambda e: e.energy_per_atom)
return relevant_entries[0]
formula_spl = [''.join(g) for _, g in groupby(str(compstr), str.isalpha)]
perov_formula = []
for k in range(len(formula_spl)):
try:
perov_formula += str(int(float(formula_spl[k]) * 8))
except ValueError:
perov_formula += str(formula_spl[k])
perov_formula = "".join(perov_formula)
perov_formula = str(perov_formula).split("O")[0] + "O24"
perovskite = get_most_stable_entry(perov_formula)
brownm_formula = []
for k in range(len(formula_spl)):
try:
brownm_formula += str(int(float(formula_spl[k]) * 32))
except ValueError:
brownm_formula += str(formula_spl[k])
brownm_formula = "".join(brownm_formula)
brownm_formula = str(brownm_formula).split("O")[0] + "O80"
brownmillerite = get_most_stable_entry(brownm_formula)
# for oxygen: do not use the most stable phase O8 but the most stable O2 phase
def get_oxygen():
relevant_entries = [entry for entry in all_entries if
entry.composition == Composition("O2")]
relevant_entries = sorted(relevant_entries, key=lambda e: e.energy_per_atom)
return relevant_entries[0]
oxygen = get_oxygen()
reaction = ComputedReaction([perovskite], [brownmillerite, oxygen])
energy = FloatWithUnit(reaction.calculated_reaction_energy, "eV atom^-1")
# figure out the stoichiometry of O2 in the reaction equation in order to normalize the energies per mol of O
try:
o_stoich = float(str(str(reaction.as_dict).split(" O2")[0]).split()[-1])
except ValueError:
o_stoich = 1
# energy in J/mol per mol of O2
ener = (float(energy.to("kJ mol^-1")) * 1000) / o_stoich
# per mol of O
ener = ener / 2
return ener
def split_comp(compstr):
"""
Splits a string containing the composition of a perovskite solid solution into its components
Chemical composition: (am_1, am_2)(tm_1, tm_2)Ox
:param compstr: composition as a string
:return: am_1, am_2, tm_1, tm_2;
each of these output variables contains the species and the stoichiometries
i.e. ("Fe", 0.6)
"""
am_1, am_2, tm_1, tm_2 = None, None, None, None
compstr_spl = [''.join(g) for _, g in groupby(str(compstr), str.isalpha)]
for l in range(len(compstr_spl)):
try:
if ptable.Element(compstr_spl[l]).is_alkaline or ptable.Element(
compstr_spl[l]).is_alkali or ptable.Element(compstr_spl[l]).is_rare_earth_metal:
if am_1 is None:
am_1 = [compstr_spl[l], float(compstr_spl[l + 1])]
elif am_2 is None:
am_2 = [compstr_spl[l], float(compstr_spl[l + 1])]
if ptable.Element(compstr_spl[l]).is_transition_metal and not (
ptable.Element(compstr_spl[l]).is_rare_earth_metal):
if tm_1 is None:
tm_1 = [compstr_spl[l], float(compstr_spl[l + 1])]
elif tm_2 is None:
tm_2 = [compstr_spl[l], float(compstr_spl[l + 1])]
# stoichiometries raise ValueErrors in pymatgen .is_alkaline etc., ignore these errors and skip that entry
except ValueError:
pass
return am_1, am_2, tm_1, tm_2
def find_active(mat_comp):
"""
Finds the more redox-active species in a perovskite solid solution
Args:
sample_no:
An integer sample number or a string as identifier that occurs
in the input file name.
mat_comp:
The materials composition data, as to be generated by self.sample_data
Returns:
act_spec:
more redox active species
act:
stoichiometry of the more redox active species
"""
# calculate charge of the A site metals
charge_sum = 0
for i in range(2):
if mat_comp[i]:
if ptable.Element(mat_comp[i][0]).is_alkali:
charge_sum += mat_comp[i][1]
elif ptable.Element(mat_comp[i][0]).is_alkaline:
charge_sum += 2 * mat_comp[i][1]
elif (ptable.Element(mat_comp[i][0]).is_lanthanoid or (
mat_comp[i][0] == "Bi")) and mat_comp[i][0] != "Ce":
charge_sum += 3 * mat_comp[i][1]
elif mat_comp[i][0] == "Ce":
charge_sum += 4 * mat_compp[i][1]
else:
raise ValueError("Charge of A site species unknown.")
red_order = None
# charge on B sites 4+
# experimentally well-established order of A2+B4+O3 perovskite reducibility: Ti - Mn - Fe - Co - Cu
if round((6 - charge_sum), 2) == 4:
red_order = ["Ti", "Mn", "Fe", "Co", "Cu"]
# charge on B sites 3+
# order of binary oxide reducibility according to Materials Project (A2O3 -> AO + O2)
if round((6 - charge_sum), 2) == 3:
red_order = ["Sc", "Ti", "V", "Cr", "Fe", "Mn", "Cu", "Co", "Ni", "Ag"] # changed Ni<->Ag order according to DFT results
# charge on B sites 5+
# order of binary oxide reducibility according to Materials Project (A2O3 -> AO + O2)
if round((6 - charge_sum), 2) == 5:
red_order = ["Ta", "Nb", "W", "Mo", "V", "Cr"]
act_a = None
if red_order:
for i in range(len(red_order)):
if mat_comp[2][0] == red_order[i]:
more_reducible = red_order[i + 1:-1]
if mat_comp[3] is not None and (mat_comp[3][0] in more_reducible):
act_a = mat_comp[3]
else:
act_a = mat_comp[2]
if act_a is None:
raise ValueError("B species reducibility unknown, preferred reduction of species not predicted")
# correct bug for the most reducible species
if act_a[0] == red_order[-2] and (red_order[-1] in str(mat_comp)):
act_a[0] = red_order[-1]
act_a[1] = 1-act_a[1]
return act_a[0], act_a[1]
def find_endmembers(compstr):
"""
Finds the endmembers of a solid solution (A_1 A_2)(B_1 B_2) O3 of four perovskite species:
A_1 B_1 O3
A_2 B_1 O3
A_1 B_2 O3
A_2 B_2 O3
:return:
endmember_1a, endmember_1b: two endmembers A_1 B_1 O3 and A_2 B_1 O3 with the same transition metal but
different A species
endmember_2a, endmember_2b: two endmembers A_1 B_2 O3 and A_2 B_2 O3 with the same transition metal but
different A species
a_conc: concentration of the A | |
if elem.length != 0.0:
lattice.append(_drift_name(elem.name, 2), "drift",
('L', elem.length / 2.0), ('aper', elem.aperture / 2.0))
elif isinstance(elem, WedgeElement):
if elem.length != 0.0:
lattice.append(_drift_name(elem.name, 1), "drift",
('L', elem.length / 2.0), ('aper', elem.aperture / 2.0))
lattice.append(elem.name, "marker", name=elem.name, etype=elem.ETYPE)
if elem.length != 0.0:
lattice.append(_drift_name(elem.name, 2), "drift",
('L', elem.length / 2.0), ('aper', elem.aperture / 2.0))
elif isinstance(elem, BPMElement):
if elem.length != 0.0:
lattice.append(_drift_name(elem.name, 1), "drift",
('L', elem.length / 2.0), ('aper', elem.aperture / 2.0))
lattice.append(elem.name, "bpm", name=elem.name, etype=elem.ETYPE)
if elem.length != 0.0:
lattice.append(_drift_name(elem.name, 2), "drift",
('L', elem.length / 2.0), ('aper', elem.aperture / 2.0))
elif isinstance(elem, (BLMElement, BLElement, BCMElement)):
if elem.length != 0.0:
lattice.append(_drift_name(elem.name, 1), "drift",
('L', elem.length / 2.0), ('aper', elem.aperture / 2.0))
lattice.append(elem.name, "marker", name=elem.name, etype=elem.ETYPE)
if elem.length != 0.0:
lattice.append(_drift_name(elem.name, 2), "drift",
('L', elem.length / 2.0), ('aper', elem.aperture / 2.0))
elif isinstance(elem, PMElement):
if elem.length != 0.0:
lattice.append(_drift_name(elem.name, 1), "drift",
('L', elem.length / 2.0), ('aper', elem.aperture / 2.0))
pm_angle = self._get_config(elem.dtype, CONFIG_PM_ANGLE, DEFAULT_PM_ANGLE)
if pm_angle == '-45':
elem.sign = -1.0
else:
elem.sign = 1.0
lattice.append(elem.name, "marker", name=elem.name, etype=elem.ETYPE)
if elem.length != 0.0:
lattice.append(_drift_name(elem.name, 2), "drift",
('L', elem.length / 2.0), ('aper', elem.aperture / 2.0))
elif isinstance(elem, CavityElement):
phase = 0.0
if settings is not None:
try:
phase = settings[elem.name][elem.fields.phase_phy]
except KeyError:
raise RuntimeError(
f"FlameLatticeFactory: '{elem.fields.phase_phy}' setting not found for element: {elem.name}")
amplitude = 0.0
if settings is not None:
try:
amplitude = settings[elem.name][elem.fields.amplitude_phy]
except KeyError:
raise RuntimeError(
f"FlameLatticeFactory: '{elem.fields.amplitude_phy}' setting not found for element: {elem.name}")
frequency = 0.0
if settings is not None:
try:
frequency = settings[elem.name][elem.fields.frequency]
except KeyError:
raise RuntimeError(
f"FlameLatticeFactory: '{elem.fields.frequency}' setting not found for element: {elem.name}")
# element name-wise has higher priority
cav_type = None
conf_attr = 'dtype'
cav_type_from_dtype = self._get_config(elem.dtype, CONFIG_FLAME_CAV_TYPE, None)
cav_type_from_name = self._get_config(elem.name, CONFIG_FLAME_CAV_TYPE, None)
if cav_type_from_dtype is not None:
cav_type = cav_type_from_dtype
conf_attr = 'dtype'
if cav_type_from_name is not None:
cav_type = cav_type_from_name
conf_attr = 'name'
l = self._get_config(getattr(elem, conf_attr), CONFIG_FLAME_CAV_LENGTH, elem.length)
if cav_type is None:
raise RuntimeError(f"FlameLatticeFactory: Cavity type not found: {elem.dtype}")
elif cav_type == 'Generic':
cav_conf = self._get_config(getattr(elem, conf_attr), CONFIG_FLAME_CAV_CONF, None)
if cav_conf is None:
raise RuntimeError(f"FlameLatticeFactory: Generic cavity data file not found: {elem.dtype}")
lattice.append(elem.name, "rfcavity",
('cavtype', cav_type), ('f', frequency),
('phi', phase), ('scl_fac', amplitude),
('L', float(l)), ('aper', elem.aperture / 2.0),
('datafile', cav_conf),
*align_error_conf.items(),
name=elem.name, etype=elem.ETYPE)
else:
lattice.append(elem.name, "rfcavity",
('cavtype', cav_type), ('f', frequency),
('phi', phase), ('scl_fac', amplitude),
('L', float(l)), ('aper', elem.aperture / 2.0),
*align_error_conf.items(),
name=elem.name, etype=elem.ETYPE)
elif isinstance(elem, SolCorElement):
field = 0.0
if settings is not None:
try:
field = settings[elem.name][elem.fields.field_phy]
except KeyError:
raise RuntimeError(
f"FlameLatticeFactory: '{elem.fields.field_phy}' setting not found for element: {elem.name}")
hkick = 0.0
if settings is not None:
try:
hkick = settings[elem.h.name][elem.h.fields.angle_phy]
except KeyError:
raise RuntimeError(
f"FlameLatticeFactory: '{elem.h.fields.angle_phy}' setting not found for element: {elem.name}")
vkick = 0.0
if settings is not None:
try:
vkick = settings[elem.v.name][elem.v.fields.angle_phy]
except KeyError:
raise RuntimeError(
f"FlameLatticeFactory: '{elem.v.fields.angle_phy}' setting not found for element: {elem.name}")
# error = self._get_error(elem)
lattice.append(elem.name + "_1", "solenoid", ('L', elem.length / 2.0),
('aper', elem.aperture / 2.0), ('B', field),
name=elem.name, etype="SOL")
lattice.append(elem.h.name, "orbtrim", ('theta_x', hkick),
name=elem.h.name, etype=elem.h.ETYPE)
lattice.append(elem.v.name, "orbtrim", ('theta_y', vkick),
name=elem.v.name, etype=elem.v.ETYPE)
lattice.append(elem.name + "_2", "solenoid", ('L', elem.length / 2.0),
('aper', elem.aperture / 2.0), ('B', field),
name=elem.name, etype="SOL")
elif isinstance(elem, QuadElement):
gradient = 0.0
if settings is not None:
try:
gradient = settings[elem.name][elem.fields.gradient_phy]
except KeyError:
raise RuntimeError(
f"FlameLatticeFactory: '{elem.fields.gradient_phy}' setting not found for element: {elem.name}")
# error = self._get_error(elem)
lattice.append(elem.name, "quadrupole", ('L', elem.length),
('aper', elem.aperture / 2.0), ('B2', gradient),
*align_error_conf.items(),
name=elem.name, etype=elem.ETYPE)
elif isinstance(elem, SextElement):
field = 0.0
if settings is not None:
try:
field = settings[elem.name][elem.fields.field_phy]
except KeyError:
raise RuntimeError(
f"FlameLatticeFactory: '{elem.fields.field_phy}' setting not found for element: {elem.name}")
step = self._get_config(elem.dtype, CONFIG_FLAME_SEXT_STEP,
DEFAULT_FLAME_SEXT_STEP)
dstkick = self._get_config(elem.dtype, CONFIG_FLAME_SEXT_DSTKICK,
DEFAULT_FLAME_SEXT_DSTKICK)
lattice.append(elem.name, "sextupole",
('L', elem.length), ('B3', field),
('dstkick', int(dstkick)), ('step', int(step)),
('aper', elem.aperture / 2.0),
*align_error_conf.items(),
name=elem.name, etype=elem.ETYPE)
elif isinstance(elem, HCorElement):
hkick = 0.0
if settings is not None:
try:
hkick = settings[elem.name][elem.fields.angle_phy]
except KeyError:
raise RuntimeError(
f"FlameLatticeFactory: '{elem.fields.angle_phy}' setting not found for element: {elem.name}")
if elem.length != 0.0:
lattice.append(_drift_name(elem.name, 1), "drift",
('L', elem.length / 2.0),
('aper', elem.apertureX / 2.0))
kick_gauge = self._get_config(elem.dtype, CONFIG_FLAME_COR_GAUGE, None)
if kick_gauge == "tm_kick":
lattice.append(elem.name, "orbtrim",
('realpara', 1), ('tm_xkick', hkick),
name=elem.name, etype=elem.ETYPE)
else:
lattice.append(elem.name, "orbtrim", ('theta_x', hkick),
name=elem.name, etype=elem.ETYPE)
if elem.length != 0.0:
lattice.append(_drift_name(elem.name, 2), "drift",
('L', elem.length / 2.0),
('aper', elem.apertureX / 2.0))
elif isinstance(elem, VCorElement):
vkick = 0.0
if settings is not None:
try:
vkick = settings[elem.name][elem.fields.angle_phy]
except KeyError:
raise RuntimeError(
f"FlameLatticeFactory: '{elem.fields.angle_phy}' setting not found for element: {elem.name}")
if elem.length != 0.0:
lattice.append(_drift_name(elem.name, 1), "drift",
('L', elem.length / 2.0),
('aper', elem.apertureX / 2.0))
kick_gauge = self._get_config(elem.dtype, CONFIG_FLAME_COR_GAUGE, None)
if kick_gauge == "tm_kick":
lattice.append(elem.name, "orbtrim",
('realpara', 1), ('tm_ykick', vkick),
name=elem.name, etype=elem.ETYPE)
else:
lattice.append(elem.name, "orbtrim", ('theta_y', vkick),
name=elem.name, etype=elem.ETYPE)
if elem.length != 0.0:
lattice.append(_drift_name(elem.name, 2), "drift",
('L', elem.length / 2.0),
('aper', elem.apertureX / 2.0))
elif isinstance(elem, RotElement):
if elem.length != 0.0:
lattice.append(_drift_name(elem.name, 1), "drift",
('L', elem.length / 2.0),
('aper', elem.apertureX / 2.0))
xyrotate = self._get_config(elem.name, CONFIG_FLAME_ROT_ANG, 0)
lattice.append(elem.name, "orbtrim",
("xyrotate", float(xyrotate)),
name=elem.name, etype=elem.ETYPE)
if elem.length != 0.0:
lattice.append(_drift_name(elem.name, 2), "drift",
('L', elem.length / 2.0),
('aper', elem.apertureX / 2.0))
elif isinstance(elem, CorElement):
hkick = 0.0
if settings is not None:
try:
hkick = settings[elem.h.name][elem.h.fields.angle_phy]
except KeyError:
raise RuntimeError(
f"FlameLatticeFactory: '{elem.h.fields.angle_phy}' setting not found for element: {elem.name}")
vkick = 0.0
if settings is not None:
try:
vkick = settings[elem.v.name][elem.v.fields.angle_phy]
except KeyError:
raise RuntimeError(
f"FlameLatticeFactory: '{elem.v.fields.angle_phy}' setting not found for element: {elem.name}")
if elem.length != 0.0:
lattice.append(_drift_name(elem.name, 1), "drift",
('L', elem.length / 2.0),
('aper', elem.apertureX / 2.0))
kick_gauge = self._get_config(elem.dtype, CONFIG_FLAME_COR_GAUGE, None)
if kick_gauge == "tm_kick":
lattice.append(elem.h.name, "orbtrim",
('realpara', 1), ('tm_xkick', hkick),
name=elem.h.name, etype=elem.h.ETYPE)
lattice.append(elem.v.name, "orbtrim",
('realpara', 1), ('tm_ykick', vkick),
name=elem.v.name, etype=elem.v.ETYPE)
else:
lattice.append(elem.h.name, "orbtrim", ('theta_x', hkick),
name=elem.h.name, etype=elem.h.ETYPE)
lattice.append(elem.v.name, "orbtrim", ('theta_y', vkick),
name=elem.v.name, etype=elem.v.ETYPE)
if elem.length != 0.0:
lattice.append(_drift_name(elem.name, 2), "drift",
('L', elem.length / 2.0),
('aper', elem.apertureX / 2.0))
elif isinstance(elem, BendElement):
field = 0.0
if settings is not None:
try:
field = settings[elem.name][elem.fields.field_phy]
except KeyError:
raise RuntimeError(
f"FlameLatticeFactory: '{elem.fields.field_phy}' setting not found for element: {elem.name}")
angle = 0.0
if settings is not None:
try:
angle = settings[elem.name][elem.fields.angle]
except KeyError:
raise RuntimeError(
f"FlameLatticeFactory: '{elem.fields.angle}' setting not found for element: {elem.name}")
entr_angle = 0.0
if settings is not None:
try:
entr_angle = settings[elem.name][elem.fields.entrAngle]
except KeyError:
raise RuntimeError(
f"FlameLatticeFactory: '{elem.fields.entrAngle}' setting not found for element: {elem.name}")
exit_angle = 0.0
if settings is not None:
try:
exit_angle = settings[elem.name][elem.fields.exitAngle]
except KeyError:
raise RuntimeError(
f"FlameLatticeFactory: '{elem.fields.exitAngle}' setting not found for element: {elem.name}")
split = self._get_config_split(elem.dtype)
if split < 3:
raise RuntimeError(f"FlameLatticeFactory: '{elem.name}' split must be greater than 3.")
focusing_comp = self._get_config(elem.dtype, CONFIG_FLAME_BEND_FOCUSING, None)
if focusing_comp is not None:
_LOGGER.debug(f"FlameLatticeFactory: focusing component of {elem.name} is defined.")
k = float(focusing_comp)
lattice.append(elem.name + "_1", "sbend", ('L', elem.length / split),
('aper', elem.aperture / 2.0), ('phi', angle / split),
('phi1', entr_angle), ('phi2', 0.0), ('bg', field),
('K', k), *align_error_conf.items(),
name=elem.name, etype=elem.ETYPE)
for i in range(2, split):
lattice.append(elem.name + "_" + str(i), "sbend", ('L', elem.length / split),
('aper', elem.aperture / 2.0), ('phi', angle / split),
('phi1', 0.0), ('phi2', 0.0), ('bg', field),
('K', k), *align_error_conf.items(),
name=elem.name, etype=elem.ETYPE)
lattice.append(elem.name + "_" + str(split), "sbend", ('L', elem.length / split),
('aper', elem.aperture / 2.0), ('phi', angle / split),
('phi1', 0.0), ('phi2', exit_angle), ('bg', field),
('K', k), *align_error_conf.items(),
name=elem.name, etype=elem.ETYPE)
else:
lattice.append(elem.name + "_1", "sbend", ('L', elem.length / split),
('aper', elem.aperture / 2.0), ('phi', angle / split),
('phi1', entr_angle), ('phi2', 0.0), ('bg', field),
*align_error_conf.items(),
name=elem.name, etype=elem.ETYPE)
for i in range(2, split):
lattice.append(elem.name + "_" + str(i), "sbend", ('L', elem.length / split),
('aper', elem.aperture / 2.0), ('phi', angle / split),
('phi1', 0.0), ('phi2', 0.0), ('bg', field),
*align_error_conf.items(),
name=elem.name, etype=elem.ETYPE)
lattice.append(elem.name + "_" + str(split), "sbend", ('L', elem.length / split),
('aper', elem.aperture / 2.0), ('phi', angle / split),
('phi1', 0.0), ('phi2', exit_angle), ('bg', field),
*align_error_conf.items(),
name=elem.name, etype=elem.ETYPE)
elif isinstance(elem, StripElement):
stripper_charge = self._get_config_array(elem.name, CONFIG_FLAME_STRIPPER_CHARGE, None, conv=float)
if stripper_charge is None:
raise RuntimeError(f"FlameLatticeFactory: Stripper charge not found: {elem.name}")
stripper_charge = numpy.array(stripper_charge)
stripper_count = self._get_config_array(elem.name, CONFIG_FLAME_STRIPPER_COUNT, None, conv=float)
if stripper_count is None:
raise RuntimeError(f"FlameLatticeFactory: Stripper count not found: {elem.name}")
stripper_count = numpy.array(stripper_count)
| |
for polygon in polylist:
count = 0
for point in polygon:
points.append(point)
count += 1
polycounts.append(count)
bounds = self._getBounds(points)
if self._useShort(bounds):
e = cls16(points, polycounts, bounds)
else:
e = cls(points, polycounts, bounds)
if not self._append(e):
return 0
return 1
def _appendHandle(self, e):
handle = self.dc.addObject(e)
if not self._append(e):
self.dc.popObject()
return 0
e.handle = handle
return handle
def GetStockObject(self, obj):
"""
Retrieve the handle for a predefined graphics object. Stock objects
include (at least) the following:
- WHITE_BRUSH
- LTGRAY_BRUSH
- GRAY_BRUSH
- DKGRAY_BRUSH
- BLACK_BRUSH
- NULL_BRUSH
- HOLLOW_BRUSH
- WHITE_PEN
- BLACK_PEN
- NULL_PEN
- OEM_FIXED_FONT
- ANSI_FIXED_FONT
- ANSI_VAR_FONT
- SYSTEM_FONT
- DEVICE_DEFAULT_FONT
- DEFAULT_PALETTE
- SYSTEM_FIXED_FONT
- DEFAULT_GUI_FONT
@param obj: number of stock object.
@return: handle of stock graphics object.
@rtype: int
@type obj: int
"""
if obj >= 0 and obj <= STOCK_LAST:
return obj | 0x80000000
raise IndexError("Undefined stock object.")
def SelectObject(self, handle):
"""
Make the given graphics object current.
@param handle: handle of graphics object to make current.
@return:
the handle of the current graphics object which obj replaces.
@rtype: int
@type handle: int
"""
return self._append(emr._SELECTOBJECT(self.dc, handle))
def DeleteObject(self, handle):
"""
Delete the given graphics object. Note that, now, only those contexts
into which the object has been selected get a delete object
records.
@param handle: handle of graphics object to delete.
@return: true if the object was successfully deleted.
@rtype: int
@type handle: int
"""
e = emr._DELETEOBJECT(self.dc, handle)
self.dc.removeObject(handle)
return self._append(e)
def CreatePen(self, style, width, color):
"""
Create a pen, used to draw lines and path outlines.
@param style: the style of the new pen, one of:
- PS_SOLID
- PS_DASH
- PS_DOT
- PS_DASHDOT
- PS_DASHDOTDOT
- PS_NULL
- PS_INSIDEFRAME
- PS_USERSTYLE
- PS_ALTERNATE
@param width: the width of the new pen.
@param color: (r,g,b) tuple or the packed integer L{color<RGB>} of the new pen.
@return: handle to the new pen graphics object.
@rtype: int
@type style: int
@type width: int
@type color: int
"""
return self._appendHandle(emr._CREATEPEN(style, width, _normalizeColor(color)))
def CreateSolidBrush(self, color):
"""
Create a solid brush used to fill polygons.
@param color: the L{color<RGB>} of the solid brush.
@return: handle to brush graphics object.
@rtype: int
@type color: int
"""
return self._appendHandle(
emr._CREATEBRUSHINDIRECT(color=_normalizeColor(color))
)
def CreateHatchBrush(self, hatch, color):
"""
Create a hatched brush used to fill polygons.
B{Note:} Currently appears unsupported in OpenOffice.
@param hatch: integer representing type of fill:
- HS_HORIZONTAL
- HS_VERTICAL
- HS_FDIAGONAL
- HS_BDIAGONAL
- HS_CROSS
- HS_DIAGCROSS
@type hatch: int
@param color: the L{color<RGB>} of the 'on' pixels of the brush.
@return: handle to brush graphics object.
@rtype: int
@type color: int
"""
return self._appendHandle(
emr._CREATEBRUSHINDIRECT(hatch=hatch, color=_normalizeColor(color))
)
def SetBkColor(self, color):
"""
Set the background color used for any transparent regions in fills or
hatched brushes.
B{Note:} Currently appears sporadically supported in OpenOffice.
@param color: background L{color<RGB>}.
@return: previous background L{color<RGB>}.
@rtype: int
@type color: int
"""
e = emr._SETBKCOLOR(_normalizeColor(color))
if not self._append(e):
return 0
return 1
def SetBkMode(self, mode):
"""
Set the background mode for interaction between transparent areas in
the region to be drawn and the existing background.
The choices for mode are:
- TRANSPARENT
- OPAQUE
B{Note:} Currently appears sporadically supported in OpenOffice.
@param mode: background mode.
@return: previous background mode.
@rtype: int
@type mode: int
"""
e = emr._SETBKMODE(mode)
if not self._append(e):
return 0
return 1
def SetPolyFillMode(self, mode):
"""
Set the polygon fill mode. Generally these modes produce
different results only when the edges of the polygons overlap
other edges.
@param mode: fill mode with the following options:
- ALTERNATE - fills area between odd and even numbered sides
- WINDING - fills all area as long as a point is between any two sides
@return: previous fill mode.
@rtype: int
@type mode: int
"""
e = emr._SETPOLYFILLMODE(mode)
if not self._append(e):
return 0
return 1
def SetMapMode(self, mode):
"""
Set the window mapping mode. This is the mapping between pixels in page space to pixels in device space. Page space is the coordinate system that is used for all the drawing commands -- it is how pixels are identified and figures are placed in the metafile. They are integer units.
Device space is the coordinate system of the final output, measured in physical dimensions such as mm, inches, or twips. It is this coordinate system that provides the scaling that makes metafiles into a scalable graphics format.
- MM_TEXT: each unit in page space is mapped to one pixel
- MM_LOMETRIC: 1 page unit = .1 mm in device space
- MM_HIMETRIC: 1 page unit = .01 mm in device space
- MM_LOENGLISH: 1 page unit = .01 inch in device space
- MM_HIENGLISH: 1 page unit = .001 inch in device space
- MM_TWIPS: 1 page unit = 1/20 point (or 1/1440 inch)
- MM_ISOTROPIC: 1 page unit = user defined ratio, but axes equally scaled
- MM_ANISOTROPIC: 1 page unit = user defined ratio, axes may be independently scaled
@param mode: window mapping mode.
@return: previous window mapping mode, or zero if error.
@rtype: int
@type mode: int
"""
e = emr._SETMAPMODE(mode)
if not self._append(e):
return 0
return 1
def SetViewportOrgEx(self, xv, yv):
"""
Set the origin of the viewport, which translates the origin of the
coordinate system by (xv,yv). A pixel drawn at (x,y) in the new
coordinate system will be displayed at (x+xv,y+yv) in terms of the
previous coordinate system.
Contrast this with L{SetWindowOrgEx}, which seems to be the opposite
translation. So, if in addition, the window origin is set to (xw,yw)
using L{SetWindowOrgEx}, a pixel drawn at (x,y) will be displayed at
(x-xw+xv,y-yw+yv) in terms of the original coordinate system.
@param xv: new x position of the viewport origin.
@param yv: new y position of the viewport origin.
@return: previous viewport origin
@rtype: 2-tuple (x,y) if successful, or None if unsuccessful
@type xv: int
@type yv: int
"""
e = emr._SETVIEWPORTORGEX(xv, yv)
if not self._append(e):
return None
old = (self.dc.viewport_x, self.dc.viewport_y)
self.dc.viewport_x = xv
self.dc.viewport_y = yv
return old
def GetViewportOrgEx(self):
"""
Get the origin of the viewport.
@return: returns the current viewport origin.
@rtype: 2-tuple (x,y)
"""
return (self.dc.viewport_x, self.dc.viewport_y)
def SetWindowOrgEx(self, xw, yw):
"""
Set the origin of the window, which translates the origin of the
coordinate system by (-xw,-yw). A pixel drawn at (x,y) in the new
coordinate system will be displayed at (x-xw,y-yw) in terms of the
previous coordinate system.
Contrast this with L{SetViewportOrgEx}, which seems to be the opposite
translation. So, if in addition, the viewport origin is set to
(xv,yv) using L{SetViewportOrgEx}, a pixel drawn at (x,y) will be
displayed at (x-xw+xv,y-yw+yv) in terms of the original coordinate
system.
@param xw: new x position of the window origin.
@param yw: new y position of the window origin.
@return: previous window origin
@rtype: 2-tuple (x,y) if successful, or None if unsuccessful
@type xw: int
@type yw: int
"""
e = emr._SETWINDOWORGEX(xw, yw)
if not self._append(e):
return None
old = (self.dc.window_x, self.dc.window_y)
self.dc.window_x = xw
self.dc.window_y = yw
return old
def GetWindowOrgEx(self):
"""
Get the origin of the window.
@return: returns the current window origin.
@rtype: 2-tuple (x,y)
"""
return (self.dc.window_x, self.dc.window_y)
def SetViewportExtEx(self, x, y):
"""
Set the dimensions of the viewport in device units. Device units are
physical dimensions, in millimeters. The total extent is equal to the
width is millimeters multiplied by the density of pixels per
millimeter in that dimension.
Note: this is only usable when L{SetMapMode} has been set to
MM_ISOTROPIC or MM_ANISOTROPIC.
@param x: new width of the viewport.
@param y: new height of the viewport.
@return: returns the previous size of the viewport.
@rtype: 2-tuple (width,height) if successful, or None if unsuccessful
@type x: int
@type y: int
"""
e = emr._SETVIEWPORTEXTEX(x, y)
if not self._append(e):
return None
old = (self.dc.viewport_ext_x, self.dc.viewport_ext_y)
self.dc.viewport_ext_x | |
<reponame>Mikuana/oops_fhir
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["OperationOutcomeCodes"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class OperationOutcomeCodes:
"""
Operation Outcome Codes
Operation Outcome codes used by FHIR test servers (see Implementation
file translations.xml)
Status: draft - Version: 4.0.1
Copyright None
http://terminology.hl7.org/CodeSystem/operation-outcome
"""
delete_multiple_matches = CodeSystemConcept(
{
"code": "DELETE_MULTIPLE_MATCHES",
"designation": [
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Fout: er is meer dan één resultaat voor de conditionele delete",
}
],
"display": "Error: Multiple matches exist for the conditional delete",
}
)
"""
Error: Multiple matches exist for the conditional delete
"""
msg_auth_required = CodeSystemConcept(
{
"code": "MSG_AUTH_REQUIRED",
"designation": [
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Autenticazione richiesta prima di usare questo servizio",
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Wymagana autentykacja przed użyciem tego serwisu",
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Vous devez être authentifié avant de pouvoir utiliser ce service",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "U moet zich authenticeren voor gebruik van deze service",
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "使用此服务前需认证",
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Debe autenticarse antes de poder usar este servicio",
},
],
"display": "You must authenticate before you can use this service",
}
)
"""
You must authenticate before you can use this service
"""
msg_bad_format = CodeSystemConcept(
{
"code": "MSG_BAD_FORMAT",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Неверный синтакс: "%s" должен быть %s',
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Sintassi Errata: "%s" deve essere un %s\'',
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Błąd składni: "%s" powinno być %s\'',
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Erreur de Syntaxe : "%s" doit être un %s',
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Verkeerde syntax: "%s" moet een %s zijn',
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": '句法错误: "%s" 必须是一个 %s\'',
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Sintaxis Incorrecta: "%s" debe de ser un %s\'',
},
],
"display": 'Bad Syntax: "%s" must be a %s\'',
}
)
"""
Bad Syntax: "%s" must be a %s'
"""
msg_bad_syntax = CodeSystemConcept(
{
"code": "MSG_BAD_SYNTAX",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Неверный синтакс: %s",
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Sintassi errata in %s",
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Błąd składni w %s",
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Erreur de Syntaxe dans %s",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Verkeerde syntax in %s",
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "%s 中句法错误",
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Sintaxis Incorrecta en %s",
},
],
"display": "Bad Syntax in %s",
}
)
"""
Bad Syntax in %s
"""
msg_cant_parse_content = CodeSystemConcept(
{
"code": "MSG_CANT_PARSE_CONTENT",
"designation": [
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Impossibile effettuare il parsing del feed (tipo del contenuto della entry = "%s")',
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Błąd parsowania (typ zawartości wejściowej = "%s")',
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Impossible d'analyser le flux (type de contenu de l'entrée = \"%s\")",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Kan feed niet verwerken (contenttype inhoud = "%s")',
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": '无法解析feed (条目的内容类型 = "%s")',
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'No se pudo parsear el feed (el tipo de contenido de la entry = "%s")',
},
],
"display": 'Unable to parse feed (entry content type = "%s")',
}
)
"""
Unable to parse feed (entry content type = "%s")
"""
msg_cant_parse_root = CodeSystemConcept(
{
"code": "MSG_CANT_PARSE_ROOT",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Не удалось разобрать данные (корневой элемент = "%s")',
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Impossibile effettuare il parsing del feed (nome elemento root = "%s")',
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Błąd parsowania (nazwa elementu root = "%s")',
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Impossible d'analyser le flux (nom de l'élément racine = \"%s\")",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Kan feed niet verwerken (rootelementnaam = "%s")',
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": '无法解析feed (根元素名 = "%s")',
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'No se pudo parsear el feed (nombre del elemento raiz = "%s")',
},
],
"display": 'Unable to parse feed (root element name = "%s")',
}
)
"""
Unable to parse feed (root element name = "%s")
"""
msg_created = CodeSystemConcept(
{
"code": "MSG_CREATED",
"designation": [
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Nieuwe resource gemaakt",
}
],
"display": "New resource created",
}
)
"""
New resource created
"""
msg_date_format = CodeSystemConcept(
{
"code": "MSG_DATE_FORMAT",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Значение Date %s в неверном формате (требуется Xml Date формат)",
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Il valore %s per la data non è nel formato corretto (richiesto il Formato Data Xml)",
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Niepoprawny format wartości daty %s (wymagany format XML)",
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Le format de la date %s est incorrect (format Date Xml attendu)",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "De Datum-waarde %s heeft niet de juiste structuur (Xml Date vereist)",
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "日期的值 %s 格式不正确 (要求是Xml Date格式)",
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "El valor de la fecha %s no está en el formato correcto (se requiere un formato de fecha Xml)",
},
],
"display": "The Date value %s is not in the correct format (Xml Date Format required)",
}
)
"""
The Date value %s is not in the correct format (Xml Date Format required)
"""
msg_deleted = CodeSystemConcept(
{
"code": "MSG_DELETED",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Данный ресурс был удалён",
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Questa risorsa è stata cancellata",
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Ten zasób został usunięty",
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "La ressource a été supprimée",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Deze resource is verwijderd",
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "该资源已删除",
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Este recurso ha sido borrado",
},
],
"display": "This resource has been deleted",
}
)
"""
This resource has been deleted
"""
msg_deleted_done = CodeSystemConcept(
{
"code": "MSG_DELETED_DONE",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Ресурс удалён",
},
{
"language": "it",
"use": {
"code": | |
from pathlib import Path
import pytest
from graviton.blackbox import (
DryRunEncoder as Encoder,
DryRunExecutor as Executor,
DryRunProperty as DRProp,
DryRunInspector as Inspector,
ExecutionMode,
mode_has_property,
)
from graviton.invariant import Invariant
from tests.clients import get_algod
TESTS_DIR = Path.cwd() / "tests"
def fac_with_overflow(n):
if n < 2:
return 1
if n > 20:
return 2432902008176640000
return n * fac_with_overflow(n - 1)
def fib(n):
a, b = 0, 1
for _ in range(n):
a, b = b, a + b
return a
def fib_cost(args):
cost = 17
for n in range(1, args[0] + 1):
cost += 31 * fib(n - 1)
return cost
def test_singleton_invariants():
algod = get_algod()
algod_status = algod.status()
assert algod_status
teal_fmt = """#pragma version 6
{} 0
btoi
callsub square_0
{}
return
// square
square_0:
store 0
load 0
pushint 2 // 2
exp
retsub"""
teal_app, teal_lsig = list(
map(lambda s: teal_fmt.format(s, ""), ["txna ApplicationArgs", "arg"])
)
teal_app_log, bad_teal_lsig = list(
map(
lambda s: teal_fmt.format(
s,
"""store 1
load 1
itob
log
load 1""",
),
["txna ApplicationArgs", "arg"],
)
)
x = 9
args = [x]
app_res, app_log_res = list(
map(
lambda teal: Executor.dryrun_app(algod, teal, args),
[teal_app, teal_app_log],
)
)
lsig_res, bad_lsig_res = list(
map(
lambda teal: Executor.dryrun_logicsig(algod, teal, args),
[teal_lsig, bad_teal_lsig],
)
)
assert isinstance(app_res, Inspector)
assert isinstance(app_log_res, Inspector)
assert isinstance(lsig_res, Inspector)
assert isinstance(bad_lsig_res, Inspector)
assert app_res.mode == ExecutionMode.Application
assert app_log_res.mode == ExecutionMode.Application
assert lsig_res.mode == ExecutionMode.Signature
assert bad_lsig_res.mode == ExecutionMode.Signature
def prop_assert(dr_resp, actual, expected):
assert expected == actual, dr_resp.report(
args, f"expected {expected} but got {actual}"
)
prop_assert(app_res, app_res.cost(), 9)
prop_assert(app_log_res, app_log_res.cost(), 14)
prop_assert(lsig_res, lsig_res.cost(), None)
prop_assert(app_res, app_res.last_log(), None)
prop_assert(app_log_res, app_log_res.last_log(), (x**2).to_bytes(8, "big").hex())
prop_assert(app_log_res, app_log_res.last_log(), Encoder.hex(x**2))
prop_assert(lsig_res, lsig_res.last_log(), None)
prop_assert(app_res, app_res.final_scratch(), {0: x})
prop_assert(app_log_res, app_log_res.final_scratch(), {0: x, 1: x**2})
prop_assert(lsig_res, lsig_res.final_scratch(), {0: x})
prop_assert(bad_lsig_res, bad_lsig_res.final_scratch(), {0: x, 1: x**2})
prop_assert(app_res, app_res.stack_top(), x**2)
prop_assert(app_log_res, app_log_res.stack_top(), x**2)
prop_assert(lsig_res, lsig_res.stack_top(), x**2)
prop_assert(bad_lsig_res, bad_lsig_res.stack_top(), Encoder.hex0x(x**2))
prop_assert(app_res, app_res.max_stack_height(), 2)
prop_assert(app_log_res, app_log_res.max_stack_height(), 2)
prop_assert(lsig_res, lsig_res.max_stack_height(), 2)
prop_assert(bad_lsig_res, bad_lsig_res.max_stack_height(), 2)
prop_assert(app_res, app_res.status(), "PASS")
prop_assert(app_log_res, app_log_res.status(), "PASS")
prop_assert(lsig_res, lsig_res.status(), "PASS")
prop_assert(bad_lsig_res, bad_lsig_res.status(), "REJECT")
prop_assert(app_res, app_res.passed(), True)
prop_assert(app_log_res, app_log_res.passed(), True)
prop_assert(lsig_res, lsig_res.passed(), True)
prop_assert(bad_lsig_res, bad_lsig_res.passed(), False)
prop_assert(app_res, app_res.rejected(), False)
prop_assert(app_log_res, app_log_res.rejected(), False)
prop_assert(lsig_res, lsig_res.rejected(), False)
prop_assert(bad_lsig_res, bad_lsig_res.rejected(), True)
prop_assert(app_res, app_res.error(), False)
prop_assert(app_log_res, app_log_res.error(), False)
prop_assert(lsig_res, lsig_res.error(), False)
prop_assert(bad_lsig_res, bad_lsig_res.error(), True)
assert bad_lsig_res.error(
contains="logic 0 failed at line 7: log not allowed in current mode"
)
prop_assert(bad_lsig_res, bad_lsig_res.error(contains="log not allowed"), True)
prop_assert(bad_lsig_res, bad_lsig_res.error(contains="WRONG PATTERN"), False)
prop_assert(app_res, app_res.error_message(), None)
prop_assert(app_log_res, app_log_res.error_message(), None)
prop_assert(lsig_res, lsig_res.error_message(), None)
assert (
"logic 0 failed at line 7: log not allowed in current mode"
in bad_lsig_res.error_message()
)
APP_SCENARIOS = {
"app_exp": {
"inputs": [()],
# since only a single input, just assert a constant in each case
"invariants": {
DRProp.cost: 11,
DRProp.lastLog: Encoder.hex(2**10),
# dicts have a special meaning as invariants. So in the case of "finalScratch"
# which is supposed to _ALSO_ output a dict, we need to use a lambda as a work-around
DRProp.finalScratch: lambda _: {0: 2**10},
DRProp.stackTop: 2**10,
DRProp.maxStackHeight: 2,
DRProp.status: "PASS",
DRProp.passed: True,
DRProp.rejected: False,
DRProp.errorMessage: None,
},
},
"app_square_byref": {
"inputs": [(i,) for i in range(100)],
"invariants": {
DRProp.cost: lambda _, actual: 20 < actual < 22,
DRProp.lastLog: Encoder.hex(1337),
# due to dry-run artifact of not reporting 0-valued scratchvars,
# we have a special case for n=0:
DRProp.finalScratch: lambda args, actual: (
{2, 1337, (args[0] ** 2 if args[0] else 2)}
).issubset(set(actual.values())),
DRProp.stackTop: 1337,
DRProp.maxStackHeight: 3,
DRProp.status: "PASS",
DRProp.passed: True,
DRProp.rejected: False,
DRProp.errorMessage: None,
},
},
"app_square": {
"inputs": [(i,) for i in range(100)],
"invariants": {
DRProp.cost: 14,
DRProp.lastLog: {
# since execution REJECTS for 0, expect last log for this case to be None
(i,): Encoder.hex(i * i) if i else None
for i in range(100)
},
DRProp.finalScratch: lambda args: (
{0: args[0], 1: args[0] ** 2} if args[0] else {}
),
DRProp.stackTop: lambda args: args[0] ** 2,
DRProp.maxStackHeight: 2,
DRProp.status: lambda i: "PASS" if i[0] > 0 else "REJECT",
DRProp.passed: lambda i: i[0] > 0,
DRProp.rejected: lambda i: i[0] == 0,
DRProp.errorMessage: None,
},
},
"app_swap": {
"inputs": [(1, 2), (1, "two"), ("one", 2), ("one", "two")],
"invariants": {
DRProp.cost: 27,
DRProp.lastLog: Encoder.hex(1337),
DRProp.finalScratch: lambda args: {
0: 4,
1: 5,
2: Encoder.hex0x(args[0]),
3: 1337,
4: Encoder.hex0x(args[1]),
5: Encoder.hex0x(args[0]),
},
DRProp.stackTop: 1337,
DRProp.maxStackHeight: 2,
DRProp.status: "PASS",
DRProp.passed: True,
DRProp.rejected: False,
DRProp.errorMessage: None,
},
},
"app_string_mult": {
"inputs": [("xyzw", i) for i in range(100)],
"invariants": {
DRProp.cost: lambda args: 30 + 15 * args[1],
DRProp.lastLog: (
lambda args: Encoder.hex(args[0] * args[1]) if args[1] else None
),
# due to dryrun 0-scratchvar artifact, special case for i == 0:
DRProp.finalScratch: lambda args: (
{
0: 5,
1: args[1],
2: args[1] + 1,
3: Encoder.hex0x(args[0]),
4: Encoder.hex0x(args[0] * args[1]),
5: Encoder.hex0x(args[0] * args[1]),
}
if args[1]
else {
0: 5,
2: args[1] + 1,
3: Encoder.hex0x(args[0]),
}
),
DRProp.stackTop: lambda args: len(args[0] * args[1]),
DRProp.maxStackHeight: lambda args: 3 if args[1] else 2,
DRProp.status: lambda args: ("PASS" if 0 < args[1] < 45 else "REJECT"),
DRProp.passed: lambda args: 0 < args[1] < 45,
DRProp.rejected: lambda args: 0 >= args[1] or args[1] >= 45,
DRProp.errorMessage: None,
},
},
"app_oldfac": {
"inputs": [(i,) for i in range(25)],
"invariants": {
DRProp.cost: lambda args, actual: (
actual - 40 <= 17 * args[0] <= actual + 40
),
DRProp.lastLog: lambda args: (
Encoder.hex(fac_with_overflow(args[0])) if args[0] < 21 else None
),
DRProp.finalScratch: lambda args: (
{0: args[0], 1: fac_with_overflow(args[0])}
if 0 < args[0] < 21
else (
{0: min(21, args[0])}
if args[0]
else {1: fac_with_overflow(args[0])}
)
),
DRProp.stackTop: lambda args: fac_with_overflow(args[0]),
DRProp.maxStackHeight: lambda args: max(2, 2 * args[0]),
DRProp.status: lambda args: "PASS" if args[0] < 21 else "REJECT",
DRProp.passed: lambda args: args[0] < 21,
DRProp.rejected: lambda args: args[0] >= 21,
DRProp.errorMessage: lambda args, actual: (
actual is None if args[0] < 21 else "overflowed" in actual
),
},
},
"app_slow_fibonacci": {
"inputs": [(i,) for i in range(18)],
"invariants": {
DRProp.cost: lambda args: (fib_cost(args) if args[0] < 17 else 70_000),
DRProp.lastLog: lambda args: (
Encoder.hex(fib(args[0])) if 0 < args[0] < 17 else None
),
DRProp.finalScratch: lambda args, actual: (
actual == {0: args[0], 1: fib(args[0])}
if 0 < args[0] < 17
else (True if args[0] >= 17 else actual == {})
),
# we declare to "not care" about the top of the stack for n >= 17
DRProp.stackTop: lambda args, actual: (
actual == fib(args[0]) if args[0] < 17 else True
),
# similarly, we don't care about max stack height for n >= 17
DRProp.maxStackHeight: lambda args, actual: (
actual == max(2, 2 * args[0]) if args[0] < 17 else True
),
DRProp.status: lambda args: "PASS" if 0 < args[0] < 8 else "REJECT",
DRProp.passed: lambda args: 0 < args[0] < 8,
DRProp.rejected: lambda args: 0 >= args[0] or args[0] >= 8,
DRProp.errorMessage: lambda args, actual: (
actual is None
if args[0] < 17
else "dynamic cost budget exceeded" in actual
),
},
},
}
@pytest.mark.parametrize("filebase", APP_SCENARIOS.keys())
def test_app_with_report(filebase: str):
mode, scenario = ExecutionMode.Application, APP_SCENARIOS[filebase]
# 0. Validate that the scenarios are well defined:
inputs, invariants = Invariant.inputs_and_invariants(
scenario, mode, raw_predicates=True # type: ignore
)
algod = get_algod()
# 1. Read the TEAL from ./tests/teal/*.teal
path = TESTS_DIR / "teal"
case_name = filebase
tealpath = path / f"{filebase}.teal"
with open(tealpath, "r") as f:
teal = f.read()
print(
f"""Sandbox test and report {mode} for {case_name} from {tealpath}. TEAL is:
-------
{teal}
-------"""
)
# 2. Run the requests to obtain sequence of Dryrun responses:
dryrun_results = Executor.dryrun_app_on_sequence(algod, teal, inputs) # type: ignore
# 3. Generate statistical report of all the runs:
csvpath = path / f"{filebase}.csv"
with open(csvpath, "w") as f:
f.write(Inspector.csv_report(inputs, dryrun_results))
print(f"Saved Dry Run CSV report to {csvpath}")
# 4. Sequential invariants (if provided any)
for i, type_n_invariant in enumerate(invariants.items()):
dr_property, invariant = type_n_invariant
assert mode_has_property(
mode, dr_property
), f"assert_type {dr_property} is not applicable for {mode}. Please REMOVE or MODIFY"
invariant = Invariant(invariant, name=f"{case_name}[{i}]@{mode}-{dr_property}")
print(
f"{i+1}. Semantic invariant for {case_name}-{mode}: {dr_property} <<{invariant}>>"
)
invariant.validates(dr_property, inputs, dryrun_results)
# NOTE: logic sig dry runs are missing some information when compared with app dry runs.
# Therefore, certain invariants don't make sense for logic | |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of the SimCenter Backend Applications
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the BSD 3-Clause License along with
# this file. If not, see <http://www.opensource.org/licenses/>.
#
# Contributors:
# <NAME>
# <NAME>
#
# Based on rulesets developed by:
# <NAME>
# <NAME>
# <NAME>
import random
import numpy as np
import datetime
def WMUH_config(BIM):
"""
Rules to identify a HAZUS WMUH configuration based on BIM data
Parameters
----------
BIM: dictionary
Information about the building characteristics.
Returns
-------
config: str
A string that identifies a specific configration within this buidling
class.
"""
year = BIM['year_built'] # just for the sake of brevity
# Secondary Water Resistance (SWR)
SWR = 0 # Default
if year > 2000:
if BIM['roof_shape'] == 'flt':
SWR = 'null' # because SWR is not a question for flat roofs
elif BIM['roof_shape'] in ['gab','hip']:
SWR = int(random.random() < 0.6)
elif year > 1987:
if BIM['roof_shape'] == 'flt':
SWR = 'null' # because SWR is not a question for flat roofs
elif (BIM['roof_shape'] == 'gab') or (BIM['roof_shape'] == 'hip'):
if BIM['roof_slope'] < 0.33:
SWR = int(True)
else:
SWR = int(BIM['avg_jan_temp'] == 'below')
else:
# year <= 1987
if BIM['roof_shape'] == 'flt':
SWR = 'null' # because SWR is not a question for flat roofs
else:
SWR = int(random.random() < 0.3)
# Roof cover & Roof quality
# Roof cover and quality do not apply to gable and hip roofs
if BIM['roof_shape'] in ['gab', 'hip']:
roof_cover = 'null'
roof_quality = 'null'
# NJ Building Code Section 1507 (in particular 1507.10 and 1507.12) address
# Built Up Roofs and Single Ply Membranes. However, the NJ Building Code
# only addresses installation and material standards of different roof
# covers, but not in what circumstance each must be used.
# SPMs started being used in the 1960s, but different types continued to be
# developed through the 1980s. Today, single ply membrane roofing is the
# most popular flat roof option. BURs have been used for over 100 years,
# and although they are still used today, they are used less than SPMs.
# Since there is no available ruleset to be taken from the NJ Building
# Code, the ruleset is based off this information.
# We assume that all flat roofs built before 1975 are BURs and all roofs
# built after 1975 are SPMs.
# Nothing in NJ Building Code or in the Hazus manual specifies what
# constitutes “good” and “poor” roof conditions, so ruleset is dependant
# on the age of the roof and average lifespan of BUR and SPM roofs.
# We assume that the average lifespan of a BUR roof is 30 years and the
# average lifespan of a SPM is 35 years. Therefore, BURs installed before
# 1990 are in poor condition, and SPMs installed before 1985 are in poor
# condition.
else:
if year >= 1975:
roof_cover = 'spm'
if BIM['year_built'] >= (datetime.datetime.now().year - 35):
roof_quality = 'god'
else:
roof_quality = 'por'
else:
# year < 1975
roof_cover = 'bur'
if BIM['year_built'] >= (datetime.datetime.now().year - 30):
roof_quality = 'god'
else:
roof_quality = 'por'
# Roof Deck Attachment (RDA)
# IRC 2009-2015:
# Requires 8d nails (with spacing 6”/12”) for sheathing thicknesses between
# ⅜”-1”, see Table 2304.10, Line 31. Fastener selection is contingent on
# thickness of sheathing in building codes.
# Wind Speed Considerations taken from Table 2304.6.1, Maximum Nominal
# Design Wind Speed, Vasd, Permitted For Wood Structural Panel Wall
# Sheathing Used to Resist Wind Pressures. Typical wall stud spacing is 16
# inches, according to table 2304.6.3(4). NJ code defines this with respect
# to exposures B and C only. These are mapped to HAZUS categories based on
# roughness length in the ruleset herein.
# The base rule was then extended to the exposures closest to suburban and
# light suburban, even though these are not considered by the code.
if year > 2009:
if BIM['terrain'] >= 35: # suburban or light trees
if BIM['V_ult'] > 168.0:
RDA = '8s' # 8d @ 6"/6" 'D'
else:
RDA = '8d' # 8d @ 6"/12" 'B'
else: # light suburban or open
if BIM['V_ult'] > 142.0:
RDA = '8s' # 8d @ 6"/6" 'D'
else:
RDA = '8d' # 8d @ 6"/12" 'B'
# IRC 2000-2006:
# Table 2304.9.1, Line 31 of the 2006
# NJ IBC requires 8d nails (with spacing 6”/12”) for sheathing thicknesses
# of ⅞”-1”. Fastener selection is contingent on thickness of sheathing in
# building codes. Table 2308.10.1 outlines the required rating of approved
# uplift connectors, but does not specify requirements that require a
# change of connector at a certain wind speed.
# Thus, all RDAs are assumed to be 8d @ 6”/12”.
elif year > 2000:
RDA = '8d' # 8d @ 6"/12" 'B'
# BOCA 1996:
# The BOCA 1996 Building Code Requires 8d nails (with spacing 6”/12”) for
# roof sheathing thickness up to 1". See Table 2305.2, Section 4.
# Attachment requirements are given based on sheathing thickness, basic
# wind speed, and the mean roof height of the building.
elif year > 1996:
if (BIM['V_ult'] >= 103 ) and (BIM['mean_roof_height'] >= 25.0):
RDA = '8s' # 8d @ 6"/6" 'D'
else:
RDA = '8d' # 8d @ 6"/12" 'B'
# BOCA 1993:
# The BOCA 1993 Building Code Requires 8d nails (with spacing 6”/12”) for
# sheathing thicknesses of 19/32 inches or greater, and 6d nails (with
# spacing 6”/12”) for sheathing thicknesses of ½ inches or less.
# See Table 2305.2, Section 4.
elif year > 1993:
if BIM['sheathing_t'] <= 0.5:
RDA = '6d' # 6d @ 6"/12" 'A'
else:
RDA = '8d' # 8d @ 6"/12" 'B'
else:
# year <= 1993
if BIM['sheathing_t'] <= 0.5:
RDA = '6d' # 6d @ 6"/12" 'A'
else:
RDA = '8d' # 8d @ 6"/12" 'B'
# Roof-Wall Connection (RWC)
# IRC 2000-2015:
# 1507.2.8.1 High Wind Attachment. Underlayment applied in areas subject
# to high winds (Vasd greater than 110 mph as determined in accordance
# with Section 1609.3.1) shall be applied with corrosion-resistant
# fasteners in accordance with the manufacturer’s instructions. Fasteners
# are to be applied along the overlap not more than 36 inches on center.
# Underlayment installed where Vasd, in accordance with section 1609.3.1
# equals or exceeds 120 mph shall be attached in | |
"""EdgeTransformer class to convert edges to edge embeddings."""
from typing import List, Union, Optional
import numpy as np
import pandas as pd
from userinput.utils import closest
from ensmallen import express_measures
from embiggen.utils.abstract_models import format_list
from embiggen.embedding_transformers.node_transformer import NodeTransformer
def get_hadamard_edge_embedding(
source_node_embedding: np.ndarray,
destination_node_embedding: np.ndarray
) -> np.ndarray:
"""Return Hadamard edge embedding of the two nodes.
Parameters
--------------------------
source_node_embedding: np.ndarray
Numpy array with the embedding of the source node.
destination_node_embedding: np.ndarray
Numpy array with the embedding of the destination node.
Returns
--------------------------
Numpy array with the Hadamard edge embedding.
"""
return np.multiply(
source_node_embedding,
destination_node_embedding
)
def get_sum_edge_embedding(
source_node_embedding: np.ndarray,
destination_node_embedding: np.ndarray
) -> np.ndarray:
"""Return sum edge embedding of the two nodes.
Parameters
--------------------------
source_node_embedding: np.ndarray
Numpy array with the embedding of the source node.
destination_node_embedding: np.ndarray
Numpy array with the embedding of the destination node.
Returns
--------------------------
Numpy array with the sum edge embedding.
"""
return np.add(
source_node_embedding,
destination_node_embedding
)
def get_average_edge_embedding(
source_node_embedding: np.ndarray,
destination_node_embedding: np.ndarray
) -> np.ndarray:
"""Return average edge embedding of the two nodes.
Parameters
--------------------------
source_node_embedding: np.ndarray
Numpy array with the embedding of the source node.
destination_node_embedding: np.ndarray
Numpy array with the embedding of the destination node.
Returns
--------------------------
Numpy array with the average edge embedding.
"""
return np.divide(
get_sum_edge_embedding(
source_node_embedding,
destination_node_embedding
),
2.0
)
def get_l1_edge_embedding(
source_node_embedding: np.ndarray,
destination_node_embedding: np.ndarray
) -> np.ndarray:
"""Return L1 edge embedding of the two nodes.
Parameters
--------------------------
source_node_embedding: np.ndarray
Numpy array with the embedding of the source node.
destination_node_embedding: np.ndarray
Numpy array with the embedding of the destination node.
Returns
--------------------------
Numpy array with the L1 edge embedding.
"""
return np.subtract(
source_node_embedding,
destination_node_embedding
)
def get_absolute_l1_edge_embedding(
source_node_embedding: np.ndarray,
destination_node_embedding: np.ndarray
) -> np.ndarray:
"""Return Absolute L1 edge embedding of the two nodes.
Parameters
--------------------------
source_node_embedding: np.ndarray
Numpy array with the embedding of the source node.
destination_node_embedding: np.ndarray
Numpy array with the embedding of the destination node.
Returns
--------------------------
Numpy array with the Absolute L1 edge embedding.
"""
return np.abs(
get_l1_edge_embedding(
source_node_embedding,
destination_node_embedding
)
)
def get_squared_l2_edge_embedding(
source_node_embedding: np.ndarray,
destination_node_embedding: np.ndarray
) -> np.ndarray:
"""Return Squared L2 edge embedding of the two nodes.
Parameters
--------------------------
source_node_embedding: np.ndarray
Numpy array with the embedding of the source node.
destination_node_embedding: np.ndarray
Numpy array with the embedding of the destination node.
Returns
--------------------------
Numpy array with the Squared L2 edge embedding.
"""
return np.power(
get_l1_edge_embedding(
source_node_embedding,
destination_node_embedding
),
2.0
)
def get_l2_edge_embedding(
source_node_embedding: np.ndarray,
destination_node_embedding: np.ndarray
) -> np.ndarray:
"""Return L2 edge embedding of the two nodes.
Parameters
--------------------------
source_node_embedding: np.ndarray
Numpy array with the embedding of the source node.
destination_node_embedding: np.ndarray
Numpy array with the embedding of the destination node.
Returns
--------------------------
Numpy array with the L2 edge embedding.
"""
return np.sqrt(
get_squared_l2_edge_embedding(
source_node_embedding,
destination_node_embedding
)
)
def get_l2_distance(
source_node_embedding: np.ndarray,
destination_node_embedding: np.ndarray
) -> np.ndarray:
"""Return L2 distance of the two nodes.
Parameters
--------------------------
source_node_embedding: np.ndarray
Numpy array with the embedding of the source node.
destination_node_embedding: np.ndarray
Numpy array with the embedding of the destination node.
Returns
--------------------------
Numpy array with the L2 distance.
"""
return np.sqrt(np.sum(np.power(
source_node_embedding - destination_node_embedding,
2.0
), axis=1)).reshape((-1, 1))
def get_cosine_similarity(
embedding: np.ndarray,
source_node_ids: np.ndarray,
destination_node_ids: np.ndarray
) -> np.ndarray:
"""Return cosine similarity of the two nodes.
Parameters
--------------------------
embedding: np.ndarray
Numpy array with the embedding matrix.
source_node_ids: np.ndarray
Numpy array with the ids of the source node.
destination_node_ids: np.ndarray
Numpy array with the ids of the destination node.
Returns
--------------------------
Numpy array with the cosine similarity.
"""
if not source_node_ids.data.c_contiguous:
source_node_ids = np.ascontiguousarray(source_node_ids)
if not destination_node_ids.data.c_contiguous:
destination_node_ids = np.ascontiguousarray(destination_node_ids)
return express_measures.cosine_similarity_from_indices_unchecked(
matrix=embedding,
sources=source_node_ids,
destinations=destination_node_ids
).reshape((-1, 1))
def get_concatenate_edge_embedding(
source_node_embedding: np.ndarray,
destination_node_embedding: np.ndarray
) -> np.ndarray:
"""Return concatenate edge embedding of the two nodes.
Parameters
--------------------------
source_node_embedding: np.ndarray
Numpy array with the embedding of the source node.
destination_node_embedding: np.ndarray
Numpy array with the embedding of the destination node.
Returns
--------------------------
Numpy array with the concatenate edge embedding.
"""
return np.hstack((
source_node_embedding,
destination_node_embedding
))
def get_min_edge_embedding(
source_node_embedding: np.ndarray,
destination_node_embedding: np.ndarray
) -> np.ndarray:
"""Return min edge embedding of the two nodes.
Parameters
--------------------------
source_node_embedding: np.ndarray
Numpy array with the embedding of the source node.
destination_node_embedding: np.ndarray
Numpy array with the embedding of the destination node.
Returns
--------------------------
Numpy array with the min edge embedding.
"""
return np.min(
[
source_node_embedding,
destination_node_embedding
],
axis=0
)
def get_max_edge_embedding(
source_node_embedding: np.ndarray,
destination_node_embedding: np.ndarray
) -> np.ndarray:
"""Return max edge embedding of the two nodes.
Parameters
--------------------------
source_node_embedding: np.ndarray
Numpy array with the embedding of the source node.
destination_node_embedding: np.ndarray
Numpy array with the embedding of the destination node.
Returns
--------------------------
Numpy array with the max edge embedding.
"""
return np.max(
[
source_node_embedding,
destination_node_embedding
],
axis=0
)
def get_indices_edge_embedding(
source_node_embedding: np.ndarray,
destination_node_embedding: np.ndarray
) -> np.ndarray:
"""Placeholder method."""
return np.vstack((
source_node_embedding,
destination_node_embedding
)).T
class EdgeTransformer:
"""EdgeTransformer class to convert edges to edge embeddings."""
methods = {
"Hadamard": get_hadamard_edge_embedding,
"Sum": get_sum_edge_embedding,
"Average": get_average_edge_embedding,
"L1": get_l1_edge_embedding,
"AbsoluteL1": get_absolute_l1_edge_embedding,
"SquaredL2": get_squared_l2_edge_embedding,
"L2": get_l2_edge_embedding,
"Concatenate": get_concatenate_edge_embedding,
"Min": get_min_edge_embedding,
"Max": get_max_edge_embedding,
"L2Distance": get_l2_distance,
"CosineSimilarity": get_cosine_similarity,
None: get_indices_edge_embedding,
}
def __init__(
self,
method: str = "Hadamard",
aligned_node_mapping: bool = False,
):
"""Create new EdgeTransformer object.
Parameters
------------------------
method: str = "Hadamard",
Method to use for the embedding.
If None is used, we return instead the numeric tuples.
Can either be 'Hadamard', 'Min', 'Max', 'Sum', 'Average',
'L1', 'AbsoluteL1', 'SquaredL2', 'L2' or 'Concatenate'.
aligned_node_mapping: bool = False,
This parameter specifies whether the mapping of the embeddings nodes
matches the internal node mapping of the given graph.
If these two mappings do not match, the generated edge embedding
will be meaningless.
"""
if isinstance(method, str) and method.lower() not in [
None if method_name is None else method_name.lower()
for method_name in EdgeTransformer.methods
]:
raise ValueError((
"Given method '{}' is not supported. "
"Supported methods are {}, or alternatively a lambda. "
"Maybe you meant {}?"
).format(
method,
format_list(
[method for method in EdgeTransformer.methods.keys() if method is not None]),
closest(method, [
method_name
for method_name in EdgeTransformer.methods
if method_name is not None
])
))
self._transformer = NodeTransformer(
numeric_node_ids=method is None,
aligned_node_mapping=aligned_node_mapping,
)
self._method_name = method
if self._method_name is None:
self._method = EdgeTransformer.methods[None]
else:
self._method = {
None if method_name is None else method_name.lower(): callback
for method_name, callback in EdgeTransformer.methods.items()
}[self._method_name.lower()]
@property
def numeric_node_ids(self) -> bool:
"""Return whether the transformer returns numeric node IDs."""
return self._transformer.numeric_node_ids
@property
def method(self) -> str:
"""Return the used edge embedding method."""
return self._method_name
def fit(self, node_feature: Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]]):
"""Fit the model.
Parameters
-------------------------
node_feature: Union[pd.DataFrame, np.ndarray, List[Union[pd.DataFrame, np.ndarray]]],
Node feature to use to fit the transformer.
Raises
-------------------------
ValueError
If the given method is None there is no need to call the fit method.
"""
if self._method is None:
raise ValueError(
"There is no need to call the fit when edge method is None, "
"as the transformer will exclusively return the numeric node "
"indices and not any node feature."
)
self._transformer.fit(node_feature)
def transform(
self,
sources: Union[List[str], List[int]],
destinations: Union[List[str], List[int]],
edge_features: Optional[Union[np.ndarray, List[np.ndarray]]] = None,
) -> np.ndarray:
"""Return embedding for given edges using provided method.
Parameters
--------------------------
sources:Union[List[str], List[int]]
List of source nodes whose embedding is to be returned.
destinations:Union[List[str], List[int]]
List of destination nodes whose embedding is to be returned.
edge_features: Optional[Union[np.ndarray, List[np.ndarray]]] = None
Optional edge features to be used as input concatenated
to the obtained edge embedding. The shape must be equal
to the number of directed edges in the provided graph.
Raises
--------------------------
ValueError
If embedding is not fitted.
ValueError
If the edge features are provided and do not have the correct shape.
Returns
--------------------------
Numpy array of embeddings.
"""
if self.method == "CosineSimilarity":
if (
not isinstance(sources, np.ndarray) or
not isinstance(destinations, np.ndarray)
):
raise NotImplementedError(
"The Cosine Similarity is currently implemented exclusively for "
"numpy arrays of type uint32, but you have provided objects of type "
f"{type(sources)} and {type(destinations)}. "
)
if (
sources.dtype != np.uint32 or
destinations.dtype != np.uint32
):
raise NotImplementedError(
"The Cosine Similarity is currently implemented exclusively for "
"numpy arrays of type uint32, but you have provided objects of type "
f"{sources.dtype} and {destinations.dtype}. "
)
if self._transformer._node_feature.dtype != | |
numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from pysurvival.models.simulations import SimulationModel
from pysurvival.models.multi_task import LinearMultiTaskModel
from pysurvival.utils.metrics import concordance_index
#%matplotlib inline # To use with Jupyter notebooks
#### 2 - Generating the dataset from a Weibull parametric model
# Initializing the simulation model
sim = SimulationModel( survival_distribution = 'Weibull',
risk_type = 'linear',
censored_parameter = 10.0,
alpha = .01, beta = 3.0 )
# Generating N random samples
N = 1000
dataset = sim.generate_data(num_samples = N, num_features = 3)
# Showing a few data-points
time_column = 'time'
event_column = 'event'
dataset.head(2)
#### 3 - Creating the modeling dataset
# Defining the features
features = sim.features
# Building training and testing sets #
index_train, index_test = train_test_split( range(N), test_size = 0.2)
data_train = dataset.loc[index_train].reset_index( drop = True )
data_test = dataset.loc[index_test].reset_index( drop = True )
# Creating the X, T and E input
X_train, X_test = data_train[features], data_test[features]
T_train, T_test = data_train['time'].values, data_test['time'].values
E_train, E_test = data_train['event'].values, data_test['event'].values
#### 4 - Initializing a MTLR model and fitting the data.
# Building a Linear model
mtlr = LinearMultiTaskModel(bins=50)
mtlr.fit(X_train, T_train, E_train, lr=5e-3, init_method='orthogonal')
# Building a Neural MTLR
# structure = [ {'activation': 'Swish', 'num_units': 150}, ]
# mtlr = NeuralMultiTaskModel(structure=structure, bins=150)
# mtlr.fit(X_train, T_train, E_train, lr=5e-3, init_method='adam')
#### 5 - Cross Validation / Model Performances
c_index = concordance_index(mtlr, X_test, T_test, E_test) #0.95
print('C-index: {:.2f}'.format(c_index))
"""
# Checking data format (i.e.: transforming into numpy array)
X, T, E = utils.check_data(X, T, E)
input_shape = []
# Extracting data parameters
if isinstance(X, list):
nb_inputs = len(X)
for data in X:
nb_units, num_vars = data.shape
input_shape.append(num_vars)
# Scaling data
if self.auto_scaler:
for index, data in enumerate(X):
X[index] = self.scaler.fit_transform(data)
else:
nb_inputs = 1
nb_units, self.num_vars = X.shape
input_shape.append(self.num_vars)
# Scaling data
if self.auto_scaler:
X = self.scaler.fit_transform(X)
# Building the time axis, time buckets and output Y
X_cens, X_uncens, Y_cens, Y_uncens \
= self.compute_XY(X, T, E, is_min_time_zero, extra_pct_time)
# Initializing the model
model = nn.NeuralNet(input_shape, self.num_times, self.structure,
init_method, dropout, batch_normalization,
bn_and_dropout)
# Creating the Triangular matrix
Triangle = np.tri(self.num_times, self.num_times + 1, dtype=np.float32)
Triangle = torch.FloatTensor(Triangle)
if torch.cuda.is_available():
model = model.cuda()
Triangle = Triangle.cuda()
# Performing order 1 optimization
model, loss_values = opt.optimize(self.loss_function, model, optimizer,
lr, num_epochs, verbose, X_cens=X_cens, X_uncens=X_uncens,
Y_cens=Y_cens, Y_uncens=Y_uncens, Triangle=Triangle,
l2_reg=l2_reg, l2_smooth=l2_smooth, max_norm=max_norm,
min_clamp_value=min_clamp_value, max_clamp_value=max_clamp_value)
# Saving attributes
self.model = model.eval()
self.loss_values = loss_values
return self
def predict(self, x, t=None):
""" Predicting the hazard, density and survival functions
Parameters:
----------
* `x` : **array-like** *shape=(n_samples, n_features)* --
array-like representing the datapoints.
x should not be standardized before, the model
will take care of it
* `t`: **double** *(default=None)* --
time at which the prediction should be performed.
If None, then return the function for all available t.
"""
# Convert x into the right format
x = utils.check_data(x)
# Scaling the data
if self.auto_scaler:
if x.ndim == 1:
x = self.scaler.transform(x.reshape(1, -1))
elif x.ndim == 2:
x = self.scaler.transform(x)
x = torch.FloatTensor(x)
if torch.cuda.is_available():
x = x.cuda()
else:
# Ensuring x has 2 dimensions
if isinstance(x, list):
for index in range(len(x)):
if x[index].ndim == 1:
x[index] = np.reshape(x[index], (1, -1))
# Transforming into pytorch objects
x[index] = torch.FloatTensor(x[index])
if torch.cuda.is_available():
x[index] = x[index].cuda()
else:
if x.ndim == 1:
x = np.reshape(x, (1, -1))
# Transforming into pytorch objects
x = torch.FloatTensor(x)
if torch.cuda.is_available():
x = x.cuda()
# Predicting using linear/nonlinear function
score_torch = self.model(x)
score = score_torch.data.cpu().numpy()
# Cretaing the time triangles
Triangle1 = np.tri(self.num_times, self.num_times + 1)
Triangle2 = np.tri(self.num_times + 1, self.num_times + 1)
# Calculating the score, density, hazard and Survival
phi = np.exp(np.dot(score, Triangle1))
div = np.repeat(np.sum(phi, 1).reshape(-1, 1), phi.shape[1], axis=1)
density = (phi / div)
Survival = np.dot(density, Triangle2)
hazard = density[:, :-1] / Survival[:, 1:]
# Returning the full functions of just one time point
if t is None:
return hazard, density, Survival
else:
min_abs_value = [abs(a_j_1 - t) for (a_j_1, a_j) in self.time_buckets]
index = np.argmin(min_abs_value)
return hazard[:, index], density[:, index], Survival[:, index]
def predict_risk(self, x, use_log=False):
""" Computing the risk score
Parameters:
-----------
* `x` : **array-like** *shape=(n_samples, n_features)* --
array-like representing the datapoints.
x should not be standardized before, the model
will take care of it
* `use_log`: **bool** *(default=True)* --
Applies the log function to the risk values
"""
risk = super(BaseMultiTaskModel, self).predict_risk(x)
if use_log:
return np.log(risk)
else:
return risk
class LinearMultiTaskModel(BaseMultiTaskModel):
""" LinearMultiTaskModel is the original Multi-Task model,
a.k.a the Multi-Task Logistic Regression model (MTLR).
It was first introduced by <NAME> al. in
Learning Patient-Specific Cancer Survival Distributions
as a Sequence of Dependent Regressors
Reference:
----------
* http://www.cs.cornell.edu/~cnyu/papers/nips11_survival.pdf
Parameters:
----------
* bins: int
Number of subdivisions of the time axis
* auto_scaler: boolean (default=True)
Determines whether a sklearn scaler should be automatically
applied
"""
def __init__(self, bins=100, auto_scaler=True):
super(LinearMultiTaskModel, self).__init__(
structure=None, bins=bins, auto_scaler=auto_scaler)
def fit(self, X, T, E, init_method='glorot_uniform', optimizer='adam',
lr=1e-4, num_epochs=1000, l2_reg=1e-2, l2_smooth=1e-2,
verbose=True, extra_pct_time=0.1, is_min_time_zero=True, max_norm=1.0,
min_clamp_value=1e-8, max_clamp_value=torch.finfo(torch.float32).max-1):
super(LinearMultiTaskModel, self).fit(X=X, T=T, E=E,
init_method=init_method, optimizer=optimizer,
lr=lr, num_epochs=num_epochs, dropout=None, l2_reg=l2_reg,
l2_smooth=l2_smooth, batch_normalization=False,
bn_and_dropout=False, verbose=verbose,
extra_pct_time=extra_pct_time, is_min_time_zero=is_min_time_zero,
max_norm=max_norm,
min_clamp_value=min_clamp_value, max_clamp_value=max_clamp_value)
return self
class NeuralMultiTaskModel(BaseMultiTaskModel):
""" NeuralMultiTaskModel is the Neural Multi-Task Logistic Regression
model (N-MTLR) was developed by <NAME>. in
Deep Neural Networks for Survival Analysis Based on a
Multi-Task Framework,
allowing the use of Neural Networks within the original design.
Reference:
----------
* https://arxiv.org/pdf/1801.05512
Parameters:
----------
* `structure`: **list of dictionaries** --
Provides the structure of the MLP built within the N-MTLR.
ex: `structure = [ {'activation': 'ReLU', 'num_units': 128}, ]`.
Each dictionary corresponds to a fully connected hidden layer:
* `units` is the number of hidden units in this layer
* `activation` is the activation function that will be used.
The list of all available activation functions can be found :
* Atan
* BentIdentity
* BipolarSigmoid
* CosReLU
* ELU
* Gaussian
* Hardtanh
* Identity
* InverseSqrt
* LeakyReLU
* LeCunTanh
* LogLog
* LogSigmoid
* ReLU
* SELU
* Sigmoid
* Sinc
* SinReLU
* Softmax
* Softplus
* Softsign
* Swish
* Tanh
In case there are more than one dictionary,
each hidden layer will be applied in the resulting MLP,
using the order it is provided in the structure:
ex: structure = [ {'activation': 'relu', 'num_units': 128},
{'activation': 'tanh', 'num_units': 128}, ]
* `bins`: **int** *(default=100)* --
Number of subdivisions of the time axis
* `auto_scaler`: **boolean** *(default=True)* --
Determines whether a sklearn scaler should be automatically applied
"""
def __init__(self, structure, bins=100, auto_scaler=True):
# Checking the validity of structure
structure = nn.check_mlp_structure(structure)
# print(structure)
# Initializing the instance
super(NeuralMultiTaskModel, self).__init__(
structure=structure, bins=bins, auto_scaler=auto_scaler)
def __repr__(self):
""" Representing the class object """
if self.structure is None:
super(NeuralMultiTaskModel, self).__repr__()
return self.name
else:
S = len(self.structure)
self.name = self.__class__.__name__
empty = len(self.name)
self.name += '( '
for i, s in enumerate(self.structure):
if isinstance(s, list):
for s_ in s:
n = 'Layer({}): '.format(i + 1)
activation = nn.activation_function(s_['activation'],
return_text=True)
n += 'activation = {}, '.format(s_['activation'])
if 'num_units' in s_.keys():
n += 'units = {} '.format(s_['num_units'])
if i != S - 1:
self.name += n + '; \n'
self.name += empty * ' ' + ' '
else:
self.name += n
self.name = self.name + ')'
else:
n = 'Layer({}): '.format(i + 1)
activation = nn.activation_function(s['activation'],
return_text=True)
n += 'activation = {}, '.format(s['activation'])
if 'num_units' in s.keys():
n += 'units = {} '.format(s['num_units'])
if i != S - 1:
self.name += n + '; \n'
self.name += empty * ' ' + ' '
else:
self.name += n
self.name = self.name + ')'
return self.name
def norm_diff(W):
""" Special norm function for the last layer of the MTLR |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.